Show More
@@ -1,403 +1,405 | |||
|
1 | 1 | # remotenames.py - extension to display remotenames |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2017 Augie Fackler <raf@durin42.com> |
|
4 | 4 | # Copyright 2017 Sean Farley <sean@farley.io> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | """ showing remotebookmarks and remotebranches in UI (EXPERIMENTAL) |
|
10 | 10 | |
|
11 | 11 | By default both remotebookmarks and remotebranches are turned on. Config knob to |
|
12 | 12 | control the individually are as follows. |
|
13 | 13 | |
|
14 | 14 | Config options to tweak the default behaviour: |
|
15 | 15 | |
|
16 | 16 | remotenames.bookmarks |
|
17 | 17 | Boolean value to enable or disable showing of remotebookmarks (default: True) |
|
18 | 18 | |
|
19 | 19 | remotenames.branches |
|
20 | 20 | Boolean value to enable or disable showing of remotebranches (default: True) |
|
21 | 21 | |
|
22 | 22 | remotenames.hoistedpeer |
|
23 | 23 | Name of the peer whose remotebookmarks should be hoisted into the top-level |
|
24 | 24 | namespace (default: 'default') |
|
25 | 25 | """ |
|
26 | 26 | |
|
27 | 27 | from __future__ import absolute_import |
|
28 | 28 | |
|
29 | 29 | from mercurial.i18n import _ |
|
30 | 30 | |
|
31 | 31 | from mercurial.node import ( |
|
32 | 32 | bin, |
|
33 | 33 | ) |
|
34 | 34 | from mercurial import ( |
|
35 | 35 | bookmarks, |
|
36 | 36 | error, |
|
37 | 37 | extensions, |
|
38 | 38 | logexchange, |
|
39 | 39 | namespaces, |
|
40 | 40 | pycompat, |
|
41 | 41 | registrar, |
|
42 | 42 | revsetlang, |
|
43 | 43 | smartset, |
|
44 | 44 | templateutil, |
|
45 | 45 | util, |
|
46 | 46 | ) |
|
47 | 47 | |
|
48 | 48 | from mercurial.utils import ( |
|
49 | 49 | stringutil, |
|
50 | 50 | ) |
|
51 | 51 | |
|
52 | 52 | if pycompat.ispy3: |
|
53 | 53 | import collections.abc |
|
54 | 54 | mutablemapping = collections.abc.MutableMapping |
|
55 | 55 | else: |
|
56 | 56 | import collections |
|
57 | 57 | mutablemapping = collections.MutableMapping |
|
58 | 58 | |
|
59 | 59 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
60 | 60 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
61 | 61 | # be specifying the version(s) of Mercurial they are tested with, or |
|
62 | 62 | # leave the attribute unspecified. |
|
63 | 63 | testedwith = 'ships-with-hg-core' |
|
64 | 64 | |
|
65 | 65 | configtable = {} |
|
66 | 66 | configitem = registrar.configitem(configtable) |
|
67 | 67 | templatekeyword = registrar.templatekeyword() |
|
68 | 68 | revsetpredicate = registrar.revsetpredicate() |
|
69 | 69 | |
|
70 | 70 | configitem('remotenames', 'bookmarks', |
|
71 | 71 | default=True, |
|
72 | 72 | ) |
|
73 | 73 | configitem('remotenames', 'branches', |
|
74 | 74 | default=True, |
|
75 | 75 | ) |
|
76 | 76 | configitem('remotenames', 'hoistedpeer', |
|
77 | 77 | default='default', |
|
78 | 78 | ) |
|
79 | 79 | |
|
80 | 80 | class lazyremotenamedict(mutablemapping): |
|
81 | 81 | """ |
|
82 | 82 | Read-only dict-like Class to lazily resolve remotename entries |
|
83 | 83 | |
|
84 | 84 | We are doing that because remotenames startup was slow. |
|
85 | 85 | We lazily read the remotenames file once to figure out the potential entries |
|
86 | 86 | and store them in self.potentialentries. Then when asked to resolve an |
|
87 | 87 | entry, if it is not in self.potentialentries, then it isn't there, if it |
|
88 | 88 | is in self.potentialentries we resolve it and store the result in |
|
89 | 89 | self.cache. We cannot be lazy is when asked all the entries (keys). |
|
90 | 90 | """ |
|
91 | 91 | def __init__(self, kind, repo): |
|
92 | 92 | self.cache = {} |
|
93 | 93 | self.potentialentries = {} |
|
94 | 94 | self._kind = kind # bookmarks or branches |
|
95 | 95 | self._repo = repo |
|
96 | 96 | self.loaded = False |
|
97 | 97 | |
|
98 | 98 | def _load(self): |
|
99 | 99 | """ Read the remotenames file, store entries matching selected kind """ |
|
100 | 100 | self.loaded = True |
|
101 | 101 | repo = self._repo |
|
102 | 102 | for node, rpath, rname in logexchange.readremotenamefile(repo, |
|
103 | 103 | self._kind): |
|
104 | 104 | name = rpath + '/' + rname |
|
105 | 105 | self.potentialentries[name] = (node, rpath, name) |
|
106 | 106 | |
|
107 | 107 | def _resolvedata(self, potentialentry): |
|
108 | 108 | """ Check that the node for potentialentry exists and return it """ |
|
109 | 109 | if not potentialentry in self.potentialentries: |
|
110 | 110 | return None |
|
111 | 111 | node, remote, name = self.potentialentries[potentialentry] |
|
112 | 112 | repo = self._repo |
|
113 | 113 | binnode = bin(node) |
|
114 | 114 | # if the node doesn't exist, skip it |
|
115 | 115 | try: |
|
116 | 116 | repo.changelog.rev(binnode) |
|
117 | 117 | except LookupError: |
|
118 | 118 | return None |
|
119 | 119 | # Skip closed branches |
|
120 | 120 | if (self._kind == 'branches' and repo[binnode].closesbranch()): |
|
121 | 121 | return None |
|
122 | 122 | return [binnode] |
|
123 | 123 | |
|
124 | 124 | def __getitem__(self, key): |
|
125 | 125 | if not self.loaded: |
|
126 | 126 | self._load() |
|
127 | 127 | val = self._fetchandcache(key) |
|
128 | 128 | if val is not None: |
|
129 | 129 | return val |
|
130 | 130 | else: |
|
131 | 131 | raise KeyError() |
|
132 | 132 | |
|
133 | 133 | def __iter__(self): |
|
134 | 134 | return iter(self.potentialentries) |
|
135 | 135 | |
|
136 | 136 | def __len__(self): |
|
137 | 137 | return len(self.potentialentries) |
|
138 | 138 | |
|
139 | 139 | def __setitem__(self): |
|
140 | 140 | raise NotImplementedError |
|
141 | 141 | |
|
142 | 142 | def __delitem__(self): |
|
143 | 143 | raise NotImplementedError |
|
144 | 144 | |
|
145 | 145 | def _fetchandcache(self, key): |
|
146 | 146 | if key in self.cache: |
|
147 | 147 | return self.cache[key] |
|
148 | 148 | val = self._resolvedata(key) |
|
149 | 149 | if val is not None: |
|
150 | 150 | self.cache[key] = val |
|
151 | 151 | return val |
|
152 | 152 | else: |
|
153 | 153 | return None |
|
154 | 154 | |
|
155 | 155 | def keys(self): |
|
156 | 156 | """ Get a list of bookmark or branch names """ |
|
157 | 157 | if not self.loaded: |
|
158 | 158 | self._load() |
|
159 | 159 | return self.potentialentries.keys() |
|
160 | 160 | |
|
161 | 161 | def iteritems(self): |
|
162 | 162 | """ Iterate over (name, node) tuples """ |
|
163 | 163 | |
|
164 | 164 | if not self.loaded: |
|
165 | 165 | self._load() |
|
166 | 166 | |
|
167 | 167 | for k, vtup in self.potentialentries.iteritems(): |
|
168 | 168 | yield (k, [bin(vtup[0])]) |
|
169 | 169 | |
|
170 | items = iteritems | |
|
171 | ||
|
170 | 172 | class remotenames(object): |
|
171 | 173 | """ |
|
172 | 174 | This class encapsulates all the remotenames state. It also contains |
|
173 | 175 | methods to access that state in convenient ways. Remotenames are lazy |
|
174 | 176 | loaded. Whenever client code needs to ensure the freshest copy of |
|
175 | 177 | remotenames, use the `clearnames` method to force an eventual load. |
|
176 | 178 | """ |
|
177 | 179 | |
|
178 | 180 | def __init__(self, repo, *args): |
|
179 | 181 | self._repo = repo |
|
180 | 182 | self.clearnames() |
|
181 | 183 | |
|
182 | 184 | def clearnames(self): |
|
183 | 185 | """ Clear all remote names state """ |
|
184 | 186 | self.bookmarks = lazyremotenamedict("bookmarks", self._repo) |
|
185 | 187 | self.branches = lazyremotenamedict("branches", self._repo) |
|
186 | 188 | self._invalidatecache() |
|
187 | 189 | |
|
188 | 190 | def _invalidatecache(self): |
|
189 | 191 | self._nodetobmarks = None |
|
190 | 192 | self._nodetobranch = None |
|
191 | 193 | self._hoisttonodes = None |
|
192 | 194 | self._nodetohoists = None |
|
193 | 195 | |
|
194 | 196 | def bmarktonodes(self): |
|
195 | 197 | return self.bookmarks |
|
196 | 198 | |
|
197 | 199 | def nodetobmarks(self): |
|
198 | 200 | if not self._nodetobmarks: |
|
199 | 201 | bmarktonodes = self.bmarktonodes() |
|
200 | 202 | self._nodetobmarks = {} |
|
201 | 203 | for name, node in bmarktonodes.iteritems(): |
|
202 | 204 | self._nodetobmarks.setdefault(node[0], []).append(name) |
|
203 | 205 | return self._nodetobmarks |
|
204 | 206 | |
|
205 | 207 | def branchtonodes(self): |
|
206 | 208 | return self.branches |
|
207 | 209 | |
|
208 | 210 | def nodetobranch(self): |
|
209 | 211 | if not self._nodetobranch: |
|
210 | 212 | branchtonodes = self.branchtonodes() |
|
211 | 213 | self._nodetobranch = {} |
|
212 | 214 | for name, nodes in branchtonodes.iteritems(): |
|
213 | 215 | for node in nodes: |
|
214 | 216 | self._nodetobranch.setdefault(node, []).append(name) |
|
215 | 217 | return self._nodetobranch |
|
216 | 218 | |
|
217 | 219 | def hoisttonodes(self, hoist): |
|
218 | 220 | if not self._hoisttonodes: |
|
219 | 221 | marktonodes = self.bmarktonodes() |
|
220 | 222 | self._hoisttonodes = {} |
|
221 | 223 | hoist += '/' |
|
222 | 224 | for name, node in marktonodes.iteritems(): |
|
223 | 225 | if name.startswith(hoist): |
|
224 | 226 | name = name[len(hoist):] |
|
225 | 227 | self._hoisttonodes[name] = node |
|
226 | 228 | return self._hoisttonodes |
|
227 | 229 | |
|
228 | 230 | def nodetohoists(self, hoist): |
|
229 | 231 | if not self._nodetohoists: |
|
230 | 232 | marktonodes = self.bmarktonodes() |
|
231 | 233 | self._nodetohoists = {} |
|
232 | 234 | hoist += '/' |
|
233 | 235 | for name, node in marktonodes.iteritems(): |
|
234 | 236 | if name.startswith(hoist): |
|
235 | 237 | name = name[len(hoist):] |
|
236 | 238 | self._nodetohoists.setdefault(node[0], []).append(name) |
|
237 | 239 | return self._nodetohoists |
|
238 | 240 | |
|
239 | 241 | def wrapprintbookmarks(orig, ui, repo, fm, bmarks): |
|
240 | 242 | if 'remotebookmarks' not in repo.names: |
|
241 | 243 | return |
|
242 | 244 | ns = repo.names['remotebookmarks'] |
|
243 | 245 | |
|
244 | 246 | for name in ns.listnames(repo): |
|
245 | 247 | nodes = ns.nodes(repo, name) |
|
246 | 248 | if not nodes: |
|
247 | 249 | continue |
|
248 | 250 | node = nodes[0] |
|
249 | 251 | |
|
250 | 252 | bmarks[name] = (node, ' ', '') |
|
251 | 253 | |
|
252 | 254 | return orig(ui, repo, fm, bmarks) |
|
253 | 255 | |
|
254 | 256 | def extsetup(ui): |
|
255 | 257 | extensions.wrapfunction(bookmarks, '_printbookmarks', wrapprintbookmarks) |
|
256 | 258 | |
|
257 | 259 | def reposetup(ui, repo): |
|
258 | 260 | |
|
259 | 261 | # set the config option to store remotenames |
|
260 | 262 | repo.ui.setconfig('experimental', 'remotenames', True, 'remotenames-ext') |
|
261 | 263 | |
|
262 | 264 | if not repo.local(): |
|
263 | 265 | return |
|
264 | 266 | |
|
265 | 267 | repo._remotenames = remotenames(repo) |
|
266 | 268 | ns = namespaces.namespace |
|
267 | 269 | |
|
268 | 270 | if ui.configbool('remotenames', 'bookmarks'): |
|
269 | 271 | remotebookmarkns = ns( |
|
270 | 272 | 'remotebookmarks', |
|
271 | 273 | templatename='remotebookmarks', |
|
272 | 274 | colorname='remotebookmark', |
|
273 | 275 | logfmt='remote bookmark: %s\n', |
|
274 | 276 | listnames=lambda repo: repo._remotenames.bmarktonodes().keys(), |
|
275 | 277 | namemap=lambda repo, name: |
|
276 | 278 | repo._remotenames.bmarktonodes().get(name, []), |
|
277 | 279 | nodemap=lambda repo, node: |
|
278 | 280 | repo._remotenames.nodetobmarks().get(node, [])) |
|
279 | 281 | repo.names.addnamespace(remotebookmarkns) |
|
280 | 282 | |
|
281 | 283 | # hoisting only works if there are remote bookmarks |
|
282 | 284 | hoist = ui.config('remotenames', 'hoistedpeer') |
|
283 | 285 | if hoist: |
|
284 | 286 | hoistednamens = ns( |
|
285 | 287 | 'hoistednames', |
|
286 | 288 | templatename='hoistednames', |
|
287 | 289 | colorname='hoistedname', |
|
288 | 290 | logfmt='hoisted name: %s\n', |
|
289 | 291 | listnames = lambda repo: |
|
290 | 292 | repo._remotenames.hoisttonodes(hoist).keys(), |
|
291 | 293 | namemap = lambda repo, name: |
|
292 | 294 | repo._remotenames.hoisttonodes(hoist).get(name, []), |
|
293 | 295 | nodemap = lambda repo, node: |
|
294 | 296 | repo._remotenames.nodetohoists(hoist).get(node, [])) |
|
295 | 297 | repo.names.addnamespace(hoistednamens) |
|
296 | 298 | |
|
297 | 299 | if ui.configbool('remotenames', 'branches'): |
|
298 | 300 | remotebranchns = ns( |
|
299 | 301 | 'remotebranches', |
|
300 | 302 | templatename='remotebranches', |
|
301 | 303 | colorname='remotebranch', |
|
302 | 304 | logfmt='remote branch: %s\n', |
|
303 | 305 | listnames = lambda repo: repo._remotenames.branchtonodes().keys(), |
|
304 | 306 | namemap = lambda repo, name: |
|
305 | 307 | repo._remotenames.branchtonodes().get(name, []), |
|
306 | 308 | nodemap = lambda repo, node: |
|
307 | 309 | repo._remotenames.nodetobranch().get(node, [])) |
|
308 | 310 | repo.names.addnamespace(remotebranchns) |
|
309 | 311 | |
|
310 | 312 | @templatekeyword('remotenames', requires={'repo', 'ctx'}) |
|
311 | 313 | def remotenameskw(context, mapping): |
|
312 | 314 | """List of strings. Remote names associated with the changeset.""" |
|
313 | 315 | repo = context.resource(mapping, 'repo') |
|
314 | 316 | ctx = context.resource(mapping, 'ctx') |
|
315 | 317 | |
|
316 | 318 | remotenames = [] |
|
317 | 319 | if 'remotebookmarks' in repo.names: |
|
318 | 320 | remotenames = repo.names['remotebookmarks'].names(repo, ctx.node()) |
|
319 | 321 | |
|
320 | 322 | if 'remotebranches' in repo.names: |
|
321 | 323 | remotenames += repo.names['remotebranches'].names(repo, ctx.node()) |
|
322 | 324 | |
|
323 | 325 | return templateutil.compatlist(context, mapping, 'remotename', remotenames, |
|
324 | 326 | plural='remotenames') |
|
325 | 327 | |
|
326 | 328 | @templatekeyword('remotebookmarks', requires={'repo', 'ctx'}) |
|
327 | 329 | def remotebookmarkskw(context, mapping): |
|
328 | 330 | """List of strings. Remote bookmarks associated with the changeset.""" |
|
329 | 331 | repo = context.resource(mapping, 'repo') |
|
330 | 332 | ctx = context.resource(mapping, 'ctx') |
|
331 | 333 | |
|
332 | 334 | remotebmarks = [] |
|
333 | 335 | if 'remotebookmarks' in repo.names: |
|
334 | 336 | remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node()) |
|
335 | 337 | |
|
336 | 338 | return templateutil.compatlist(context, mapping, 'remotebookmark', |
|
337 | 339 | remotebmarks, plural='remotebookmarks') |
|
338 | 340 | |
|
339 | 341 | @templatekeyword('remotebranches', requires={'repo', 'ctx'}) |
|
340 | 342 | def remotebrancheskw(context, mapping): |
|
341 | 343 | """List of strings. Remote branches associated with the changeset.""" |
|
342 | 344 | repo = context.resource(mapping, 'repo') |
|
343 | 345 | ctx = context.resource(mapping, 'ctx') |
|
344 | 346 | |
|
345 | 347 | remotebranches = [] |
|
346 | 348 | if 'remotebranches' in repo.names: |
|
347 | 349 | remotebranches = repo.names['remotebranches'].names(repo, ctx.node()) |
|
348 | 350 | |
|
349 | 351 | return templateutil.compatlist(context, mapping, 'remotebranch', |
|
350 | 352 | remotebranches, plural='remotebranches') |
|
351 | 353 | |
|
352 | 354 | def _revsetutil(repo, subset, x, rtypes): |
|
353 | 355 | """utility function to return a set of revs based on the rtypes""" |
|
354 | 356 | args = revsetlang.getargs(x, 0, 1, _('only one argument accepted')) |
|
355 | 357 | if args: |
|
356 | 358 | kind, pattern, matcher = stringutil.stringmatcher( |
|
357 | 359 | revsetlang.getstring(args[0], _('argument must be a string'))) |
|
358 | 360 | else: |
|
359 | 361 | kind = pattern = None |
|
360 | 362 | matcher = util.always |
|
361 | 363 | |
|
362 | 364 | nodes = set() |
|
363 | 365 | cl = repo.changelog |
|
364 | 366 | for rtype in rtypes: |
|
365 | 367 | if rtype in repo.names: |
|
366 | 368 | ns = repo.names[rtype] |
|
367 | 369 | for name in ns.listnames(repo): |
|
368 | 370 | if not matcher(name): |
|
369 | 371 | continue |
|
370 | 372 | nodes.update(ns.nodes(repo, name)) |
|
371 | 373 | if kind == 'literal' and not nodes: |
|
372 | 374 | raise error.RepoLookupError(_("remote name '%s' does not exist") |
|
373 | 375 | % pattern) |
|
374 | 376 | |
|
375 | 377 | revs = (cl.rev(n) for n in nodes if cl.hasnode(n)) |
|
376 | 378 | return subset & smartset.baseset(revs) |
|
377 | 379 | |
|
378 | 380 | @revsetpredicate('remotenames([name])') |
|
379 | 381 | def remotenamesrevset(repo, subset, x): |
|
380 | 382 | """All changesets which have a remotename on them. If `name` is |
|
381 | 383 | specified, only remotenames of matching remote paths are considered. |
|
382 | 384 | |
|
383 | 385 | Pattern matching is supported for `name`. See :hg:`help revisions.patterns`. |
|
384 | 386 | """ |
|
385 | 387 | return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches')) |
|
386 | 388 | |
|
387 | 389 | @revsetpredicate('remotebranches([name])') |
|
388 | 390 | def remotebranchesrevset(repo, subset, x): |
|
389 | 391 | """All changesets which are branch heads on remotes. If `name` is |
|
390 | 392 | specified, only remotenames of matching remote paths are considered. |
|
391 | 393 | |
|
392 | 394 | Pattern matching is supported for `name`. See :hg:`help revisions.patterns`. |
|
393 | 395 | """ |
|
394 | 396 | return _revsetutil(repo, subset, x, ('remotebranches',)) |
|
395 | 397 | |
|
396 | 398 | @revsetpredicate('remotebookmarks([name])') |
|
397 | 399 | def remotebmarksrevset(repo, subset, x): |
|
398 | 400 | """All changesets which have bookmarks on remotes. If `name` is |
|
399 | 401 | specified, only remotenames of matching remote paths are considered. |
|
400 | 402 | |
|
401 | 403 | Pattern matching is supported for `name`. See :hg:`help revisions.patterns`. |
|
402 | 404 | """ |
|
403 | 405 | return _revsetutil(repo, subset, x, ('remotebookmarks',)) |
@@ -1,312 +1,314 | |||
|
1 | 1 | # __init__.py - Startup and module loading logic for Mercurial. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import sys |
|
11 | 11 | |
|
12 | 12 | # Allow 'from mercurial import demandimport' to keep working. |
|
13 | 13 | import hgdemandimport |
|
14 | 14 | demandimport = hgdemandimport |
|
15 | 15 | |
|
16 | 16 | __all__ = [] |
|
17 | 17 | |
|
18 | 18 | # Python 3 uses a custom module loader that transforms source code between |
|
19 | 19 | # source file reading and compilation. This is done by registering a custom |
|
20 | 20 | # finder that changes the spec for Mercurial modules to use a custom loader. |
|
21 | 21 | if sys.version_info[0] >= 3: |
|
22 | 22 | import importlib |
|
23 | 23 | import importlib.abc |
|
24 | 24 | import io |
|
25 | 25 | import token |
|
26 | 26 | import tokenize |
|
27 | 27 | |
|
28 | 28 | class hgpathentryfinder(importlib.abc.MetaPathFinder): |
|
29 | 29 | """A sys.meta_path finder that uses a custom module loader.""" |
|
30 | 30 | def find_spec(self, fullname, path, target=None): |
|
31 | 31 | # Only handle Mercurial-related modules. |
|
32 | 32 | if not fullname.startswith(('mercurial.', 'hgext.')): |
|
33 | 33 | return None |
|
34 | 34 | # don't try to parse binary |
|
35 | 35 | if fullname.startswith('mercurial.cext.'): |
|
36 | 36 | return None |
|
37 | 37 | # third-party packages are expected to be dual-version clean |
|
38 | 38 | if fullname.startswith('mercurial.thirdparty'): |
|
39 | 39 | return None |
|
40 | 40 | # zstd is already dual-version clean, don't try and mangle it |
|
41 | 41 | if fullname.startswith('mercurial.zstd'): |
|
42 | 42 | return None |
|
43 | 43 | # rustext is built for the right python version, |
|
44 | 44 | # don't try and mangle it |
|
45 | 45 | if fullname.startswith('mercurial.rustext'): |
|
46 | 46 | return None |
|
47 | 47 | # pywatchman is already dual-version clean, don't try and mangle it |
|
48 | 48 | if fullname.startswith('hgext.fsmonitor.pywatchman'): |
|
49 | 49 | return None |
|
50 | 50 | |
|
51 | 51 | # Try to find the module using other registered finders. |
|
52 | 52 | spec = None |
|
53 | 53 | for finder in sys.meta_path: |
|
54 | 54 | if finder == self: |
|
55 | 55 | continue |
|
56 | 56 | |
|
57 | 57 | # Originally the API was a `find_module` method, but it was |
|
58 | 58 | # renamed to `find_spec` in python 3.4, with a new `target` |
|
59 | 59 | # argument. |
|
60 | 60 | find_spec_method = getattr(finder, 'find_spec', None) |
|
61 | 61 | if find_spec_method: |
|
62 | 62 | spec = find_spec_method(fullname, path, target=target) |
|
63 | 63 | else: |
|
64 | 64 | spec = finder.find_module(fullname) |
|
65 | 65 | if spec is not None: |
|
66 | 66 | spec = importlib.util.spec_from_loader(fullname, spec) |
|
67 | 67 | if spec: |
|
68 | 68 | break |
|
69 | 69 | |
|
70 | 70 | # This is a Mercurial-related module but we couldn't find it |
|
71 | 71 | # using the previously-registered finders. This likely means |
|
72 | 72 | # the module doesn't exist. |
|
73 | 73 | if not spec: |
|
74 | 74 | return None |
|
75 | 75 | |
|
76 | 76 | # TODO need to support loaders from alternate specs, like zip |
|
77 | 77 | # loaders. |
|
78 | 78 | loader = hgloader(spec.name, spec.origin) |
|
79 | 79 | # Can't use util.safehasattr here because that would require |
|
80 | 80 | # importing util, and we're in import code. |
|
81 | 81 | if hasattr(spec.loader, 'loader'): # hasattr-py3-only |
|
82 | 82 | # This is a nested loader (maybe a lazy loader?) |
|
83 | 83 | spec.loader.loader = loader |
|
84 | 84 | else: |
|
85 | 85 | spec.loader = loader |
|
86 | 86 | return spec |
|
87 | 87 | |
|
88 | 88 | def replacetokens(tokens, fullname): |
|
89 | 89 | """Transform a stream of tokens from raw to Python 3. |
|
90 | 90 | |
|
91 | 91 | It is called by the custom module loading machinery to rewrite |
|
92 | 92 | source/tokens between source decoding and compilation. |
|
93 | 93 | |
|
94 | 94 | Returns a generator of possibly rewritten tokens. |
|
95 | 95 | |
|
96 | 96 | The input token list may be mutated as part of processing. However, |
|
97 | 97 | its changes do not necessarily match the output token stream. |
|
98 | 98 | |
|
99 | 99 | REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION |
|
100 | 100 | OR CACHED FILES WON'T GET INVALIDATED PROPERLY. |
|
101 | 101 | """ |
|
102 | 102 | futureimpline = False |
|
103 | 103 | |
|
104 | 104 | # The following utility functions access the tokens list and i index of |
|
105 | 105 | # the for i, t enumerate(tokens) loop below |
|
106 | 106 | def _isop(j, *o): |
|
107 | 107 | """Assert that tokens[j] is an OP with one of the given values""" |
|
108 | 108 | try: |
|
109 | 109 | return tokens[j].type == token.OP and tokens[j].string in o |
|
110 | 110 | except IndexError: |
|
111 | 111 | return False |
|
112 | 112 | |
|
113 | 113 | def _findargnofcall(n): |
|
114 | 114 | """Find arg n of a call expression (start at 0) |
|
115 | 115 | |
|
116 | 116 | Returns index of the first token of that argument, or None if |
|
117 | 117 | there is not that many arguments. |
|
118 | 118 | |
|
119 | 119 | Assumes that token[i + 1] is '('. |
|
120 | 120 | |
|
121 | 121 | """ |
|
122 | 122 | nested = 0 |
|
123 | 123 | for j in range(i + 2, len(tokens)): |
|
124 | 124 | if _isop(j, ')', ']', '}'): |
|
125 | 125 | # end of call, tuple, subscription or dict / set |
|
126 | 126 | nested -= 1 |
|
127 | 127 | if nested < 0: |
|
128 | 128 | return None |
|
129 | 129 | elif n == 0: |
|
130 | 130 | # this is the starting position of arg |
|
131 | 131 | return j |
|
132 | 132 | elif _isop(j, '(', '[', '{'): |
|
133 | 133 | nested += 1 |
|
134 | 134 | elif _isop(j, ',') and nested == 0: |
|
135 | 135 | n -= 1 |
|
136 | 136 | |
|
137 | 137 | return None |
|
138 | 138 | |
|
139 | 139 | def _ensureunicode(j): |
|
140 | 140 | """Make sure the token at j is a unicode string |
|
141 | 141 | |
|
142 | 142 | This rewrites a string token to include the unicode literal prefix |
|
143 | 143 | so the string transformer won't add the byte prefix. |
|
144 | 144 | |
|
145 | 145 | Ignores tokens that are not strings. Assumes bounds checking has |
|
146 | 146 | already been done. |
|
147 | 147 | |
|
148 | 148 | """ |
|
149 | 149 | st = tokens[j] |
|
150 | 150 | if st.type == token.STRING and st.string.startswith(("'", '"')): |
|
151 | 151 | tokens[j] = st._replace(string='u%s' % st.string) |
|
152 | 152 | |
|
153 | 153 | for i, t in enumerate(tokens): |
|
154 | 154 | # Convert most string literals to byte literals. String literals |
|
155 | 155 | # in Python 2 are bytes. String literals in Python 3 are unicode. |
|
156 | 156 | # Most strings in Mercurial are bytes and unicode strings are rare. |
|
157 | 157 | # Rather than rewrite all string literals to use ``b''`` to indicate |
|
158 | 158 | # byte strings, we apply this token transformer to insert the ``b`` |
|
159 | 159 | # prefix nearly everywhere. |
|
160 | 160 | if t.type == token.STRING: |
|
161 | 161 | s = t.string |
|
162 | 162 | |
|
163 | 163 | # Preserve docstrings as string literals. This is inconsistent |
|
164 | 164 | # with regular unprefixed strings. However, the |
|
165 | 165 | # "from __future__" parsing (which allows a module docstring to |
|
166 | 166 | # exist before it) doesn't properly handle the docstring if it |
|
167 | 167 | # is b''' prefixed, leading to a SyntaxError. We leave all |
|
168 | 168 | # docstrings as unprefixed to avoid this. This means Mercurial |
|
169 | 169 | # components touching docstrings need to handle unicode, |
|
170 | 170 | # unfortunately. |
|
171 | 171 | if s[0:3] in ("'''", '"""'): |
|
172 | 172 | yield t |
|
173 | 173 | continue |
|
174 | 174 | |
|
175 | 175 | # If the first character isn't a quote, it is likely a string |
|
176 | 176 | # prefixing character (such as 'b', 'u', or 'r'. Ignore. |
|
177 | 177 | if s[0] not in ("'", '"'): |
|
178 | 178 | yield t |
|
179 | 179 | continue |
|
180 | 180 | |
|
181 | 181 | # String literal. Prefix to make a b'' string. |
|
182 | 182 | yield t._replace(string='b%s' % t.string) |
|
183 | 183 | continue |
|
184 | 184 | |
|
185 | 185 | # Insert compatibility imports at "from __future__ import" line. |
|
186 | 186 | # No '\n' should be added to preserve line numbers. |
|
187 | 187 | if (t.type == token.NAME and t.string == 'import' and |
|
188 | 188 | all(u.type == token.NAME for u in tokens[i - 2:i]) and |
|
189 | 189 | [u.string for u in tokens[i - 2:i]] == ['from', '__future__']): |
|
190 | 190 | futureimpline = True |
|
191 | 191 | if t.type == token.NEWLINE and futureimpline: |
|
192 | 192 | futureimpline = False |
|
193 | 193 | if fullname == 'mercurial.pycompat': |
|
194 | 194 | yield t |
|
195 | 195 | continue |
|
196 | 196 | r, c = t.start |
|
197 | 197 | l = (b'; from mercurial.pycompat import ' |
|
198 | 198 | b'delattr, getattr, hasattr, setattr, ' |
|
199 | 199 | b'open, unicode\n') |
|
200 | 200 | for u in tokenize.tokenize(io.BytesIO(l).readline): |
|
201 | 201 | if u.type in (tokenize.ENCODING, token.ENDMARKER): |
|
202 | 202 | continue |
|
203 | 203 | yield u._replace( |
|
204 | 204 | start=(r, c + u.start[1]), end=(r, c + u.end[1])) |
|
205 | 205 | continue |
|
206 | 206 | |
|
207 | 207 | # This looks like a function call. |
|
208 | 208 | if t.type == token.NAME and _isop(i + 1, '('): |
|
209 | 209 | fn = t.string |
|
210 | 210 | |
|
211 | 211 | # *attr() builtins don't accept byte strings to 2nd argument. |
|
212 | 212 | if (fn in ('getattr', 'setattr', 'hasattr', 'safehasattr') and |
|
213 | 213 | not _isop(i - 1, '.')): |
|
214 | 214 | arg1idx = _findargnofcall(1) |
|
215 | 215 | if arg1idx is not None: |
|
216 | 216 | _ensureunicode(arg1idx) |
|
217 | 217 | |
|
218 | 218 | # .encode() and .decode() on str/bytes/unicode don't accept |
|
219 | 219 | # byte strings on Python 3. |
|
220 | 220 | elif fn in ('encode', 'decode') and _isop(i - 1, '.'): |
|
221 | 221 | for argn in range(2): |
|
222 | 222 | argidx = _findargnofcall(argn) |
|
223 | 223 | if argidx is not None: |
|
224 | 224 | _ensureunicode(argidx) |
|
225 | 225 | |
|
226 | 226 | # It changes iteritems/values to items/values as they are not |
|
227 | 227 | # present in Python 3 world. |
|
228 |
elif fn in ('iteritems', 'itervalues') |
|
|
228 | elif (fn in ('iteritems', 'itervalues') and | |
|
229 | not (tokens[i - 1].type == token.NAME and | |
|
230 | tokens[i - 1].string == 'def')): | |
|
229 | 231 | yield t._replace(string=fn[4:]) |
|
230 | 232 | continue |
|
231 | 233 | |
|
232 | 234 | # Emit unmodified token. |
|
233 | 235 | yield t |
|
234 | 236 | |
|
235 | 237 | # Header to add to bytecode files. This MUST be changed when |
|
236 | 238 | # ``replacetoken`` or any mechanism that changes semantics of module |
|
237 | 239 | # loading is changed. Otherwise cached bytecode may get loaded without |
|
238 | 240 | # the new transformation mechanisms applied. |
|
239 | 241 | BYTECODEHEADER = b'HG\x00\x0b' |
|
240 | 242 | |
|
241 | 243 | class hgloader(importlib.machinery.SourceFileLoader): |
|
242 | 244 | """Custom module loader that transforms source code. |
|
243 | 245 | |
|
244 | 246 | When the source code is converted to a code object, we transform |
|
245 | 247 | certain patterns to be Python 3 compatible. This allows us to write code |
|
246 | 248 | that is natively Python 2 and compatible with Python 3 without |
|
247 | 249 | making the code excessively ugly. |
|
248 | 250 | |
|
249 | 251 | We do this by transforming the token stream between parse and compile. |
|
250 | 252 | |
|
251 | 253 | Implementing transformations invalidates caching assumptions made |
|
252 | 254 | by the built-in importer. The built-in importer stores a header on |
|
253 | 255 | saved bytecode files indicating the Python/bytecode version. If the |
|
254 | 256 | version changes, the cached bytecode is ignored. The Mercurial |
|
255 | 257 | transformations could change at any time. This means we need to check |
|
256 | 258 | that cached bytecode was generated with the current transformation |
|
257 | 259 | code or there could be a mismatch between cached bytecode and what |
|
258 | 260 | would be generated from this class. |
|
259 | 261 | |
|
260 | 262 | We supplement the bytecode caching layer by wrapping ``get_data`` |
|
261 | 263 | and ``set_data``. These functions are called when the |
|
262 | 264 | ``SourceFileLoader`` retrieves and saves bytecode cache files, |
|
263 | 265 | respectively. We simply add an additional header on the file. As |
|
264 | 266 | long as the version in this file is changed when semantics change, |
|
265 | 267 | cached bytecode should be invalidated when transformations change. |
|
266 | 268 | |
|
267 | 269 | The added header has the form ``HG<VERSION>``. That is a literal |
|
268 | 270 | ``HG`` with 2 binary bytes indicating the transformation version. |
|
269 | 271 | """ |
|
270 | 272 | def get_data(self, path): |
|
271 | 273 | data = super(hgloader, self).get_data(path) |
|
272 | 274 | |
|
273 | 275 | if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): |
|
274 | 276 | return data |
|
275 | 277 | |
|
276 | 278 | # There should be a header indicating the Mercurial transformation |
|
277 | 279 | # version. If it doesn't exist or doesn't match the current version, |
|
278 | 280 | # we raise an OSError because that is what |
|
279 | 281 | # ``SourceFileLoader.get_code()`` expects when loading bytecode |
|
280 | 282 | # paths to indicate the cached file is "bad." |
|
281 | 283 | if data[0:2] != b'HG': |
|
282 | 284 | raise OSError('no hg header') |
|
283 | 285 | if data[0:4] != BYTECODEHEADER: |
|
284 | 286 | raise OSError('hg header version mismatch') |
|
285 | 287 | |
|
286 | 288 | return data[4:] |
|
287 | 289 | |
|
288 | 290 | def set_data(self, path, data, *args, **kwargs): |
|
289 | 291 | if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)): |
|
290 | 292 | data = BYTECODEHEADER + data |
|
291 | 293 | |
|
292 | 294 | return super(hgloader, self).set_data(path, data, *args, **kwargs) |
|
293 | 295 | |
|
294 | 296 | def source_to_code(self, data, path): |
|
295 | 297 | """Perform token transformation before compilation.""" |
|
296 | 298 | buf = io.BytesIO(data) |
|
297 | 299 | tokens = tokenize.tokenize(buf.readline) |
|
298 | 300 | data = tokenize.untokenize(replacetokens(list(tokens), self.name)) |
|
299 | 301 | # Python's built-in importer strips frames from exceptions raised |
|
300 | 302 | # for this code. Unfortunately, that mechanism isn't extensible |
|
301 | 303 | # and our frame will be blamed for the import failure. There |
|
302 | 304 | # are extremely hacky ways to do frame stripping. We haven't |
|
303 | 305 | # implemented them because they are very ugly. |
|
304 | 306 | return super(hgloader, self).source_to_code(data, path) |
|
305 | 307 | |
|
306 | 308 | # We automagically register our custom importer as a side-effect of |
|
307 | 309 | # loading. This is necessary to ensure that any entry points are able |
|
308 | 310 | # to import mercurial.* modules without having to perform this |
|
309 | 311 | # registration themselves. |
|
310 | 312 | if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path): |
|
311 | 313 | # meta_path is used before any implicit finders and before sys.path. |
|
312 | 314 | sys.meta_path.insert(0, hgpathentryfinder()) |
@@ -1,676 +1,678 | |||
|
1 | 1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import struct |
|
11 | 11 | |
|
12 | 12 | from .node import ( |
|
13 | 13 | bin, |
|
14 | 14 | hex, |
|
15 | 15 | nullid, |
|
16 | 16 | nullrev, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | pycompat, |
|
22 | 22 | scmutil, |
|
23 | 23 | util, |
|
24 | 24 | ) |
|
25 | 25 | from .utils import ( |
|
26 | 26 | repoviewutil, |
|
27 | 27 | stringutil, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | subsettable = repoviewutil. subsettable |
|
31 | 31 | |
|
32 | 32 | calcsize = struct.calcsize |
|
33 | 33 | pack_into = struct.pack_into |
|
34 | 34 | unpack_from = struct.unpack_from |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | class BranchMapCache(object): |
|
38 | 38 | """mapping of filtered views of repo with their branchcache""" |
|
39 | 39 | def __init__(self): |
|
40 | 40 | self._per_filter = {} |
|
41 | 41 | |
|
42 | 42 | def __getitem__(self, repo): |
|
43 | 43 | self.updatecache(repo) |
|
44 | 44 | return self._per_filter[repo.filtername] |
|
45 | 45 | |
|
46 | 46 | def updatecache(self, repo): |
|
47 | 47 | """Update the cache for the given filtered view on a repository""" |
|
48 | 48 | # This can trigger updates for the caches for subsets of the filtered |
|
49 | 49 | # view, e.g. when there is no cache for this filtered view or the cache |
|
50 | 50 | # is stale. |
|
51 | 51 | |
|
52 | 52 | cl = repo.changelog |
|
53 | 53 | filtername = repo.filtername |
|
54 | 54 | bcache = self._per_filter.get(filtername) |
|
55 | 55 | if bcache is None or not bcache.validfor(repo): |
|
56 | 56 | # cache object missing or cache object stale? Read from disk |
|
57 | 57 | bcache = branchcache.fromfile(repo) |
|
58 | 58 | |
|
59 | 59 | revs = [] |
|
60 | 60 | if bcache is None: |
|
61 | 61 | # no (fresh) cache available anymore, perhaps we can re-use |
|
62 | 62 | # the cache for a subset, then extend that to add info on missing |
|
63 | 63 | # revisions. |
|
64 | 64 | subsetname = subsettable.get(filtername) |
|
65 | 65 | if subsetname is not None: |
|
66 | 66 | subset = repo.filtered(subsetname) |
|
67 | 67 | bcache = self[subset].copy() |
|
68 | 68 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
|
69 | 69 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) |
|
70 | 70 | else: |
|
71 | 71 | # nothing to fall back on, start empty. |
|
72 | 72 | bcache = branchcache() |
|
73 | 73 | |
|
74 | 74 | revs.extend(cl.revs(start=bcache.tiprev + 1)) |
|
75 | 75 | if revs: |
|
76 | 76 | bcache.update(repo, revs) |
|
77 | 77 | |
|
78 | 78 | assert bcache.validfor(repo), filtername |
|
79 | 79 | self._per_filter[repo.filtername] = bcache |
|
80 | 80 | |
|
81 | 81 | def replace(self, repo, remotebranchmap): |
|
82 | 82 | """Replace the branchmap cache for a repo with a branch mapping. |
|
83 | 83 | |
|
84 | 84 | This is likely only called during clone with a branch map from a |
|
85 | 85 | remote. |
|
86 | 86 | |
|
87 | 87 | """ |
|
88 | 88 | cl = repo.changelog |
|
89 | 89 | clrev = cl.rev |
|
90 | 90 | clbranchinfo = cl.branchinfo |
|
91 | 91 | rbheads = [] |
|
92 | 92 | closed = [] |
|
93 | 93 | for bheads in remotebranchmap.itervalues(): |
|
94 | 94 | rbheads += bheads |
|
95 | 95 | for h in bheads: |
|
96 | 96 | r = clrev(h) |
|
97 | 97 | b, c = clbranchinfo(r) |
|
98 | 98 | if c: |
|
99 | 99 | closed.append(h) |
|
100 | 100 | |
|
101 | 101 | if rbheads: |
|
102 | 102 | rtiprev = max((int(clrev(node)) for node in rbheads)) |
|
103 | 103 | cache = branchcache( |
|
104 | 104 | remotebranchmap, repo[rtiprev].node(), rtiprev, |
|
105 | 105 | closednodes=closed) |
|
106 | 106 | |
|
107 | 107 | # Try to stick it as low as possible |
|
108 | 108 | # filter above served are unlikely to be fetch from a clone |
|
109 | 109 | for candidate in ('base', 'immutable', 'served'): |
|
110 | 110 | rview = repo.filtered(candidate) |
|
111 | 111 | if cache.validfor(rview): |
|
112 | 112 | self._per_filter[candidate] = cache |
|
113 | 113 | cache.write(rview) |
|
114 | 114 | return |
|
115 | 115 | |
|
116 | 116 | def clear(self): |
|
117 | 117 | self._per_filter.clear() |
|
118 | 118 | |
|
119 | 119 | def _unknownnode(node): |
|
120 | 120 | """ raises ValueError when branchcache found a node which does not exists |
|
121 | 121 | """ |
|
122 | 122 | raise ValueError(r'node %s does not exist' % pycompat.sysstr(hex(node))) |
|
123 | 123 | |
|
124 | 124 | def _branchcachedesc(repo): |
|
125 | 125 | if repo.filtername is not None: |
|
126 | 126 | return 'branch cache (%s)' % repo.filtername |
|
127 | 127 | else: |
|
128 | 128 | return 'branch cache' |
|
129 | 129 | |
|
130 | 130 | class branchcache(object): |
|
131 | 131 | """A dict like object that hold branches heads cache. |
|
132 | 132 | |
|
133 | 133 | This cache is used to avoid costly computations to determine all the |
|
134 | 134 | branch heads of a repo. |
|
135 | 135 | |
|
136 | 136 | The cache is serialized on disk in the following format: |
|
137 | 137 | |
|
138 | 138 | <tip hex node> <tip rev number> [optional filtered repo hex hash] |
|
139 | 139 | <branch head hex node> <open/closed state> <branch name> |
|
140 | 140 | <branch head hex node> <open/closed state> <branch name> |
|
141 | 141 | ... |
|
142 | 142 | |
|
143 | 143 | The first line is used to check if the cache is still valid. If the |
|
144 | 144 | branch cache is for a filtered repo view, an optional third hash is |
|
145 | 145 | included that hashes the hashes of all filtered revisions. |
|
146 | 146 | |
|
147 | 147 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
148 | 148 | This field can be used to avoid changelog reads when determining if a |
|
149 | 149 | branch head closes a branch or not. |
|
150 | 150 | """ |
|
151 | 151 | |
|
152 | 152 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, |
|
153 | 153 | filteredhash=None, closednodes=None, hasnode=None): |
|
154 | 154 | """ hasnode is a function which can be used to verify whether changelog |
|
155 | 155 | has a given node or not. If it's not provided, we assume that every node |
|
156 | 156 | we have exists in changelog """ |
|
157 | 157 | self.tipnode = tipnode |
|
158 | 158 | self.tiprev = tiprev |
|
159 | 159 | self.filteredhash = filteredhash |
|
160 | 160 | # closednodes is a set of nodes that close their branch. If the branch |
|
161 | 161 | # cache has been updated, it may contain nodes that are no longer |
|
162 | 162 | # heads. |
|
163 | 163 | if closednodes is None: |
|
164 | 164 | self._closednodes = set() |
|
165 | 165 | else: |
|
166 | 166 | self._closednodes = closednodes |
|
167 | 167 | self._entries = dict(entries) |
|
168 | 168 | # whether closed nodes are verified or not |
|
169 | 169 | self._closedverified = False |
|
170 | 170 | # branches for which nodes are verified |
|
171 | 171 | self._verifiedbranches = set() |
|
172 | 172 | self._hasnode = hasnode |
|
173 | 173 | if self._hasnode is None: |
|
174 | 174 | self._hasnode = lambda x: True |
|
175 | 175 | |
|
176 | 176 | def _verifyclosed(self): |
|
177 | 177 | """ verify the closed nodes we have """ |
|
178 | 178 | if self._closedverified: |
|
179 | 179 | return |
|
180 | 180 | for node in self._closednodes: |
|
181 | 181 | if not self._hasnode(node): |
|
182 | 182 | _unknownnode(node) |
|
183 | 183 | |
|
184 | 184 | self._closedverified = True |
|
185 | 185 | |
|
186 | 186 | def _verifybranch(self, branch): |
|
187 | 187 | """ verify head nodes for the given branch. """ |
|
188 | 188 | if branch not in self._entries or branch in self._verifiedbranches: |
|
189 | 189 | return |
|
190 | 190 | for n in self._entries[branch]: |
|
191 | 191 | if not self._hasnode(n): |
|
192 | 192 | _unknownnode(n) |
|
193 | 193 | |
|
194 | 194 | self._verifiedbranches.add(branch) |
|
195 | 195 | |
|
196 | 196 | def _verifyall(self): |
|
197 | 197 | """ verifies nodes of all the branches """ |
|
198 | 198 | needverification = set(self._entries.keys()) - self._verifiedbranches |
|
199 | 199 | for b in needverification: |
|
200 | 200 | self._verifybranch(b) |
|
201 | 201 | |
|
202 | 202 | def __iter__(self): |
|
203 | 203 | return iter(self._entries) |
|
204 | 204 | |
|
205 | 205 | def __setitem__(self, key, value): |
|
206 | 206 | self._entries[key] = value |
|
207 | 207 | |
|
208 | 208 | def __getitem__(self, key): |
|
209 | 209 | self._verifybranch(key) |
|
210 | 210 | return self._entries[key] |
|
211 | 211 | |
|
212 | 212 | def __contains__(self, key): |
|
213 | 213 | self._verifybranch(key) |
|
214 | 214 | return key in self._entries |
|
215 | 215 | |
|
216 | 216 | def iteritems(self): |
|
217 | 217 | for k, v in self._entries.iteritems(): |
|
218 | 218 | self._verifybranch(k) |
|
219 | 219 | yield k, v |
|
220 | 220 | |
|
221 | items = iteritems | |
|
222 | ||
|
221 | 223 | def hasbranch(self, label): |
|
222 | 224 | """ checks whether a branch of this name exists or not """ |
|
223 | 225 | self._verifybranch(label) |
|
224 | 226 | return label in self._entries |
|
225 | 227 | |
|
226 | 228 | @classmethod |
|
227 | 229 | def fromfile(cls, repo): |
|
228 | 230 | f = None |
|
229 | 231 | try: |
|
230 | 232 | f = repo.cachevfs(cls._filename(repo)) |
|
231 | 233 | lineiter = iter(f) |
|
232 | 234 | cachekey = next(lineiter).rstrip('\n').split(" ", 2) |
|
233 | 235 | last, lrev = cachekey[:2] |
|
234 | 236 | last, lrev = bin(last), int(lrev) |
|
235 | 237 | filteredhash = None |
|
236 | 238 | hasnode = repo.changelog.hasnode |
|
237 | 239 | if len(cachekey) > 2: |
|
238 | 240 | filteredhash = bin(cachekey[2]) |
|
239 | 241 | bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash, |
|
240 | 242 | hasnode=hasnode) |
|
241 | 243 | if not bcache.validfor(repo): |
|
242 | 244 | # invalidate the cache |
|
243 | 245 | raise ValueError(r'tip differs') |
|
244 | 246 | bcache.load(repo, lineiter) |
|
245 | 247 | except (IOError, OSError): |
|
246 | 248 | return None |
|
247 | 249 | |
|
248 | 250 | except Exception as inst: |
|
249 | 251 | if repo.ui.debugflag: |
|
250 | 252 | msg = 'invalid %s: %s\n' |
|
251 | 253 | repo.ui.debug(msg % (_branchcachedesc(repo), |
|
252 | 254 | pycompat.bytestr(inst))) |
|
253 | 255 | bcache = None |
|
254 | 256 | |
|
255 | 257 | finally: |
|
256 | 258 | if f: |
|
257 | 259 | f.close() |
|
258 | 260 | |
|
259 | 261 | return bcache |
|
260 | 262 | |
|
261 | 263 | def load(self, repo, lineiter): |
|
262 | 264 | """ fully loads the branchcache by reading from the file using the line |
|
263 | 265 | iterator passed""" |
|
264 | 266 | for line in lineiter: |
|
265 | 267 | line = line.rstrip('\n') |
|
266 | 268 | if not line: |
|
267 | 269 | continue |
|
268 | 270 | node, state, label = line.split(" ", 2) |
|
269 | 271 | if state not in 'oc': |
|
270 | 272 | raise ValueError(r'invalid branch state') |
|
271 | 273 | label = encoding.tolocal(label.strip()) |
|
272 | 274 | node = bin(node) |
|
273 | 275 | self._entries.setdefault(label, []).append(node) |
|
274 | 276 | if state == 'c': |
|
275 | 277 | self._closednodes.add(node) |
|
276 | 278 | |
|
277 | 279 | @staticmethod |
|
278 | 280 | def _filename(repo): |
|
279 | 281 | """name of a branchcache file for a given repo or repoview""" |
|
280 | 282 | filename = "branch2" |
|
281 | 283 | if repo.filtername: |
|
282 | 284 | filename = '%s-%s' % (filename, repo.filtername) |
|
283 | 285 | return filename |
|
284 | 286 | |
|
285 | 287 | def validfor(self, repo): |
|
286 | 288 | """Is the cache content valid regarding a repo |
|
287 | 289 | |
|
288 | 290 | - False when cached tipnode is unknown or if we detect a strip. |
|
289 | 291 | - True when cache is up to date or a subset of current repo.""" |
|
290 | 292 | try: |
|
291 | 293 | return ((self.tipnode == repo.changelog.node(self.tiprev)) |
|
292 | 294 | and (self.filteredhash == |
|
293 | 295 | scmutil.filteredhash(repo, self.tiprev))) |
|
294 | 296 | except IndexError: |
|
295 | 297 | return False |
|
296 | 298 | |
|
297 | 299 | def _branchtip(self, heads): |
|
298 | 300 | '''Return tuple with last open head in heads and false, |
|
299 | 301 | otherwise return last closed head and true.''' |
|
300 | 302 | tip = heads[-1] |
|
301 | 303 | closed = True |
|
302 | 304 | for h in reversed(heads): |
|
303 | 305 | if h not in self._closednodes: |
|
304 | 306 | tip = h |
|
305 | 307 | closed = False |
|
306 | 308 | break |
|
307 | 309 | return tip, closed |
|
308 | 310 | |
|
309 | 311 | def branchtip(self, branch): |
|
310 | 312 | '''Return the tipmost open head on branch head, otherwise return the |
|
311 | 313 | tipmost closed head on branch. |
|
312 | 314 | Raise KeyError for unknown branch.''' |
|
313 | 315 | return self._branchtip(self[branch])[0] |
|
314 | 316 | |
|
315 | 317 | def iteropen(self, nodes): |
|
316 | 318 | return (n for n in nodes if n not in self._closednodes) |
|
317 | 319 | |
|
318 | 320 | def branchheads(self, branch, closed=False): |
|
319 | 321 | self._verifybranch(branch) |
|
320 | 322 | heads = self._entries[branch] |
|
321 | 323 | if not closed: |
|
322 | 324 | heads = list(self.iteropen(heads)) |
|
323 | 325 | return heads |
|
324 | 326 | |
|
325 | 327 | def iterbranches(self): |
|
326 | 328 | for bn, heads in self.iteritems(): |
|
327 | 329 | yield (bn, heads) + self._branchtip(heads) |
|
328 | 330 | |
|
329 | 331 | def iterheads(self): |
|
330 | 332 | """ returns all the heads """ |
|
331 | 333 | self._verifyall() |
|
332 | 334 | return self._entries.itervalues() |
|
333 | 335 | |
|
334 | 336 | def copy(self): |
|
335 | 337 | """return an deep copy of the branchcache object""" |
|
336 | 338 | return type(self)( |
|
337 | 339 | self._entries, self.tipnode, self.tiprev, self.filteredhash, |
|
338 | 340 | self._closednodes) |
|
339 | 341 | |
|
340 | 342 | def write(self, repo): |
|
341 | 343 | try: |
|
342 | 344 | f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) |
|
343 | 345 | cachekey = [hex(self.tipnode), '%d' % self.tiprev] |
|
344 | 346 | if self.filteredhash is not None: |
|
345 | 347 | cachekey.append(hex(self.filteredhash)) |
|
346 | 348 | f.write(" ".join(cachekey) + '\n') |
|
347 | 349 | nodecount = 0 |
|
348 | 350 | for label, nodes in sorted(self._entries.iteritems()): |
|
349 | 351 | label = encoding.fromlocal(label) |
|
350 | 352 | for node in nodes: |
|
351 | 353 | nodecount += 1 |
|
352 | 354 | if node in self._closednodes: |
|
353 | 355 | state = 'c' |
|
354 | 356 | else: |
|
355 | 357 | state = 'o' |
|
356 | 358 | f.write("%s %s %s\n" % (hex(node), state, label)) |
|
357 | 359 | f.close() |
|
358 | 360 | repo.ui.log('branchcache', 'wrote %s with %d labels and %d nodes\n', |
|
359 | 361 | _branchcachedesc(repo), len(self._entries), nodecount) |
|
360 | 362 | except (IOError, OSError, error.Abort) as inst: |
|
361 | 363 | # Abort may be raised by read only opener, so log and continue |
|
362 | 364 | repo.ui.debug("couldn't write branch cache: %s\n" % |
|
363 | 365 | stringutil.forcebytestr(inst)) |
|
364 | 366 | |
|
365 | 367 | def update(self, repo, revgen): |
|
366 | 368 | """Given a branchhead cache, self, that may have extra nodes or be |
|
367 | 369 | missing heads, and a generator of nodes that are strictly a superset of |
|
368 | 370 | heads missing, this function updates self to be correct. |
|
369 | 371 | """ |
|
370 | 372 | starttime = util.timer() |
|
371 | 373 | cl = repo.changelog |
|
372 | 374 | # collect new branch entries |
|
373 | 375 | newbranches = {} |
|
374 | 376 | getbranchinfo = repo.revbranchcache().branchinfo |
|
375 | 377 | for r in revgen: |
|
376 | 378 | branch, closesbranch = getbranchinfo(r) |
|
377 | 379 | newbranches.setdefault(branch, []).append(r) |
|
378 | 380 | if closesbranch: |
|
379 | 381 | self._closednodes.add(cl.node(r)) |
|
380 | 382 | |
|
381 | 383 | # fetch current topological heads to speed up filtering |
|
382 | 384 | topoheads = set(cl.headrevs()) |
|
383 | 385 | |
|
384 | 386 | # new tip revision which we found after iterating items from new |
|
385 | 387 | # branches |
|
386 | 388 | ntiprev = self.tiprev |
|
387 | 389 | |
|
388 | 390 | # if older branchheads are reachable from new ones, they aren't |
|
389 | 391 | # really branchheads. Note checking parents is insufficient: |
|
390 | 392 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
|
391 | 393 | for branch, newheadrevs in newbranches.iteritems(): |
|
392 | 394 | bheads = self._entries.setdefault(branch, []) |
|
393 | 395 | bheadset = set(cl.rev(node) for node in bheads) |
|
394 | 396 | |
|
395 | 397 | # This have been tested True on all internal usage of this function. |
|
396 | 398 | # run it again in case of doubt |
|
397 | 399 | # assert not (set(bheadrevs) & set(newheadrevs)) |
|
398 | 400 | bheadset.update(newheadrevs) |
|
399 | 401 | |
|
400 | 402 | # This prunes out two kinds of heads - heads that are superseded by |
|
401 | 403 | # a head in newheadrevs, and newheadrevs that are not heads because |
|
402 | 404 | # an existing head is their descendant. |
|
403 | 405 | uncertain = bheadset - topoheads |
|
404 | 406 | if uncertain: |
|
405 | 407 | floorrev = min(uncertain) |
|
406 | 408 | ancestors = set(cl.ancestors(newheadrevs, floorrev)) |
|
407 | 409 | bheadset -= ancestors |
|
408 | 410 | bheadrevs = sorted(bheadset) |
|
409 | 411 | self[branch] = [cl.node(rev) for rev in bheadrevs] |
|
410 | 412 | tiprev = bheadrevs[-1] |
|
411 | 413 | if tiprev > ntiprev: |
|
412 | 414 | ntiprev = tiprev |
|
413 | 415 | |
|
414 | 416 | if ntiprev > self.tiprev: |
|
415 | 417 | self.tiprev = ntiprev |
|
416 | 418 | self.tipnode = cl.node(ntiprev) |
|
417 | 419 | |
|
418 | 420 | if not self.validfor(repo): |
|
419 | 421 | # cache key are not valid anymore |
|
420 | 422 | self.tipnode = nullid |
|
421 | 423 | self.tiprev = nullrev |
|
422 | 424 | for heads in self.iterheads(): |
|
423 | 425 | tiprev = max(cl.rev(node) for node in heads) |
|
424 | 426 | if tiprev > self.tiprev: |
|
425 | 427 | self.tipnode = cl.node(tiprev) |
|
426 | 428 | self.tiprev = tiprev |
|
427 | 429 | self.filteredhash = scmutil.filteredhash(repo, self.tiprev) |
|
428 | 430 | |
|
429 | 431 | duration = util.timer() - starttime |
|
430 | 432 | repo.ui.log('branchcache', 'updated %s in %.4f seconds\n', |
|
431 | 433 | _branchcachedesc(repo), duration) |
|
432 | 434 | |
|
433 | 435 | self.write(repo) |
|
434 | 436 | |
|
435 | 437 | |
|
436 | 438 | class remotebranchcache(branchcache): |
|
437 | 439 | """Branchmap info for a remote connection, should not write locally""" |
|
438 | 440 | def write(self, repo): |
|
439 | 441 | pass |
|
440 | 442 | |
|
441 | 443 | |
|
442 | 444 | # Revision branch info cache |
|
443 | 445 | |
|
444 | 446 | _rbcversion = '-v1' |
|
445 | 447 | _rbcnames = 'rbc-names' + _rbcversion |
|
446 | 448 | _rbcrevs = 'rbc-revs' + _rbcversion |
|
447 | 449 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] |
|
448 | 450 | _rbcrecfmt = '>4sI' |
|
449 | 451 | _rbcrecsize = calcsize(_rbcrecfmt) |
|
450 | 452 | _rbcnodelen = 4 |
|
451 | 453 | _rbcbranchidxmask = 0x7fffffff |
|
452 | 454 | _rbccloseflag = 0x80000000 |
|
453 | 455 | |
|
454 | 456 | class revbranchcache(object): |
|
455 | 457 | """Persistent cache, mapping from revision number to branch name and close. |
|
456 | 458 | This is a low level cache, independent of filtering. |
|
457 | 459 | |
|
458 | 460 | Branch names are stored in rbc-names in internal encoding separated by 0. |
|
459 | 461 | rbc-names is append-only, and each branch name is only stored once and will |
|
460 | 462 | thus have a unique index. |
|
461 | 463 | |
|
462 | 464 | The branch info for each revision is stored in rbc-revs as constant size |
|
463 | 465 | records. The whole file is read into memory, but it is only 'parsed' on |
|
464 | 466 | demand. The file is usually append-only but will be truncated if repo |
|
465 | 467 | modification is detected. |
|
466 | 468 | The record for each revision contains the first 4 bytes of the |
|
467 | 469 | corresponding node hash, and the record is only used if it still matches. |
|
468 | 470 | Even a completely trashed rbc-revs fill thus still give the right result |
|
469 | 471 | while converging towards full recovery ... assuming no incorrectly matching |
|
470 | 472 | node hashes. |
|
471 | 473 | The record also contains 4 bytes where 31 bits contains the index of the |
|
472 | 474 | branch and the last bit indicate that it is a branch close commit. |
|
473 | 475 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i |
|
474 | 476 | and will grow with it but be 1/8th of its size. |
|
475 | 477 | """ |
|
476 | 478 | |
|
477 | 479 | def __init__(self, repo, readonly=True): |
|
478 | 480 | assert repo.filtername is None |
|
479 | 481 | self._repo = repo |
|
480 | 482 | self._names = [] # branch names in local encoding with static index |
|
481 | 483 | self._rbcrevs = bytearray() |
|
482 | 484 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen |
|
483 | 485 | try: |
|
484 | 486 | bndata = repo.cachevfs.read(_rbcnames) |
|
485 | 487 | self._rbcsnameslen = len(bndata) # for verification before writing |
|
486 | 488 | if bndata: |
|
487 | 489 | self._names = [encoding.tolocal(bn) |
|
488 | 490 | for bn in bndata.split('\0')] |
|
489 | 491 | except (IOError, OSError): |
|
490 | 492 | if readonly: |
|
491 | 493 | # don't try to use cache - fall back to the slow path |
|
492 | 494 | self.branchinfo = self._branchinfo |
|
493 | 495 | |
|
494 | 496 | if self._names: |
|
495 | 497 | try: |
|
496 | 498 | data = repo.cachevfs.read(_rbcrevs) |
|
497 | 499 | self._rbcrevs[:] = data |
|
498 | 500 | except (IOError, OSError) as inst: |
|
499 | 501 | repo.ui.debug("couldn't read revision branch cache: %s\n" % |
|
500 | 502 | stringutil.forcebytestr(inst)) |
|
501 | 503 | # remember number of good records on disk |
|
502 | 504 | self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize, |
|
503 | 505 | len(repo.changelog)) |
|
504 | 506 | if self._rbcrevslen == 0: |
|
505 | 507 | self._names = [] |
|
506 | 508 | self._rbcnamescount = len(self._names) # number of names read at |
|
507 | 509 | # _rbcsnameslen |
|
508 | 510 | |
|
509 | 511 | def _clear(self): |
|
510 | 512 | self._rbcsnameslen = 0 |
|
511 | 513 | del self._names[:] |
|
512 | 514 | self._rbcnamescount = 0 |
|
513 | 515 | self._rbcrevslen = len(self._repo.changelog) |
|
514 | 516 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) |
|
515 | 517 | util.clearcachedproperty(self, '_namesreverse') |
|
516 | 518 | |
|
517 | 519 | @util.propertycache |
|
518 | 520 | def _namesreverse(self): |
|
519 | 521 | return dict((b, r) for r, b in enumerate(self._names)) |
|
520 | 522 | |
|
521 | 523 | def branchinfo(self, rev): |
|
522 | 524 | """Return branch name and close flag for rev, using and updating |
|
523 | 525 | persistent cache.""" |
|
524 | 526 | changelog = self._repo.changelog |
|
525 | 527 | rbcrevidx = rev * _rbcrecsize |
|
526 | 528 | |
|
527 | 529 | # avoid negative index, changelog.read(nullrev) is fast without cache |
|
528 | 530 | if rev == nullrev: |
|
529 | 531 | return changelog.branchinfo(rev) |
|
530 | 532 | |
|
531 | 533 | # if requested rev isn't allocated, grow and cache the rev info |
|
532 | 534 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
533 | 535 | return self._branchinfo(rev) |
|
534 | 536 | |
|
535 | 537 | # fast path: extract data from cache, use it if node is matching |
|
536 | 538 | reponode = changelog.node(rev)[:_rbcnodelen] |
|
537 | 539 | cachenode, branchidx = unpack_from( |
|
538 | 540 | _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx) |
|
539 | 541 | close = bool(branchidx & _rbccloseflag) |
|
540 | 542 | if close: |
|
541 | 543 | branchidx &= _rbcbranchidxmask |
|
542 | 544 | if cachenode == '\0\0\0\0': |
|
543 | 545 | pass |
|
544 | 546 | elif cachenode == reponode: |
|
545 | 547 | try: |
|
546 | 548 | return self._names[branchidx], close |
|
547 | 549 | except IndexError: |
|
548 | 550 | # recover from invalid reference to unknown branch |
|
549 | 551 | self._repo.ui.debug("referenced branch names not found" |
|
550 | 552 | " - rebuilding revision branch cache from scratch\n") |
|
551 | 553 | self._clear() |
|
552 | 554 | else: |
|
553 | 555 | # rev/node map has changed, invalidate the cache from here up |
|
554 | 556 | self._repo.ui.debug("history modification detected - truncating " |
|
555 | 557 | "revision branch cache to revision %d\n" % rev) |
|
556 | 558 | truncate = rbcrevidx + _rbcrecsize |
|
557 | 559 | del self._rbcrevs[truncate:] |
|
558 | 560 | self._rbcrevslen = min(self._rbcrevslen, truncate) |
|
559 | 561 | |
|
560 | 562 | # fall back to slow path and make sure it will be written to disk |
|
561 | 563 | return self._branchinfo(rev) |
|
562 | 564 | |
|
563 | 565 | def _branchinfo(self, rev): |
|
564 | 566 | """Retrieve branch info from changelog and update _rbcrevs""" |
|
565 | 567 | changelog = self._repo.changelog |
|
566 | 568 | b, close = changelog.branchinfo(rev) |
|
567 | 569 | if b in self._namesreverse: |
|
568 | 570 | branchidx = self._namesreverse[b] |
|
569 | 571 | else: |
|
570 | 572 | branchidx = len(self._names) |
|
571 | 573 | self._names.append(b) |
|
572 | 574 | self._namesreverse[b] = branchidx |
|
573 | 575 | reponode = changelog.node(rev) |
|
574 | 576 | if close: |
|
575 | 577 | branchidx |= _rbccloseflag |
|
576 | 578 | self._setcachedata(rev, reponode, branchidx) |
|
577 | 579 | return b, close |
|
578 | 580 | |
|
579 | 581 | def setdata(self, branch, rev, node, close): |
|
580 | 582 | """add new data information to the cache""" |
|
581 | 583 | if branch in self._namesreverse: |
|
582 | 584 | branchidx = self._namesreverse[branch] |
|
583 | 585 | else: |
|
584 | 586 | branchidx = len(self._names) |
|
585 | 587 | self._names.append(branch) |
|
586 | 588 | self._namesreverse[branch] = branchidx |
|
587 | 589 | if close: |
|
588 | 590 | branchidx |= _rbccloseflag |
|
589 | 591 | self._setcachedata(rev, node, branchidx) |
|
590 | 592 | # If no cache data were readable (non exists, bad permission, etc) |
|
591 | 593 | # the cache was bypassing itself by setting: |
|
592 | 594 | # |
|
593 | 595 | # self.branchinfo = self._branchinfo |
|
594 | 596 | # |
|
595 | 597 | # Since we now have data in the cache, we need to drop this bypassing. |
|
596 | 598 | if r'branchinfo' in vars(self): |
|
597 | 599 | del self.branchinfo |
|
598 | 600 | |
|
599 | 601 | def _setcachedata(self, rev, node, branchidx): |
|
600 | 602 | """Writes the node's branch data to the in-memory cache data.""" |
|
601 | 603 | if rev == nullrev: |
|
602 | 604 | return |
|
603 | 605 | rbcrevidx = rev * _rbcrecsize |
|
604 | 606 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
605 | 607 | self._rbcrevs.extend('\0' * |
|
606 | 608 | (len(self._repo.changelog) * _rbcrecsize - |
|
607 | 609 | len(self._rbcrevs))) |
|
608 | 610 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) |
|
609 | 611 | self._rbcrevslen = min(self._rbcrevslen, rev) |
|
610 | 612 | |
|
611 | 613 | tr = self._repo.currenttransaction() |
|
612 | 614 | if tr: |
|
613 | 615 | tr.addfinalize('write-revbranchcache', self.write) |
|
614 | 616 | |
|
615 | 617 | def write(self, tr=None): |
|
616 | 618 | """Save branch cache if it is dirty.""" |
|
617 | 619 | repo = self._repo |
|
618 | 620 | wlock = None |
|
619 | 621 | step = '' |
|
620 | 622 | try: |
|
621 | 623 | # write the new names |
|
622 | 624 | if self._rbcnamescount < len(self._names): |
|
623 | 625 | wlock = repo.wlock(wait=False) |
|
624 | 626 | step = ' names' |
|
625 | 627 | self._writenames(repo) |
|
626 | 628 | |
|
627 | 629 | # write the new revs |
|
628 | 630 | start = self._rbcrevslen * _rbcrecsize |
|
629 | 631 | if start != len(self._rbcrevs): |
|
630 | 632 | step = '' |
|
631 | 633 | if wlock is None: |
|
632 | 634 | wlock = repo.wlock(wait=False) |
|
633 | 635 | self._writerevs(repo, start) |
|
634 | 636 | |
|
635 | 637 | except (IOError, OSError, error.Abort, error.LockError) as inst: |
|
636 | 638 | repo.ui.debug("couldn't write revision branch cache%s: %s\n" |
|
637 | 639 | % (step, stringutil.forcebytestr(inst))) |
|
638 | 640 | finally: |
|
639 | 641 | if wlock is not None: |
|
640 | 642 | wlock.release() |
|
641 | 643 | |
|
642 | 644 | def _writenames(self, repo): |
|
643 | 645 | """ write the new branch names to revbranchcache """ |
|
644 | 646 | if self._rbcnamescount != 0: |
|
645 | 647 | f = repo.cachevfs.open(_rbcnames, 'ab') |
|
646 | 648 | if f.tell() == self._rbcsnameslen: |
|
647 | 649 | f.write('\0') |
|
648 | 650 | else: |
|
649 | 651 | f.close() |
|
650 | 652 | repo.ui.debug("%s changed - rewriting it\n" % _rbcnames) |
|
651 | 653 | self._rbcnamescount = 0 |
|
652 | 654 | self._rbcrevslen = 0 |
|
653 | 655 | if self._rbcnamescount == 0: |
|
654 | 656 | # before rewriting names, make sure references are removed |
|
655 | 657 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) |
|
656 | 658 | f = repo.cachevfs.open(_rbcnames, 'wb') |
|
657 | 659 | f.write('\0'.join(encoding.fromlocal(b) |
|
658 | 660 | for b in self._names[self._rbcnamescount:])) |
|
659 | 661 | self._rbcsnameslen = f.tell() |
|
660 | 662 | f.close() |
|
661 | 663 | self._rbcnamescount = len(self._names) |
|
662 | 664 | |
|
663 | 665 | def _writerevs(self, repo, start): |
|
664 | 666 | """ write the new revs to revbranchcache """ |
|
665 | 667 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) |
|
666 | 668 | with repo.cachevfs.open(_rbcrevs, 'ab') as f: |
|
667 | 669 | if f.tell() != start: |
|
668 | 670 | repo.ui.debug("truncating cache/%s to %d\n" % (_rbcrevs, start)) |
|
669 | 671 | f.seek(start) |
|
670 | 672 | if f.tell() != start: |
|
671 | 673 | start = 0 |
|
672 | 674 | f.seek(start) |
|
673 | 675 | f.truncate() |
|
674 | 676 | end = revs * _rbcrecsize |
|
675 | 677 | f.write(self._rbcrevs[start:end]) |
|
676 | 678 | self._rbcrevslen = revs |
General Comments 0
You need to be logged in to leave comments.
Login now