Show More
@@ -1,634 +1,634 | |||
|
1 | 1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import struct |
|
11 | 11 | |
|
12 | 12 | from .node import ( |
|
13 | 13 | bin, |
|
14 | 14 | hex, |
|
15 | 15 | nullid, |
|
16 | 16 | nullrev, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | pycompat, |
|
22 | 22 | scmutil, |
|
23 | 23 | util, |
|
24 | 24 | ) |
|
25 | 25 | from .utils import ( |
|
26 | 26 | stringutil, |
|
27 | 27 | ) |
|
28 | 28 | |
|
29 | 29 | calcsize = struct.calcsize |
|
30 | 30 | pack_into = struct.pack_into |
|
31 | 31 | unpack_from = struct.unpack_from |
|
32 | 32 | |
|
33 | 33 | |
|
34 | 34 | ### Nearest subset relation |
|
35 | 35 | # Nearest subset of filter X is a filter Y so that: |
|
36 | 36 | # * Y is included in X, |
|
37 | 37 | # * X - Y is as small as possible. |
|
38 | 38 | # This create and ordering used for branchmap purpose. |
|
39 | 39 | # the ordering may be partial |
|
40 | 40 | subsettable = {None: 'visible', |
|
41 | 41 | 'visible-hidden': 'visible', |
|
42 | 42 | 'visible': 'served', |
|
43 | 43 | 'served': 'immutable', |
|
44 | 44 | 'immutable': 'base'} |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class BranchMapCache(object): |
|
48 | 48 | """mapping of filtered views of repo with their branchcache""" |
|
49 | 49 | def __init__(self): |
|
50 | 50 | self._per_filter = {} |
|
51 | 51 | |
|
52 | 52 | def __getitem__(self, repo): |
|
53 | 53 | self.updatecache(repo) |
|
54 | 54 | return self._per_filter[repo.filtername] |
|
55 | 55 | |
|
56 | 56 | def updatecache(self, repo): |
|
57 | 57 | """Update the cache for the given filtered view on a repository""" |
|
58 | 58 | # This can trigger updates for the caches for subsets of the filtered |
|
59 | 59 | # view, e.g. when there is no cache for this filtered view or the cache |
|
60 | 60 | # is stale. |
|
61 | 61 | |
|
62 | 62 | cl = repo.changelog |
|
63 | 63 | filtername = repo.filtername |
|
64 | 64 | bcache = self._per_filter.get(filtername) |
|
65 | 65 | if bcache is None or not bcache.validfor(repo): |
|
66 | 66 | # cache object missing or cache object stale? Read from disk |
|
67 | 67 | bcache = branchcache.fromfile(repo) |
|
68 | 68 | |
|
69 | 69 | revs = [] |
|
70 | 70 | if bcache is None: |
|
71 | 71 | # no (fresh) cache available anymore, perhaps we can re-use |
|
72 | 72 | # the cache for a subset, then extend that to add info on missing |
|
73 | 73 | # revisions. |
|
74 | 74 | subsetname = subsettable.get(filtername) |
|
75 | 75 | if subsetname is not None: |
|
76 | 76 | subset = repo.filtered(subsetname) |
|
77 | 77 | bcache = self[subset].copy() |
|
78 | 78 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
|
79 | 79 | revs.extend(r for r in extrarevs if r <= bcache.tiprev) |
|
80 | 80 | else: |
|
81 | 81 | # nothing to fall back on, start empty. |
|
82 | 82 | bcache = branchcache() |
|
83 | 83 | |
|
84 | 84 | revs.extend(cl.revs(start=bcache.tiprev + 1)) |
|
85 | 85 | if revs: |
|
86 | 86 | bcache.update(repo, revs) |
|
87 | 87 | |
|
88 | 88 | assert bcache.validfor(repo), filtername |
|
89 | 89 | self._per_filter[repo.filtername] = bcache |
|
90 | 90 | |
|
91 | 91 | def replace(self, repo, remotebranchmap): |
|
92 | 92 | """Replace the branchmap cache for a repo with a branch mapping. |
|
93 | 93 | |
|
94 | 94 | This is likely only called during clone with a branch map from a |
|
95 | 95 | remote. |
|
96 | 96 | |
|
97 | 97 | """ |
|
98 | 98 | cl = repo.changelog |
|
99 | 99 | clrev = cl.rev |
|
100 | 100 | clbranchinfo = cl.branchinfo |
|
101 | 101 | rbheads = [] |
|
102 | 102 | closed = [] |
|
103 | 103 | for bheads in remotebranchmap.itervalues(): |
|
104 | 104 | rbheads += bheads |
|
105 | 105 | for h in bheads: |
|
106 | 106 | r = clrev(h) |
|
107 | 107 | b, c = clbranchinfo(r) |
|
108 | 108 | if c: |
|
109 | 109 | closed.append(h) |
|
110 | 110 | |
|
111 | 111 | if rbheads: |
|
112 | 112 | rtiprev = max((int(clrev(node)) for node in rbheads)) |
|
113 | 113 | cache = branchcache( |
|
114 | 114 | remotebranchmap, repo[rtiprev].node(), rtiprev, |
|
115 | 115 | closednodes=closed) |
|
116 | 116 | |
|
117 | 117 | # Try to stick it as low as possible |
|
118 | 118 | # filter above served are unlikely to be fetch from a clone |
|
119 | 119 | for candidate in ('base', 'immutable', 'served'): |
|
120 | 120 | rview = repo.filtered(candidate) |
|
121 | 121 | if cache.validfor(rview): |
|
122 | 122 | self._per_filter[candidate] = cache |
|
123 | 123 | cache.write(rview) |
|
124 | 124 | return |
|
125 | 125 | |
|
126 | 126 | def clear(self): |
|
127 | 127 | self._per_filter.clear() |
|
128 | 128 | |
|
129 | 129 | |
|
130 | 130 | class branchcache(object): |
|
131 | 131 | """A dict like object that hold branches heads cache. |
|
132 | 132 | |
|
133 | 133 | This cache is used to avoid costly computations to determine all the |
|
134 | 134 | branch heads of a repo. |
|
135 | 135 | |
|
136 | 136 | The cache is serialized on disk in the following format: |
|
137 | 137 | |
|
138 | 138 | <tip hex node> <tip rev number> [optional filtered repo hex hash] |
|
139 | 139 | <branch head hex node> <open/closed state> <branch name> |
|
140 | 140 | <branch head hex node> <open/closed state> <branch name> |
|
141 | 141 | ... |
|
142 | 142 | |
|
143 | 143 | The first line is used to check if the cache is still valid. If the |
|
144 | 144 | branch cache is for a filtered repo view, an optional third hash is |
|
145 | 145 | included that hashes the hashes of all filtered revisions. |
|
146 | 146 | |
|
147 | 147 | The open/closed state is represented by a single letter 'o' or 'c'. |
|
148 | 148 | This field can be used to avoid changelog reads when determining if a |
|
149 | 149 | branch head closes a branch or not. |
|
150 | 150 | """ |
|
151 | 151 | |
|
152 | 152 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, |
|
153 | 153 | filteredhash=None, closednodes=None, hasnode=None): |
|
154 | 154 | """ hasnode is a function which can be used to verify whether changelog |
|
155 | 155 | has a given node or not. If it's not provided, we assume that every node |
|
156 | 156 | we have exists in changelog """ |
|
157 | 157 | self.tipnode = tipnode |
|
158 | 158 | self.tiprev = tiprev |
|
159 | 159 | self.filteredhash = filteredhash |
|
160 | 160 | # closednodes is a set of nodes that close their branch. If the branch |
|
161 | 161 | # cache has been updated, it may contain nodes that are no longer |
|
162 | 162 | # heads. |
|
163 | 163 | if closednodes is None: |
|
164 | 164 | self._closednodes = set() |
|
165 | 165 | else: |
|
166 | 166 | self._closednodes = closednodes |
|
167 | 167 | self._entries = dict(entries) |
|
168 | 168 | # whether closed nodes are verified or not |
|
169 | 169 | self._closedverified = False |
|
170 | 170 | # branches for which nodes are verified |
|
171 | 171 | self._verifiedbranches = set() |
|
172 | 172 | self._hasnode = hasnode |
|
173 | 173 | if self._hasnode is None: |
|
174 | 174 | self._hasnode = lambda x: True |
|
175 | 175 | |
|
176 | 176 | def __iter__(self): |
|
177 | 177 | return iter(self._entries) |
|
178 | 178 | |
|
179 | 179 | def __setitem__(self, key, value): |
|
180 | 180 | self._entries[key] = value |
|
181 | 181 | |
|
182 | 182 | def __getitem__(self, key): |
|
183 | 183 | return self._entries[key] |
|
184 | 184 | |
|
185 | 185 | def iteritems(self): |
|
186 | 186 | return self._entries.iteritems() |
|
187 | 187 | |
|
188 | 188 | def hasbranch(self, label): |
|
189 | 189 | """ checks whether a branch of this name exists or not """ |
|
190 | 190 | return label in self._entries |
|
191 | 191 | |
|
192 | 192 | @classmethod |
|
193 | 193 | def fromfile(cls, repo): |
|
194 | 194 | f = None |
|
195 | 195 | try: |
|
196 | 196 | f = repo.cachevfs(cls._filename(repo)) |
|
197 | 197 | lineiter = iter(f) |
|
198 | 198 | cachekey = next(lineiter).rstrip('\n').split(" ", 2) |
|
199 | 199 | last, lrev = cachekey[:2] |
|
200 | 200 | last, lrev = bin(last), int(lrev) |
|
201 | 201 | filteredhash = None |
|
202 | 202 | hasnode = repo.changelog.hasnode |
|
203 | 203 | if len(cachekey) > 2: |
|
204 | 204 | filteredhash = bin(cachekey[2]) |
|
205 | 205 | bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash, |
|
206 | 206 | hasnode=hasnode) |
|
207 | 207 | if not bcache.validfor(repo): |
|
208 | 208 | # invalidate the cache |
|
209 | 209 | raise ValueError(r'tip differs') |
|
210 | 210 | bcache.load(repo, lineiter) |
|
211 | 211 | except (IOError, OSError): |
|
212 | 212 | return None |
|
213 | 213 | |
|
214 | 214 | except Exception as inst: |
|
215 | 215 | if repo.ui.debugflag: |
|
216 | 216 | msg = 'invalid branchheads cache' |
|
217 | 217 | if repo.filtername is not None: |
|
218 | 218 | msg += ' (%s)' % repo.filtername |
|
219 | 219 | msg += ': %s\n' |
|
220 | 220 | repo.ui.debug(msg % pycompat.bytestr(inst)) |
|
221 | 221 | bcache = None |
|
222 | 222 | |
|
223 | 223 | finally: |
|
224 | 224 | if f: |
|
225 | 225 | f.close() |
|
226 | 226 | |
|
227 | 227 | return bcache |
|
228 | 228 | |
|
229 | 229 | def load(self, repo, lineiter): |
|
230 | 230 | """ fully loads the branchcache by reading from the file using the line |
|
231 | 231 | iterator passed""" |
|
232 | 232 | cl = repo.changelog |
|
233 | 233 | for line in lineiter: |
|
234 | 234 | line = line.rstrip('\n') |
|
235 | 235 | if not line: |
|
236 | 236 | continue |
|
237 | 237 | node, state, label = line.split(" ", 2) |
|
238 | 238 | if state not in 'oc': |
|
239 | 239 | raise ValueError(r'invalid branch state') |
|
240 | 240 | label = encoding.tolocal(label.strip()) |
|
241 | 241 | node = bin(node) |
|
242 | 242 | if not cl.hasnode(node): |
|
243 | 243 | raise ValueError( |
|
244 | 244 | r'node %s does not exist' % pycompat.sysstr(hex(node))) |
|
245 | 245 | self._entries.setdefault(label, []).append(node) |
|
246 | 246 | self._verifiedbranches.add(label) |
|
247 | 247 | if state == 'c': |
|
248 | 248 | self._closednodes.add(node) |
|
249 | 249 | self._closedverified = True |
|
250 | 250 | |
|
251 | 251 | @staticmethod |
|
252 | 252 | def _filename(repo): |
|
253 | 253 | """name of a branchcache file for a given repo or repoview""" |
|
254 | 254 | filename = "branch2" |
|
255 | 255 | if repo.filtername: |
|
256 | 256 | filename = '%s-%s' % (filename, repo.filtername) |
|
257 | 257 | return filename |
|
258 | 258 | |
|
259 | 259 | def validfor(self, repo): |
|
260 | 260 | """Is the cache content valid regarding a repo |
|
261 | 261 | |
|
262 | 262 | - False when cached tipnode is unknown or if we detect a strip. |
|
263 | 263 | - True when cache is up to date or a subset of current repo.""" |
|
264 | 264 | try: |
|
265 | 265 | return ((self.tipnode == repo.changelog.node(self.tiprev)) |
|
266 | 266 | and (self.filteredhash == |
|
267 | 267 | scmutil.filteredhash(repo, self.tiprev))) |
|
268 | 268 | except IndexError: |
|
269 | 269 | return False |
|
270 | 270 | |
|
271 | 271 | def _branchtip(self, heads): |
|
272 | 272 | '''Return tuple with last open head in heads and false, |
|
273 | 273 | otherwise return last closed head and true.''' |
|
274 | 274 | tip = heads[-1] |
|
275 | 275 | closed = True |
|
276 | 276 | for h in reversed(heads): |
|
277 | 277 | if h not in self._closednodes: |
|
278 | 278 | tip = h |
|
279 | 279 | closed = False |
|
280 | 280 | break |
|
281 | 281 | return tip, closed |
|
282 | 282 | |
|
283 | 283 | def branchtip(self, branch): |
|
284 | 284 | '''Return the tipmost open head on branch head, otherwise return the |
|
285 | 285 | tipmost closed head on branch. |
|
286 | 286 | Raise KeyError for unknown branch.''' |
|
287 | 287 | return self._branchtip(self[branch])[0] |
|
288 | 288 | |
|
289 | 289 | def iteropen(self, nodes): |
|
290 | 290 | return (n for n in nodes if n not in self._closednodes) |
|
291 | 291 | |
|
292 | 292 | def branchheads(self, branch, closed=False): |
|
293 | heads = self[branch] | |
|
293 | heads = self._entries[branch] | |
|
294 | 294 | if not closed: |
|
295 | 295 | heads = list(self.iteropen(heads)) |
|
296 | 296 | return heads |
|
297 | 297 | |
|
298 | 298 | def iterbranches(self): |
|
299 | 299 | for bn, heads in self.iteritems(): |
|
300 | 300 | yield (bn, heads) + self._branchtip(heads) |
|
301 | 301 | |
|
302 | 302 | def iterheads(self): |
|
303 | 303 | """ returns all the heads """ |
|
304 | 304 | return self._entries.itervalues() |
|
305 | 305 | |
|
306 | 306 | def copy(self): |
|
307 | 307 | """return an deep copy of the branchcache object""" |
|
308 | 308 | return type(self)( |
|
309 | 309 | self._entries, self.tipnode, self.tiprev, self.filteredhash, |
|
310 | 310 | self._closednodes) |
|
311 | 311 | |
|
312 | 312 | def write(self, repo): |
|
313 | 313 | try: |
|
314 | 314 | f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) |
|
315 | 315 | cachekey = [hex(self.tipnode), '%d' % self.tiprev] |
|
316 | 316 | if self.filteredhash is not None: |
|
317 | 317 | cachekey.append(hex(self.filteredhash)) |
|
318 | 318 | f.write(" ".join(cachekey) + '\n') |
|
319 | 319 | nodecount = 0 |
|
320 | 320 | for label, nodes in sorted(self.iteritems()): |
|
321 | 321 | label = encoding.fromlocal(label) |
|
322 | 322 | for node in nodes: |
|
323 | 323 | nodecount += 1 |
|
324 | 324 | if node in self._closednodes: |
|
325 | 325 | state = 'c' |
|
326 | 326 | else: |
|
327 | 327 | state = 'o' |
|
328 | 328 | f.write("%s %s %s\n" % (hex(node), state, label)) |
|
329 | 329 | f.close() |
|
330 | 330 | repo.ui.log('branchcache', |
|
331 | 331 | 'wrote %s branch cache with %d labels and %d nodes\n', |
|
332 | 332 | repo.filtername, len(self._entries), nodecount) |
|
333 | 333 | except (IOError, OSError, error.Abort) as inst: |
|
334 | 334 | # Abort may be raised by read only opener, so log and continue |
|
335 | 335 | repo.ui.debug("couldn't write branch cache: %s\n" % |
|
336 | 336 | stringutil.forcebytestr(inst)) |
|
337 | 337 | |
|
338 | 338 | def update(self, repo, revgen): |
|
339 | 339 | """Given a branchhead cache, self, that may have extra nodes or be |
|
340 | 340 | missing heads, and a generator of nodes that are strictly a superset of |
|
341 | 341 | heads missing, this function updates self to be correct. |
|
342 | 342 | """ |
|
343 | 343 | starttime = util.timer() |
|
344 | 344 | cl = repo.changelog |
|
345 | 345 | # collect new branch entries |
|
346 | 346 | newbranches = {} |
|
347 | 347 | getbranchinfo = repo.revbranchcache().branchinfo |
|
348 | 348 | for r in revgen: |
|
349 | 349 | branch, closesbranch = getbranchinfo(r) |
|
350 | 350 | newbranches.setdefault(branch, []).append(r) |
|
351 | 351 | if closesbranch: |
|
352 | 352 | self._closednodes.add(cl.node(r)) |
|
353 | 353 | |
|
354 | 354 | # fetch current topological heads to speed up filtering |
|
355 | 355 | topoheads = set(cl.headrevs()) |
|
356 | 356 | |
|
357 | 357 | # if older branchheads are reachable from new ones, they aren't |
|
358 | 358 | # really branchheads. Note checking parents is insufficient: |
|
359 | 359 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
|
360 | 360 | for branch, newheadrevs in newbranches.iteritems(): |
|
361 | 361 | bheads = self._entries.setdefault(branch, []) |
|
362 | 362 | bheadset = set(cl.rev(node) for node in bheads) |
|
363 | 363 | |
|
364 | 364 | # This have been tested True on all internal usage of this function. |
|
365 | 365 | # run it again in case of doubt |
|
366 | 366 | # assert not (set(bheadrevs) & set(newheadrevs)) |
|
367 | 367 | bheadset.update(newheadrevs) |
|
368 | 368 | |
|
369 | 369 | # This prunes out two kinds of heads - heads that are superseded by |
|
370 | 370 | # a head in newheadrevs, and newheadrevs that are not heads because |
|
371 | 371 | # an existing head is their descendant. |
|
372 | 372 | uncertain = bheadset - topoheads |
|
373 | 373 | if uncertain: |
|
374 | 374 | floorrev = min(uncertain) |
|
375 | 375 | ancestors = set(cl.ancestors(newheadrevs, floorrev)) |
|
376 | 376 | bheadset -= ancestors |
|
377 | 377 | bheadrevs = sorted(bheadset) |
|
378 | 378 | self[branch] = [cl.node(rev) for rev in bheadrevs] |
|
379 | 379 | tiprev = bheadrevs[-1] |
|
380 | 380 | if tiprev > self.tiprev: |
|
381 | 381 | self.tipnode = cl.node(tiprev) |
|
382 | 382 | self.tiprev = tiprev |
|
383 | 383 | |
|
384 | 384 | if not self.validfor(repo): |
|
385 | 385 | # cache key are not valid anymore |
|
386 | 386 | self.tipnode = nullid |
|
387 | 387 | self.tiprev = nullrev |
|
388 | 388 | for heads in self.iterheads(): |
|
389 | 389 | tiprev = max(cl.rev(node) for node in heads) |
|
390 | 390 | if tiprev > self.tiprev: |
|
391 | 391 | self.tipnode = cl.node(tiprev) |
|
392 | 392 | self.tiprev = tiprev |
|
393 | 393 | self.filteredhash = scmutil.filteredhash(repo, self.tiprev) |
|
394 | 394 | |
|
395 | 395 | duration = util.timer() - starttime |
|
396 | 396 | repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n', |
|
397 | 397 | repo.filtername or b'None', duration) |
|
398 | 398 | |
|
399 | 399 | self.write(repo) |
|
400 | 400 | |
|
401 | 401 | |
|
402 | 402 | class remotebranchcache(branchcache): |
|
403 | 403 | """Branchmap info for a remote connection, should not write locally""" |
|
404 | 404 | def write(self, repo): |
|
405 | 405 | pass |
|
406 | 406 | |
|
407 | 407 | |
|
408 | 408 | # Revision branch info cache |
|
409 | 409 | |
|
410 | 410 | _rbcversion = '-v1' |
|
411 | 411 | _rbcnames = 'rbc-names' + _rbcversion |
|
412 | 412 | _rbcrevs = 'rbc-revs' + _rbcversion |
|
413 | 413 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] |
|
414 | 414 | _rbcrecfmt = '>4sI' |
|
415 | 415 | _rbcrecsize = calcsize(_rbcrecfmt) |
|
416 | 416 | _rbcnodelen = 4 |
|
417 | 417 | _rbcbranchidxmask = 0x7fffffff |
|
418 | 418 | _rbccloseflag = 0x80000000 |
|
419 | 419 | |
|
420 | 420 | class revbranchcache(object): |
|
421 | 421 | """Persistent cache, mapping from revision number to branch name and close. |
|
422 | 422 | This is a low level cache, independent of filtering. |
|
423 | 423 | |
|
424 | 424 | Branch names are stored in rbc-names in internal encoding separated by 0. |
|
425 | 425 | rbc-names is append-only, and each branch name is only stored once and will |
|
426 | 426 | thus have a unique index. |
|
427 | 427 | |
|
428 | 428 | The branch info for each revision is stored in rbc-revs as constant size |
|
429 | 429 | records. The whole file is read into memory, but it is only 'parsed' on |
|
430 | 430 | demand. The file is usually append-only but will be truncated if repo |
|
431 | 431 | modification is detected. |
|
432 | 432 | The record for each revision contains the first 4 bytes of the |
|
433 | 433 | corresponding node hash, and the record is only used if it still matches. |
|
434 | 434 | Even a completely trashed rbc-revs fill thus still give the right result |
|
435 | 435 | while converging towards full recovery ... assuming no incorrectly matching |
|
436 | 436 | node hashes. |
|
437 | 437 | The record also contains 4 bytes where 31 bits contains the index of the |
|
438 | 438 | branch and the last bit indicate that it is a branch close commit. |
|
439 | 439 | The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i |
|
440 | 440 | and will grow with it but be 1/8th of its size. |
|
441 | 441 | """ |
|
442 | 442 | |
|
443 | 443 | def __init__(self, repo, readonly=True): |
|
444 | 444 | assert repo.filtername is None |
|
445 | 445 | self._repo = repo |
|
446 | 446 | self._names = [] # branch names in local encoding with static index |
|
447 | 447 | self._rbcrevs = bytearray() |
|
448 | 448 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen |
|
449 | 449 | try: |
|
450 | 450 | bndata = repo.cachevfs.read(_rbcnames) |
|
451 | 451 | self._rbcsnameslen = len(bndata) # for verification before writing |
|
452 | 452 | if bndata: |
|
453 | 453 | self._names = [encoding.tolocal(bn) |
|
454 | 454 | for bn in bndata.split('\0')] |
|
455 | 455 | except (IOError, OSError): |
|
456 | 456 | if readonly: |
|
457 | 457 | # don't try to use cache - fall back to the slow path |
|
458 | 458 | self.branchinfo = self._branchinfo |
|
459 | 459 | |
|
460 | 460 | if self._names: |
|
461 | 461 | try: |
|
462 | 462 | data = repo.cachevfs.read(_rbcrevs) |
|
463 | 463 | self._rbcrevs[:] = data |
|
464 | 464 | except (IOError, OSError) as inst: |
|
465 | 465 | repo.ui.debug("couldn't read revision branch cache: %s\n" % |
|
466 | 466 | stringutil.forcebytestr(inst)) |
|
467 | 467 | # remember number of good records on disk |
|
468 | 468 | self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize, |
|
469 | 469 | len(repo.changelog)) |
|
470 | 470 | if self._rbcrevslen == 0: |
|
471 | 471 | self._names = [] |
|
472 | 472 | self._rbcnamescount = len(self._names) # number of names read at |
|
473 | 473 | # _rbcsnameslen |
|
474 | 474 | |
|
475 | 475 | def _clear(self): |
|
476 | 476 | self._rbcsnameslen = 0 |
|
477 | 477 | del self._names[:] |
|
478 | 478 | self._rbcnamescount = 0 |
|
479 | 479 | self._rbcrevslen = len(self._repo.changelog) |
|
480 | 480 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) |
|
481 | 481 | util.clearcachedproperty(self, '_namesreverse') |
|
482 | 482 | |
|
483 | 483 | @util.propertycache |
|
484 | 484 | def _namesreverse(self): |
|
485 | 485 | return dict((b, r) for r, b in enumerate(self._names)) |
|
486 | 486 | |
|
487 | 487 | def branchinfo(self, rev): |
|
488 | 488 | """Return branch name and close flag for rev, using and updating |
|
489 | 489 | persistent cache.""" |
|
490 | 490 | changelog = self._repo.changelog |
|
491 | 491 | rbcrevidx = rev * _rbcrecsize |
|
492 | 492 | |
|
493 | 493 | # avoid negative index, changelog.read(nullrev) is fast without cache |
|
494 | 494 | if rev == nullrev: |
|
495 | 495 | return changelog.branchinfo(rev) |
|
496 | 496 | |
|
497 | 497 | # if requested rev isn't allocated, grow and cache the rev info |
|
498 | 498 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
499 | 499 | return self._branchinfo(rev) |
|
500 | 500 | |
|
501 | 501 | # fast path: extract data from cache, use it if node is matching |
|
502 | 502 | reponode = changelog.node(rev)[:_rbcnodelen] |
|
503 | 503 | cachenode, branchidx = unpack_from( |
|
504 | 504 | _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx) |
|
505 | 505 | close = bool(branchidx & _rbccloseflag) |
|
506 | 506 | if close: |
|
507 | 507 | branchidx &= _rbcbranchidxmask |
|
508 | 508 | if cachenode == '\0\0\0\0': |
|
509 | 509 | pass |
|
510 | 510 | elif cachenode == reponode: |
|
511 | 511 | try: |
|
512 | 512 | return self._names[branchidx], close |
|
513 | 513 | except IndexError: |
|
514 | 514 | # recover from invalid reference to unknown branch |
|
515 | 515 | self._repo.ui.debug("referenced branch names not found" |
|
516 | 516 | " - rebuilding revision branch cache from scratch\n") |
|
517 | 517 | self._clear() |
|
518 | 518 | else: |
|
519 | 519 | # rev/node map has changed, invalidate the cache from here up |
|
520 | 520 | self._repo.ui.debug("history modification detected - truncating " |
|
521 | 521 | "revision branch cache to revision %d\n" % rev) |
|
522 | 522 | truncate = rbcrevidx + _rbcrecsize |
|
523 | 523 | del self._rbcrevs[truncate:] |
|
524 | 524 | self._rbcrevslen = min(self._rbcrevslen, truncate) |
|
525 | 525 | |
|
526 | 526 | # fall back to slow path and make sure it will be written to disk |
|
527 | 527 | return self._branchinfo(rev) |
|
528 | 528 | |
|
529 | 529 | def _branchinfo(self, rev): |
|
530 | 530 | """Retrieve branch info from changelog and update _rbcrevs""" |
|
531 | 531 | changelog = self._repo.changelog |
|
532 | 532 | b, close = changelog.branchinfo(rev) |
|
533 | 533 | if b in self._namesreverse: |
|
534 | 534 | branchidx = self._namesreverse[b] |
|
535 | 535 | else: |
|
536 | 536 | branchidx = len(self._names) |
|
537 | 537 | self._names.append(b) |
|
538 | 538 | self._namesreverse[b] = branchidx |
|
539 | 539 | reponode = changelog.node(rev) |
|
540 | 540 | if close: |
|
541 | 541 | branchidx |= _rbccloseflag |
|
542 | 542 | self._setcachedata(rev, reponode, branchidx) |
|
543 | 543 | return b, close |
|
544 | 544 | |
|
545 | 545 | def setdata(self, branch, rev, node, close): |
|
546 | 546 | """add new data information to the cache""" |
|
547 | 547 | if branch in self._namesreverse: |
|
548 | 548 | branchidx = self._namesreverse[branch] |
|
549 | 549 | else: |
|
550 | 550 | branchidx = len(self._names) |
|
551 | 551 | self._names.append(branch) |
|
552 | 552 | self._namesreverse[branch] = branchidx |
|
553 | 553 | if close: |
|
554 | 554 | branchidx |= _rbccloseflag |
|
555 | 555 | self._setcachedata(rev, node, branchidx) |
|
556 | 556 | # If no cache data were readable (non exists, bad permission, etc) |
|
557 | 557 | # the cache was bypassing itself by setting: |
|
558 | 558 | # |
|
559 | 559 | # self.branchinfo = self._branchinfo |
|
560 | 560 | # |
|
561 | 561 | # Since we now have data in the cache, we need to drop this bypassing. |
|
562 | 562 | if r'branchinfo' in vars(self): |
|
563 | 563 | del self.branchinfo |
|
564 | 564 | |
|
565 | 565 | def _setcachedata(self, rev, node, branchidx): |
|
566 | 566 | """Writes the node's branch data to the in-memory cache data.""" |
|
567 | 567 | if rev == nullrev: |
|
568 | 568 | return |
|
569 | 569 | rbcrevidx = rev * _rbcrecsize |
|
570 | 570 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: |
|
571 | 571 | self._rbcrevs.extend('\0' * |
|
572 | 572 | (len(self._repo.changelog) * _rbcrecsize - |
|
573 | 573 | len(self._rbcrevs))) |
|
574 | 574 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) |
|
575 | 575 | self._rbcrevslen = min(self._rbcrevslen, rev) |
|
576 | 576 | |
|
577 | 577 | tr = self._repo.currenttransaction() |
|
578 | 578 | if tr: |
|
579 | 579 | tr.addfinalize('write-revbranchcache', self.write) |
|
580 | 580 | |
|
581 | 581 | def write(self, tr=None): |
|
582 | 582 | """Save branch cache if it is dirty.""" |
|
583 | 583 | repo = self._repo |
|
584 | 584 | wlock = None |
|
585 | 585 | step = '' |
|
586 | 586 | try: |
|
587 | 587 | if self._rbcnamescount < len(self._names): |
|
588 | 588 | step = ' names' |
|
589 | 589 | wlock = repo.wlock(wait=False) |
|
590 | 590 | if self._rbcnamescount != 0: |
|
591 | 591 | f = repo.cachevfs.open(_rbcnames, 'ab') |
|
592 | 592 | if f.tell() == self._rbcsnameslen: |
|
593 | 593 | f.write('\0') |
|
594 | 594 | else: |
|
595 | 595 | f.close() |
|
596 | 596 | repo.ui.debug("%s changed - rewriting it\n" % _rbcnames) |
|
597 | 597 | self._rbcnamescount = 0 |
|
598 | 598 | self._rbcrevslen = 0 |
|
599 | 599 | if self._rbcnamescount == 0: |
|
600 | 600 | # before rewriting names, make sure references are removed |
|
601 | 601 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) |
|
602 | 602 | f = repo.cachevfs.open(_rbcnames, 'wb') |
|
603 | 603 | f.write('\0'.join(encoding.fromlocal(b) |
|
604 | 604 | for b in self._names[self._rbcnamescount:])) |
|
605 | 605 | self._rbcsnameslen = f.tell() |
|
606 | 606 | f.close() |
|
607 | 607 | self._rbcnamescount = len(self._names) |
|
608 | 608 | |
|
609 | 609 | start = self._rbcrevslen * _rbcrecsize |
|
610 | 610 | if start != len(self._rbcrevs): |
|
611 | 611 | step = '' |
|
612 | 612 | if wlock is None: |
|
613 | 613 | wlock = repo.wlock(wait=False) |
|
614 | 614 | revs = min(len(repo.changelog), |
|
615 | 615 | len(self._rbcrevs) // _rbcrecsize) |
|
616 | 616 | f = repo.cachevfs.open(_rbcrevs, 'ab') |
|
617 | 617 | if f.tell() != start: |
|
618 | 618 | repo.ui.debug("truncating cache/%s to %d\n" |
|
619 | 619 | % (_rbcrevs, start)) |
|
620 | 620 | f.seek(start) |
|
621 | 621 | if f.tell() != start: |
|
622 | 622 | start = 0 |
|
623 | 623 | f.seek(start) |
|
624 | 624 | f.truncate() |
|
625 | 625 | end = revs * _rbcrecsize |
|
626 | 626 | f.write(self._rbcrevs[start:end]) |
|
627 | 627 | f.close() |
|
628 | 628 | self._rbcrevslen = revs |
|
629 | 629 | except (IOError, OSError, error.Abort, error.LockError) as inst: |
|
630 | 630 | repo.ui.debug("couldn't write revision branch cache%s: %s\n" |
|
631 | 631 | % (step, stringutil.forcebytestr(inst))) |
|
632 | 632 | finally: |
|
633 | 633 | if wlock is not None: |
|
634 | 634 | wlock.release() |
General Comments 0
You need to be logged in to leave comments.
Login now