##// END OF EJS Templates
repoview: add a new filtername for accessing hidden commits...
Pulkit Goyal -
r35511:07fdac1d default
parent child Browse files
Show More
@@ -1,522 +1,523
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .node import (
13 13 bin,
14 14 hex,
15 15 nullid,
16 16 nullrev,
17 17 )
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 scmutil,
22 22 util,
23 23 )
24 24
25 25 calcsize = struct.calcsize
26 26 pack_into = struct.pack_into
27 27 unpack_from = struct.unpack_from
28 28
29 29 def _filename(repo):
30 30 """name of a branchcache file for a given repo or repoview"""
31 31 filename = "branch2"
32 32 if repo.filtername:
33 33 filename = '%s-%s' % (filename, repo.filtername)
34 34 return filename
35 35
36 36 def read(repo):
37 37 try:
38 38 f = repo.cachevfs(_filename(repo))
39 39 lines = f.read().split('\n')
40 40 f.close()
41 41 except (IOError, OSError):
42 42 return None
43 43
44 44 try:
45 45 cachekey = lines.pop(0).split(" ", 2)
46 46 last, lrev = cachekey[:2]
47 47 last, lrev = bin(last), int(lrev)
48 48 filteredhash = None
49 49 if len(cachekey) > 2:
50 50 filteredhash = bin(cachekey[2])
51 51 partial = branchcache(tipnode=last, tiprev=lrev,
52 52 filteredhash=filteredhash)
53 53 if not partial.validfor(repo):
54 54 # invalidate the cache
55 55 raise ValueError('tip differs')
56 56 cl = repo.changelog
57 57 for l in lines:
58 58 if not l:
59 59 continue
60 60 node, state, label = l.split(" ", 2)
61 61 if state not in 'oc':
62 62 raise ValueError('invalid branch state')
63 63 label = encoding.tolocal(label.strip())
64 64 node = bin(node)
65 65 if not cl.hasnode(node):
66 66 raise ValueError('node %s does not exist' % hex(node))
67 67 partial.setdefault(label, []).append(node)
68 68 if state == 'c':
69 69 partial._closednodes.add(node)
70 70 except Exception as inst:
71 71 if repo.ui.debugflag:
72 72 msg = 'invalid branchheads cache'
73 73 if repo.filtername is not None:
74 74 msg += ' (%s)' % repo.filtername
75 75 msg += ': %s\n'
76 76 repo.ui.debug(msg % inst)
77 77 partial = None
78 78 return partial
79 79
80 80 ### Nearest subset relation
81 81 # Nearest subset of filter X is a filter Y so that:
82 82 # * Y is included in X,
83 83 # * X - Y is as small as possible.
84 84 # This create and ordering used for branchmap purpose.
85 85 # the ordering may be partial
86 86 subsettable = {None: 'visible',
87 'visible-hidden': 'visible',
87 88 'visible': 'served',
88 89 'served': 'immutable',
89 90 'immutable': 'base'}
90 91
91 92 def updatecache(repo):
92 93 cl = repo.changelog
93 94 filtername = repo.filtername
94 95 partial = repo._branchcaches.get(filtername)
95 96
96 97 revs = []
97 98 if partial is None or not partial.validfor(repo):
98 99 partial = read(repo)
99 100 if partial is None:
100 101 subsetname = subsettable.get(filtername)
101 102 if subsetname is None:
102 103 partial = branchcache()
103 104 else:
104 105 subset = repo.filtered(subsetname)
105 106 partial = subset.branchmap().copy()
106 107 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
107 108 revs.extend(r for r in extrarevs if r <= partial.tiprev)
108 109 revs.extend(cl.revs(start=partial.tiprev + 1))
109 110 if revs:
110 111 partial.update(repo, revs)
111 112 partial.write(repo)
112 113
113 114 assert partial.validfor(repo), filtername
114 115 repo._branchcaches[repo.filtername] = partial
115 116
116 117 def replacecache(repo, bm):
117 118 """Replace the branchmap cache for a repo with a branch mapping.
118 119
119 120 This is likely only called during clone with a branch map from a remote.
120 121 """
121 122 rbheads = []
122 123 closed = []
123 124 for bheads in bm.itervalues():
124 125 rbheads.extend(bheads)
125 126 for h in bheads:
126 127 r = repo.changelog.rev(h)
127 128 b, c = repo.changelog.branchinfo(r)
128 129 if c:
129 130 closed.append(h)
130 131
131 132 if rbheads:
132 133 rtiprev = max((int(repo.changelog.rev(node))
133 134 for node in rbheads))
134 135 cache = branchcache(bm,
135 136 repo[rtiprev].node(),
136 137 rtiprev,
137 138 closednodes=closed)
138 139
139 140 # Try to stick it as low as possible
140 141 # filter above served are unlikely to be fetch from a clone
141 142 for candidate in ('base', 'immutable', 'served'):
142 143 rview = repo.filtered(candidate)
143 144 if cache.validfor(rview):
144 145 repo._branchcaches[candidate] = cache
145 146 cache.write(rview)
146 147 break
147 148
148 149 class branchcache(dict):
149 150 """A dict like object that hold branches heads cache.
150 151
151 152 This cache is used to avoid costly computations to determine all the
152 153 branch heads of a repo.
153 154
154 155 The cache is serialized on disk in the following format:
155 156
156 157 <tip hex node> <tip rev number> [optional filtered repo hex hash]
157 158 <branch head hex node> <open/closed state> <branch name>
158 159 <branch head hex node> <open/closed state> <branch name>
159 160 ...
160 161
161 162 The first line is used to check if the cache is still valid. If the
162 163 branch cache is for a filtered repo view, an optional third hash is
163 164 included that hashes the hashes of all filtered revisions.
164 165
165 166 The open/closed state is represented by a single letter 'o' or 'c'.
166 167 This field can be used to avoid changelog reads when determining if a
167 168 branch head closes a branch or not.
168 169 """
169 170
170 171 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
171 172 filteredhash=None, closednodes=None):
172 173 super(branchcache, self).__init__(entries)
173 174 self.tipnode = tipnode
174 175 self.tiprev = tiprev
175 176 self.filteredhash = filteredhash
176 177 # closednodes is a set of nodes that close their branch. If the branch
177 178 # cache has been updated, it may contain nodes that are no longer
178 179 # heads.
179 180 if closednodes is None:
180 181 self._closednodes = set()
181 182 else:
182 183 self._closednodes = closednodes
183 184
184 185 def validfor(self, repo):
185 186 """Is the cache content valid regarding a repo
186 187
187 188 - False when cached tipnode is unknown or if we detect a strip.
188 189 - True when cache is up to date or a subset of current repo."""
189 190 try:
190 191 return ((self.tipnode == repo.changelog.node(self.tiprev))
191 192 and (self.filteredhash == \
192 193 scmutil.filteredhash(repo, self.tiprev)))
193 194 except IndexError:
194 195 return False
195 196
196 197 def _branchtip(self, heads):
197 198 '''Return tuple with last open head in heads and false,
198 199 otherwise return last closed head and true.'''
199 200 tip = heads[-1]
200 201 closed = True
201 202 for h in reversed(heads):
202 203 if h not in self._closednodes:
203 204 tip = h
204 205 closed = False
205 206 break
206 207 return tip, closed
207 208
208 209 def branchtip(self, branch):
209 210 '''Return the tipmost open head on branch head, otherwise return the
210 211 tipmost closed head on branch.
211 212 Raise KeyError for unknown branch.'''
212 213 return self._branchtip(self[branch])[0]
213 214
214 215 def iteropen(self, nodes):
215 216 return (n for n in nodes if n not in self._closednodes)
216 217
217 218 def branchheads(self, branch, closed=False):
218 219 heads = self[branch]
219 220 if not closed:
220 221 heads = list(self.iteropen(heads))
221 222 return heads
222 223
223 224 def iterbranches(self):
224 225 for bn, heads in self.iteritems():
225 226 yield (bn, heads) + self._branchtip(heads)
226 227
227 228 def copy(self):
228 229 """return an deep copy of the branchcache object"""
229 230 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
230 231 self._closednodes)
231 232
232 233 def write(self, repo):
233 234 try:
234 235 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
235 236 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
236 237 if self.filteredhash is not None:
237 238 cachekey.append(hex(self.filteredhash))
238 239 f.write(" ".join(cachekey) + '\n')
239 240 nodecount = 0
240 241 for label, nodes in sorted(self.iteritems()):
241 242 for node in nodes:
242 243 nodecount += 1
243 244 if node in self._closednodes:
244 245 state = 'c'
245 246 else:
246 247 state = 'o'
247 248 f.write("%s %s %s\n" % (hex(node), state,
248 249 encoding.fromlocal(label)))
249 250 f.close()
250 251 repo.ui.log('branchcache',
251 252 'wrote %s branch cache with %d labels and %d nodes\n',
252 253 repo.filtername, len(self), nodecount)
253 254 except (IOError, OSError, error.Abort) as inst:
254 255 # Abort may be raised by read only opener, so log and continue
255 256 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
256 257
257 258 def update(self, repo, revgen):
258 259 """Given a branchhead cache, self, that may have extra nodes or be
259 260 missing heads, and a generator of nodes that are strictly a superset of
260 261 heads missing, this function updates self to be correct.
261 262 """
262 263 starttime = util.timer()
263 264 cl = repo.changelog
264 265 # collect new branch entries
265 266 newbranches = {}
266 267 getbranchinfo = repo.revbranchcache().branchinfo
267 268 for r in revgen:
268 269 branch, closesbranch = getbranchinfo(r)
269 270 newbranches.setdefault(branch, []).append(r)
270 271 if closesbranch:
271 272 self._closednodes.add(cl.node(r))
272 273
273 274 # fetch current topological heads to speed up filtering
274 275 topoheads = set(cl.headrevs())
275 276
276 277 # if older branchheads are reachable from new ones, they aren't
277 278 # really branchheads. Note checking parents is insufficient:
278 279 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
279 280 for branch, newheadrevs in newbranches.iteritems():
280 281 bheads = self.setdefault(branch, [])
281 282 bheadset = set(cl.rev(node) for node in bheads)
282 283
283 284 # This have been tested True on all internal usage of this function.
284 285 # run it again in case of doubt
285 286 # assert not (set(bheadrevs) & set(newheadrevs))
286 287 newheadrevs.sort()
287 288 bheadset.update(newheadrevs)
288 289
289 290 # This prunes out two kinds of heads - heads that are superseded by
290 291 # a head in newheadrevs, and newheadrevs that are not heads because
291 292 # an existing head is their descendant.
292 293 uncertain = bheadset - topoheads
293 294 if uncertain:
294 295 floorrev = min(uncertain)
295 296 ancestors = set(cl.ancestors(newheadrevs, floorrev))
296 297 bheadset -= ancestors
297 298 bheadrevs = sorted(bheadset)
298 299 self[branch] = [cl.node(rev) for rev in bheadrevs]
299 300 tiprev = bheadrevs[-1]
300 301 if tiprev > self.tiprev:
301 302 self.tipnode = cl.node(tiprev)
302 303 self.tiprev = tiprev
303 304
304 305 if not self.validfor(repo):
305 306 # cache key are not valid anymore
306 307 self.tipnode = nullid
307 308 self.tiprev = nullrev
308 309 for heads in self.values():
309 310 tiprev = max(cl.rev(node) for node in heads)
310 311 if tiprev > self.tiprev:
311 312 self.tipnode = cl.node(tiprev)
312 313 self.tiprev = tiprev
313 314 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
314 315
315 316 duration = util.timer() - starttime
316 317 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
317 318 repo.filtername, duration)
318 319
319 320 # Revision branch info cache
320 321
321 322 _rbcversion = '-v1'
322 323 _rbcnames = 'rbc-names' + _rbcversion
323 324 _rbcrevs = 'rbc-revs' + _rbcversion
324 325 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
325 326 _rbcrecfmt = '>4sI'
326 327 _rbcrecsize = calcsize(_rbcrecfmt)
327 328 _rbcnodelen = 4
328 329 _rbcbranchidxmask = 0x7fffffff
329 330 _rbccloseflag = 0x80000000
330 331
331 332 class revbranchcache(object):
332 333 """Persistent cache, mapping from revision number to branch name and close.
333 334 This is a low level cache, independent of filtering.
334 335
335 336 Branch names are stored in rbc-names in internal encoding separated by 0.
336 337 rbc-names is append-only, and each branch name is only stored once and will
337 338 thus have a unique index.
338 339
339 340 The branch info for each revision is stored in rbc-revs as constant size
340 341 records. The whole file is read into memory, but it is only 'parsed' on
341 342 demand. The file is usually append-only but will be truncated if repo
342 343 modification is detected.
343 344 The record for each revision contains the first 4 bytes of the
344 345 corresponding node hash, and the record is only used if it still matches.
345 346 Even a completely trashed rbc-revs fill thus still give the right result
346 347 while converging towards full recovery ... assuming no incorrectly matching
347 348 node hashes.
348 349 The record also contains 4 bytes where 31 bits contains the index of the
349 350 branch and the last bit indicate that it is a branch close commit.
350 351 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
351 352 and will grow with it but be 1/8th of its size.
352 353 """
353 354
354 355 def __init__(self, repo, readonly=True):
355 356 assert repo.filtername is None
356 357 self._repo = repo
357 358 self._names = [] # branch names in local encoding with static index
358 359 self._rbcrevs = bytearray()
359 360 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
360 361 try:
361 362 bndata = repo.cachevfs.read(_rbcnames)
362 363 self._rbcsnameslen = len(bndata) # for verification before writing
363 364 if bndata:
364 365 self._names = [encoding.tolocal(bn)
365 366 for bn in bndata.split('\0')]
366 367 except (IOError, OSError):
367 368 if readonly:
368 369 # don't try to use cache - fall back to the slow path
369 370 self.branchinfo = self._branchinfo
370 371
371 372 if self._names:
372 373 try:
373 374 data = repo.cachevfs.read(_rbcrevs)
374 375 self._rbcrevs[:] = data
375 376 except (IOError, OSError) as inst:
376 377 repo.ui.debug("couldn't read revision branch cache: %s\n" %
377 378 inst)
378 379 # remember number of good records on disk
379 380 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
380 381 len(repo.changelog))
381 382 if self._rbcrevslen == 0:
382 383 self._names = []
383 384 self._rbcnamescount = len(self._names) # number of names read at
384 385 # _rbcsnameslen
385 386 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
386 387
387 388 def _clear(self):
388 389 self._rbcsnameslen = 0
389 390 del self._names[:]
390 391 self._rbcnamescount = 0
391 392 self._namesreverse.clear()
392 393 self._rbcrevslen = len(self._repo.changelog)
393 394 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
394 395
395 396 def branchinfo(self, rev):
396 397 """Return branch name and close flag for rev, using and updating
397 398 persistent cache."""
398 399 changelog = self._repo.changelog
399 400 rbcrevidx = rev * _rbcrecsize
400 401
401 402 # avoid negative index, changelog.read(nullrev) is fast without cache
402 403 if rev == nullrev:
403 404 return changelog.branchinfo(rev)
404 405
405 406 # if requested rev isn't allocated, grow and cache the rev info
406 407 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
407 408 return self._branchinfo(rev)
408 409
409 410 # fast path: extract data from cache, use it if node is matching
410 411 reponode = changelog.node(rev)[:_rbcnodelen]
411 412 cachenode, branchidx = unpack_from(
412 413 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
413 414 close = bool(branchidx & _rbccloseflag)
414 415 if close:
415 416 branchidx &= _rbcbranchidxmask
416 417 if cachenode == '\0\0\0\0':
417 418 pass
418 419 elif cachenode == reponode:
419 420 try:
420 421 return self._names[branchidx], close
421 422 except IndexError:
422 423 # recover from invalid reference to unknown branch
423 424 self._repo.ui.debug("referenced branch names not found"
424 425 " - rebuilding revision branch cache from scratch\n")
425 426 self._clear()
426 427 else:
427 428 # rev/node map has changed, invalidate the cache from here up
428 429 self._repo.ui.debug("history modification detected - truncating "
429 430 "revision branch cache to revision %d\n" % rev)
430 431 truncate = rbcrevidx + _rbcrecsize
431 432 del self._rbcrevs[truncate:]
432 433 self._rbcrevslen = min(self._rbcrevslen, truncate)
433 434
434 435 # fall back to slow path and make sure it will be written to disk
435 436 return self._branchinfo(rev)
436 437
437 438 def _branchinfo(self, rev):
438 439 """Retrieve branch info from changelog and update _rbcrevs"""
439 440 changelog = self._repo.changelog
440 441 b, close = changelog.branchinfo(rev)
441 442 if b in self._namesreverse:
442 443 branchidx = self._namesreverse[b]
443 444 else:
444 445 branchidx = len(self._names)
445 446 self._names.append(b)
446 447 self._namesreverse[b] = branchidx
447 448 reponode = changelog.node(rev)
448 449 if close:
449 450 branchidx |= _rbccloseflag
450 451 self._setcachedata(rev, reponode, branchidx)
451 452 return b, close
452 453
453 454 def _setcachedata(self, rev, node, branchidx):
454 455 """Writes the node's branch data to the in-memory cache data."""
455 456 if rev == nullrev:
456 457 return
457 458 rbcrevidx = rev * _rbcrecsize
458 459 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
459 460 self._rbcrevs.extend('\0' *
460 461 (len(self._repo.changelog) * _rbcrecsize -
461 462 len(self._rbcrevs)))
462 463 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
463 464 self._rbcrevslen = min(self._rbcrevslen, rev)
464 465
465 466 tr = self._repo.currenttransaction()
466 467 if tr:
467 468 tr.addfinalize('write-revbranchcache', self.write)
468 469
469 470 def write(self, tr=None):
470 471 """Save branch cache if it is dirty."""
471 472 repo = self._repo
472 473 wlock = None
473 474 step = ''
474 475 try:
475 476 if self._rbcnamescount < len(self._names):
476 477 step = ' names'
477 478 wlock = repo.wlock(wait=False)
478 479 if self._rbcnamescount != 0:
479 480 f = repo.cachevfs.open(_rbcnames, 'ab')
480 481 if f.tell() == self._rbcsnameslen:
481 482 f.write('\0')
482 483 else:
483 484 f.close()
484 485 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
485 486 self._rbcnamescount = 0
486 487 self._rbcrevslen = 0
487 488 if self._rbcnamescount == 0:
488 489 # before rewriting names, make sure references are removed
489 490 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
490 491 f = repo.cachevfs.open(_rbcnames, 'wb')
491 492 f.write('\0'.join(encoding.fromlocal(b)
492 493 for b in self._names[self._rbcnamescount:]))
493 494 self._rbcsnameslen = f.tell()
494 495 f.close()
495 496 self._rbcnamescount = len(self._names)
496 497
497 498 start = self._rbcrevslen * _rbcrecsize
498 499 if start != len(self._rbcrevs):
499 500 step = ''
500 501 if wlock is None:
501 502 wlock = repo.wlock(wait=False)
502 503 revs = min(len(repo.changelog),
503 504 len(self._rbcrevs) // _rbcrecsize)
504 505 f = repo.cachevfs.open(_rbcrevs, 'ab')
505 506 if f.tell() != start:
506 507 repo.ui.debug("truncating cache/%s to %d\n"
507 508 % (_rbcrevs, start))
508 509 f.seek(start)
509 510 if f.tell() != start:
510 511 start = 0
511 512 f.seek(start)
512 513 f.truncate()
513 514 end = revs * _rbcrecsize
514 515 f.write(self._rbcrevs[start:end])
515 516 f.close()
516 517 self._rbcrevslen = revs
517 518 except (IOError, OSError, error.Abort, error.LockError) as inst:
518 519 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
519 520 % (step, inst))
520 521 finally:
521 522 if wlock is not None:
522 523 wlock.release()
@@ -1,273 +1,274
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import weakref
13 13
14 14 from .node import nullrev
15 15 from . import (
16 16 obsolete,
17 17 phases,
18 18 pycompat,
19 19 tags as tagsmod,
20 20 )
21 21
22 22 def hideablerevs(repo):
23 23 """Revision candidates to be hidden
24 24
25 25 This is a standalone function to allow extensions to wrap it.
26 26
27 27 Because we use the set of immutable changesets as a fallback subset in
28 28 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
29 29 changesets as "hideable". Doing so would break multiple code assertions and
30 30 lead to crashes."""
31 31 return obsolete.getrevs(repo, 'obsolete')
32 32
33 33 def pinnedrevs(repo):
34 34 """revisions blocking hidden changesets from being filtered
35 35 """
36 36
37 37 cl = repo.changelog
38 38 pinned = set()
39 39 pinned.update([par.rev() for par in repo[None].parents()])
40 40 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
41 41
42 42 tags = {}
43 43 tagsmod.readlocaltags(repo.ui, repo, tags, {})
44 44 if tags:
45 45 rev, nodemap = cl.rev, cl.nodemap
46 46 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
47 47 return pinned
48 48
49 49
50 50 def _revealancestors(pfunc, hidden, revs):
51 51 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
52 52 from 'hidden'
53 53
54 54 - pfunc(r): a funtion returning parent of 'r',
55 55 - hidden: the (preliminary) hidden revisions, to be updated
56 56 - revs: iterable of revnum,
57 57
58 58 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
59 59 *not* revealed)
60 60 """
61 61 stack = list(revs)
62 62 while stack:
63 63 for p in pfunc(stack.pop()):
64 64 if p != nullrev and p in hidden:
65 65 hidden.remove(p)
66 66 stack.append(p)
67 67
68 68 def computehidden(repo, visibilityexceptions=None):
69 69 """compute the set of hidden revision to filter
70 70
71 71 During most operation hidden should be filtered."""
72 72 assert not repo.changelog.filteredrevs
73 73
74 74 hidden = hideablerevs(repo)
75 75 if hidden:
76 76 hidden = set(hidden - pinnedrevs(repo))
77 77 if visibilityexceptions:
78 78 hidden -= visibilityexceptions
79 79 pfunc = repo.changelog.parentrevs
80 80 mutablephases = (phases.draft, phases.secret)
81 81 mutable = repo._phasecache.getrevset(repo, mutablephases)
82 82
83 83 visible = mutable - hidden
84 84 _revealancestors(pfunc, hidden, visible)
85 85 return frozenset(hidden)
86 86
87 87 def computeunserved(repo, visibilityexceptions=None):
88 88 """compute the set of revision that should be filtered when used a server
89 89
90 90 Secret and hidden changeset should not pretend to be here."""
91 91 assert not repo.changelog.filteredrevs
92 92 # fast path in simple case to avoid impact of non optimised code
93 93 hiddens = filterrevs(repo, 'visible')
94 94 if phases.hassecret(repo):
95 95 cl = repo.changelog
96 96 secret = phases.secret
97 97 getphase = repo._phasecache.phase
98 98 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
99 99 revs = cl.revs(start=first)
100 100 secrets = set(r for r in revs if getphase(repo, r) >= secret)
101 101 return frozenset(hiddens | secrets)
102 102 else:
103 103 return hiddens
104 104
105 105 def computemutable(repo, visibilityexceptions=None):
106 106 assert not repo.changelog.filteredrevs
107 107 # fast check to avoid revset call on huge repo
108 108 if any(repo._phasecache.phaseroots[1:]):
109 109 getphase = repo._phasecache.phase
110 110 maymutable = filterrevs(repo, 'base')
111 111 return frozenset(r for r in maymutable if getphase(repo, r))
112 112 return frozenset()
113 113
114 114 def computeimpactable(repo, visibilityexceptions=None):
115 115 """Everything impactable by mutable revision
116 116
117 117 The immutable filter still have some chance to get invalidated. This will
118 118 happen when:
119 119
120 120 - you garbage collect hidden changeset,
121 121 - public phase is moved backward,
122 122 - something is changed in the filtering (this could be fixed)
123 123
124 124 This filter out any mutable changeset and any public changeset that may be
125 125 impacted by something happening to a mutable revision.
126 126
127 127 This is achieved by filtered everything with a revision number egal or
128 128 higher than the first mutable changeset is filtered."""
129 129 assert not repo.changelog.filteredrevs
130 130 cl = repo.changelog
131 131 firstmutable = len(cl)
132 132 for roots in repo._phasecache.phaseroots[1:]:
133 133 if roots:
134 134 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
135 135 # protect from nullrev root
136 136 firstmutable = max(0, firstmutable)
137 137 return frozenset(xrange(firstmutable, len(cl)))
138 138
139 139 # function to compute filtered set
140 140 #
141 141 # When adding a new filter you MUST update the table at:
142 142 # mercurial.branchmap.subsettable
143 143 # Otherwise your filter will have to recompute all its branches cache
144 144 # from scratch (very slow).
145 145 filtertable = {'visible': computehidden,
146 'visible-hidden': computehidden,
146 147 'served': computeunserved,
147 148 'immutable': computemutable,
148 149 'base': computeimpactable}
149 150
150 151 def filterrevs(repo, filtername, visibilityexceptions=None):
151 152 """returns set of filtered revision for this filter name
152 153
153 154 visibilityexceptions is a set of revs which must are exceptions for
154 155 hidden-state and must be visible. They are dynamic and hence we should not
155 156 cache it's result"""
156 157 if filtername not in repo.filteredrevcache:
157 158 func = filtertable[filtername]
158 159 if visibilityexceptions:
159 160 return func(repo.unfiltered, visibilityexceptions)
160 161 repo.filteredrevcache[filtername] = func(repo.unfiltered())
161 162 return repo.filteredrevcache[filtername]
162 163
163 164 class repoview(object):
164 165 """Provide a read/write view of a repo through a filtered changelog
165 166
166 167 This object is used to access a filtered version of a repository without
167 168 altering the original repository object itself. We can not alter the
168 169 original object for two main reasons:
169 170 - It prevents the use of a repo with multiple filters at the same time. In
170 171 particular when multiple threads are involved.
171 172 - It makes scope of the filtering harder to control.
172 173
173 174 This object behaves very closely to the original repository. All attribute
174 175 operations are done on the original repository:
175 176 - An access to `repoview.someattr` actually returns `repo.someattr`,
176 177 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
177 178 - A deletion of `repoview.someattr` actually drops `someattr`
178 179 from `repo.__dict__`.
179 180
180 181 The only exception is the `changelog` property. It is overridden to return
181 182 a (surface) copy of `repo.changelog` with some revisions filtered. The
182 183 `filtername` attribute of the view control the revisions that need to be
183 184 filtered. (the fact the changelog is copied is an implementation detail).
184 185
185 186 Unlike attributes, this object intercepts all method calls. This means that
186 187 all methods are run on the `repoview` object with the filtered `changelog`
187 188 property. For this purpose the simple `repoview` class must be mixed with
188 189 the actual class of the repository. This ensures that the resulting
189 190 `repoview` object have the very same methods than the repo object. This
190 191 leads to the property below.
191 192
192 193 repoview.method() --> repo.__class__.method(repoview)
193 194
194 195 The inheritance has to be done dynamically because `repo` can be of any
195 196 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
196 197 """
197 198
198 199 def __init__(self, repo, filtername, visibilityexceptions=None):
199 200 object.__setattr__(self, r'_unfilteredrepo', repo)
200 201 object.__setattr__(self, r'filtername', filtername)
201 202 object.__setattr__(self, r'_clcachekey', None)
202 203 object.__setattr__(self, r'_clcache', None)
203 204 # revs which are exceptions and must not be hidden
204 205 object.__setattr__(self, r'_visibilityexceptions',
205 206 visibilityexceptions)
206 207
207 208 # not a propertycache on purpose we shall implement a proper cache later
208 209 @property
209 210 def changelog(self):
210 211 """return a filtered version of the changeset
211 212
212 213 this changelog must not be used for writing"""
213 214 # some cache may be implemented later
214 215 unfi = self._unfilteredrepo
215 216 unfichangelog = unfi.changelog
216 217 # bypass call to changelog.method
217 218 unfiindex = unfichangelog.index
218 219 unfilen = len(unfiindex) - 1
219 220 unfinode = unfiindex[unfilen - 1][7]
220 221
221 222 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
222 223 cl = self._clcache
223 224 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
224 225 # if cl.index is not unfiindex, unfi.changelog would be
225 226 # recreated, and our clcache refers to garbage object
226 227 if (cl is not None and
227 228 (cl.index is not unfiindex or newkey != self._clcachekey)):
228 229 cl = None
229 230 # could have been made None by the previous if
230 231 if cl is None:
231 232 cl = copy.copy(unfichangelog)
232 233 cl.filteredrevs = revs
233 234 object.__setattr__(self, r'_clcache', cl)
234 235 object.__setattr__(self, r'_clcachekey', newkey)
235 236 return cl
236 237
237 238 def unfiltered(self):
238 239 """Return an unfiltered version of a repo"""
239 240 return self._unfilteredrepo
240 241
241 242 def filtered(self, name, visibilityexceptions=None):
242 243 """Return a filtered version of a repository"""
243 244 if name == self.filtername and not visibilityexceptions:
244 245 return self
245 246 return self.unfiltered().filtered(name, visibilityexceptions)
246 247
247 248 def __repr__(self):
248 249 return r'<%s:%s %r>' % (self.__class__.__name__,
249 250 pycompat.sysstr(self.filtername),
250 251 self.unfiltered())
251 252
252 253 # everything access are forwarded to the proxied repo
253 254 def __getattr__(self, attr):
254 255 return getattr(self._unfilteredrepo, attr)
255 256
256 257 def __setattr__(self, attr, value):
257 258 return setattr(self._unfilteredrepo, attr, value)
258 259
259 260 def __delattr__(self, attr):
260 261 return delattr(self._unfilteredrepo, attr)
261 262
262 263 # Python <3.4 easily leaks types via __mro__. See
263 264 # https://bugs.python.org/issue17950. We cache dynamically created types
264 265 # so they won't be leaked on every invocation of repo.filtered().
265 266 _filteredrepotypes = weakref.WeakKeyDictionary()
266 267
267 268 def newtype(base):
268 269 """Create a new type with the repoview mixin and the given base class"""
269 270 if base not in _filteredrepotypes:
270 271 class filteredrepo(repoview, base):
271 272 pass
272 273 _filteredrepotypes[base] = filteredrepo
273 274 return _filteredrepotypes[base]
General Comments 0
You need to be logged in to leave comments. Login now