##// END OF EJS Templates
repoview: move '_getdynamicblock' next to 'hideablerevs'...
marmoute -
r32426:06aa645e default
parent child Browse files
Show More
@@ -1,363 +1,363 b''
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import hashlib
13 13 import heapq
14 14 import struct
15 15
16 16 from .node import nullrev
17 17 from . import (
18 18 error,
19 19 obsolete,
20 20 phases,
21 21 tags as tagsmod,
22 22 )
23 23
24 24 def hideablerevs(repo):
25 25 """Revision candidates to be hidden
26 26
27 27 This is a standalone function to allow extensions to wrap it.
28 28
29 29 Because we use the set of immutable changesets as a fallback subset in
30 30 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
31 31 changesets as "hideable". Doing so would break multiple code assertions and
32 32 lead to crashes."""
33 33 return obsolete.getrevs(repo, 'obsolete')
34 34
35 def _getdynamicblockers(repo):
36 """Non-cacheable revisions blocking hidden changesets from being filtered.
37
38 Get revisions that will block hidden changesets and are likely to change,
39 but unlikely to create hidden blockers. They won't be cached, so be careful
40 with adding additional computation."""
41
42 cl = repo.changelog
43 blockers = set()
44 blockers.update([par.rev() for par in repo[None].parents()])
45 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
46
47 tags = {}
48 tagsmod.readlocaltags(repo.ui, repo, tags, {})
49 if tags:
50 rev, nodemap = cl.rev, cl.nodemap
51 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
52 return blockers
53
35 54 def _getstatichidden(repo):
36 55 """Revision to be hidden (disregarding dynamic blocker)
37 56
38 57 To keep a consistent graph, we cannot hide any revisions with
39 58 non-hidden descendants. This function computes the set of
40 59 revisions that could be hidden while keeping the graph consistent.
41 60
42 61 A second pass will be done to apply "dynamic blocker" like bookmarks or
43 62 working directory parents.
44 63
45 64 """
46 65 assert not repo.changelog.filteredrevs
47 66 hidden = set(hideablerevs(repo))
48 67 if hidden:
49 68 getphase = repo._phasecache.phase
50 69 getparentrevs = repo.changelog.parentrevs
51 70 # Skip heads which are public (guaranteed to not be hidden)
52 71 heap = [-r for r in repo.changelog.headrevs() if getphase(repo, r)]
53 72 heapq.heapify(heap)
54 73 heappop = heapq.heappop
55 74 heappush = heapq.heappush
56 75 seen = set() # no need to init it with heads, they have no children
57 76 while heap:
58 77 rev = -heappop(heap)
59 78 # All children have been processed so at that point, if no children
60 79 # removed 'rev' from the 'hidden' set, 'rev' is going to be hidden.
61 80 blocker = rev not in hidden
62 81 for parent in getparentrevs(rev):
63 82 if parent == nullrev:
64 83 continue
65 84 if blocker:
66 85 # If visible, ensure parent will be visible too
67 86 hidden.discard(parent)
68 87 # - Avoid adding the same revision twice
69 88 # - Skip nodes which are public (guaranteed to not be hidden)
70 89 pre = len(seen)
71 90 seen.add(parent)
72 91 if pre < len(seen) and getphase(repo, rev):
73 92 heappush(heap, -parent)
74 93 return hidden
75 94
76 def _getdynamicblockers(repo):
77 """Non-cacheable revisions blocking hidden changesets from being filtered.
78
79 Get revisions that will block hidden changesets and are likely to change,
80 but unlikely to create hidden blockers. They won't be cached, so be careful
81 with adding additional computation."""
82
83 cl = repo.changelog
84 blockers = set()
85 blockers.update([par.rev() for par in repo[None].parents()])
86 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
87
88 tags = {}
89 tagsmod.readlocaltags(repo.ui, repo, tags, {})
90 if tags:
91 rev, nodemap = cl.rev, cl.nodemap
92 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
93 return blockers
94
95 95 cacheversion = 1
96 96 cachefile = 'cache/hidden'
97 97
98 98 def cachehash(repo, hideable):
99 99 """return sha1 hash of repository data to identify a valid cache.
100 100
101 101 We calculate a sha1 of repo heads and the content of the obsstore and write
102 102 it to the cache. Upon reading we can easily validate by checking the hash
103 103 against the stored one and discard the cache in case the hashes don't match.
104 104 """
105 105 h = hashlib.sha1()
106 106 h.update(''.join(repo.heads()))
107 107 h.update('%d' % hash(frozenset(hideable)))
108 108 return h.digest()
109 109
110 110 def _writehiddencache(cachefile, cachehash, hidden):
111 111 """write hidden data to a cache file"""
112 112 data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
113 113 cachefile.write(struct.pack(">H", cacheversion))
114 114 cachefile.write(cachehash)
115 115 cachefile.write(data)
116 116
117 117 def trywritehiddencache(repo, hideable, hidden):
118 118 """write cache of hidden changesets to disk
119 119
120 120 Will not write the cache if a wlock cannot be obtained lazily.
121 121 The cache consists of a head of 22byte:
122 122 2 byte version number of the cache
123 123 20 byte sha1 to validate the cache
124 124 n*4 byte hidden revs
125 125 """
126 126 wlock = fh = None
127 127 try:
128 128 wlock = repo.wlock(wait=False)
129 129 # write cache to file
130 130 newhash = cachehash(repo, hideable)
131 131 fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
132 132 _writehiddencache(fh, newhash, hidden)
133 133 fh.close()
134 134 except (IOError, OSError):
135 135 repo.ui.debug('error writing hidden changesets cache\n')
136 136 except error.LockHeld:
137 137 repo.ui.debug('cannot obtain lock to write hidden changesets cache\n')
138 138 finally:
139 139 if wlock:
140 140 wlock.release()
141 141
142 142 def _readhiddencache(repo, cachefilename, newhash):
143 143 hidden = fh = None
144 144 try:
145 145 if repo.vfs.exists(cachefile):
146 146 fh = repo.vfs.open(cachefile, 'rb')
147 147 version, = struct.unpack(">H", fh.read(2))
148 148 oldhash = fh.read(20)
149 149 if (cacheversion, oldhash) == (version, newhash):
150 150 # cache is valid, so we can start reading the hidden revs
151 151 data = fh.read()
152 152 count = len(data) / 4
153 153 hidden = frozenset(struct.unpack('>%ii' % count, data))
154 154 return hidden
155 155 except struct.error:
156 156 repo.ui.debug('corrupted hidden cache\n')
157 157 # No need to fix the content as it will get rewritten
158 158 return None
159 159 except (IOError, OSError):
160 160 repo.ui.debug('cannot read hidden cache\n')
161 161 return None
162 162 finally:
163 163 if fh:
164 164 fh.close()
165 165
166 166 def tryreadcache(repo, hideable):
167 167 """read a cache if the cache exists and is valid, otherwise returns None."""
168 168 newhash = cachehash(repo, hideable)
169 169 return _readhiddencache(repo, cachefile, newhash)
170 170
171 171 def computehidden(repo):
172 172 """compute the set of hidden revision to filter
173 173
174 174 During most operation hidden should be filtered."""
175 175 assert not repo.changelog.filteredrevs
176 176
177 177 hidden = frozenset()
178 178 hideable = hideablerevs(repo)
179 179 if hideable:
180 180 cl = repo.changelog
181 181 hidden = tryreadcache(repo, hideable)
182 182 if hidden is None:
183 183 hidden = frozenset(_getstatichidden(repo))
184 184 trywritehiddencache(repo, hideable, hidden)
185 185
186 186 # check if we have wd parents, bookmarks or tags pointing to hidden
187 187 # changesets and remove those.
188 188 dynamic = hidden & _getdynamicblockers(repo)
189 189 if dynamic:
190 190 blocked = cl.ancestors(dynamic, inclusive=True)
191 191 hidden = frozenset(r for r in hidden if r not in blocked)
192 192 return hidden
193 193
194 194 def computeunserved(repo):
195 195 """compute the set of revision that should be filtered when used a server
196 196
197 197 Secret and hidden changeset should not pretend to be here."""
198 198 assert not repo.changelog.filteredrevs
199 199 # fast path in simple case to avoid impact of non optimised code
200 200 hiddens = filterrevs(repo, 'visible')
201 201 if phases.hassecret(repo):
202 202 cl = repo.changelog
203 203 secret = phases.secret
204 204 getphase = repo._phasecache.phase
205 205 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
206 206 revs = cl.revs(start=first)
207 207 secrets = set(r for r in revs if getphase(repo, r) >= secret)
208 208 return frozenset(hiddens | secrets)
209 209 else:
210 210 return hiddens
211 211
212 212 def computemutable(repo):
213 213 """compute the set of revision that should be filtered when used a server
214 214
215 215 Secret and hidden changeset should not pretend to be here."""
216 216 assert not repo.changelog.filteredrevs
217 217 # fast check to avoid revset call on huge repo
218 218 if any(repo._phasecache.phaseroots[1:]):
219 219 getphase = repo._phasecache.phase
220 220 maymutable = filterrevs(repo, 'base')
221 221 return frozenset(r for r in maymutable if getphase(repo, r))
222 222 return frozenset()
223 223
224 224 def computeimpactable(repo):
225 225 """Everything impactable by mutable revision
226 226
227 227 The immutable filter still have some chance to get invalidated. This will
228 228 happen when:
229 229
230 230 - you garbage collect hidden changeset,
231 231 - public phase is moved backward,
232 232 - something is changed in the filtering (this could be fixed)
233 233
234 234 This filter out any mutable changeset and any public changeset that may be
235 235 impacted by something happening to a mutable revision.
236 236
237 237 This is achieved by filtered everything with a revision number egal or
238 238 higher than the first mutable changeset is filtered."""
239 239 assert not repo.changelog.filteredrevs
240 240 cl = repo.changelog
241 241 firstmutable = len(cl)
242 242 for roots in repo._phasecache.phaseroots[1:]:
243 243 if roots:
244 244 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
245 245 # protect from nullrev root
246 246 firstmutable = max(0, firstmutable)
247 247 return frozenset(xrange(firstmutable, len(cl)))
248 248
249 249 # function to compute filtered set
250 250 #
251 251 # When adding a new filter you MUST update the table at:
252 252 # mercurial.branchmap.subsettable
253 253 # Otherwise your filter will have to recompute all its branches cache
254 254 # from scratch (very slow).
255 255 filtertable = {'visible': computehidden,
256 256 'served': computeunserved,
257 257 'immutable': computemutable,
258 258 'base': computeimpactable}
259 259
260 260 def filterrevs(repo, filtername):
261 261 """returns set of filtered revision for this filter name"""
262 262 if filtername not in repo.filteredrevcache:
263 263 func = filtertable[filtername]
264 264 repo.filteredrevcache[filtername] = func(repo.unfiltered())
265 265 return repo.filteredrevcache[filtername]
266 266
267 267 class repoview(object):
268 268 """Provide a read/write view of a repo through a filtered changelog
269 269
270 270 This object is used to access a filtered version of a repository without
271 271 altering the original repository object itself. We can not alter the
272 272 original object for two main reasons:
273 273 - It prevents the use of a repo with multiple filters at the same time. In
274 274 particular when multiple threads are involved.
275 275 - It makes scope of the filtering harder to control.
276 276
277 277 This object behaves very closely to the original repository. All attribute
278 278 operations are done on the original repository:
279 279 - An access to `repoview.someattr` actually returns `repo.someattr`,
280 280 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
281 281 - A deletion of `repoview.someattr` actually drops `someattr`
282 282 from `repo.__dict__`.
283 283
284 284 The only exception is the `changelog` property. It is overridden to return
285 285 a (surface) copy of `repo.changelog` with some revisions filtered. The
286 286 `filtername` attribute of the view control the revisions that need to be
287 287 filtered. (the fact the changelog is copied is an implementation detail).
288 288
289 289 Unlike attributes, this object intercepts all method calls. This means that
290 290 all methods are run on the `repoview` object with the filtered `changelog`
291 291 property. For this purpose the simple `repoview` class must be mixed with
292 292 the actual class of the repository. This ensures that the resulting
293 293 `repoview` object have the very same methods than the repo object. This
294 294 leads to the property below.
295 295
296 296 repoview.method() --> repo.__class__.method(repoview)
297 297
298 298 The inheritance has to be done dynamically because `repo` can be of any
299 299 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
300 300 """
301 301
302 302 def __init__(self, repo, filtername):
303 303 object.__setattr__(self, r'_unfilteredrepo', repo)
304 304 object.__setattr__(self, r'filtername', filtername)
305 305 object.__setattr__(self, r'_clcachekey', None)
306 306 object.__setattr__(self, r'_clcache', None)
307 307
308 308 # not a propertycache on purpose we shall implement a proper cache later
309 309 @property
310 310 def changelog(self):
311 311 """return a filtered version of the changeset
312 312
313 313 this changelog must not be used for writing"""
314 314 # some cache may be implemented later
315 315 unfi = self._unfilteredrepo
316 316 unfichangelog = unfi.changelog
317 317 # bypass call to changelog.method
318 318 unfiindex = unfichangelog.index
319 319 unfilen = len(unfiindex) - 1
320 320 unfinode = unfiindex[unfilen - 1][7]
321 321
322 322 revs = filterrevs(unfi, self.filtername)
323 323 cl = self._clcache
324 324 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
325 325 # if cl.index is not unfiindex, unfi.changelog would be
326 326 # recreated, and our clcache refers to garbage object
327 327 if (cl is not None and
328 328 (cl.index is not unfiindex or newkey != self._clcachekey)):
329 329 cl = None
330 330 # could have been made None by the previous if
331 331 if cl is None:
332 332 cl = copy.copy(unfichangelog)
333 333 cl.filteredrevs = revs
334 334 object.__setattr__(self, r'_clcache', cl)
335 335 object.__setattr__(self, r'_clcachekey', newkey)
336 336 return cl
337 337
338 338 def unfiltered(self):
339 339 """Return an unfiltered version of a repo"""
340 340 return self._unfilteredrepo
341 341
342 342 def filtered(self, name):
343 343 """Return a filtered version of a repository"""
344 344 if name == self.filtername:
345 345 return self
346 346 return self.unfiltered().filtered(name)
347 347
348 348 # everything access are forwarded to the proxied repo
349 349 def __getattr__(self, attr):
350 350 return getattr(self._unfilteredrepo, attr)
351 351
352 352 def __setattr__(self, attr, value):
353 353 return setattr(self._unfilteredrepo, attr, value)
354 354
355 355 def __delattr__(self, attr):
356 356 return delattr(self._unfilteredrepo, attr)
357 357
358 358 # The `requirements` attribute is initialized during __init__. But
359 359 # __getattr__ won't be called as it also exists on the class. We need
360 360 # explicit forwarding to main repo here
361 361 @property
362 362 def requirements(self):
363 363 return self._unfilteredrepo.requirements
General Comments 0
You need to be logged in to leave comments. Login now