##// END OF EJS Templates
repoview: directly skip public head in _getstatichidden...
Pierre-Yves David -
r24618:cde57a8d default
parent child Browse files
Show More
@@ -1,344 +1,345
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import heapq
10 10 import copy
11 11 import error
12 12 import phases
13 13 import util
14 14 import obsolete
15 15 import struct
16 16 import tags as tagsmod
17 17 from node import nullrev
18 18
19 19 def hideablerevs(repo):
20 20 """Revisions candidates to be hidden
21 21
22 22 This is a standalone function to help extensions to wrap it."""
23 23 return obsolete.getrevs(repo, 'obsolete')
24 24
25 25 def _getstatichidden(repo):
26 26 """Revision to be hidden (disregarding dynamic blocker)
27 27
28 28 To keep a consistent graph, we cannot hide any revisions with
29 29 non-hidden descendants. This function computes the set of
30 30 revisions that could be hidden while keeping the graph consistent.
31 31
32 32 A second pass will be done to apply "dynamic blocker" like bookmarks or
33 33 working directory parents.
34 34
35 35 """
36 36 assert not repo.changelog.filteredrevs
37 37 hidden = set(hideablerevs(repo))
38 38 if hidden:
39 39 getphase = repo._phasecache.phase
40 40 getparentrevs = repo.changelog.parentrevs
41 heap = [-r for r in repo.changelog.headrevs()]
41 # Skip heads which are public (guaranteed to not be hidden)
42 heap = [-r for r in repo.changelog.headrevs() if getphase(repo, r)]
42 43 heapq.heapify(heap)
43 44 heappop = heapq.heappop
44 45 heappush = heapq.heappush
45 46 while heap:
46 47 rev = -heappop(heap)
47 48 # Skip nodes which are public (guaranteed to not be hidden)
48 49 if not getphase(repo, rev):
49 50 continue
50 51 # All children have been processed so at that point, if no children
51 52 # removed 'rev' from the 'hidden' set, 'rev' is going to be hidden.
52 53 blocker = rev not in hidden
53 54 for parent in getparentrevs(rev):
54 55 if parent == nullrev:
55 56 continue
56 57 if blocker:
57 58 # If visible, ensure parent will be visible too
58 59 hidden.discard(parent)
59 60 heappush(heap, -parent)
60 61 return hidden
61 62
62 63 def _getdynamicblockers(repo):
63 64 """Non-cacheable revisions blocking hidden changesets from being filtered.
64 65
65 66 Get revisions that will block hidden changesets and are likely to change,
66 67 but unlikely to create hidden blockers. They won't be cached, so be careful
67 68 with adding additional computation."""
68 69
69 70 cl = repo.changelog
70 71 blockers = set()
71 72 blockers.update([par.rev() for par in repo[None].parents()])
72 73 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
73 74
74 75 tags = {}
75 76 tagsmod.readlocaltags(repo.ui, repo, tags, {})
76 77 if tags:
77 78 rev, nodemap = cl.rev, cl.nodemap
78 79 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
79 80 return blockers
80 81
81 82 cacheversion = 1
82 83 cachefile = 'cache/hidden'
83 84
84 85 def cachehash(repo, hideable):
85 86 """return sha1 hash of repository data to identify a valid cache.
86 87
87 88 We calculate a sha1 of repo heads and the content of the obsstore and write
88 89 it to the cache. Upon reading we can easily validate by checking the hash
89 90 against the stored one and discard the cache in case the hashes don't match.
90 91 """
91 92 h = util.sha1()
92 93 h.update(''.join(repo.heads()))
93 94 h.update(str(hash(frozenset(hideable))))
94 95 return h.digest()
95 96
96 97 def _writehiddencache(cachefile, cachehash, hidden):
97 98 """write hidden data to a cache file"""
98 99 data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
99 100 cachefile.write(struct.pack(">H", cacheversion))
100 101 cachefile.write(cachehash)
101 102 cachefile.write(data)
102 103
103 104 def trywritehiddencache(repo, hideable, hidden):
104 105 """write cache of hidden changesets to disk
105 106
106 107 Will not write the cache if a wlock cannot be obtained lazily.
107 108 The cache consists of a head of 22byte:
108 109 2 byte version number of the cache
109 110 20 byte sha1 to validate the cache
110 111 n*4 byte hidden revs
111 112 """
112 113 wlock = fh = None
113 114 try:
114 115 try:
115 116 wlock = repo.wlock(wait=False)
116 117 # write cache to file
117 118 newhash = cachehash(repo, hideable)
118 119 fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
119 120 _writehiddencache(fh, newhash, hidden)
120 121 except (IOError, OSError):
121 122 repo.ui.debug('error writing hidden changesets cache')
122 123 except error.LockHeld:
123 124 repo.ui.debug('cannot obtain lock to write hidden changesets cache')
124 125 finally:
125 126 if fh:
126 127 fh.close()
127 128 if wlock:
128 129 wlock.release()
129 130
130 131 def tryreadcache(repo, hideable):
131 132 """read a cache if the cache exists and is valid, otherwise returns None."""
132 133 hidden = fh = None
133 134 try:
134 135 if repo.vfs.exists(cachefile):
135 136 fh = repo.vfs.open(cachefile, 'rb')
136 137 version, = struct.unpack(">H", fh.read(2))
137 138 oldhash = fh.read(20)
138 139 newhash = cachehash(repo, hideable)
139 140 if (cacheversion, oldhash) == (version, newhash):
140 141 # cache is valid, so we can start reading the hidden revs
141 142 data = fh.read()
142 143 count = len(data) / 4
143 144 hidden = frozenset(struct.unpack('>%ii' % count, data))
144 145 return hidden
145 146 finally:
146 147 if fh:
147 148 fh.close()
148 149
149 150 def computehidden(repo):
150 151 """compute the set of hidden revision to filter
151 152
152 153 During most operation hidden should be filtered."""
153 154 assert not repo.changelog.filteredrevs
154 155
155 156 hidden = frozenset()
156 157 hideable = hideablerevs(repo)
157 158 if hideable:
158 159 cl = repo.changelog
159 160 hidden = tryreadcache(repo, hideable)
160 161 if hidden is None:
161 162 hidden = frozenset(_getstatichidden(repo))
162 163 trywritehiddencache(repo, hideable, hidden)
163 164
164 165 # check if we have wd parents, bookmarks or tags pointing to hidden
165 166 # changesets and remove those.
166 167 dynamic = hidden & _getdynamicblockers(repo)
167 168 if dynamic:
168 169 blocked = cl.ancestors(dynamic, inclusive=True)
169 170 hidden = frozenset(r for r in hidden if r not in blocked)
170 171 return hidden
171 172
172 173 def computeunserved(repo):
173 174 """compute the set of revision that should be filtered when used a server
174 175
175 176 Secret and hidden changeset should not pretend to be here."""
176 177 assert not repo.changelog.filteredrevs
177 178 # fast path in simple case to avoid impact of non optimised code
178 179 hiddens = filterrevs(repo, 'visible')
179 180 if phases.hassecret(repo):
180 181 cl = repo.changelog
181 182 secret = phases.secret
182 183 getphase = repo._phasecache.phase
183 184 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
184 185 revs = cl.revs(start=first)
185 186 secrets = set(r for r in revs if getphase(repo, r) >= secret)
186 187 return frozenset(hiddens | secrets)
187 188 else:
188 189 return hiddens
189 190
190 191 def computemutable(repo):
191 192 """compute the set of revision that should be filtered when used a server
192 193
193 194 Secret and hidden changeset should not pretend to be here."""
194 195 assert not repo.changelog.filteredrevs
195 196 # fast check to avoid revset call on huge repo
196 197 if util.any(repo._phasecache.phaseroots[1:]):
197 198 getphase = repo._phasecache.phase
198 199 maymutable = filterrevs(repo, 'base')
199 200 return frozenset(r for r in maymutable if getphase(repo, r))
200 201 return frozenset()
201 202
202 203 def computeimpactable(repo):
203 204 """Everything impactable by mutable revision
204 205
205 206 The immutable filter still have some chance to get invalidated. This will
206 207 happen when:
207 208
208 209 - you garbage collect hidden changeset,
209 210 - public phase is moved backward,
210 211 - something is changed in the filtering (this could be fixed)
211 212
212 213 This filter out any mutable changeset and any public changeset that may be
213 214 impacted by something happening to a mutable revision.
214 215
215 216 This is achieved by filtered everything with a revision number egal or
216 217 higher than the first mutable changeset is filtered."""
217 218 assert not repo.changelog.filteredrevs
218 219 cl = repo.changelog
219 220 firstmutable = len(cl)
220 221 for roots in repo._phasecache.phaseroots[1:]:
221 222 if roots:
222 223 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
223 224 # protect from nullrev root
224 225 firstmutable = max(0, firstmutable)
225 226 return frozenset(xrange(firstmutable, len(cl)))
226 227
227 228 # function to compute filtered set
228 229 #
229 230 # When adding a new filter you MUST update the table at:
230 231 # mercurial.branchmap.subsettable
231 232 # Otherwise your filter will have to recompute all its branches cache
232 233 # from scratch (very slow).
233 234 filtertable = {'visible': computehidden,
234 235 'served': computeunserved,
235 236 'immutable': computemutable,
236 237 'base': computeimpactable}
237 238
238 239 def filterrevs(repo, filtername):
239 240 """returns set of filtered revision for this filter name"""
240 241 if filtername not in repo.filteredrevcache:
241 242 func = filtertable[filtername]
242 243 repo.filteredrevcache[filtername] = func(repo.unfiltered())
243 244 return repo.filteredrevcache[filtername]
244 245
245 246 class repoview(object):
246 247 """Provide a read/write view of a repo through a filtered changelog
247 248
248 249 This object is used to access a filtered version of a repository without
249 250 altering the original repository object itself. We can not alter the
250 251 original object for two main reasons:
251 252 - It prevents the use of a repo with multiple filters at the same time. In
252 253 particular when multiple threads are involved.
253 254 - It makes scope of the filtering harder to control.
254 255
255 256 This object behaves very closely to the original repository. All attribute
256 257 operations are done on the original repository:
257 258 - An access to `repoview.someattr` actually returns `repo.someattr`,
258 259 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
259 260 - A deletion of `repoview.someattr` actually drops `someattr`
260 261 from `repo.__dict__`.
261 262
262 263 The only exception is the `changelog` property. It is overridden to return
263 264 a (surface) copy of `repo.changelog` with some revisions filtered. The
264 265 `filtername` attribute of the view control the revisions that need to be
265 266 filtered. (the fact the changelog is copied is an implementation detail).
266 267
267 268 Unlike attributes, this object intercepts all method calls. This means that
268 269 all methods are run on the `repoview` object with the filtered `changelog`
269 270 property. For this purpose the simple `repoview` class must be mixed with
270 271 the actual class of the repository. This ensures that the resulting
271 272 `repoview` object have the very same methods than the repo object. This
272 273 leads to the property below.
273 274
274 275 repoview.method() --> repo.__class__.method(repoview)
275 276
276 277 The inheritance has to be done dynamically because `repo` can be of any
277 278 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
278 279 """
279 280
280 281 def __init__(self, repo, filtername):
281 282 object.__setattr__(self, '_unfilteredrepo', repo)
282 283 object.__setattr__(self, 'filtername', filtername)
283 284 object.__setattr__(self, '_clcachekey', None)
284 285 object.__setattr__(self, '_clcache', None)
285 286
286 287 # not a propertycache on purpose we shall implement a proper cache later
287 288 @property
288 289 def changelog(self):
289 290 """return a filtered version of the changeset
290 291
291 292 this changelog must not be used for writing"""
292 293 # some cache may be implemented later
293 294 unfi = self._unfilteredrepo
294 295 unfichangelog = unfi.changelog
295 296 revs = filterrevs(unfi, self.filtername)
296 297 cl = self._clcache
297 298 newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs),
298 299 unfichangelog._delayed)
299 300 if cl is not None:
300 301 # we need to check curkey too for some obscure reason.
301 302 # MQ test show a corruption of the underlying repo (in _clcache)
302 303 # without change in the cachekey.
303 304 oldfilter = cl.filteredrevs
304 305 try:
305 306 cl.filteredrevs = () # disable filtering for tip
306 307 curkey = (len(cl), cl.tip(), hash(oldfilter), cl._delayed)
307 308 finally:
308 309 cl.filteredrevs = oldfilter
309 310 if newkey != self._clcachekey or newkey != curkey:
310 311 cl = None
311 312 # could have been made None by the previous if
312 313 if cl is None:
313 314 cl = copy.copy(unfichangelog)
314 315 cl.filteredrevs = revs
315 316 object.__setattr__(self, '_clcache', cl)
316 317 object.__setattr__(self, '_clcachekey', newkey)
317 318 return cl
318 319
319 320 def unfiltered(self):
320 321 """Return an unfiltered version of a repo"""
321 322 return self._unfilteredrepo
322 323
323 324 def filtered(self, name):
324 325 """Return a filtered version of a repository"""
325 326 if name == self.filtername:
326 327 return self
327 328 return self.unfiltered().filtered(name)
328 329
329 330 # everything access are forwarded to the proxied repo
330 331 def __getattr__(self, attr):
331 332 return getattr(self._unfilteredrepo, attr)
332 333
333 334 def __setattr__(self, attr, value):
334 335 return setattr(self._unfilteredrepo, attr, value)
335 336
336 337 def __delattr__(self, attr):
337 338 return delattr(self._unfilteredrepo, attr)
338 339
339 340 # The `requirements` attribute is initialized during __init__. But
340 341 # __getattr__ won't be called as it also exists on the class. We need
341 342 # explicit forwarding to main repo here
342 343 @property
343 344 def requirements(self):
344 345 return self._unfilteredrepo.requirements
General Comments 0
You need to be logged in to leave comments. Login now