##// END OF EJS Templates
repoview: update documentation of _getstatichidden...
Pierre-Yves David -
r24615:9e558b78 default
parent child Browse files
Show More
@@ -1,339 +1,345 b''
1 1 # repoview.py - Filtered view of a localrepo object
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import collections
10 10 import copy
11 11 import error
12 12 import phases
13 13 import util
14 14 import obsolete
15 15 import struct
16 16 import tags as tagsmod
17 17 from node import nullrev
18 18
19 19 def hideablerevs(repo):
20 20 """Revisions candidates to be hidden
21 21
22 22 This is a standalone function to help extensions to wrap it."""
23 23 return obsolete.getrevs(repo, 'obsolete')
24 24
25 25 def _getstatichidden(repo):
26 """Cacheable revisions blocking hidden changesets from being filtered.
26 """Revision to be hidden (disregarding dynamic blocker)
27 27
28 Additional non-cached hidden blockers are computed in _getdynamicblockers.
29 This is a standalone function to help extensions to wrap it."""
28 To keep a consistent graph, we cannot hide any revisions with
29 non-hidden descendants. This function computes the set of
30 revisions that could be hidden while keeping the graph consistent.
31
32 A second pass will be done to apply "dynamic blocker" like bookmarks or
33 working directory parents.
34
35 """
30 36 assert not repo.changelog.filteredrevs
31 37 hideable = hideablerevs(repo)
32 38 if hideable:
33 39 actuallyhidden = {}
34 40 getphase = repo._phasecache.phase
35 41 getparentrevs = repo.changelog.parentrevs
36 42 queue = collections.deque((r, False) for r in repo.changelog.headrevs())
37 43 while queue:
38 44 rev, blocked = queue.popleft()
39 45 phase = getphase(repo, rev)
40 46 # Skip nodes which are public (guaranteed to not be hidden) and
41 47 # nodes which have already been processed and won't be blocked by
42 48 # the previous node.
43 49 if phase == 0 or (not blocked and rev in actuallyhidden):
44 50 continue
45 51 if rev in hideable:
46 52 if blocked:
47 53 actuallyhidden[rev] = False
48 54 else:
49 55 actuallyhidden.setdefault(rev, True)
50 56 else:
51 57 blocked = True
52 58
53 59 for parent in (p for p in getparentrevs(rev) if p != nullrev):
54 60 queue.append((parent, blocked))
55 61 return set(rev for rev, hidden in actuallyhidden.iteritems() if hidden)
56 62
57 63 def _getdynamicblockers(repo):
58 64 """Non-cacheable revisions blocking hidden changesets from being filtered.
59 65
60 66 Get revisions that will block hidden changesets and are likely to change,
61 67 but unlikely to create hidden blockers. They won't be cached, so be careful
62 68 with adding additional computation."""
63 69
64 70 cl = repo.changelog
65 71 blockers = set()
66 72 blockers.update([par.rev() for par in repo[None].parents()])
67 73 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
68 74
69 75 tags = {}
70 76 tagsmod.readlocaltags(repo.ui, repo, tags, {})
71 77 if tags:
72 78 rev, nodemap = cl.rev, cl.nodemap
73 79 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
74 80 return blockers
75 81
76 82 cacheversion = 1
77 83 cachefile = 'cache/hidden'
78 84
79 85 def cachehash(repo, hideable):
80 86 """return sha1 hash of repository data to identify a valid cache.
81 87
82 88 We calculate a sha1 of repo heads and the content of the obsstore and write
83 89 it to the cache. Upon reading we can easily validate by checking the hash
84 90 against the stored one and discard the cache in case the hashes don't match.
85 91 """
86 92 h = util.sha1()
87 93 h.update(''.join(repo.heads()))
88 94 h.update(str(hash(frozenset(hideable))))
89 95 return h.digest()
90 96
91 97 def _writehiddencache(cachefile, cachehash, hidden):
92 98 """write hidden data to a cache file"""
93 99 data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
94 100 cachefile.write(struct.pack(">H", cacheversion))
95 101 cachefile.write(cachehash)
96 102 cachefile.write(data)
97 103
98 104 def trywritehiddencache(repo, hideable, hidden):
99 105 """write cache of hidden changesets to disk
100 106
101 107 Will not write the cache if a wlock cannot be obtained lazily.
102 108 The cache consists of a head of 22byte:
103 109 2 byte version number of the cache
104 110 20 byte sha1 to validate the cache
105 111 n*4 byte hidden revs
106 112 """
107 113 wlock = fh = None
108 114 try:
109 115 try:
110 116 wlock = repo.wlock(wait=False)
111 117 # write cache to file
112 118 newhash = cachehash(repo, hideable)
113 119 fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
114 120 _writehiddencache(fh, newhash, hidden)
115 121 except (IOError, OSError):
116 122 repo.ui.debug('error writing hidden changesets cache')
117 123 except error.LockHeld:
118 124 repo.ui.debug('cannot obtain lock to write hidden changesets cache')
119 125 finally:
120 126 if fh:
121 127 fh.close()
122 128 if wlock:
123 129 wlock.release()
124 130
125 131 def tryreadcache(repo, hideable):
126 132 """read a cache if the cache exists and is valid, otherwise returns None."""
127 133 hidden = fh = None
128 134 try:
129 135 if repo.vfs.exists(cachefile):
130 136 fh = repo.vfs.open(cachefile, 'rb')
131 137 version, = struct.unpack(">H", fh.read(2))
132 138 oldhash = fh.read(20)
133 139 newhash = cachehash(repo, hideable)
134 140 if (cacheversion, oldhash) == (version, newhash):
135 141 # cache is valid, so we can start reading the hidden revs
136 142 data = fh.read()
137 143 count = len(data) / 4
138 144 hidden = frozenset(struct.unpack('>%ii' % count, data))
139 145 return hidden
140 146 finally:
141 147 if fh:
142 148 fh.close()
143 149
144 150 def computehidden(repo):
145 151 """compute the set of hidden revision to filter
146 152
147 153 During most operation hidden should be filtered."""
148 154 assert not repo.changelog.filteredrevs
149 155
150 156 hidden = frozenset()
151 157 hideable = hideablerevs(repo)
152 158 if hideable:
153 159 cl = repo.changelog
154 160 hidden = tryreadcache(repo, hideable)
155 161 if hidden is None:
156 162 hidden = frozenset(_getstatichidden(repo))
157 163 trywritehiddencache(repo, hideable, hidden)
158 164
159 165 # check if we have wd parents, bookmarks or tags pointing to hidden
160 166 # changesets and remove those.
161 167 dynamic = hidden & _getdynamicblockers(repo)
162 168 if dynamic:
163 169 blocked = cl.ancestors(dynamic, inclusive=True)
164 170 hidden = frozenset(r for r in hidden if r not in blocked)
165 171 return hidden
166 172
167 173 def computeunserved(repo):
168 174 """compute the set of revision that should be filtered when used a server
169 175
170 176 Secret and hidden changeset should not pretend to be here."""
171 177 assert not repo.changelog.filteredrevs
172 178 # fast path in simple case to avoid impact of non optimised code
173 179 hiddens = filterrevs(repo, 'visible')
174 180 if phases.hassecret(repo):
175 181 cl = repo.changelog
176 182 secret = phases.secret
177 183 getphase = repo._phasecache.phase
178 184 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
179 185 revs = cl.revs(start=first)
180 186 secrets = set(r for r in revs if getphase(repo, r) >= secret)
181 187 return frozenset(hiddens | secrets)
182 188 else:
183 189 return hiddens
184 190
185 191 def computemutable(repo):
186 192 """compute the set of revision that should be filtered when used a server
187 193
188 194 Secret and hidden changeset should not pretend to be here."""
189 195 assert not repo.changelog.filteredrevs
190 196 # fast check to avoid revset call on huge repo
191 197 if util.any(repo._phasecache.phaseroots[1:]):
192 198 getphase = repo._phasecache.phase
193 199 maymutable = filterrevs(repo, 'base')
194 200 return frozenset(r for r in maymutable if getphase(repo, r))
195 201 return frozenset()
196 202
197 203 def computeimpactable(repo):
198 204 """Everything impactable by mutable revision
199 205
200 206 The immutable filter still have some chance to get invalidated. This will
201 207 happen when:
202 208
203 209 - you garbage collect hidden changeset,
204 210 - public phase is moved backward,
205 211 - something is changed in the filtering (this could be fixed)
206 212
207 213 This filter out any mutable changeset and any public changeset that may be
208 214 impacted by something happening to a mutable revision.
209 215
210 216 This is achieved by filtered everything with a revision number egal or
211 217 higher than the first mutable changeset is filtered."""
212 218 assert not repo.changelog.filteredrevs
213 219 cl = repo.changelog
214 220 firstmutable = len(cl)
215 221 for roots in repo._phasecache.phaseroots[1:]:
216 222 if roots:
217 223 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
218 224 # protect from nullrev root
219 225 firstmutable = max(0, firstmutable)
220 226 return frozenset(xrange(firstmutable, len(cl)))
221 227
222 228 # function to compute filtered set
223 229 #
224 230 # When adding a new filter you MUST update the table at:
225 231 # mercurial.branchmap.subsettable
226 232 # Otherwise your filter will have to recompute all its branches cache
227 233 # from scratch (very slow).
228 234 filtertable = {'visible': computehidden,
229 235 'served': computeunserved,
230 236 'immutable': computemutable,
231 237 'base': computeimpactable}
232 238
233 239 def filterrevs(repo, filtername):
234 240 """returns set of filtered revision for this filter name"""
235 241 if filtername not in repo.filteredrevcache:
236 242 func = filtertable[filtername]
237 243 repo.filteredrevcache[filtername] = func(repo.unfiltered())
238 244 return repo.filteredrevcache[filtername]
239 245
240 246 class repoview(object):
241 247 """Provide a read/write view of a repo through a filtered changelog
242 248
243 249 This object is used to access a filtered version of a repository without
244 250 altering the original repository object itself. We can not alter the
245 251 original object for two main reasons:
246 252 - It prevents the use of a repo with multiple filters at the same time. In
247 253 particular when multiple threads are involved.
248 254 - It makes scope of the filtering harder to control.
249 255
250 256 This object behaves very closely to the original repository. All attribute
251 257 operations are done on the original repository:
252 258 - An access to `repoview.someattr` actually returns `repo.someattr`,
253 259 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
254 260 - A deletion of `repoview.someattr` actually drops `someattr`
255 261 from `repo.__dict__`.
256 262
257 263 The only exception is the `changelog` property. It is overridden to return
258 264 a (surface) copy of `repo.changelog` with some revisions filtered. The
259 265 `filtername` attribute of the view control the revisions that need to be
260 266 filtered. (the fact the changelog is copied is an implementation detail).
261 267
262 268 Unlike attributes, this object intercepts all method calls. This means that
263 269 all methods are run on the `repoview` object with the filtered `changelog`
264 270 property. For this purpose the simple `repoview` class must be mixed with
265 271 the actual class of the repository. This ensures that the resulting
266 272 `repoview` object have the very same methods than the repo object. This
267 273 leads to the property below.
268 274
269 275 repoview.method() --> repo.__class__.method(repoview)
270 276
271 277 The inheritance has to be done dynamically because `repo` can be of any
272 278 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
273 279 """
274 280
275 281 def __init__(self, repo, filtername):
276 282 object.__setattr__(self, '_unfilteredrepo', repo)
277 283 object.__setattr__(self, 'filtername', filtername)
278 284 object.__setattr__(self, '_clcachekey', None)
279 285 object.__setattr__(self, '_clcache', None)
280 286
281 287 # not a propertycache on purpose we shall implement a proper cache later
282 288 @property
283 289 def changelog(self):
284 290 """return a filtered version of the changeset
285 291
286 292 this changelog must not be used for writing"""
287 293 # some cache may be implemented later
288 294 unfi = self._unfilteredrepo
289 295 unfichangelog = unfi.changelog
290 296 revs = filterrevs(unfi, self.filtername)
291 297 cl = self._clcache
292 298 newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs),
293 299 unfichangelog._delayed)
294 300 if cl is not None:
295 301 # we need to check curkey too for some obscure reason.
296 302 # MQ test show a corruption of the underlying repo (in _clcache)
297 303 # without change in the cachekey.
298 304 oldfilter = cl.filteredrevs
299 305 try:
300 306 cl.filteredrevs = () # disable filtering for tip
301 307 curkey = (len(cl), cl.tip(), hash(oldfilter), cl._delayed)
302 308 finally:
303 309 cl.filteredrevs = oldfilter
304 310 if newkey != self._clcachekey or newkey != curkey:
305 311 cl = None
306 312 # could have been made None by the previous if
307 313 if cl is None:
308 314 cl = copy.copy(unfichangelog)
309 315 cl.filteredrevs = revs
310 316 object.__setattr__(self, '_clcache', cl)
311 317 object.__setattr__(self, '_clcachekey', newkey)
312 318 return cl
313 319
314 320 def unfiltered(self):
315 321 """Return an unfiltered version of a repo"""
316 322 return self._unfilteredrepo
317 323
318 324 def filtered(self, name):
319 325 """Return a filtered version of a repository"""
320 326 if name == self.filtername:
321 327 return self
322 328 return self.unfiltered().filtered(name)
323 329
324 330 # everything access are forwarded to the proxied repo
325 331 def __getattr__(self, attr):
326 332 return getattr(self._unfilteredrepo, attr)
327 333
328 334 def __setattr__(self, attr, value):
329 335 return setattr(self._unfilteredrepo, attr, value)
330 336
331 337 def __delattr__(self, attr):
332 338 return delattr(self._unfilteredrepo, attr)
333 339
334 340 # The `requirements` attribute is initialized during __init__. But
335 341 # __getattr__ won't be called as it also exists on the class. We need
336 342 # explicit forwarding to main repo here
337 343 @property
338 344 def requirements(self):
339 345 return self._unfilteredrepo.requirements
General Comments 0
You need to be logged in to leave comments. Login now