##// END OF EJS Templates
repoview: use absolute_import
Gregory Szorc -
r25972:f2791911 default
parent child Browse files
Show More
@@ -1,347 +1,352
1 # repoview.py - Filtered view of a localrepo object
1 # repoview.py - Filtered view of a localrepo object
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import heapq
9 from __future__ import absolute_import
10
10 import copy
11 import copy
11 import error
12 import heapq
12 import phases
13 import util
14 import obsolete
15 import struct
13 import struct
16 import tags as tagsmod
14
17 from node import nullrev
15 from .node import nullrev
16 from . import (
17 error,
18 obsolete,
19 phases,
20 tags as tagsmod,
21 util,
22 )
18
23
19 def hideablerevs(repo):
24 def hideablerevs(repo):
20 """Revisions candidates to be hidden
25 """Revisions candidates to be hidden
21
26
22 This is a standalone function to help extensions to wrap it."""
27 This is a standalone function to help extensions to wrap it."""
23 return obsolete.getrevs(repo, 'obsolete')
28 return obsolete.getrevs(repo, 'obsolete')
24
29
25 def _getstatichidden(repo):
30 def _getstatichidden(repo):
26 """Revision to be hidden (disregarding dynamic blocker)
31 """Revision to be hidden (disregarding dynamic blocker)
27
32
28 To keep a consistent graph, we cannot hide any revisions with
33 To keep a consistent graph, we cannot hide any revisions with
29 non-hidden descendants. This function computes the set of
34 non-hidden descendants. This function computes the set of
30 revisions that could be hidden while keeping the graph consistent.
35 revisions that could be hidden while keeping the graph consistent.
31
36
32 A second pass will be done to apply "dynamic blocker" like bookmarks or
37 A second pass will be done to apply "dynamic blocker" like bookmarks or
33 working directory parents.
38 working directory parents.
34
39
35 """
40 """
36 assert not repo.changelog.filteredrevs
41 assert not repo.changelog.filteredrevs
37 hidden = set(hideablerevs(repo))
42 hidden = set(hideablerevs(repo))
38 if hidden:
43 if hidden:
39 getphase = repo._phasecache.phase
44 getphase = repo._phasecache.phase
40 getparentrevs = repo.changelog.parentrevs
45 getparentrevs = repo.changelog.parentrevs
41 # Skip heads which are public (guaranteed to not be hidden)
46 # Skip heads which are public (guaranteed to not be hidden)
42 heap = [-r for r in repo.changelog.headrevs() if getphase(repo, r)]
47 heap = [-r for r in repo.changelog.headrevs() if getphase(repo, r)]
43 heapq.heapify(heap)
48 heapq.heapify(heap)
44 heappop = heapq.heappop
49 heappop = heapq.heappop
45 heappush = heapq.heappush
50 heappush = heapq.heappush
46 seen = set() # no need to init it with heads, they have no children
51 seen = set() # no need to init it with heads, they have no children
47 while heap:
52 while heap:
48 rev = -heappop(heap)
53 rev = -heappop(heap)
49 # All children have been processed so at that point, if no children
54 # All children have been processed so at that point, if no children
50 # removed 'rev' from the 'hidden' set, 'rev' is going to be hidden.
55 # removed 'rev' from the 'hidden' set, 'rev' is going to be hidden.
51 blocker = rev not in hidden
56 blocker = rev not in hidden
52 for parent in getparentrevs(rev):
57 for parent in getparentrevs(rev):
53 if parent == nullrev:
58 if parent == nullrev:
54 continue
59 continue
55 if blocker:
60 if blocker:
56 # If visible, ensure parent will be visible too
61 # If visible, ensure parent will be visible too
57 hidden.discard(parent)
62 hidden.discard(parent)
58 # - Avoid adding the same revision twice
63 # - Avoid adding the same revision twice
59 # - Skip nodes which are public (guaranteed to not be hidden)
64 # - Skip nodes which are public (guaranteed to not be hidden)
60 pre = len(seen)
65 pre = len(seen)
61 seen.add(parent)
66 seen.add(parent)
62 if pre < len(seen) and getphase(repo, rev):
67 if pre < len(seen) and getphase(repo, rev):
63 heappush(heap, -parent)
68 heappush(heap, -parent)
64 return hidden
69 return hidden
65
70
66 def _getdynamicblockers(repo):
71 def _getdynamicblockers(repo):
67 """Non-cacheable revisions blocking hidden changesets from being filtered.
72 """Non-cacheable revisions blocking hidden changesets from being filtered.
68
73
69 Get revisions that will block hidden changesets and are likely to change,
74 Get revisions that will block hidden changesets and are likely to change,
70 but unlikely to create hidden blockers. They won't be cached, so be careful
75 but unlikely to create hidden blockers. They won't be cached, so be careful
71 with adding additional computation."""
76 with adding additional computation."""
72
77
73 cl = repo.changelog
78 cl = repo.changelog
74 blockers = set()
79 blockers = set()
75 blockers.update([par.rev() for par in repo[None].parents()])
80 blockers.update([par.rev() for par in repo[None].parents()])
76 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
81 blockers.update([cl.rev(bm) for bm in repo._bookmarks.values()])
77
82
78 tags = {}
83 tags = {}
79 tagsmod.readlocaltags(repo.ui, repo, tags, {})
84 tagsmod.readlocaltags(repo.ui, repo, tags, {})
80 if tags:
85 if tags:
81 rev, nodemap = cl.rev, cl.nodemap
86 rev, nodemap = cl.rev, cl.nodemap
82 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
87 blockers.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
83 return blockers
88 return blockers
84
89
85 cacheversion = 1
90 cacheversion = 1
86 cachefile = 'cache/hidden'
91 cachefile = 'cache/hidden'
87
92
88 def cachehash(repo, hideable):
93 def cachehash(repo, hideable):
89 """return sha1 hash of repository data to identify a valid cache.
94 """return sha1 hash of repository data to identify a valid cache.
90
95
91 We calculate a sha1 of repo heads and the content of the obsstore and write
96 We calculate a sha1 of repo heads and the content of the obsstore and write
92 it to the cache. Upon reading we can easily validate by checking the hash
97 it to the cache. Upon reading we can easily validate by checking the hash
93 against the stored one and discard the cache in case the hashes don't match.
98 against the stored one and discard the cache in case the hashes don't match.
94 """
99 """
95 h = util.sha1()
100 h = util.sha1()
96 h.update(''.join(repo.heads()))
101 h.update(''.join(repo.heads()))
97 h.update(str(hash(frozenset(hideable))))
102 h.update(str(hash(frozenset(hideable))))
98 return h.digest()
103 return h.digest()
99
104
100 def _writehiddencache(cachefile, cachehash, hidden):
105 def _writehiddencache(cachefile, cachehash, hidden):
101 """write hidden data to a cache file"""
106 """write hidden data to a cache file"""
102 data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
107 data = struct.pack('>%ii' % len(hidden), *sorted(hidden))
103 cachefile.write(struct.pack(">H", cacheversion))
108 cachefile.write(struct.pack(">H", cacheversion))
104 cachefile.write(cachehash)
109 cachefile.write(cachehash)
105 cachefile.write(data)
110 cachefile.write(data)
106
111
107 def trywritehiddencache(repo, hideable, hidden):
112 def trywritehiddencache(repo, hideable, hidden):
108 """write cache of hidden changesets to disk
113 """write cache of hidden changesets to disk
109
114
110 Will not write the cache if a wlock cannot be obtained lazily.
115 Will not write the cache if a wlock cannot be obtained lazily.
111 The cache consists of a head of 22byte:
116 The cache consists of a head of 22byte:
112 2 byte version number of the cache
117 2 byte version number of the cache
113 20 byte sha1 to validate the cache
118 20 byte sha1 to validate the cache
114 n*4 byte hidden revs
119 n*4 byte hidden revs
115 """
120 """
116 wlock = fh = None
121 wlock = fh = None
117 try:
122 try:
118 wlock = repo.wlock(wait=False)
123 wlock = repo.wlock(wait=False)
119 # write cache to file
124 # write cache to file
120 newhash = cachehash(repo, hideable)
125 newhash = cachehash(repo, hideable)
121 fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
126 fh = repo.vfs.open(cachefile, 'w+b', atomictemp=True)
122 _writehiddencache(fh, newhash, hidden)
127 _writehiddencache(fh, newhash, hidden)
123 except (IOError, OSError):
128 except (IOError, OSError):
124 repo.ui.debug('error writing hidden changesets cache')
129 repo.ui.debug('error writing hidden changesets cache')
125 except error.LockHeld:
130 except error.LockHeld:
126 repo.ui.debug('cannot obtain lock to write hidden changesets cache')
131 repo.ui.debug('cannot obtain lock to write hidden changesets cache')
127 finally:
132 finally:
128 if fh:
133 if fh:
129 fh.close()
134 fh.close()
130 if wlock:
135 if wlock:
131 wlock.release()
136 wlock.release()
132
137
133 def tryreadcache(repo, hideable):
138 def tryreadcache(repo, hideable):
134 """read a cache if the cache exists and is valid, otherwise returns None."""
139 """read a cache if the cache exists and is valid, otherwise returns None."""
135 hidden = fh = None
140 hidden = fh = None
136 try:
141 try:
137 if repo.vfs.exists(cachefile):
142 if repo.vfs.exists(cachefile):
138 fh = repo.vfs.open(cachefile, 'rb')
143 fh = repo.vfs.open(cachefile, 'rb')
139 version, = struct.unpack(">H", fh.read(2))
144 version, = struct.unpack(">H", fh.read(2))
140 oldhash = fh.read(20)
145 oldhash = fh.read(20)
141 newhash = cachehash(repo, hideable)
146 newhash = cachehash(repo, hideable)
142 if (cacheversion, oldhash) == (version, newhash):
147 if (cacheversion, oldhash) == (version, newhash):
143 # cache is valid, so we can start reading the hidden revs
148 # cache is valid, so we can start reading the hidden revs
144 data = fh.read()
149 data = fh.read()
145 count = len(data) / 4
150 count = len(data) / 4
146 hidden = frozenset(struct.unpack('>%ii' % count, data))
151 hidden = frozenset(struct.unpack('>%ii' % count, data))
147 return hidden
152 return hidden
148 finally:
153 finally:
149 if fh:
154 if fh:
150 fh.close()
155 fh.close()
151
156
152 def computehidden(repo):
157 def computehidden(repo):
153 """compute the set of hidden revision to filter
158 """compute the set of hidden revision to filter
154
159
155 During most operation hidden should be filtered."""
160 During most operation hidden should be filtered."""
156 assert not repo.changelog.filteredrevs
161 assert not repo.changelog.filteredrevs
157
162
158 hidden = frozenset()
163 hidden = frozenset()
159 hideable = hideablerevs(repo)
164 hideable = hideablerevs(repo)
160 if hideable:
165 if hideable:
161 cl = repo.changelog
166 cl = repo.changelog
162 hidden = tryreadcache(repo, hideable)
167 hidden = tryreadcache(repo, hideable)
163 if hidden is None:
168 if hidden is None:
164 hidden = frozenset(_getstatichidden(repo))
169 hidden = frozenset(_getstatichidden(repo))
165 trywritehiddencache(repo, hideable, hidden)
170 trywritehiddencache(repo, hideable, hidden)
166
171
167 # check if we have wd parents, bookmarks or tags pointing to hidden
172 # check if we have wd parents, bookmarks or tags pointing to hidden
168 # changesets and remove those.
173 # changesets and remove those.
169 dynamic = hidden & _getdynamicblockers(repo)
174 dynamic = hidden & _getdynamicblockers(repo)
170 if dynamic:
175 if dynamic:
171 blocked = cl.ancestors(dynamic, inclusive=True)
176 blocked = cl.ancestors(dynamic, inclusive=True)
172 hidden = frozenset(r for r in hidden if r not in blocked)
177 hidden = frozenset(r for r in hidden if r not in blocked)
173 return hidden
178 return hidden
174
179
175 def computeunserved(repo):
180 def computeunserved(repo):
176 """compute the set of revision that should be filtered when used a server
181 """compute the set of revision that should be filtered when used a server
177
182
178 Secret and hidden changeset should not pretend to be here."""
183 Secret and hidden changeset should not pretend to be here."""
179 assert not repo.changelog.filteredrevs
184 assert not repo.changelog.filteredrevs
180 # fast path in simple case to avoid impact of non optimised code
185 # fast path in simple case to avoid impact of non optimised code
181 hiddens = filterrevs(repo, 'visible')
186 hiddens = filterrevs(repo, 'visible')
182 if phases.hassecret(repo):
187 if phases.hassecret(repo):
183 cl = repo.changelog
188 cl = repo.changelog
184 secret = phases.secret
189 secret = phases.secret
185 getphase = repo._phasecache.phase
190 getphase = repo._phasecache.phase
186 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
191 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
187 revs = cl.revs(start=first)
192 revs = cl.revs(start=first)
188 secrets = set(r for r in revs if getphase(repo, r) >= secret)
193 secrets = set(r for r in revs if getphase(repo, r) >= secret)
189 return frozenset(hiddens | secrets)
194 return frozenset(hiddens | secrets)
190 else:
195 else:
191 return hiddens
196 return hiddens
192
197
193 def computemutable(repo):
198 def computemutable(repo):
194 """compute the set of revision that should be filtered when used a server
199 """compute the set of revision that should be filtered when used a server
195
200
196 Secret and hidden changeset should not pretend to be here."""
201 Secret and hidden changeset should not pretend to be here."""
197 assert not repo.changelog.filteredrevs
202 assert not repo.changelog.filteredrevs
198 # fast check to avoid revset call on huge repo
203 # fast check to avoid revset call on huge repo
199 if any(repo._phasecache.phaseroots[1:]):
204 if any(repo._phasecache.phaseroots[1:]):
200 getphase = repo._phasecache.phase
205 getphase = repo._phasecache.phase
201 maymutable = filterrevs(repo, 'base')
206 maymutable = filterrevs(repo, 'base')
202 return frozenset(r for r in maymutable if getphase(repo, r))
207 return frozenset(r for r in maymutable if getphase(repo, r))
203 return frozenset()
208 return frozenset()
204
209
205 def computeimpactable(repo):
210 def computeimpactable(repo):
206 """Everything impactable by mutable revision
211 """Everything impactable by mutable revision
207
212
208 The immutable filter still have some chance to get invalidated. This will
213 The immutable filter still have some chance to get invalidated. This will
209 happen when:
214 happen when:
210
215
211 - you garbage collect hidden changeset,
216 - you garbage collect hidden changeset,
212 - public phase is moved backward,
217 - public phase is moved backward,
213 - something is changed in the filtering (this could be fixed)
218 - something is changed in the filtering (this could be fixed)
214
219
215 This filter out any mutable changeset and any public changeset that may be
220 This filter out any mutable changeset and any public changeset that may be
216 impacted by something happening to a mutable revision.
221 impacted by something happening to a mutable revision.
217
222
218 This is achieved by filtered everything with a revision number egal or
223 This is achieved by filtered everything with a revision number egal or
219 higher than the first mutable changeset is filtered."""
224 higher than the first mutable changeset is filtered."""
220 assert not repo.changelog.filteredrevs
225 assert not repo.changelog.filteredrevs
221 cl = repo.changelog
226 cl = repo.changelog
222 firstmutable = len(cl)
227 firstmutable = len(cl)
223 for roots in repo._phasecache.phaseroots[1:]:
228 for roots in repo._phasecache.phaseroots[1:]:
224 if roots:
229 if roots:
225 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
230 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
226 # protect from nullrev root
231 # protect from nullrev root
227 firstmutable = max(0, firstmutable)
232 firstmutable = max(0, firstmutable)
228 return frozenset(xrange(firstmutable, len(cl)))
233 return frozenset(xrange(firstmutable, len(cl)))
229
234
230 # function to compute filtered set
235 # function to compute filtered set
231 #
236 #
232 # When adding a new filter you MUST update the table at:
237 # When adding a new filter you MUST update the table at:
233 # mercurial.branchmap.subsettable
238 # mercurial.branchmap.subsettable
234 # Otherwise your filter will have to recompute all its branches cache
239 # Otherwise your filter will have to recompute all its branches cache
235 # from scratch (very slow).
240 # from scratch (very slow).
236 filtertable = {'visible': computehidden,
241 filtertable = {'visible': computehidden,
237 'served': computeunserved,
242 'served': computeunserved,
238 'immutable': computemutable,
243 'immutable': computemutable,
239 'base': computeimpactable}
244 'base': computeimpactable}
240
245
241 def filterrevs(repo, filtername):
246 def filterrevs(repo, filtername):
242 """returns set of filtered revision for this filter name"""
247 """returns set of filtered revision for this filter name"""
243 if filtername not in repo.filteredrevcache:
248 if filtername not in repo.filteredrevcache:
244 func = filtertable[filtername]
249 func = filtertable[filtername]
245 repo.filteredrevcache[filtername] = func(repo.unfiltered())
250 repo.filteredrevcache[filtername] = func(repo.unfiltered())
246 return repo.filteredrevcache[filtername]
251 return repo.filteredrevcache[filtername]
247
252
248 class repoview(object):
253 class repoview(object):
249 """Provide a read/write view of a repo through a filtered changelog
254 """Provide a read/write view of a repo through a filtered changelog
250
255
251 This object is used to access a filtered version of a repository without
256 This object is used to access a filtered version of a repository without
252 altering the original repository object itself. We can not alter the
257 altering the original repository object itself. We can not alter the
253 original object for two main reasons:
258 original object for two main reasons:
254 - It prevents the use of a repo with multiple filters at the same time. In
259 - It prevents the use of a repo with multiple filters at the same time. In
255 particular when multiple threads are involved.
260 particular when multiple threads are involved.
256 - It makes scope of the filtering harder to control.
261 - It makes scope of the filtering harder to control.
257
262
258 This object behaves very closely to the original repository. All attribute
263 This object behaves very closely to the original repository. All attribute
259 operations are done on the original repository:
264 operations are done on the original repository:
260 - An access to `repoview.someattr` actually returns `repo.someattr`,
265 - An access to `repoview.someattr` actually returns `repo.someattr`,
261 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
266 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
262 - A deletion of `repoview.someattr` actually drops `someattr`
267 - A deletion of `repoview.someattr` actually drops `someattr`
263 from `repo.__dict__`.
268 from `repo.__dict__`.
264
269
265 The only exception is the `changelog` property. It is overridden to return
270 The only exception is the `changelog` property. It is overridden to return
266 a (surface) copy of `repo.changelog` with some revisions filtered. The
271 a (surface) copy of `repo.changelog` with some revisions filtered. The
267 `filtername` attribute of the view control the revisions that need to be
272 `filtername` attribute of the view control the revisions that need to be
268 filtered. (the fact the changelog is copied is an implementation detail).
273 filtered. (the fact the changelog is copied is an implementation detail).
269
274
270 Unlike attributes, this object intercepts all method calls. This means that
275 Unlike attributes, this object intercepts all method calls. This means that
271 all methods are run on the `repoview` object with the filtered `changelog`
276 all methods are run on the `repoview` object with the filtered `changelog`
272 property. For this purpose the simple `repoview` class must be mixed with
277 property. For this purpose the simple `repoview` class must be mixed with
273 the actual class of the repository. This ensures that the resulting
278 the actual class of the repository. This ensures that the resulting
274 `repoview` object have the very same methods than the repo object. This
279 `repoview` object have the very same methods than the repo object. This
275 leads to the property below.
280 leads to the property below.
276
281
277 repoview.method() --> repo.__class__.method(repoview)
282 repoview.method() --> repo.__class__.method(repoview)
278
283
279 The inheritance has to be done dynamically because `repo` can be of any
284 The inheritance has to be done dynamically because `repo` can be of any
280 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
285 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
281 """
286 """
282
287
283 def __init__(self, repo, filtername):
288 def __init__(self, repo, filtername):
284 object.__setattr__(self, '_unfilteredrepo', repo)
289 object.__setattr__(self, '_unfilteredrepo', repo)
285 object.__setattr__(self, 'filtername', filtername)
290 object.__setattr__(self, 'filtername', filtername)
286 object.__setattr__(self, '_clcachekey', None)
291 object.__setattr__(self, '_clcachekey', None)
287 object.__setattr__(self, '_clcache', None)
292 object.__setattr__(self, '_clcache', None)
288
293
289 # not a propertycache on purpose we shall implement a proper cache later
294 # not a propertycache on purpose we shall implement a proper cache later
290 @property
295 @property
291 def changelog(self):
296 def changelog(self):
292 """return a filtered version of the changeset
297 """return a filtered version of the changeset
293
298
294 this changelog must not be used for writing"""
299 this changelog must not be used for writing"""
295 # some cache may be implemented later
300 # some cache may be implemented later
296 unfi = self._unfilteredrepo
301 unfi = self._unfilteredrepo
297 unfichangelog = unfi.changelog
302 unfichangelog = unfi.changelog
298 revs = filterrevs(unfi, self.filtername)
303 revs = filterrevs(unfi, self.filtername)
299 cl = self._clcache
304 cl = self._clcache
300 newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs),
305 newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs),
301 unfichangelog._delayed)
306 unfichangelog._delayed)
302 if cl is not None:
307 if cl is not None:
303 # we need to check curkey too for some obscure reason.
308 # we need to check curkey too for some obscure reason.
304 # MQ test show a corruption of the underlying repo (in _clcache)
309 # MQ test show a corruption of the underlying repo (in _clcache)
305 # without change in the cachekey.
310 # without change in the cachekey.
306 oldfilter = cl.filteredrevs
311 oldfilter = cl.filteredrevs
307 try:
312 try:
308 cl.filteredrevs = () # disable filtering for tip
313 cl.filteredrevs = () # disable filtering for tip
309 curkey = (len(cl), cl.tip(), hash(oldfilter), cl._delayed)
314 curkey = (len(cl), cl.tip(), hash(oldfilter), cl._delayed)
310 finally:
315 finally:
311 cl.filteredrevs = oldfilter
316 cl.filteredrevs = oldfilter
312 if newkey != self._clcachekey or newkey != curkey:
317 if newkey != self._clcachekey or newkey != curkey:
313 cl = None
318 cl = None
314 # could have been made None by the previous if
319 # could have been made None by the previous if
315 if cl is None:
320 if cl is None:
316 cl = copy.copy(unfichangelog)
321 cl = copy.copy(unfichangelog)
317 cl.filteredrevs = revs
322 cl.filteredrevs = revs
318 object.__setattr__(self, '_clcache', cl)
323 object.__setattr__(self, '_clcache', cl)
319 object.__setattr__(self, '_clcachekey', newkey)
324 object.__setattr__(self, '_clcachekey', newkey)
320 return cl
325 return cl
321
326
322 def unfiltered(self):
327 def unfiltered(self):
323 """Return an unfiltered version of a repo"""
328 """Return an unfiltered version of a repo"""
324 return self._unfilteredrepo
329 return self._unfilteredrepo
325
330
326 def filtered(self, name):
331 def filtered(self, name):
327 """Return a filtered version of a repository"""
332 """Return a filtered version of a repository"""
328 if name == self.filtername:
333 if name == self.filtername:
329 return self
334 return self
330 return self.unfiltered().filtered(name)
335 return self.unfiltered().filtered(name)
331
336
332 # everything access are forwarded to the proxied repo
337 # everything access are forwarded to the proxied repo
333 def __getattr__(self, attr):
338 def __getattr__(self, attr):
334 return getattr(self._unfilteredrepo, attr)
339 return getattr(self._unfilteredrepo, attr)
335
340
336 def __setattr__(self, attr, value):
341 def __setattr__(self, attr, value):
337 return setattr(self._unfilteredrepo, attr, value)
342 return setattr(self._unfilteredrepo, attr, value)
338
343
339 def __delattr__(self, attr):
344 def __delattr__(self, attr):
340 return delattr(self._unfilteredrepo, attr)
345 return delattr(self._unfilteredrepo, attr)
341
346
342 # The `requirements` attribute is initialized during __init__. But
347 # The `requirements` attribute is initialized during __init__. But
343 # __getattr__ won't be called as it also exists on the class. We need
348 # __getattr__ won't be called as it also exists on the class. We need
344 # explicit forwarding to main repo here
349 # explicit forwarding to main repo here
345 @property
350 @property
346 def requirements(self):
351 def requirements(self):
347 return self._unfilteredrepo.requirements
352 return self._unfilteredrepo.requirements
General Comments 0
You need to be logged in to leave comments. Login now