##// END OF EJS Templates
repoview: add a new filtername for accessing hidden commits...
Pulkit Goyal -
r35511:07fdac1d default
parent child Browse files
Show More
@@ -1,522 +1,523
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .node import (
12 from .node import (
13 bin,
13 bin,
14 hex,
14 hex,
15 nullid,
15 nullid,
16 nullrev,
16 nullrev,
17 )
17 )
18 from . import (
18 from . import (
19 encoding,
19 encoding,
20 error,
20 error,
21 scmutil,
21 scmutil,
22 util,
22 util,
23 )
23 )
24
24
25 calcsize = struct.calcsize
25 calcsize = struct.calcsize
26 pack_into = struct.pack_into
26 pack_into = struct.pack_into
27 unpack_from = struct.unpack_from
27 unpack_from = struct.unpack_from
28
28
29 def _filename(repo):
29 def _filename(repo):
30 """name of a branchcache file for a given repo or repoview"""
30 """name of a branchcache file for a given repo or repoview"""
31 filename = "branch2"
31 filename = "branch2"
32 if repo.filtername:
32 if repo.filtername:
33 filename = '%s-%s' % (filename, repo.filtername)
33 filename = '%s-%s' % (filename, repo.filtername)
34 return filename
34 return filename
35
35
36 def read(repo):
36 def read(repo):
37 try:
37 try:
38 f = repo.cachevfs(_filename(repo))
38 f = repo.cachevfs(_filename(repo))
39 lines = f.read().split('\n')
39 lines = f.read().split('\n')
40 f.close()
40 f.close()
41 except (IOError, OSError):
41 except (IOError, OSError):
42 return None
42 return None
43
43
44 try:
44 try:
45 cachekey = lines.pop(0).split(" ", 2)
45 cachekey = lines.pop(0).split(" ", 2)
46 last, lrev = cachekey[:2]
46 last, lrev = cachekey[:2]
47 last, lrev = bin(last), int(lrev)
47 last, lrev = bin(last), int(lrev)
48 filteredhash = None
48 filteredhash = None
49 if len(cachekey) > 2:
49 if len(cachekey) > 2:
50 filteredhash = bin(cachekey[2])
50 filteredhash = bin(cachekey[2])
51 partial = branchcache(tipnode=last, tiprev=lrev,
51 partial = branchcache(tipnode=last, tiprev=lrev,
52 filteredhash=filteredhash)
52 filteredhash=filteredhash)
53 if not partial.validfor(repo):
53 if not partial.validfor(repo):
54 # invalidate the cache
54 # invalidate the cache
55 raise ValueError('tip differs')
55 raise ValueError('tip differs')
56 cl = repo.changelog
56 cl = repo.changelog
57 for l in lines:
57 for l in lines:
58 if not l:
58 if not l:
59 continue
59 continue
60 node, state, label = l.split(" ", 2)
60 node, state, label = l.split(" ", 2)
61 if state not in 'oc':
61 if state not in 'oc':
62 raise ValueError('invalid branch state')
62 raise ValueError('invalid branch state')
63 label = encoding.tolocal(label.strip())
63 label = encoding.tolocal(label.strip())
64 node = bin(node)
64 node = bin(node)
65 if not cl.hasnode(node):
65 if not cl.hasnode(node):
66 raise ValueError('node %s does not exist' % hex(node))
66 raise ValueError('node %s does not exist' % hex(node))
67 partial.setdefault(label, []).append(node)
67 partial.setdefault(label, []).append(node)
68 if state == 'c':
68 if state == 'c':
69 partial._closednodes.add(node)
69 partial._closednodes.add(node)
70 except Exception as inst:
70 except Exception as inst:
71 if repo.ui.debugflag:
71 if repo.ui.debugflag:
72 msg = 'invalid branchheads cache'
72 msg = 'invalid branchheads cache'
73 if repo.filtername is not None:
73 if repo.filtername is not None:
74 msg += ' (%s)' % repo.filtername
74 msg += ' (%s)' % repo.filtername
75 msg += ': %s\n'
75 msg += ': %s\n'
76 repo.ui.debug(msg % inst)
76 repo.ui.debug(msg % inst)
77 partial = None
77 partial = None
78 return partial
78 return partial
79
79
80 ### Nearest subset relation
80 ### Nearest subset relation
81 # Nearest subset of filter X is a filter Y so that:
81 # Nearest subset of filter X is a filter Y so that:
82 # * Y is included in X,
82 # * Y is included in X,
83 # * X - Y is as small as possible.
83 # * X - Y is as small as possible.
84 # This create and ordering used for branchmap purpose.
84 # This create and ordering used for branchmap purpose.
85 # the ordering may be partial
85 # the ordering may be partial
86 subsettable = {None: 'visible',
86 subsettable = {None: 'visible',
87 'visible-hidden': 'visible',
87 'visible': 'served',
88 'visible': 'served',
88 'served': 'immutable',
89 'served': 'immutable',
89 'immutable': 'base'}
90 'immutable': 'base'}
90
91
91 def updatecache(repo):
92 def updatecache(repo):
92 cl = repo.changelog
93 cl = repo.changelog
93 filtername = repo.filtername
94 filtername = repo.filtername
94 partial = repo._branchcaches.get(filtername)
95 partial = repo._branchcaches.get(filtername)
95
96
96 revs = []
97 revs = []
97 if partial is None or not partial.validfor(repo):
98 if partial is None or not partial.validfor(repo):
98 partial = read(repo)
99 partial = read(repo)
99 if partial is None:
100 if partial is None:
100 subsetname = subsettable.get(filtername)
101 subsetname = subsettable.get(filtername)
101 if subsetname is None:
102 if subsetname is None:
102 partial = branchcache()
103 partial = branchcache()
103 else:
104 else:
104 subset = repo.filtered(subsetname)
105 subset = repo.filtered(subsetname)
105 partial = subset.branchmap().copy()
106 partial = subset.branchmap().copy()
106 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
107 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
107 revs.extend(r for r in extrarevs if r <= partial.tiprev)
108 revs.extend(r for r in extrarevs if r <= partial.tiprev)
108 revs.extend(cl.revs(start=partial.tiprev + 1))
109 revs.extend(cl.revs(start=partial.tiprev + 1))
109 if revs:
110 if revs:
110 partial.update(repo, revs)
111 partial.update(repo, revs)
111 partial.write(repo)
112 partial.write(repo)
112
113
113 assert partial.validfor(repo), filtername
114 assert partial.validfor(repo), filtername
114 repo._branchcaches[repo.filtername] = partial
115 repo._branchcaches[repo.filtername] = partial
115
116
116 def replacecache(repo, bm):
117 def replacecache(repo, bm):
117 """Replace the branchmap cache for a repo with a branch mapping.
118 """Replace the branchmap cache for a repo with a branch mapping.
118
119
119 This is likely only called during clone with a branch map from a remote.
120 This is likely only called during clone with a branch map from a remote.
120 """
121 """
121 rbheads = []
122 rbheads = []
122 closed = []
123 closed = []
123 for bheads in bm.itervalues():
124 for bheads in bm.itervalues():
124 rbheads.extend(bheads)
125 rbheads.extend(bheads)
125 for h in bheads:
126 for h in bheads:
126 r = repo.changelog.rev(h)
127 r = repo.changelog.rev(h)
127 b, c = repo.changelog.branchinfo(r)
128 b, c = repo.changelog.branchinfo(r)
128 if c:
129 if c:
129 closed.append(h)
130 closed.append(h)
130
131
131 if rbheads:
132 if rbheads:
132 rtiprev = max((int(repo.changelog.rev(node))
133 rtiprev = max((int(repo.changelog.rev(node))
133 for node in rbheads))
134 for node in rbheads))
134 cache = branchcache(bm,
135 cache = branchcache(bm,
135 repo[rtiprev].node(),
136 repo[rtiprev].node(),
136 rtiprev,
137 rtiprev,
137 closednodes=closed)
138 closednodes=closed)
138
139
139 # Try to stick it as low as possible
140 # Try to stick it as low as possible
140 # filter above served are unlikely to be fetch from a clone
141 # filter above served are unlikely to be fetch from a clone
141 for candidate in ('base', 'immutable', 'served'):
142 for candidate in ('base', 'immutable', 'served'):
142 rview = repo.filtered(candidate)
143 rview = repo.filtered(candidate)
143 if cache.validfor(rview):
144 if cache.validfor(rview):
144 repo._branchcaches[candidate] = cache
145 repo._branchcaches[candidate] = cache
145 cache.write(rview)
146 cache.write(rview)
146 break
147 break
147
148
148 class branchcache(dict):
149 class branchcache(dict):
149 """A dict like object that hold branches heads cache.
150 """A dict like object that hold branches heads cache.
150
151
151 This cache is used to avoid costly computations to determine all the
152 This cache is used to avoid costly computations to determine all the
152 branch heads of a repo.
153 branch heads of a repo.
153
154
154 The cache is serialized on disk in the following format:
155 The cache is serialized on disk in the following format:
155
156
156 <tip hex node> <tip rev number> [optional filtered repo hex hash]
157 <tip hex node> <tip rev number> [optional filtered repo hex hash]
157 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
158 <branch head hex node> <open/closed state> <branch name>
159 <branch head hex node> <open/closed state> <branch name>
159 ...
160 ...
160
161
161 The first line is used to check if the cache is still valid. If the
162 The first line is used to check if the cache is still valid. If the
162 branch cache is for a filtered repo view, an optional third hash is
163 branch cache is for a filtered repo view, an optional third hash is
163 included that hashes the hashes of all filtered revisions.
164 included that hashes the hashes of all filtered revisions.
164
165
165 The open/closed state is represented by a single letter 'o' or 'c'.
166 The open/closed state is represented by a single letter 'o' or 'c'.
166 This field can be used to avoid changelog reads when determining if a
167 This field can be used to avoid changelog reads when determining if a
167 branch head closes a branch or not.
168 branch head closes a branch or not.
168 """
169 """
169
170
170 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
171 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
171 filteredhash=None, closednodes=None):
172 filteredhash=None, closednodes=None):
172 super(branchcache, self).__init__(entries)
173 super(branchcache, self).__init__(entries)
173 self.tipnode = tipnode
174 self.tipnode = tipnode
174 self.tiprev = tiprev
175 self.tiprev = tiprev
175 self.filteredhash = filteredhash
176 self.filteredhash = filteredhash
176 # closednodes is a set of nodes that close their branch. If the branch
177 # closednodes is a set of nodes that close their branch. If the branch
177 # cache has been updated, it may contain nodes that are no longer
178 # cache has been updated, it may contain nodes that are no longer
178 # heads.
179 # heads.
179 if closednodes is None:
180 if closednodes is None:
180 self._closednodes = set()
181 self._closednodes = set()
181 else:
182 else:
182 self._closednodes = closednodes
183 self._closednodes = closednodes
183
184
184 def validfor(self, repo):
185 def validfor(self, repo):
185 """Is the cache content valid regarding a repo
186 """Is the cache content valid regarding a repo
186
187
187 - False when cached tipnode is unknown or if we detect a strip.
188 - False when cached tipnode is unknown or if we detect a strip.
188 - True when cache is up to date or a subset of current repo."""
189 - True when cache is up to date or a subset of current repo."""
189 try:
190 try:
190 return ((self.tipnode == repo.changelog.node(self.tiprev))
191 return ((self.tipnode == repo.changelog.node(self.tiprev))
191 and (self.filteredhash == \
192 and (self.filteredhash == \
192 scmutil.filteredhash(repo, self.tiprev)))
193 scmutil.filteredhash(repo, self.tiprev)))
193 except IndexError:
194 except IndexError:
194 return False
195 return False
195
196
196 def _branchtip(self, heads):
197 def _branchtip(self, heads):
197 '''Return tuple with last open head in heads and false,
198 '''Return tuple with last open head in heads and false,
198 otherwise return last closed head and true.'''
199 otherwise return last closed head and true.'''
199 tip = heads[-1]
200 tip = heads[-1]
200 closed = True
201 closed = True
201 for h in reversed(heads):
202 for h in reversed(heads):
202 if h not in self._closednodes:
203 if h not in self._closednodes:
203 tip = h
204 tip = h
204 closed = False
205 closed = False
205 break
206 break
206 return tip, closed
207 return tip, closed
207
208
208 def branchtip(self, branch):
209 def branchtip(self, branch):
209 '''Return the tipmost open head on branch head, otherwise return the
210 '''Return the tipmost open head on branch head, otherwise return the
210 tipmost closed head on branch.
211 tipmost closed head on branch.
211 Raise KeyError for unknown branch.'''
212 Raise KeyError for unknown branch.'''
212 return self._branchtip(self[branch])[0]
213 return self._branchtip(self[branch])[0]
213
214
214 def iteropen(self, nodes):
215 def iteropen(self, nodes):
215 return (n for n in nodes if n not in self._closednodes)
216 return (n for n in nodes if n not in self._closednodes)
216
217
217 def branchheads(self, branch, closed=False):
218 def branchheads(self, branch, closed=False):
218 heads = self[branch]
219 heads = self[branch]
219 if not closed:
220 if not closed:
220 heads = list(self.iteropen(heads))
221 heads = list(self.iteropen(heads))
221 return heads
222 return heads
222
223
223 def iterbranches(self):
224 def iterbranches(self):
224 for bn, heads in self.iteritems():
225 for bn, heads in self.iteritems():
225 yield (bn, heads) + self._branchtip(heads)
226 yield (bn, heads) + self._branchtip(heads)
226
227
227 def copy(self):
228 def copy(self):
228 """return an deep copy of the branchcache object"""
229 """return an deep copy of the branchcache object"""
229 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
230 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
230 self._closednodes)
231 self._closednodes)
231
232
232 def write(self, repo):
233 def write(self, repo):
233 try:
234 try:
234 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
235 f = repo.cachevfs(_filename(repo), "w", atomictemp=True)
235 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
236 cachekey = [hex(self.tipnode), '%d' % self.tiprev]
236 if self.filteredhash is not None:
237 if self.filteredhash is not None:
237 cachekey.append(hex(self.filteredhash))
238 cachekey.append(hex(self.filteredhash))
238 f.write(" ".join(cachekey) + '\n')
239 f.write(" ".join(cachekey) + '\n')
239 nodecount = 0
240 nodecount = 0
240 for label, nodes in sorted(self.iteritems()):
241 for label, nodes in sorted(self.iteritems()):
241 for node in nodes:
242 for node in nodes:
242 nodecount += 1
243 nodecount += 1
243 if node in self._closednodes:
244 if node in self._closednodes:
244 state = 'c'
245 state = 'c'
245 else:
246 else:
246 state = 'o'
247 state = 'o'
247 f.write("%s %s %s\n" % (hex(node), state,
248 f.write("%s %s %s\n" % (hex(node), state,
248 encoding.fromlocal(label)))
249 encoding.fromlocal(label)))
249 f.close()
250 f.close()
250 repo.ui.log('branchcache',
251 repo.ui.log('branchcache',
251 'wrote %s branch cache with %d labels and %d nodes\n',
252 'wrote %s branch cache with %d labels and %d nodes\n',
252 repo.filtername, len(self), nodecount)
253 repo.filtername, len(self), nodecount)
253 except (IOError, OSError, error.Abort) as inst:
254 except (IOError, OSError, error.Abort) as inst:
254 # Abort may be raised by read only opener, so log and continue
255 # Abort may be raised by read only opener, so log and continue
255 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
256 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
256
257
257 def update(self, repo, revgen):
258 def update(self, repo, revgen):
258 """Given a branchhead cache, self, that may have extra nodes or be
259 """Given a branchhead cache, self, that may have extra nodes or be
259 missing heads, and a generator of nodes that are strictly a superset of
260 missing heads, and a generator of nodes that are strictly a superset of
260 heads missing, this function updates self to be correct.
261 heads missing, this function updates self to be correct.
261 """
262 """
262 starttime = util.timer()
263 starttime = util.timer()
263 cl = repo.changelog
264 cl = repo.changelog
264 # collect new branch entries
265 # collect new branch entries
265 newbranches = {}
266 newbranches = {}
266 getbranchinfo = repo.revbranchcache().branchinfo
267 getbranchinfo = repo.revbranchcache().branchinfo
267 for r in revgen:
268 for r in revgen:
268 branch, closesbranch = getbranchinfo(r)
269 branch, closesbranch = getbranchinfo(r)
269 newbranches.setdefault(branch, []).append(r)
270 newbranches.setdefault(branch, []).append(r)
270 if closesbranch:
271 if closesbranch:
271 self._closednodes.add(cl.node(r))
272 self._closednodes.add(cl.node(r))
272
273
273 # fetch current topological heads to speed up filtering
274 # fetch current topological heads to speed up filtering
274 topoheads = set(cl.headrevs())
275 topoheads = set(cl.headrevs())
275
276
276 # if older branchheads are reachable from new ones, they aren't
277 # if older branchheads are reachable from new ones, they aren't
277 # really branchheads. Note checking parents is insufficient:
278 # really branchheads. Note checking parents is insufficient:
278 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
279 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
279 for branch, newheadrevs in newbranches.iteritems():
280 for branch, newheadrevs in newbranches.iteritems():
280 bheads = self.setdefault(branch, [])
281 bheads = self.setdefault(branch, [])
281 bheadset = set(cl.rev(node) for node in bheads)
282 bheadset = set(cl.rev(node) for node in bheads)
282
283
283 # This have been tested True on all internal usage of this function.
284 # This have been tested True on all internal usage of this function.
284 # run it again in case of doubt
285 # run it again in case of doubt
285 # assert not (set(bheadrevs) & set(newheadrevs))
286 # assert not (set(bheadrevs) & set(newheadrevs))
286 newheadrevs.sort()
287 newheadrevs.sort()
287 bheadset.update(newheadrevs)
288 bheadset.update(newheadrevs)
288
289
289 # This prunes out two kinds of heads - heads that are superseded by
290 # This prunes out two kinds of heads - heads that are superseded by
290 # a head in newheadrevs, and newheadrevs that are not heads because
291 # a head in newheadrevs, and newheadrevs that are not heads because
291 # an existing head is their descendant.
292 # an existing head is their descendant.
292 uncertain = bheadset - topoheads
293 uncertain = bheadset - topoheads
293 if uncertain:
294 if uncertain:
294 floorrev = min(uncertain)
295 floorrev = min(uncertain)
295 ancestors = set(cl.ancestors(newheadrevs, floorrev))
296 ancestors = set(cl.ancestors(newheadrevs, floorrev))
296 bheadset -= ancestors
297 bheadset -= ancestors
297 bheadrevs = sorted(bheadset)
298 bheadrevs = sorted(bheadset)
298 self[branch] = [cl.node(rev) for rev in bheadrevs]
299 self[branch] = [cl.node(rev) for rev in bheadrevs]
299 tiprev = bheadrevs[-1]
300 tiprev = bheadrevs[-1]
300 if tiprev > self.tiprev:
301 if tiprev > self.tiprev:
301 self.tipnode = cl.node(tiprev)
302 self.tipnode = cl.node(tiprev)
302 self.tiprev = tiprev
303 self.tiprev = tiprev
303
304
304 if not self.validfor(repo):
305 if not self.validfor(repo):
305 # cache key are not valid anymore
306 # cache key are not valid anymore
306 self.tipnode = nullid
307 self.tipnode = nullid
307 self.tiprev = nullrev
308 self.tiprev = nullrev
308 for heads in self.values():
309 for heads in self.values():
309 tiprev = max(cl.rev(node) for node in heads)
310 tiprev = max(cl.rev(node) for node in heads)
310 if tiprev > self.tiprev:
311 if tiprev > self.tiprev:
311 self.tipnode = cl.node(tiprev)
312 self.tipnode = cl.node(tiprev)
312 self.tiprev = tiprev
313 self.tiprev = tiprev
313 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
314 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
314
315
315 duration = util.timer() - starttime
316 duration = util.timer() - starttime
316 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
317 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
317 repo.filtername, duration)
318 repo.filtername, duration)
318
319
319 # Revision branch info cache
320 # Revision branch info cache
320
321
321 _rbcversion = '-v1'
322 _rbcversion = '-v1'
322 _rbcnames = 'rbc-names' + _rbcversion
323 _rbcnames = 'rbc-names' + _rbcversion
323 _rbcrevs = 'rbc-revs' + _rbcversion
324 _rbcrevs = 'rbc-revs' + _rbcversion
324 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
325 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
325 _rbcrecfmt = '>4sI'
326 _rbcrecfmt = '>4sI'
326 _rbcrecsize = calcsize(_rbcrecfmt)
327 _rbcrecsize = calcsize(_rbcrecfmt)
327 _rbcnodelen = 4
328 _rbcnodelen = 4
328 _rbcbranchidxmask = 0x7fffffff
329 _rbcbranchidxmask = 0x7fffffff
329 _rbccloseflag = 0x80000000
330 _rbccloseflag = 0x80000000
330
331
331 class revbranchcache(object):
332 class revbranchcache(object):
332 """Persistent cache, mapping from revision number to branch name and close.
333 """Persistent cache, mapping from revision number to branch name and close.
333 This is a low level cache, independent of filtering.
334 This is a low level cache, independent of filtering.
334
335
335 Branch names are stored in rbc-names in internal encoding separated by 0.
336 Branch names are stored in rbc-names in internal encoding separated by 0.
336 rbc-names is append-only, and each branch name is only stored once and will
337 rbc-names is append-only, and each branch name is only stored once and will
337 thus have a unique index.
338 thus have a unique index.
338
339
339 The branch info for each revision is stored in rbc-revs as constant size
340 The branch info for each revision is stored in rbc-revs as constant size
340 records. The whole file is read into memory, but it is only 'parsed' on
341 records. The whole file is read into memory, but it is only 'parsed' on
341 demand. The file is usually append-only but will be truncated if repo
342 demand. The file is usually append-only but will be truncated if repo
342 modification is detected.
343 modification is detected.
343 The record for each revision contains the first 4 bytes of the
344 The record for each revision contains the first 4 bytes of the
344 corresponding node hash, and the record is only used if it still matches.
345 corresponding node hash, and the record is only used if it still matches.
345 Even a completely trashed rbc-revs fill thus still give the right result
346 Even a completely trashed rbc-revs fill thus still give the right result
346 while converging towards full recovery ... assuming no incorrectly matching
347 while converging towards full recovery ... assuming no incorrectly matching
347 node hashes.
348 node hashes.
348 The record also contains 4 bytes where 31 bits contains the index of the
349 The record also contains 4 bytes where 31 bits contains the index of the
349 branch and the last bit indicate that it is a branch close commit.
350 branch and the last bit indicate that it is a branch close commit.
350 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
351 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
351 and will grow with it but be 1/8th of its size.
352 and will grow with it but be 1/8th of its size.
352 """
353 """
353
354
354 def __init__(self, repo, readonly=True):
355 def __init__(self, repo, readonly=True):
355 assert repo.filtername is None
356 assert repo.filtername is None
356 self._repo = repo
357 self._repo = repo
357 self._names = [] # branch names in local encoding with static index
358 self._names = [] # branch names in local encoding with static index
358 self._rbcrevs = bytearray()
359 self._rbcrevs = bytearray()
359 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
360 self._rbcsnameslen = 0 # length of names read at _rbcsnameslen
360 try:
361 try:
361 bndata = repo.cachevfs.read(_rbcnames)
362 bndata = repo.cachevfs.read(_rbcnames)
362 self._rbcsnameslen = len(bndata) # for verification before writing
363 self._rbcsnameslen = len(bndata) # for verification before writing
363 if bndata:
364 if bndata:
364 self._names = [encoding.tolocal(bn)
365 self._names = [encoding.tolocal(bn)
365 for bn in bndata.split('\0')]
366 for bn in bndata.split('\0')]
366 except (IOError, OSError):
367 except (IOError, OSError):
367 if readonly:
368 if readonly:
368 # don't try to use cache - fall back to the slow path
369 # don't try to use cache - fall back to the slow path
369 self.branchinfo = self._branchinfo
370 self.branchinfo = self._branchinfo
370
371
371 if self._names:
372 if self._names:
372 try:
373 try:
373 data = repo.cachevfs.read(_rbcrevs)
374 data = repo.cachevfs.read(_rbcrevs)
374 self._rbcrevs[:] = data
375 self._rbcrevs[:] = data
375 except (IOError, OSError) as inst:
376 except (IOError, OSError) as inst:
376 repo.ui.debug("couldn't read revision branch cache: %s\n" %
377 repo.ui.debug("couldn't read revision branch cache: %s\n" %
377 inst)
378 inst)
378 # remember number of good records on disk
379 # remember number of good records on disk
379 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
380 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
380 len(repo.changelog))
381 len(repo.changelog))
381 if self._rbcrevslen == 0:
382 if self._rbcrevslen == 0:
382 self._names = []
383 self._names = []
383 self._rbcnamescount = len(self._names) # number of names read at
384 self._rbcnamescount = len(self._names) # number of names read at
384 # _rbcsnameslen
385 # _rbcsnameslen
385 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
386 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
386
387
387 def _clear(self):
388 def _clear(self):
388 self._rbcsnameslen = 0
389 self._rbcsnameslen = 0
389 del self._names[:]
390 del self._names[:]
390 self._rbcnamescount = 0
391 self._rbcnamescount = 0
391 self._namesreverse.clear()
392 self._namesreverse.clear()
392 self._rbcrevslen = len(self._repo.changelog)
393 self._rbcrevslen = len(self._repo.changelog)
393 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
394 self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
394
395
395 def branchinfo(self, rev):
396 def branchinfo(self, rev):
396 """Return branch name and close flag for rev, using and updating
397 """Return branch name and close flag for rev, using and updating
397 persistent cache."""
398 persistent cache."""
398 changelog = self._repo.changelog
399 changelog = self._repo.changelog
399 rbcrevidx = rev * _rbcrecsize
400 rbcrevidx = rev * _rbcrecsize
400
401
401 # avoid negative index, changelog.read(nullrev) is fast without cache
402 # avoid negative index, changelog.read(nullrev) is fast without cache
402 if rev == nullrev:
403 if rev == nullrev:
403 return changelog.branchinfo(rev)
404 return changelog.branchinfo(rev)
404
405
405 # if requested rev isn't allocated, grow and cache the rev info
406 # if requested rev isn't allocated, grow and cache the rev info
406 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
407 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
407 return self._branchinfo(rev)
408 return self._branchinfo(rev)
408
409
409 # fast path: extract data from cache, use it if node is matching
410 # fast path: extract data from cache, use it if node is matching
410 reponode = changelog.node(rev)[:_rbcnodelen]
411 reponode = changelog.node(rev)[:_rbcnodelen]
411 cachenode, branchidx = unpack_from(
412 cachenode, branchidx = unpack_from(
412 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
413 _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx)
413 close = bool(branchidx & _rbccloseflag)
414 close = bool(branchidx & _rbccloseflag)
414 if close:
415 if close:
415 branchidx &= _rbcbranchidxmask
416 branchidx &= _rbcbranchidxmask
416 if cachenode == '\0\0\0\0':
417 if cachenode == '\0\0\0\0':
417 pass
418 pass
418 elif cachenode == reponode:
419 elif cachenode == reponode:
419 try:
420 try:
420 return self._names[branchidx], close
421 return self._names[branchidx], close
421 except IndexError:
422 except IndexError:
422 # recover from invalid reference to unknown branch
423 # recover from invalid reference to unknown branch
423 self._repo.ui.debug("referenced branch names not found"
424 self._repo.ui.debug("referenced branch names not found"
424 " - rebuilding revision branch cache from scratch\n")
425 " - rebuilding revision branch cache from scratch\n")
425 self._clear()
426 self._clear()
426 else:
427 else:
427 # rev/node map has changed, invalidate the cache from here up
428 # rev/node map has changed, invalidate the cache from here up
428 self._repo.ui.debug("history modification detected - truncating "
429 self._repo.ui.debug("history modification detected - truncating "
429 "revision branch cache to revision %d\n" % rev)
430 "revision branch cache to revision %d\n" % rev)
430 truncate = rbcrevidx + _rbcrecsize
431 truncate = rbcrevidx + _rbcrecsize
431 del self._rbcrevs[truncate:]
432 del self._rbcrevs[truncate:]
432 self._rbcrevslen = min(self._rbcrevslen, truncate)
433 self._rbcrevslen = min(self._rbcrevslen, truncate)
433
434
434 # fall back to slow path and make sure it will be written to disk
435 # fall back to slow path and make sure it will be written to disk
435 return self._branchinfo(rev)
436 return self._branchinfo(rev)
436
437
437 def _branchinfo(self, rev):
438 def _branchinfo(self, rev):
438 """Retrieve branch info from changelog and update _rbcrevs"""
439 """Retrieve branch info from changelog and update _rbcrevs"""
439 changelog = self._repo.changelog
440 changelog = self._repo.changelog
440 b, close = changelog.branchinfo(rev)
441 b, close = changelog.branchinfo(rev)
441 if b in self._namesreverse:
442 if b in self._namesreverse:
442 branchidx = self._namesreverse[b]
443 branchidx = self._namesreverse[b]
443 else:
444 else:
444 branchidx = len(self._names)
445 branchidx = len(self._names)
445 self._names.append(b)
446 self._names.append(b)
446 self._namesreverse[b] = branchidx
447 self._namesreverse[b] = branchidx
447 reponode = changelog.node(rev)
448 reponode = changelog.node(rev)
448 if close:
449 if close:
449 branchidx |= _rbccloseflag
450 branchidx |= _rbccloseflag
450 self._setcachedata(rev, reponode, branchidx)
451 self._setcachedata(rev, reponode, branchidx)
451 return b, close
452 return b, close
452
453
453 def _setcachedata(self, rev, node, branchidx):
454 def _setcachedata(self, rev, node, branchidx):
454 """Writes the node's branch data to the in-memory cache data."""
455 """Writes the node's branch data to the in-memory cache data."""
455 if rev == nullrev:
456 if rev == nullrev:
456 return
457 return
457 rbcrevidx = rev * _rbcrecsize
458 rbcrevidx = rev * _rbcrecsize
458 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
459 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
459 self._rbcrevs.extend('\0' *
460 self._rbcrevs.extend('\0' *
460 (len(self._repo.changelog) * _rbcrecsize -
461 (len(self._repo.changelog) * _rbcrecsize -
461 len(self._rbcrevs)))
462 len(self._rbcrevs)))
462 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
463 pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
463 self._rbcrevslen = min(self._rbcrevslen, rev)
464 self._rbcrevslen = min(self._rbcrevslen, rev)
464
465
465 tr = self._repo.currenttransaction()
466 tr = self._repo.currenttransaction()
466 if tr:
467 if tr:
467 tr.addfinalize('write-revbranchcache', self.write)
468 tr.addfinalize('write-revbranchcache', self.write)
468
469
469 def write(self, tr=None):
470 def write(self, tr=None):
470 """Save branch cache if it is dirty."""
471 """Save branch cache if it is dirty."""
471 repo = self._repo
472 repo = self._repo
472 wlock = None
473 wlock = None
473 step = ''
474 step = ''
474 try:
475 try:
475 if self._rbcnamescount < len(self._names):
476 if self._rbcnamescount < len(self._names):
476 step = ' names'
477 step = ' names'
477 wlock = repo.wlock(wait=False)
478 wlock = repo.wlock(wait=False)
478 if self._rbcnamescount != 0:
479 if self._rbcnamescount != 0:
479 f = repo.cachevfs.open(_rbcnames, 'ab')
480 f = repo.cachevfs.open(_rbcnames, 'ab')
480 if f.tell() == self._rbcsnameslen:
481 if f.tell() == self._rbcsnameslen:
481 f.write('\0')
482 f.write('\0')
482 else:
483 else:
483 f.close()
484 f.close()
484 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
485 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
485 self._rbcnamescount = 0
486 self._rbcnamescount = 0
486 self._rbcrevslen = 0
487 self._rbcrevslen = 0
487 if self._rbcnamescount == 0:
488 if self._rbcnamescount == 0:
488 # before rewriting names, make sure references are removed
489 # before rewriting names, make sure references are removed
489 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
490 repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True)
490 f = repo.cachevfs.open(_rbcnames, 'wb')
491 f = repo.cachevfs.open(_rbcnames, 'wb')
491 f.write('\0'.join(encoding.fromlocal(b)
492 f.write('\0'.join(encoding.fromlocal(b)
492 for b in self._names[self._rbcnamescount:]))
493 for b in self._names[self._rbcnamescount:]))
493 self._rbcsnameslen = f.tell()
494 self._rbcsnameslen = f.tell()
494 f.close()
495 f.close()
495 self._rbcnamescount = len(self._names)
496 self._rbcnamescount = len(self._names)
496
497
497 start = self._rbcrevslen * _rbcrecsize
498 start = self._rbcrevslen * _rbcrecsize
498 if start != len(self._rbcrevs):
499 if start != len(self._rbcrevs):
499 step = ''
500 step = ''
500 if wlock is None:
501 if wlock is None:
501 wlock = repo.wlock(wait=False)
502 wlock = repo.wlock(wait=False)
502 revs = min(len(repo.changelog),
503 revs = min(len(repo.changelog),
503 len(self._rbcrevs) // _rbcrecsize)
504 len(self._rbcrevs) // _rbcrecsize)
504 f = repo.cachevfs.open(_rbcrevs, 'ab')
505 f = repo.cachevfs.open(_rbcrevs, 'ab')
505 if f.tell() != start:
506 if f.tell() != start:
506 repo.ui.debug("truncating cache/%s to %d\n"
507 repo.ui.debug("truncating cache/%s to %d\n"
507 % (_rbcrevs, start))
508 % (_rbcrevs, start))
508 f.seek(start)
509 f.seek(start)
509 if f.tell() != start:
510 if f.tell() != start:
510 start = 0
511 start = 0
511 f.seek(start)
512 f.seek(start)
512 f.truncate()
513 f.truncate()
513 end = revs * _rbcrecsize
514 end = revs * _rbcrecsize
514 f.write(self._rbcrevs[start:end])
515 f.write(self._rbcrevs[start:end])
515 f.close()
516 f.close()
516 self._rbcrevslen = revs
517 self._rbcrevslen = revs
517 except (IOError, OSError, error.Abort, error.LockError) as inst:
518 except (IOError, OSError, error.Abort, error.LockError) as inst:
518 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
519 repo.ui.debug("couldn't write revision branch cache%s: %s\n"
519 % (step, inst))
520 % (step, inst))
520 finally:
521 finally:
521 if wlock is not None:
522 if wlock is not None:
522 wlock.release()
523 wlock.release()
@@ -1,273 +1,274
1 # repoview.py - Filtered view of a localrepo object
1 # repoview.py - Filtered view of a localrepo object
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import copy
11 import copy
12 import weakref
12 import weakref
13
13
14 from .node import nullrev
14 from .node import nullrev
15 from . import (
15 from . import (
16 obsolete,
16 obsolete,
17 phases,
17 phases,
18 pycompat,
18 pycompat,
19 tags as tagsmod,
19 tags as tagsmod,
20 )
20 )
21
21
22 def hideablerevs(repo):
22 def hideablerevs(repo):
23 """Revision candidates to be hidden
23 """Revision candidates to be hidden
24
24
25 This is a standalone function to allow extensions to wrap it.
25 This is a standalone function to allow extensions to wrap it.
26
26
27 Because we use the set of immutable changesets as a fallback subset in
27 Because we use the set of immutable changesets as a fallback subset in
28 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
28 branchmap (see mercurial.branchmap.subsettable), you cannot set "public"
29 changesets as "hideable". Doing so would break multiple code assertions and
29 changesets as "hideable". Doing so would break multiple code assertions and
30 lead to crashes."""
30 lead to crashes."""
31 return obsolete.getrevs(repo, 'obsolete')
31 return obsolete.getrevs(repo, 'obsolete')
32
32
33 def pinnedrevs(repo):
33 def pinnedrevs(repo):
34 """revisions blocking hidden changesets from being filtered
34 """revisions blocking hidden changesets from being filtered
35 """
35 """
36
36
37 cl = repo.changelog
37 cl = repo.changelog
38 pinned = set()
38 pinned = set()
39 pinned.update([par.rev() for par in repo[None].parents()])
39 pinned.update([par.rev() for par in repo[None].parents()])
40 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
40 pinned.update([cl.rev(bm) for bm in repo._bookmarks.values()])
41
41
42 tags = {}
42 tags = {}
43 tagsmod.readlocaltags(repo.ui, repo, tags, {})
43 tagsmod.readlocaltags(repo.ui, repo, tags, {})
44 if tags:
44 if tags:
45 rev, nodemap = cl.rev, cl.nodemap
45 rev, nodemap = cl.rev, cl.nodemap
46 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
46 pinned.update(rev(t[0]) for t in tags.values() if t[0] in nodemap)
47 return pinned
47 return pinned
48
48
49
49
50 def _revealancestors(pfunc, hidden, revs):
50 def _revealancestors(pfunc, hidden, revs):
51 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
51 """reveals contiguous chains of hidden ancestors of 'revs' by removing them
52 from 'hidden'
52 from 'hidden'
53
53
54 - pfunc(r): a funtion returning parent of 'r',
54 - pfunc(r): a funtion returning parent of 'r',
55 - hidden: the (preliminary) hidden revisions, to be updated
55 - hidden: the (preliminary) hidden revisions, to be updated
56 - revs: iterable of revnum,
56 - revs: iterable of revnum,
57
57
58 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
58 (Ancestors are revealed exclusively, i.e. the elements in 'revs' are
59 *not* revealed)
59 *not* revealed)
60 """
60 """
61 stack = list(revs)
61 stack = list(revs)
62 while stack:
62 while stack:
63 for p in pfunc(stack.pop()):
63 for p in pfunc(stack.pop()):
64 if p != nullrev and p in hidden:
64 if p != nullrev and p in hidden:
65 hidden.remove(p)
65 hidden.remove(p)
66 stack.append(p)
66 stack.append(p)
67
67
68 def computehidden(repo, visibilityexceptions=None):
68 def computehidden(repo, visibilityexceptions=None):
69 """compute the set of hidden revision to filter
69 """compute the set of hidden revision to filter
70
70
71 During most operation hidden should be filtered."""
71 During most operation hidden should be filtered."""
72 assert not repo.changelog.filteredrevs
72 assert not repo.changelog.filteredrevs
73
73
74 hidden = hideablerevs(repo)
74 hidden = hideablerevs(repo)
75 if hidden:
75 if hidden:
76 hidden = set(hidden - pinnedrevs(repo))
76 hidden = set(hidden - pinnedrevs(repo))
77 if visibilityexceptions:
77 if visibilityexceptions:
78 hidden -= visibilityexceptions
78 hidden -= visibilityexceptions
79 pfunc = repo.changelog.parentrevs
79 pfunc = repo.changelog.parentrevs
80 mutablephases = (phases.draft, phases.secret)
80 mutablephases = (phases.draft, phases.secret)
81 mutable = repo._phasecache.getrevset(repo, mutablephases)
81 mutable = repo._phasecache.getrevset(repo, mutablephases)
82
82
83 visible = mutable - hidden
83 visible = mutable - hidden
84 _revealancestors(pfunc, hidden, visible)
84 _revealancestors(pfunc, hidden, visible)
85 return frozenset(hidden)
85 return frozenset(hidden)
86
86
87 def computeunserved(repo, visibilityexceptions=None):
87 def computeunserved(repo, visibilityexceptions=None):
88 """compute the set of revision that should be filtered when used a server
88 """compute the set of revision that should be filtered when used a server
89
89
90 Secret and hidden changeset should not pretend to be here."""
90 Secret and hidden changeset should not pretend to be here."""
91 assert not repo.changelog.filteredrevs
91 assert not repo.changelog.filteredrevs
92 # fast path in simple case to avoid impact of non optimised code
92 # fast path in simple case to avoid impact of non optimised code
93 hiddens = filterrevs(repo, 'visible')
93 hiddens = filterrevs(repo, 'visible')
94 if phases.hassecret(repo):
94 if phases.hassecret(repo):
95 cl = repo.changelog
95 cl = repo.changelog
96 secret = phases.secret
96 secret = phases.secret
97 getphase = repo._phasecache.phase
97 getphase = repo._phasecache.phase
98 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
98 first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
99 revs = cl.revs(start=first)
99 revs = cl.revs(start=first)
100 secrets = set(r for r in revs if getphase(repo, r) >= secret)
100 secrets = set(r for r in revs if getphase(repo, r) >= secret)
101 return frozenset(hiddens | secrets)
101 return frozenset(hiddens | secrets)
102 else:
102 else:
103 return hiddens
103 return hiddens
104
104
105 def computemutable(repo, visibilityexceptions=None):
105 def computemutable(repo, visibilityexceptions=None):
106 assert not repo.changelog.filteredrevs
106 assert not repo.changelog.filteredrevs
107 # fast check to avoid revset call on huge repo
107 # fast check to avoid revset call on huge repo
108 if any(repo._phasecache.phaseroots[1:]):
108 if any(repo._phasecache.phaseroots[1:]):
109 getphase = repo._phasecache.phase
109 getphase = repo._phasecache.phase
110 maymutable = filterrevs(repo, 'base')
110 maymutable = filterrevs(repo, 'base')
111 return frozenset(r for r in maymutable if getphase(repo, r))
111 return frozenset(r for r in maymutable if getphase(repo, r))
112 return frozenset()
112 return frozenset()
113
113
114 def computeimpactable(repo, visibilityexceptions=None):
114 def computeimpactable(repo, visibilityexceptions=None):
115 """Everything impactable by mutable revision
115 """Everything impactable by mutable revision
116
116
117 The immutable filter still have some chance to get invalidated. This will
117 The immutable filter still have some chance to get invalidated. This will
118 happen when:
118 happen when:
119
119
120 - you garbage collect hidden changeset,
120 - you garbage collect hidden changeset,
121 - public phase is moved backward,
121 - public phase is moved backward,
122 - something is changed in the filtering (this could be fixed)
122 - something is changed in the filtering (this could be fixed)
123
123
124 This filter out any mutable changeset and any public changeset that may be
124 This filter out any mutable changeset and any public changeset that may be
125 impacted by something happening to a mutable revision.
125 impacted by something happening to a mutable revision.
126
126
127 This is achieved by filtered everything with a revision number egal or
127 This is achieved by filtered everything with a revision number egal or
128 higher than the first mutable changeset is filtered."""
128 higher than the first mutable changeset is filtered."""
129 assert not repo.changelog.filteredrevs
129 assert not repo.changelog.filteredrevs
130 cl = repo.changelog
130 cl = repo.changelog
131 firstmutable = len(cl)
131 firstmutable = len(cl)
132 for roots in repo._phasecache.phaseroots[1:]:
132 for roots in repo._phasecache.phaseroots[1:]:
133 if roots:
133 if roots:
134 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
134 firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
135 # protect from nullrev root
135 # protect from nullrev root
136 firstmutable = max(0, firstmutable)
136 firstmutable = max(0, firstmutable)
137 return frozenset(xrange(firstmutable, len(cl)))
137 return frozenset(xrange(firstmutable, len(cl)))
138
138
139 # function to compute filtered set
139 # function to compute filtered set
140 #
140 #
141 # When adding a new filter you MUST update the table at:
141 # When adding a new filter you MUST update the table at:
142 # mercurial.branchmap.subsettable
142 # mercurial.branchmap.subsettable
143 # Otherwise your filter will have to recompute all its branches cache
143 # Otherwise your filter will have to recompute all its branches cache
144 # from scratch (very slow).
144 # from scratch (very slow).
145 filtertable = {'visible': computehidden,
145 filtertable = {'visible': computehidden,
146 'visible-hidden': computehidden,
146 'served': computeunserved,
147 'served': computeunserved,
147 'immutable': computemutable,
148 'immutable': computemutable,
148 'base': computeimpactable}
149 'base': computeimpactable}
149
150
150 def filterrevs(repo, filtername, visibilityexceptions=None):
151 def filterrevs(repo, filtername, visibilityexceptions=None):
151 """returns set of filtered revision for this filter name
152 """returns set of filtered revision for this filter name
152
153
153 visibilityexceptions is a set of revs which must are exceptions for
154 visibilityexceptions is a set of revs which must are exceptions for
154 hidden-state and must be visible. They are dynamic and hence we should not
155 hidden-state and must be visible. They are dynamic and hence we should not
155 cache it's result"""
156 cache it's result"""
156 if filtername not in repo.filteredrevcache:
157 if filtername not in repo.filteredrevcache:
157 func = filtertable[filtername]
158 func = filtertable[filtername]
158 if visibilityexceptions:
159 if visibilityexceptions:
159 return func(repo.unfiltered, visibilityexceptions)
160 return func(repo.unfiltered, visibilityexceptions)
160 repo.filteredrevcache[filtername] = func(repo.unfiltered())
161 repo.filteredrevcache[filtername] = func(repo.unfiltered())
161 return repo.filteredrevcache[filtername]
162 return repo.filteredrevcache[filtername]
162
163
163 class repoview(object):
164 class repoview(object):
164 """Provide a read/write view of a repo through a filtered changelog
165 """Provide a read/write view of a repo through a filtered changelog
165
166
166 This object is used to access a filtered version of a repository without
167 This object is used to access a filtered version of a repository without
167 altering the original repository object itself. We can not alter the
168 altering the original repository object itself. We can not alter the
168 original object for two main reasons:
169 original object for two main reasons:
169 - It prevents the use of a repo with multiple filters at the same time. In
170 - It prevents the use of a repo with multiple filters at the same time. In
170 particular when multiple threads are involved.
171 particular when multiple threads are involved.
171 - It makes scope of the filtering harder to control.
172 - It makes scope of the filtering harder to control.
172
173
173 This object behaves very closely to the original repository. All attribute
174 This object behaves very closely to the original repository. All attribute
174 operations are done on the original repository:
175 operations are done on the original repository:
175 - An access to `repoview.someattr` actually returns `repo.someattr`,
176 - An access to `repoview.someattr` actually returns `repo.someattr`,
176 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
177 - A write to `repoview.someattr` actually sets value of `repo.someattr`,
177 - A deletion of `repoview.someattr` actually drops `someattr`
178 - A deletion of `repoview.someattr` actually drops `someattr`
178 from `repo.__dict__`.
179 from `repo.__dict__`.
179
180
180 The only exception is the `changelog` property. It is overridden to return
181 The only exception is the `changelog` property. It is overridden to return
181 a (surface) copy of `repo.changelog` with some revisions filtered. The
182 a (surface) copy of `repo.changelog` with some revisions filtered. The
182 `filtername` attribute of the view control the revisions that need to be
183 `filtername` attribute of the view control the revisions that need to be
183 filtered. (the fact the changelog is copied is an implementation detail).
184 filtered. (the fact the changelog is copied is an implementation detail).
184
185
185 Unlike attributes, this object intercepts all method calls. This means that
186 Unlike attributes, this object intercepts all method calls. This means that
186 all methods are run on the `repoview` object with the filtered `changelog`
187 all methods are run on the `repoview` object with the filtered `changelog`
187 property. For this purpose the simple `repoview` class must be mixed with
188 property. For this purpose the simple `repoview` class must be mixed with
188 the actual class of the repository. This ensures that the resulting
189 the actual class of the repository. This ensures that the resulting
189 `repoview` object have the very same methods than the repo object. This
190 `repoview` object have the very same methods than the repo object. This
190 leads to the property below.
191 leads to the property below.
191
192
192 repoview.method() --> repo.__class__.method(repoview)
193 repoview.method() --> repo.__class__.method(repoview)
193
194
194 The inheritance has to be done dynamically because `repo` can be of any
195 The inheritance has to be done dynamically because `repo` can be of any
195 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
196 subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
196 """
197 """
197
198
198 def __init__(self, repo, filtername, visibilityexceptions=None):
199 def __init__(self, repo, filtername, visibilityexceptions=None):
199 object.__setattr__(self, r'_unfilteredrepo', repo)
200 object.__setattr__(self, r'_unfilteredrepo', repo)
200 object.__setattr__(self, r'filtername', filtername)
201 object.__setattr__(self, r'filtername', filtername)
201 object.__setattr__(self, r'_clcachekey', None)
202 object.__setattr__(self, r'_clcachekey', None)
202 object.__setattr__(self, r'_clcache', None)
203 object.__setattr__(self, r'_clcache', None)
203 # revs which are exceptions and must not be hidden
204 # revs which are exceptions and must not be hidden
204 object.__setattr__(self, r'_visibilityexceptions',
205 object.__setattr__(self, r'_visibilityexceptions',
205 visibilityexceptions)
206 visibilityexceptions)
206
207
207 # not a propertycache on purpose we shall implement a proper cache later
208 # not a propertycache on purpose we shall implement a proper cache later
208 @property
209 @property
209 def changelog(self):
210 def changelog(self):
210 """return a filtered version of the changeset
211 """return a filtered version of the changeset
211
212
212 this changelog must not be used for writing"""
213 this changelog must not be used for writing"""
213 # some cache may be implemented later
214 # some cache may be implemented later
214 unfi = self._unfilteredrepo
215 unfi = self._unfilteredrepo
215 unfichangelog = unfi.changelog
216 unfichangelog = unfi.changelog
216 # bypass call to changelog.method
217 # bypass call to changelog.method
217 unfiindex = unfichangelog.index
218 unfiindex = unfichangelog.index
218 unfilen = len(unfiindex) - 1
219 unfilen = len(unfiindex) - 1
219 unfinode = unfiindex[unfilen - 1][7]
220 unfinode = unfiindex[unfilen - 1][7]
220
221
221 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
222 revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
222 cl = self._clcache
223 cl = self._clcache
223 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
224 newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
224 # if cl.index is not unfiindex, unfi.changelog would be
225 # if cl.index is not unfiindex, unfi.changelog would be
225 # recreated, and our clcache refers to garbage object
226 # recreated, and our clcache refers to garbage object
226 if (cl is not None and
227 if (cl is not None and
227 (cl.index is not unfiindex or newkey != self._clcachekey)):
228 (cl.index is not unfiindex or newkey != self._clcachekey)):
228 cl = None
229 cl = None
229 # could have been made None by the previous if
230 # could have been made None by the previous if
230 if cl is None:
231 if cl is None:
231 cl = copy.copy(unfichangelog)
232 cl = copy.copy(unfichangelog)
232 cl.filteredrevs = revs
233 cl.filteredrevs = revs
233 object.__setattr__(self, r'_clcache', cl)
234 object.__setattr__(self, r'_clcache', cl)
234 object.__setattr__(self, r'_clcachekey', newkey)
235 object.__setattr__(self, r'_clcachekey', newkey)
235 return cl
236 return cl
236
237
237 def unfiltered(self):
238 def unfiltered(self):
238 """Return an unfiltered version of a repo"""
239 """Return an unfiltered version of a repo"""
239 return self._unfilteredrepo
240 return self._unfilteredrepo
240
241
241 def filtered(self, name, visibilityexceptions=None):
242 def filtered(self, name, visibilityexceptions=None):
242 """Return a filtered version of a repository"""
243 """Return a filtered version of a repository"""
243 if name == self.filtername and not visibilityexceptions:
244 if name == self.filtername and not visibilityexceptions:
244 return self
245 return self
245 return self.unfiltered().filtered(name, visibilityexceptions)
246 return self.unfiltered().filtered(name, visibilityexceptions)
246
247
247 def __repr__(self):
248 def __repr__(self):
248 return r'<%s:%s %r>' % (self.__class__.__name__,
249 return r'<%s:%s %r>' % (self.__class__.__name__,
249 pycompat.sysstr(self.filtername),
250 pycompat.sysstr(self.filtername),
250 self.unfiltered())
251 self.unfiltered())
251
252
252 # everything access are forwarded to the proxied repo
253 # everything access are forwarded to the proxied repo
253 def __getattr__(self, attr):
254 def __getattr__(self, attr):
254 return getattr(self._unfilteredrepo, attr)
255 return getattr(self._unfilteredrepo, attr)
255
256
256 def __setattr__(self, attr, value):
257 def __setattr__(self, attr, value):
257 return setattr(self._unfilteredrepo, attr, value)
258 return setattr(self._unfilteredrepo, attr, value)
258
259
259 def __delattr__(self, attr):
260 def __delattr__(self, attr):
260 return delattr(self._unfilteredrepo, attr)
261 return delattr(self._unfilteredrepo, attr)
261
262
262 # Python <3.4 easily leaks types via __mro__. See
263 # Python <3.4 easily leaks types via __mro__. See
263 # https://bugs.python.org/issue17950. We cache dynamically created types
264 # https://bugs.python.org/issue17950. We cache dynamically created types
264 # so they won't be leaked on every invocation of repo.filtered().
265 # so they won't be leaked on every invocation of repo.filtered().
265 _filteredrepotypes = weakref.WeakKeyDictionary()
266 _filteredrepotypes = weakref.WeakKeyDictionary()
266
267
267 def newtype(base):
268 def newtype(base):
268 """Create a new type with the repoview mixin and the given base class"""
269 """Create a new type with the repoview mixin and the given base class"""
269 if base not in _filteredrepotypes:
270 if base not in _filteredrepotypes:
270 class filteredrepo(repoview, base):
271 class filteredrepo(repoview, base):
271 pass
272 pass
272 _filteredrepotypes[base] = filteredrepo
273 _filteredrepotypes[base] = filteredrepo
273 return _filteredrepotypes[base]
274 return _filteredrepotypes[base]
General Comments 0
You need to be logged in to leave comments. Login now