##// END OF EJS Templates
rawdata: forward `rawdata` call on `manifestlog`...
marmoute -
r42949:2128c76c default
parent child Browse files
Show More
@@ -1,2100 +1,2103
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 error,
23 error,
24 mdiff,
24 mdiff,
25 policy,
25 policy,
26 pycompat,
26 pycompat,
27 repository,
27 repository,
28 revlog,
28 revlog,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 interfaceutil,
32 interfaceutil,
33 )
33 )
34
34
35 parsers = policy.importmod(r'parsers')
35 parsers = policy.importmod(r'parsers')
36 propertycache = util.propertycache
36 propertycache = util.propertycache
37
37
38 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
38 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
39 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
39 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
40
40
41 def _parse(data):
41 def _parse(data):
42 # This method does a little bit of excessive-looking
42 # This method does a little bit of excessive-looking
43 # precondition checking. This is so that the behavior of this
43 # precondition checking. This is so that the behavior of this
44 # class exactly matches its C counterpart to try and help
44 # class exactly matches its C counterpart to try and help
45 # prevent surprise breakage for anyone that develops against
45 # prevent surprise breakage for anyone that develops against
46 # the pure version.
46 # the pure version.
47 if data and data[-1:] != '\n':
47 if data and data[-1:] != '\n':
48 raise ValueError('Manifest did not end in a newline.')
48 raise ValueError('Manifest did not end in a newline.')
49 prev = None
49 prev = None
50 for l in data.splitlines():
50 for l in data.splitlines():
51 if prev is not None and prev > l:
51 if prev is not None and prev > l:
52 raise ValueError('Manifest lines not in sorted order.')
52 raise ValueError('Manifest lines not in sorted order.')
53 prev = l
53 prev = l
54 f, n = l.split('\0')
54 f, n = l.split('\0')
55 if len(n) > 40:
55 if len(n) > 40:
56 yield f, bin(n[:40]), n[40:]
56 yield f, bin(n[:40]), n[40:]
57 else:
57 else:
58 yield f, bin(n), ''
58 yield f, bin(n), ''
59
59
60 def _text(it):
60 def _text(it):
61 files = []
61 files = []
62 lines = []
62 lines = []
63 for f, n, fl in it:
63 for f, n, fl in it:
64 files.append(f)
64 files.append(f)
65 # if this is changed to support newlines in filenames,
65 # if this is changed to support newlines in filenames,
66 # be sure to check the templates/ dir again (especially *-raw.tmpl)
66 # be sure to check the templates/ dir again (especially *-raw.tmpl)
67 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
67 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
68
68
69 _checkforbidden(files)
69 _checkforbidden(files)
70 return ''.join(lines)
70 return ''.join(lines)
71
71
72 class lazymanifestiter(object):
72 class lazymanifestiter(object):
73 def __init__(self, lm):
73 def __init__(self, lm):
74 self.pos = 0
74 self.pos = 0
75 self.lm = lm
75 self.lm = lm
76
76
77 def __iter__(self):
77 def __iter__(self):
78 return self
78 return self
79
79
80 def next(self):
80 def next(self):
81 try:
81 try:
82 data, pos = self.lm._get(self.pos)
82 data, pos = self.lm._get(self.pos)
83 except IndexError:
83 except IndexError:
84 raise StopIteration
84 raise StopIteration
85 if pos == -1:
85 if pos == -1:
86 self.pos += 1
86 self.pos += 1
87 return data[0]
87 return data[0]
88 self.pos += 1
88 self.pos += 1
89 zeropos = data.find('\x00', pos)
89 zeropos = data.find('\x00', pos)
90 return data[pos:zeropos]
90 return data[pos:zeropos]
91
91
92 __next__ = next
92 __next__ = next
93
93
94 class lazymanifestiterentries(object):
94 class lazymanifestiterentries(object):
95 def __init__(self, lm):
95 def __init__(self, lm):
96 self.lm = lm
96 self.lm = lm
97 self.pos = 0
97 self.pos = 0
98
98
99 def __iter__(self):
99 def __iter__(self):
100 return self
100 return self
101
101
102 def next(self):
102 def next(self):
103 try:
103 try:
104 data, pos = self.lm._get(self.pos)
104 data, pos = self.lm._get(self.pos)
105 except IndexError:
105 except IndexError:
106 raise StopIteration
106 raise StopIteration
107 if pos == -1:
107 if pos == -1:
108 self.pos += 1
108 self.pos += 1
109 return data
109 return data
110 zeropos = data.find('\x00', pos)
110 zeropos = data.find('\x00', pos)
111 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
111 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
112 zeropos + 1, 40)
112 zeropos + 1, 40)
113 flags = self.lm._getflags(data, self.pos, zeropos)
113 flags = self.lm._getflags(data, self.pos, zeropos)
114 self.pos += 1
114 self.pos += 1
115 return (data[pos:zeropos], hashval, flags)
115 return (data[pos:zeropos], hashval, flags)
116
116
117 __next__ = next
117 __next__ = next
118
118
119 def unhexlify(data, extra, pos, length):
119 def unhexlify(data, extra, pos, length):
120 s = bin(data[pos:pos + length])
120 s = bin(data[pos:pos + length])
121 if extra:
121 if extra:
122 s += chr(extra & 0xff)
122 s += chr(extra & 0xff)
123 return s
123 return s
124
124
125 def _cmp(a, b):
125 def _cmp(a, b):
126 return (a > b) - (a < b)
126 return (a > b) - (a < b)
127
127
128 class _lazymanifest(object):
128 class _lazymanifest(object):
129 """A pure python manifest backed by a byte string. It is supplimented with
129 """A pure python manifest backed by a byte string. It is supplimented with
130 internal lists as it is modified, until it is compacted back to a pure byte
130 internal lists as it is modified, until it is compacted back to a pure byte
131 string.
131 string.
132
132
133 ``data`` is the initial manifest data.
133 ``data`` is the initial manifest data.
134
134
135 ``positions`` is a list of offsets, one per manifest entry. Positive
135 ``positions`` is a list of offsets, one per manifest entry. Positive
136 values are offsets into ``data``, negative values are offsets into the
136 values are offsets into ``data``, negative values are offsets into the
137 ``extradata`` list. When an entry is removed, its entry is dropped from
137 ``extradata`` list. When an entry is removed, its entry is dropped from
138 ``positions``. The values are encoded such that when walking the list and
138 ``positions``. The values are encoded such that when walking the list and
139 indexing into ``data`` or ``extradata`` as appropriate, the entries are
139 indexing into ``data`` or ``extradata`` as appropriate, the entries are
140 sorted by filename.
140 sorted by filename.
141
141
142 ``extradata`` is a list of (key, hash, flags) for entries that were added or
142 ``extradata`` is a list of (key, hash, flags) for entries that were added or
143 modified since the manifest was created or compacted.
143 modified since the manifest was created or compacted.
144 """
144 """
145 def __init__(self, data, positions=None, extrainfo=None, extradata=None,
145 def __init__(self, data, positions=None, extrainfo=None, extradata=None,
146 hasremovals=False):
146 hasremovals=False):
147 if positions is None:
147 if positions is None:
148 self.positions = self.findlines(data)
148 self.positions = self.findlines(data)
149 self.extrainfo = [0] * len(self.positions)
149 self.extrainfo = [0] * len(self.positions)
150 self.data = data
150 self.data = data
151 self.extradata = []
151 self.extradata = []
152 self.hasremovals = False
152 self.hasremovals = False
153 else:
153 else:
154 self.positions = positions[:]
154 self.positions = positions[:]
155 self.extrainfo = extrainfo[:]
155 self.extrainfo = extrainfo[:]
156 self.extradata = extradata[:]
156 self.extradata = extradata[:]
157 self.data = data
157 self.data = data
158 self.hasremovals = hasremovals
158 self.hasremovals = hasremovals
159
159
160 def findlines(self, data):
160 def findlines(self, data):
161 if not data:
161 if not data:
162 return []
162 return []
163 pos = data.find("\n")
163 pos = data.find("\n")
164 if pos == -1 or data[-1:] != '\n':
164 if pos == -1 or data[-1:] != '\n':
165 raise ValueError("Manifest did not end in a newline.")
165 raise ValueError("Manifest did not end in a newline.")
166 positions = [0]
166 positions = [0]
167 prev = data[:data.find('\x00')]
167 prev = data[:data.find('\x00')]
168 while pos < len(data) - 1 and pos != -1:
168 while pos < len(data) - 1 and pos != -1:
169 positions.append(pos + 1)
169 positions.append(pos + 1)
170 nexts = data[pos + 1:data.find('\x00', pos + 1)]
170 nexts = data[pos + 1:data.find('\x00', pos + 1)]
171 if nexts < prev:
171 if nexts < prev:
172 raise ValueError("Manifest lines not in sorted order.")
172 raise ValueError("Manifest lines not in sorted order.")
173 prev = nexts
173 prev = nexts
174 pos = data.find("\n", pos + 1)
174 pos = data.find("\n", pos + 1)
175 return positions
175 return positions
176
176
177 def _get(self, index):
177 def _get(self, index):
178 # get the position encoded in pos:
178 # get the position encoded in pos:
179 # positive number is an index in 'data'
179 # positive number is an index in 'data'
180 # negative number is in extrapieces
180 # negative number is in extrapieces
181 pos = self.positions[index]
181 pos = self.positions[index]
182 if pos >= 0:
182 if pos >= 0:
183 return self.data, pos
183 return self.data, pos
184 return self.extradata[-pos - 1], -1
184 return self.extradata[-pos - 1], -1
185
185
186 def _getkey(self, pos):
186 def _getkey(self, pos):
187 if pos >= 0:
187 if pos >= 0:
188 return self.data[pos:self.data.find('\x00', pos + 1)]
188 return self.data[pos:self.data.find('\x00', pos + 1)]
189 return self.extradata[-pos - 1][0]
189 return self.extradata[-pos - 1][0]
190
190
191 def bsearch(self, key):
191 def bsearch(self, key):
192 first = 0
192 first = 0
193 last = len(self.positions) - 1
193 last = len(self.positions) - 1
194
194
195 while first <= last:
195 while first <= last:
196 midpoint = (first + last)//2
196 midpoint = (first + last)//2
197 nextpos = self.positions[midpoint]
197 nextpos = self.positions[midpoint]
198 candidate = self._getkey(nextpos)
198 candidate = self._getkey(nextpos)
199 r = _cmp(key, candidate)
199 r = _cmp(key, candidate)
200 if r == 0:
200 if r == 0:
201 return midpoint
201 return midpoint
202 else:
202 else:
203 if r < 0:
203 if r < 0:
204 last = midpoint - 1
204 last = midpoint - 1
205 else:
205 else:
206 first = midpoint + 1
206 first = midpoint + 1
207 return -1
207 return -1
208
208
209 def bsearch2(self, key):
209 def bsearch2(self, key):
210 # same as the above, but will always return the position
210 # same as the above, but will always return the position
211 # done for performance reasons
211 # done for performance reasons
212 first = 0
212 first = 0
213 last = len(self.positions) - 1
213 last = len(self.positions) - 1
214
214
215 while first <= last:
215 while first <= last:
216 midpoint = (first + last)//2
216 midpoint = (first + last)//2
217 nextpos = self.positions[midpoint]
217 nextpos = self.positions[midpoint]
218 candidate = self._getkey(nextpos)
218 candidate = self._getkey(nextpos)
219 r = _cmp(key, candidate)
219 r = _cmp(key, candidate)
220 if r == 0:
220 if r == 0:
221 return (midpoint, True)
221 return (midpoint, True)
222 else:
222 else:
223 if r < 0:
223 if r < 0:
224 last = midpoint - 1
224 last = midpoint - 1
225 else:
225 else:
226 first = midpoint + 1
226 first = midpoint + 1
227 return (first, False)
227 return (first, False)
228
228
229 def __contains__(self, key):
229 def __contains__(self, key):
230 return self.bsearch(key) != -1
230 return self.bsearch(key) != -1
231
231
232 def _getflags(self, data, needle, pos):
232 def _getflags(self, data, needle, pos):
233 start = pos + 41
233 start = pos + 41
234 end = data.find("\n", start)
234 end = data.find("\n", start)
235 if end == -1:
235 if end == -1:
236 end = len(data) - 1
236 end = len(data) - 1
237 if start == end:
237 if start == end:
238 return ''
238 return ''
239 return self.data[start:end]
239 return self.data[start:end]
240
240
241 def __getitem__(self, key):
241 def __getitem__(self, key):
242 if not isinstance(key, bytes):
242 if not isinstance(key, bytes):
243 raise TypeError("getitem: manifest keys must be a bytes.")
243 raise TypeError("getitem: manifest keys must be a bytes.")
244 needle = self.bsearch(key)
244 needle = self.bsearch(key)
245 if needle == -1:
245 if needle == -1:
246 raise KeyError
246 raise KeyError
247 data, pos = self._get(needle)
247 data, pos = self._get(needle)
248 if pos == -1:
248 if pos == -1:
249 return (data[1], data[2])
249 return (data[1], data[2])
250 zeropos = data.find('\x00', pos)
250 zeropos = data.find('\x00', pos)
251 assert 0 <= needle <= len(self.positions)
251 assert 0 <= needle <= len(self.positions)
252 assert len(self.extrainfo) == len(self.positions)
252 assert len(self.extrainfo) == len(self.positions)
253 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
253 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
254 flags = self._getflags(data, needle, zeropos)
254 flags = self._getflags(data, needle, zeropos)
255 return (hashval, flags)
255 return (hashval, flags)
256
256
257 def __delitem__(self, key):
257 def __delitem__(self, key):
258 needle, found = self.bsearch2(key)
258 needle, found = self.bsearch2(key)
259 if not found:
259 if not found:
260 raise KeyError
260 raise KeyError
261 cur = self.positions[needle]
261 cur = self.positions[needle]
262 self.positions = self.positions[:needle] + self.positions[needle + 1:]
262 self.positions = self.positions[:needle] + self.positions[needle + 1:]
263 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
263 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
264 if cur >= 0:
264 if cur >= 0:
265 # This does NOT unsort the list as far as the search functions are
265 # This does NOT unsort the list as far as the search functions are
266 # concerned, as they only examine lines mapped by self.positions.
266 # concerned, as they only examine lines mapped by self.positions.
267 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
267 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
268 self.hasremovals = True
268 self.hasremovals = True
269
269
270 def __setitem__(self, key, value):
270 def __setitem__(self, key, value):
271 if not isinstance(key, bytes):
271 if not isinstance(key, bytes):
272 raise TypeError("setitem: manifest keys must be a byte string.")
272 raise TypeError("setitem: manifest keys must be a byte string.")
273 if not isinstance(value, tuple) or len(value) != 2:
273 if not isinstance(value, tuple) or len(value) != 2:
274 raise TypeError("Manifest values must be a tuple of (node, flags).")
274 raise TypeError("Manifest values must be a tuple of (node, flags).")
275 hashval = value[0]
275 hashval = value[0]
276 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
276 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
277 raise TypeError("node must be a 20-byte byte string")
277 raise TypeError("node must be a 20-byte byte string")
278 flags = value[1]
278 flags = value[1]
279 if len(hashval) == 22:
279 if len(hashval) == 22:
280 hashval = hashval[:-1]
280 hashval = hashval[:-1]
281 if not isinstance(flags, bytes) or len(flags) > 1:
281 if not isinstance(flags, bytes) or len(flags) > 1:
282 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
282 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
283 needle, found = self.bsearch2(key)
283 needle, found = self.bsearch2(key)
284 if found:
284 if found:
285 # put the item
285 # put the item
286 pos = self.positions[needle]
286 pos = self.positions[needle]
287 if pos < 0:
287 if pos < 0:
288 self.extradata[-pos - 1] = (key, hashval, value[1])
288 self.extradata[-pos - 1] = (key, hashval, value[1])
289 else:
289 else:
290 # just don't bother
290 # just don't bother
291 self.extradata.append((key, hashval, value[1]))
291 self.extradata.append((key, hashval, value[1]))
292 self.positions[needle] = -len(self.extradata)
292 self.positions[needle] = -len(self.extradata)
293 else:
293 else:
294 # not found, put it in with extra positions
294 # not found, put it in with extra positions
295 self.extradata.append((key, hashval, value[1]))
295 self.extradata.append((key, hashval, value[1]))
296 self.positions = (self.positions[:needle] + [-len(self.extradata)]
296 self.positions = (self.positions[:needle] + [-len(self.extradata)]
297 + self.positions[needle:])
297 + self.positions[needle:])
298 self.extrainfo = (self.extrainfo[:needle] + [0] +
298 self.extrainfo = (self.extrainfo[:needle] + [0] +
299 self.extrainfo[needle:])
299 self.extrainfo[needle:])
300
300
301 def copy(self):
301 def copy(self):
302 # XXX call _compact like in C?
302 # XXX call _compact like in C?
303 return _lazymanifest(self.data, self.positions, self.extrainfo,
303 return _lazymanifest(self.data, self.positions, self.extrainfo,
304 self.extradata, self.hasremovals)
304 self.extradata, self.hasremovals)
305
305
306 def _compact(self):
306 def _compact(self):
307 # hopefully not called TOO often
307 # hopefully not called TOO often
308 if len(self.extradata) == 0 and not self.hasremovals:
308 if len(self.extradata) == 0 and not self.hasremovals:
309 return
309 return
310 l = []
310 l = []
311 i = 0
311 i = 0
312 offset = 0
312 offset = 0
313 self.extrainfo = [0] * len(self.positions)
313 self.extrainfo = [0] * len(self.positions)
314 while i < len(self.positions):
314 while i < len(self.positions):
315 if self.positions[i] >= 0:
315 if self.positions[i] >= 0:
316 cur = self.positions[i]
316 cur = self.positions[i]
317 last_cut = cur
317 last_cut = cur
318
318
319 # Collect all contiguous entries in the buffer at the current
319 # Collect all contiguous entries in the buffer at the current
320 # offset, breaking out only for added/modified items held in
320 # offset, breaking out only for added/modified items held in
321 # extradata, or a deleted line prior to the next position.
321 # extradata, or a deleted line prior to the next position.
322 while True:
322 while True:
323 self.positions[i] = offset
323 self.positions[i] = offset
324 i += 1
324 i += 1
325 if i == len(self.positions) or self.positions[i] < 0:
325 if i == len(self.positions) or self.positions[i] < 0:
326 break
326 break
327
327
328 # A removed file has no positions[] entry, but does have an
328 # A removed file has no positions[] entry, but does have an
329 # overwritten first byte. Break out and find the end of the
329 # overwritten first byte. Break out and find the end of the
330 # current good entry/entries if there is a removed file
330 # current good entry/entries if there is a removed file
331 # before the next position.
331 # before the next position.
332 if (self.hasremovals
332 if (self.hasremovals
333 and self.data.find('\n\x00', cur,
333 and self.data.find('\n\x00', cur,
334 self.positions[i]) != -1):
334 self.positions[i]) != -1):
335 break
335 break
336
336
337 offset += self.positions[i] - cur
337 offset += self.positions[i] - cur
338 cur = self.positions[i]
338 cur = self.positions[i]
339 end_cut = self.data.find('\n', cur)
339 end_cut = self.data.find('\n', cur)
340 if end_cut != -1:
340 if end_cut != -1:
341 end_cut += 1
341 end_cut += 1
342 offset += end_cut - cur
342 offset += end_cut - cur
343 l.append(self.data[last_cut:end_cut])
343 l.append(self.data[last_cut:end_cut])
344 else:
344 else:
345 while i < len(self.positions) and self.positions[i] < 0:
345 while i < len(self.positions) and self.positions[i] < 0:
346 cur = self.positions[i]
346 cur = self.positions[i]
347 t = self.extradata[-cur - 1]
347 t = self.extradata[-cur - 1]
348 l.append(self._pack(t))
348 l.append(self._pack(t))
349 self.positions[i] = offset
349 self.positions[i] = offset
350 if len(t[1]) > 20:
350 if len(t[1]) > 20:
351 self.extrainfo[i] = ord(t[1][21])
351 self.extrainfo[i] = ord(t[1][21])
352 offset += len(l[-1])
352 offset += len(l[-1])
353 i += 1
353 i += 1
354 self.data = ''.join(l)
354 self.data = ''.join(l)
355 self.hasremovals = False
355 self.hasremovals = False
356 self.extradata = []
356 self.extradata = []
357
357
358 def _pack(self, d):
358 def _pack(self, d):
359 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
359 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
360
360
361 def text(self):
361 def text(self):
362 self._compact()
362 self._compact()
363 return self.data
363 return self.data
364
364
365 def diff(self, m2, clean=False):
365 def diff(self, m2, clean=False):
366 '''Finds changes between the current manifest and m2.'''
366 '''Finds changes between the current manifest and m2.'''
367 # XXX think whether efficiency matters here
367 # XXX think whether efficiency matters here
368 diff = {}
368 diff = {}
369
369
370 for fn, e1, flags in self.iterentries():
370 for fn, e1, flags in self.iterentries():
371 if fn not in m2:
371 if fn not in m2:
372 diff[fn] = (e1, flags), (None, '')
372 diff[fn] = (e1, flags), (None, '')
373 else:
373 else:
374 e2 = m2[fn]
374 e2 = m2[fn]
375 if (e1, flags) != e2:
375 if (e1, flags) != e2:
376 diff[fn] = (e1, flags), e2
376 diff[fn] = (e1, flags), e2
377 elif clean:
377 elif clean:
378 diff[fn] = None
378 diff[fn] = None
379
379
380 for fn, e2, flags in m2.iterentries():
380 for fn, e2, flags in m2.iterentries():
381 if fn not in self:
381 if fn not in self:
382 diff[fn] = (None, ''), (e2, flags)
382 diff[fn] = (None, ''), (e2, flags)
383
383
384 return diff
384 return diff
385
385
386 def iterentries(self):
386 def iterentries(self):
387 return lazymanifestiterentries(self)
387 return lazymanifestiterentries(self)
388
388
389 def iterkeys(self):
389 def iterkeys(self):
390 return lazymanifestiter(self)
390 return lazymanifestiter(self)
391
391
392 def __iter__(self):
392 def __iter__(self):
393 return lazymanifestiter(self)
393 return lazymanifestiter(self)
394
394
395 def __len__(self):
395 def __len__(self):
396 return len(self.positions)
396 return len(self.positions)
397
397
398 def filtercopy(self, filterfn):
398 def filtercopy(self, filterfn):
399 # XXX should be optimized
399 # XXX should be optimized
400 c = _lazymanifest('')
400 c = _lazymanifest('')
401 for f, n, fl in self.iterentries():
401 for f, n, fl in self.iterentries():
402 if filterfn(f):
402 if filterfn(f):
403 c[f] = n, fl
403 c[f] = n, fl
404 return c
404 return c
405
405
406 try:
406 try:
407 _lazymanifest = parsers.lazymanifest
407 _lazymanifest = parsers.lazymanifest
408 except AttributeError:
408 except AttributeError:
409 pass
409 pass
410
410
411 @interfaceutil.implementer(repository.imanifestdict)
411 @interfaceutil.implementer(repository.imanifestdict)
412 class manifestdict(object):
412 class manifestdict(object):
413 def __init__(self, data=''):
413 def __init__(self, data=''):
414 self._lm = _lazymanifest(data)
414 self._lm = _lazymanifest(data)
415
415
416 def __getitem__(self, key):
416 def __getitem__(self, key):
417 return self._lm[key][0]
417 return self._lm[key][0]
418
418
419 def find(self, key):
419 def find(self, key):
420 return self._lm[key]
420 return self._lm[key]
421
421
422 def __len__(self):
422 def __len__(self):
423 return len(self._lm)
423 return len(self._lm)
424
424
425 def __nonzero__(self):
425 def __nonzero__(self):
426 # nonzero is covered by the __len__ function, but implementing it here
426 # nonzero is covered by the __len__ function, but implementing it here
427 # makes it easier for extensions to override.
427 # makes it easier for extensions to override.
428 return len(self._lm) != 0
428 return len(self._lm) != 0
429
429
430 __bool__ = __nonzero__
430 __bool__ = __nonzero__
431
431
432 def __setitem__(self, key, node):
432 def __setitem__(self, key, node):
433 self._lm[key] = node, self.flags(key, '')
433 self._lm[key] = node, self.flags(key, '')
434
434
435 def __contains__(self, key):
435 def __contains__(self, key):
436 if key is None:
436 if key is None:
437 return False
437 return False
438 return key in self._lm
438 return key in self._lm
439
439
440 def __delitem__(self, key):
440 def __delitem__(self, key):
441 del self._lm[key]
441 del self._lm[key]
442
442
443 def __iter__(self):
443 def __iter__(self):
444 return self._lm.__iter__()
444 return self._lm.__iter__()
445
445
446 def iterkeys(self):
446 def iterkeys(self):
447 return self._lm.iterkeys()
447 return self._lm.iterkeys()
448
448
449 def keys(self):
449 def keys(self):
450 return list(self.iterkeys())
450 return list(self.iterkeys())
451
451
452 def filesnotin(self, m2, match=None):
452 def filesnotin(self, m2, match=None):
453 '''Set of files in this manifest that are not in the other'''
453 '''Set of files in this manifest that are not in the other'''
454 if match:
454 if match:
455 m1 = self.matches(match)
455 m1 = self.matches(match)
456 m2 = m2.matches(match)
456 m2 = m2.matches(match)
457 return m1.filesnotin(m2)
457 return m1.filesnotin(m2)
458 diff = self.diff(m2)
458 diff = self.diff(m2)
459 files = set(filepath
459 files = set(filepath
460 for filepath, hashflags in diff.iteritems()
460 for filepath, hashflags in diff.iteritems()
461 if hashflags[1][0] is None)
461 if hashflags[1][0] is None)
462 return files
462 return files
463
463
464 @propertycache
464 @propertycache
465 def _dirs(self):
465 def _dirs(self):
466 return util.dirs(self)
466 return util.dirs(self)
467
467
468 def dirs(self):
468 def dirs(self):
469 return self._dirs
469 return self._dirs
470
470
471 def hasdir(self, dir):
471 def hasdir(self, dir):
472 return dir in self._dirs
472 return dir in self._dirs
473
473
474 def _filesfastpath(self, match):
474 def _filesfastpath(self, match):
475 '''Checks whether we can correctly and quickly iterate over matcher
475 '''Checks whether we can correctly and quickly iterate over matcher
476 files instead of over manifest files.'''
476 files instead of over manifest files.'''
477 files = match.files()
477 files = match.files()
478 return (len(files) < 100 and (match.isexact() or
478 return (len(files) < 100 and (match.isexact() or
479 (match.prefix() and all(fn in self for fn in files))))
479 (match.prefix() and all(fn in self for fn in files))))
480
480
481 def walk(self, match):
481 def walk(self, match):
482 '''Generates matching file names.
482 '''Generates matching file names.
483
483
484 Equivalent to manifest.matches(match).iterkeys(), but without creating
484 Equivalent to manifest.matches(match).iterkeys(), but without creating
485 an entirely new manifest.
485 an entirely new manifest.
486
486
487 It also reports nonexistent files by marking them bad with match.bad().
487 It also reports nonexistent files by marking them bad with match.bad().
488 '''
488 '''
489 if match.always():
489 if match.always():
490 for f in iter(self):
490 for f in iter(self):
491 yield f
491 yield f
492 return
492 return
493
493
494 fset = set(match.files())
494 fset = set(match.files())
495
495
496 # avoid the entire walk if we're only looking for specific files
496 # avoid the entire walk if we're only looking for specific files
497 if self._filesfastpath(match):
497 if self._filesfastpath(match):
498 for fn in sorted(fset):
498 for fn in sorted(fset):
499 yield fn
499 yield fn
500 return
500 return
501
501
502 for fn in self:
502 for fn in self:
503 if fn in fset:
503 if fn in fset:
504 # specified pattern is the exact name
504 # specified pattern is the exact name
505 fset.remove(fn)
505 fset.remove(fn)
506 if match(fn):
506 if match(fn):
507 yield fn
507 yield fn
508
508
509 # for dirstate.walk, files=[''] means "walk the whole tree".
509 # for dirstate.walk, files=[''] means "walk the whole tree".
510 # follow that here, too
510 # follow that here, too
511 fset.discard('')
511 fset.discard('')
512
512
513 for fn in sorted(fset):
513 for fn in sorted(fset):
514 if not self.hasdir(fn):
514 if not self.hasdir(fn):
515 match.bad(fn, None)
515 match.bad(fn, None)
516
516
517 def matches(self, match):
517 def matches(self, match):
518 '''generate a new manifest filtered by the match argument'''
518 '''generate a new manifest filtered by the match argument'''
519 if match.always():
519 if match.always():
520 return self.copy()
520 return self.copy()
521
521
522 if self._filesfastpath(match):
522 if self._filesfastpath(match):
523 m = manifestdict()
523 m = manifestdict()
524 lm = self._lm
524 lm = self._lm
525 for fn in match.files():
525 for fn in match.files():
526 if fn in lm:
526 if fn in lm:
527 m._lm[fn] = lm[fn]
527 m._lm[fn] = lm[fn]
528 return m
528 return m
529
529
530 m = manifestdict()
530 m = manifestdict()
531 m._lm = self._lm.filtercopy(match)
531 m._lm = self._lm.filtercopy(match)
532 return m
532 return m
533
533
534 def diff(self, m2, match=None, clean=False):
534 def diff(self, m2, match=None, clean=False):
535 '''Finds changes between the current manifest and m2.
535 '''Finds changes between the current manifest and m2.
536
536
537 Args:
537 Args:
538 m2: the manifest to which this manifest should be compared.
538 m2: the manifest to which this manifest should be compared.
539 clean: if true, include files unchanged between these manifests
539 clean: if true, include files unchanged between these manifests
540 with a None value in the returned dictionary.
540 with a None value in the returned dictionary.
541
541
542 The result is returned as a dict with filename as key and
542 The result is returned as a dict with filename as key and
543 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
543 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
544 nodeid in the current/other manifest and fl1/fl2 is the flag
544 nodeid in the current/other manifest and fl1/fl2 is the flag
545 in the current/other manifest. Where the file does not exist,
545 in the current/other manifest. Where the file does not exist,
546 the nodeid will be None and the flags will be the empty
546 the nodeid will be None and the flags will be the empty
547 string.
547 string.
548 '''
548 '''
549 if match:
549 if match:
550 m1 = self.matches(match)
550 m1 = self.matches(match)
551 m2 = m2.matches(match)
551 m2 = m2.matches(match)
552 return m1.diff(m2, clean=clean)
552 return m1.diff(m2, clean=clean)
553 return self._lm.diff(m2._lm, clean)
553 return self._lm.diff(m2._lm, clean)
554
554
555 def setflag(self, key, flag):
555 def setflag(self, key, flag):
556 self._lm[key] = self[key], flag
556 self._lm[key] = self[key], flag
557
557
558 def get(self, key, default=None):
558 def get(self, key, default=None):
559 try:
559 try:
560 return self._lm[key][0]
560 return self._lm[key][0]
561 except KeyError:
561 except KeyError:
562 return default
562 return default
563
563
564 def flags(self, key, default=''):
564 def flags(self, key, default=''):
565 try:
565 try:
566 return self._lm[key][1]
566 return self._lm[key][1]
567 except KeyError:
567 except KeyError:
568 return default
568 return default
569
569
570 def copy(self):
570 def copy(self):
571 c = manifestdict()
571 c = manifestdict()
572 c._lm = self._lm.copy()
572 c._lm = self._lm.copy()
573 return c
573 return c
574
574
575 def items(self):
575 def items(self):
576 return (x[:2] for x in self._lm.iterentries())
576 return (x[:2] for x in self._lm.iterentries())
577
577
578 def iteritems(self):
578 def iteritems(self):
579 return (x[:2] for x in self._lm.iterentries())
579 return (x[:2] for x in self._lm.iterentries())
580
580
581 def iterentries(self):
581 def iterentries(self):
582 return self._lm.iterentries()
582 return self._lm.iterentries()
583
583
584 def text(self):
584 def text(self):
585 # most likely uses native version
585 # most likely uses native version
586 return self._lm.text()
586 return self._lm.text()
587
587
588 def fastdelta(self, base, changes):
588 def fastdelta(self, base, changes):
589 """Given a base manifest text as a bytearray and a list of changes
589 """Given a base manifest text as a bytearray and a list of changes
590 relative to that text, compute a delta that can be used by revlog.
590 relative to that text, compute a delta that can be used by revlog.
591 """
591 """
592 delta = []
592 delta = []
593 dstart = None
593 dstart = None
594 dend = None
594 dend = None
595 dline = [""]
595 dline = [""]
596 start = 0
596 start = 0
597 # zero copy representation of base as a buffer
597 # zero copy representation of base as a buffer
598 addbuf = util.buffer(base)
598 addbuf = util.buffer(base)
599
599
600 changes = list(changes)
600 changes = list(changes)
601 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
601 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
602 # start with a readonly loop that finds the offset of
602 # start with a readonly loop that finds the offset of
603 # each line and creates the deltas
603 # each line and creates the deltas
604 for f, todelete in changes:
604 for f, todelete in changes:
605 # bs will either be the index of the item or the insert point
605 # bs will either be the index of the item or the insert point
606 start, end = _msearch(addbuf, f, start)
606 start, end = _msearch(addbuf, f, start)
607 if not todelete:
607 if not todelete:
608 h, fl = self._lm[f]
608 h, fl = self._lm[f]
609 l = "%s\0%s%s\n" % (f, hex(h), fl)
609 l = "%s\0%s%s\n" % (f, hex(h), fl)
610 else:
610 else:
611 if start == end:
611 if start == end:
612 # item we want to delete was not found, error out
612 # item we want to delete was not found, error out
613 raise AssertionError(
613 raise AssertionError(
614 _("failed to remove %s from manifest") % f)
614 _("failed to remove %s from manifest") % f)
615 l = ""
615 l = ""
616 if dstart is not None and dstart <= start and dend >= start:
616 if dstart is not None and dstart <= start and dend >= start:
617 if dend < end:
617 if dend < end:
618 dend = end
618 dend = end
619 if l:
619 if l:
620 dline.append(l)
620 dline.append(l)
621 else:
621 else:
622 if dstart is not None:
622 if dstart is not None:
623 delta.append([dstart, dend, "".join(dline)])
623 delta.append([dstart, dend, "".join(dline)])
624 dstart = start
624 dstart = start
625 dend = end
625 dend = end
626 dline = [l]
626 dline = [l]
627
627
628 if dstart is not None:
628 if dstart is not None:
629 delta.append([dstart, dend, "".join(dline)])
629 delta.append([dstart, dend, "".join(dline)])
630 # apply the delta to the base, and get a delta for addrevision
630 # apply the delta to the base, and get a delta for addrevision
631 deltatext, arraytext = _addlistdelta(base, delta)
631 deltatext, arraytext = _addlistdelta(base, delta)
632 else:
632 else:
633 # For large changes, it's much cheaper to just build the text and
633 # For large changes, it's much cheaper to just build the text and
634 # diff it.
634 # diff it.
635 arraytext = bytearray(self.text())
635 arraytext = bytearray(self.text())
636 deltatext = mdiff.textdiff(
636 deltatext = mdiff.textdiff(
637 util.buffer(base), util.buffer(arraytext))
637 util.buffer(base), util.buffer(arraytext))
638
638
639 return arraytext, deltatext
639 return arraytext, deltatext
640
640
641 def _msearch(m, s, lo=0, hi=None):
641 def _msearch(m, s, lo=0, hi=None):
642 '''return a tuple (start, end) that says where to find s within m.
642 '''return a tuple (start, end) that says where to find s within m.
643
643
644 If the string is found m[start:end] are the line containing
644 If the string is found m[start:end] are the line containing
645 that string. If start == end the string was not found and
645 that string. If start == end the string was not found and
646 they indicate the proper sorted insertion point.
646 they indicate the proper sorted insertion point.
647
647
648 m should be a buffer, a memoryview or a byte string.
648 m should be a buffer, a memoryview or a byte string.
649 s is a byte string'''
649 s is a byte string'''
650 def advance(i, c):
650 def advance(i, c):
651 while i < lenm and m[i:i + 1] != c:
651 while i < lenm and m[i:i + 1] != c:
652 i += 1
652 i += 1
653 return i
653 return i
654 if not s:
654 if not s:
655 return (lo, lo)
655 return (lo, lo)
656 lenm = len(m)
656 lenm = len(m)
657 if not hi:
657 if not hi:
658 hi = lenm
658 hi = lenm
659 while lo < hi:
659 while lo < hi:
660 mid = (lo + hi) // 2
660 mid = (lo + hi) // 2
661 start = mid
661 start = mid
662 while start > 0 and m[start - 1:start] != '\n':
662 while start > 0 and m[start - 1:start] != '\n':
663 start -= 1
663 start -= 1
664 end = advance(start, '\0')
664 end = advance(start, '\0')
665 if bytes(m[start:end]) < s:
665 if bytes(m[start:end]) < s:
666 # we know that after the null there are 40 bytes of sha1
666 # we know that after the null there are 40 bytes of sha1
667 # this translates to the bisect lo = mid + 1
667 # this translates to the bisect lo = mid + 1
668 lo = advance(end + 40, '\n') + 1
668 lo = advance(end + 40, '\n') + 1
669 else:
669 else:
670 # this translates to the bisect hi = mid
670 # this translates to the bisect hi = mid
671 hi = start
671 hi = start
672 end = advance(lo, '\0')
672 end = advance(lo, '\0')
673 found = m[lo:end]
673 found = m[lo:end]
674 if s == found:
674 if s == found:
675 # we know that after the null there are 40 bytes of sha1
675 # we know that after the null there are 40 bytes of sha1
676 end = advance(end + 40, '\n')
676 end = advance(end + 40, '\n')
677 return (lo, end + 1)
677 return (lo, end + 1)
678 else:
678 else:
679 return (lo, lo)
679 return (lo, lo)
680
680
681 def _checkforbidden(l):
681 def _checkforbidden(l):
682 """Check filenames for illegal characters."""
682 """Check filenames for illegal characters."""
683 for f in l:
683 for f in l:
684 if '\n' in f or '\r' in f:
684 if '\n' in f or '\r' in f:
685 raise error.StorageError(
685 raise error.StorageError(
686 _("'\\n' and '\\r' disallowed in filenames: %r")
686 _("'\\n' and '\\r' disallowed in filenames: %r")
687 % pycompat.bytestr(f))
687 % pycompat.bytestr(f))
688
688
689
689
690 # apply the changes collected during the bisect loop to our addlist
690 # apply the changes collected during the bisect loop to our addlist
691 # return a delta suitable for addrevision
691 # return a delta suitable for addrevision
692 def _addlistdelta(addlist, x):
692 def _addlistdelta(addlist, x):
693 # for large addlist arrays, building a new array is cheaper
693 # for large addlist arrays, building a new array is cheaper
694 # than repeatedly modifying the existing one
694 # than repeatedly modifying the existing one
695 currentposition = 0
695 currentposition = 0
696 newaddlist = bytearray()
696 newaddlist = bytearray()
697
697
698 for start, end, content in x:
698 for start, end, content in x:
699 newaddlist += addlist[currentposition:start]
699 newaddlist += addlist[currentposition:start]
700 if content:
700 if content:
701 newaddlist += bytearray(content)
701 newaddlist += bytearray(content)
702
702
703 currentposition = end
703 currentposition = end
704
704
705 newaddlist += addlist[currentposition:]
705 newaddlist += addlist[currentposition:]
706
706
707 deltatext = "".join(struct.pack(">lll", start, end, len(content))
707 deltatext = "".join(struct.pack(">lll", start, end, len(content))
708 + content for start, end, content in x)
708 + content for start, end, content in x)
709 return deltatext, newaddlist
709 return deltatext, newaddlist
710
710
711 def _splittopdir(f):
711 def _splittopdir(f):
712 if '/' in f:
712 if '/' in f:
713 dir, subpath = f.split('/', 1)
713 dir, subpath = f.split('/', 1)
714 return dir + '/', subpath
714 return dir + '/', subpath
715 else:
715 else:
716 return '', f
716 return '', f
717
717
718 _noop = lambda s: None
718 _noop = lambda s: None
719
719
720 class treemanifest(object):
720 class treemanifest(object):
721 def __init__(self, dir='', text=''):
721 def __init__(self, dir='', text=''):
722 self._dir = dir
722 self._dir = dir
723 self._node = nullid
723 self._node = nullid
724 self._loadfunc = _noop
724 self._loadfunc = _noop
725 self._copyfunc = _noop
725 self._copyfunc = _noop
726 self._dirty = False
726 self._dirty = False
727 self._dirs = {}
727 self._dirs = {}
728 self._lazydirs = {}
728 self._lazydirs = {}
729 # Using _lazymanifest here is a little slower than plain old dicts
729 # Using _lazymanifest here is a little slower than plain old dicts
730 self._files = {}
730 self._files = {}
731 self._flags = {}
731 self._flags = {}
732 if text:
732 if text:
733 def readsubtree(subdir, subm):
733 def readsubtree(subdir, subm):
734 raise AssertionError('treemanifest constructor only accepts '
734 raise AssertionError('treemanifest constructor only accepts '
735 'flat manifests')
735 'flat manifests')
736 self.parse(text, readsubtree)
736 self.parse(text, readsubtree)
737 self._dirty = True # Mark flat manifest dirty after parsing
737 self._dirty = True # Mark flat manifest dirty after parsing
738
738
739 def _subpath(self, path):
739 def _subpath(self, path):
740 return self._dir + path
740 return self._dir + path
741
741
742 def _loadalllazy(self):
742 def _loadalllazy(self):
743 selfdirs = self._dirs
743 selfdirs = self._dirs
744 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
744 for d, (path, node, readsubtree, docopy) in self._lazydirs.iteritems():
745 if docopy:
745 if docopy:
746 selfdirs[d] = readsubtree(path, node).copy()
746 selfdirs[d] = readsubtree(path, node).copy()
747 else:
747 else:
748 selfdirs[d] = readsubtree(path, node)
748 selfdirs[d] = readsubtree(path, node)
749 self._lazydirs = {}
749 self._lazydirs = {}
750
750
751 def _loadlazy(self, d):
751 def _loadlazy(self, d):
752 v = self._lazydirs.get(d)
752 v = self._lazydirs.get(d)
753 if v:
753 if v:
754 path, node, readsubtree, docopy = v
754 path, node, readsubtree, docopy = v
755 if docopy:
755 if docopy:
756 self._dirs[d] = readsubtree(path, node).copy()
756 self._dirs[d] = readsubtree(path, node).copy()
757 else:
757 else:
758 self._dirs[d] = readsubtree(path, node)
758 self._dirs[d] = readsubtree(path, node)
759 del self._lazydirs[d]
759 del self._lazydirs[d]
760
760
761 def _loadchildrensetlazy(self, visit):
761 def _loadchildrensetlazy(self, visit):
762 if not visit:
762 if not visit:
763 return None
763 return None
764 if visit == 'all' or visit == 'this':
764 if visit == 'all' or visit == 'this':
765 self._loadalllazy()
765 self._loadalllazy()
766 return None
766 return None
767
767
768 loadlazy = self._loadlazy
768 loadlazy = self._loadlazy
769 for k in visit:
769 for k in visit:
770 loadlazy(k + '/')
770 loadlazy(k + '/')
771 return visit
771 return visit
772
772
773 def _loaddifflazy(self, t1, t2):
773 def _loaddifflazy(self, t1, t2):
774 """load items in t1 and t2 if they're needed for diffing.
774 """load items in t1 and t2 if they're needed for diffing.
775
775
776 The criteria currently is:
776 The criteria currently is:
777 - if it's not present in _lazydirs in either t1 or t2, load it in the
777 - if it's not present in _lazydirs in either t1 or t2, load it in the
778 other (it may already be loaded or it may not exist, doesn't matter)
778 other (it may already be loaded or it may not exist, doesn't matter)
779 - if it's present in _lazydirs in both, compare the nodeid; if it
779 - if it's present in _lazydirs in both, compare the nodeid; if it
780 differs, load it in both
780 differs, load it in both
781 """
781 """
782 toloadlazy = []
782 toloadlazy = []
783 for d, v1 in t1._lazydirs.iteritems():
783 for d, v1 in t1._lazydirs.iteritems():
784 v2 = t2._lazydirs.get(d)
784 v2 = t2._lazydirs.get(d)
785 if not v2 or v2[1] != v1[1]:
785 if not v2 or v2[1] != v1[1]:
786 toloadlazy.append(d)
786 toloadlazy.append(d)
787 for d, v1 in t2._lazydirs.iteritems():
787 for d, v1 in t2._lazydirs.iteritems():
788 if d not in t1._lazydirs:
788 if d not in t1._lazydirs:
789 toloadlazy.append(d)
789 toloadlazy.append(d)
790
790
791 for d in toloadlazy:
791 for d in toloadlazy:
792 t1._loadlazy(d)
792 t1._loadlazy(d)
793 t2._loadlazy(d)
793 t2._loadlazy(d)
794
794
795 def __len__(self):
795 def __len__(self):
796 self._load()
796 self._load()
797 size = len(self._files)
797 size = len(self._files)
798 self._loadalllazy()
798 self._loadalllazy()
799 for m in self._dirs.values():
799 for m in self._dirs.values():
800 size += m.__len__()
800 size += m.__len__()
801 return size
801 return size
802
802
803 def __nonzero__(self):
803 def __nonzero__(self):
804 # Faster than "__len() != 0" since it avoids loading sub-manifests
804 # Faster than "__len() != 0" since it avoids loading sub-manifests
805 return not self._isempty()
805 return not self._isempty()
806
806
807 __bool__ = __nonzero__
807 __bool__ = __nonzero__
808
808
809 def _isempty(self):
809 def _isempty(self):
810 self._load() # for consistency; already loaded by all callers
810 self._load() # for consistency; already loaded by all callers
811 # See if we can skip loading everything.
811 # See if we can skip loading everything.
812 if self._files or (self._dirs and
812 if self._files or (self._dirs and
813 any(not m._isempty() for m in self._dirs.values())):
813 any(not m._isempty() for m in self._dirs.values())):
814 return False
814 return False
815 self._loadalllazy()
815 self._loadalllazy()
816 return (not self._dirs or
816 return (not self._dirs or
817 all(m._isempty() for m in self._dirs.values()))
817 all(m._isempty() for m in self._dirs.values()))
818
818
819 def __repr__(self):
819 def __repr__(self):
820 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
820 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
821 (self._dir, hex(self._node),
821 (self._dir, hex(self._node),
822 bool(self._loadfunc is _noop),
822 bool(self._loadfunc is _noop),
823 self._dirty, id(self)))
823 self._dirty, id(self)))
824
824
825 def dir(self):
825 def dir(self):
826 '''The directory that this tree manifest represents, including a
826 '''The directory that this tree manifest represents, including a
827 trailing '/'. Empty string for the repo root directory.'''
827 trailing '/'. Empty string for the repo root directory.'''
828 return self._dir
828 return self._dir
829
829
830 def node(self):
830 def node(self):
831 '''This node of this instance. nullid for unsaved instances. Should
831 '''This node of this instance. nullid for unsaved instances. Should
832 be updated when the instance is read or written from a revlog.
832 be updated when the instance is read or written from a revlog.
833 '''
833 '''
834 assert not self._dirty
834 assert not self._dirty
835 return self._node
835 return self._node
836
836
837 def setnode(self, node):
837 def setnode(self, node):
838 self._node = node
838 self._node = node
839 self._dirty = False
839 self._dirty = False
840
840
841 def iterentries(self):
841 def iterentries(self):
842 self._load()
842 self._load()
843 self._loadalllazy()
843 self._loadalllazy()
844 for p, n in sorted(itertools.chain(self._dirs.items(),
844 for p, n in sorted(itertools.chain(self._dirs.items(),
845 self._files.items())):
845 self._files.items())):
846 if p in self._files:
846 if p in self._files:
847 yield self._subpath(p), n, self._flags.get(p, '')
847 yield self._subpath(p), n, self._flags.get(p, '')
848 else:
848 else:
849 for x in n.iterentries():
849 for x in n.iterentries():
850 yield x
850 yield x
851
851
852 def items(self):
852 def items(self):
853 self._load()
853 self._load()
854 self._loadalllazy()
854 self._loadalllazy()
855 for p, n in sorted(itertools.chain(self._dirs.items(),
855 for p, n in sorted(itertools.chain(self._dirs.items(),
856 self._files.items())):
856 self._files.items())):
857 if p in self._files:
857 if p in self._files:
858 yield self._subpath(p), n
858 yield self._subpath(p), n
859 else:
859 else:
860 for f, sn in n.iteritems():
860 for f, sn in n.iteritems():
861 yield f, sn
861 yield f, sn
862
862
863 iteritems = items
863 iteritems = items
864
864
865 def iterkeys(self):
865 def iterkeys(self):
866 self._load()
866 self._load()
867 self._loadalllazy()
867 self._loadalllazy()
868 for p in sorted(itertools.chain(self._dirs, self._files)):
868 for p in sorted(itertools.chain(self._dirs, self._files)):
869 if p in self._files:
869 if p in self._files:
870 yield self._subpath(p)
870 yield self._subpath(p)
871 else:
871 else:
872 for f in self._dirs[p]:
872 for f in self._dirs[p]:
873 yield f
873 yield f
874
874
875 def keys(self):
875 def keys(self):
876 return list(self.iterkeys())
876 return list(self.iterkeys())
877
877
878 def __iter__(self):
878 def __iter__(self):
879 return self.iterkeys()
879 return self.iterkeys()
880
880
881 def __contains__(self, f):
881 def __contains__(self, f):
882 if f is None:
882 if f is None:
883 return False
883 return False
884 self._load()
884 self._load()
885 dir, subpath = _splittopdir(f)
885 dir, subpath = _splittopdir(f)
886 if dir:
886 if dir:
887 self._loadlazy(dir)
887 self._loadlazy(dir)
888
888
889 if dir not in self._dirs:
889 if dir not in self._dirs:
890 return False
890 return False
891
891
892 return self._dirs[dir].__contains__(subpath)
892 return self._dirs[dir].__contains__(subpath)
893 else:
893 else:
894 return f in self._files
894 return f in self._files
895
895
896 def get(self, f, default=None):
896 def get(self, f, default=None):
897 self._load()
897 self._load()
898 dir, subpath = _splittopdir(f)
898 dir, subpath = _splittopdir(f)
899 if dir:
899 if dir:
900 self._loadlazy(dir)
900 self._loadlazy(dir)
901
901
902 if dir not in self._dirs:
902 if dir not in self._dirs:
903 return default
903 return default
904 return self._dirs[dir].get(subpath, default)
904 return self._dirs[dir].get(subpath, default)
905 else:
905 else:
906 return self._files.get(f, default)
906 return self._files.get(f, default)
907
907
908 def __getitem__(self, f):
908 def __getitem__(self, f):
909 self._load()
909 self._load()
910 dir, subpath = _splittopdir(f)
910 dir, subpath = _splittopdir(f)
911 if dir:
911 if dir:
912 self._loadlazy(dir)
912 self._loadlazy(dir)
913
913
914 return self._dirs[dir].__getitem__(subpath)
914 return self._dirs[dir].__getitem__(subpath)
915 else:
915 else:
916 return self._files[f]
916 return self._files[f]
917
917
918 def flags(self, f):
918 def flags(self, f):
919 self._load()
919 self._load()
920 dir, subpath = _splittopdir(f)
920 dir, subpath = _splittopdir(f)
921 if dir:
921 if dir:
922 self._loadlazy(dir)
922 self._loadlazy(dir)
923
923
924 if dir not in self._dirs:
924 if dir not in self._dirs:
925 return ''
925 return ''
926 return self._dirs[dir].flags(subpath)
926 return self._dirs[dir].flags(subpath)
927 else:
927 else:
928 if f in self._lazydirs or f in self._dirs:
928 if f in self._lazydirs or f in self._dirs:
929 return ''
929 return ''
930 return self._flags.get(f, '')
930 return self._flags.get(f, '')
931
931
932 def find(self, f):
932 def find(self, f):
933 self._load()
933 self._load()
934 dir, subpath = _splittopdir(f)
934 dir, subpath = _splittopdir(f)
935 if dir:
935 if dir:
936 self._loadlazy(dir)
936 self._loadlazy(dir)
937
937
938 return self._dirs[dir].find(subpath)
938 return self._dirs[dir].find(subpath)
939 else:
939 else:
940 return self._files[f], self._flags.get(f, '')
940 return self._files[f], self._flags.get(f, '')
941
941
942 def __delitem__(self, f):
942 def __delitem__(self, f):
943 self._load()
943 self._load()
944 dir, subpath = _splittopdir(f)
944 dir, subpath = _splittopdir(f)
945 if dir:
945 if dir:
946 self._loadlazy(dir)
946 self._loadlazy(dir)
947
947
948 self._dirs[dir].__delitem__(subpath)
948 self._dirs[dir].__delitem__(subpath)
949 # If the directory is now empty, remove it
949 # If the directory is now empty, remove it
950 if self._dirs[dir]._isempty():
950 if self._dirs[dir]._isempty():
951 del self._dirs[dir]
951 del self._dirs[dir]
952 else:
952 else:
953 del self._files[f]
953 del self._files[f]
954 if f in self._flags:
954 if f in self._flags:
955 del self._flags[f]
955 del self._flags[f]
956 self._dirty = True
956 self._dirty = True
957
957
958 def __setitem__(self, f, n):
958 def __setitem__(self, f, n):
959 assert n is not None
959 assert n is not None
960 self._load()
960 self._load()
961 dir, subpath = _splittopdir(f)
961 dir, subpath = _splittopdir(f)
962 if dir:
962 if dir:
963 self._loadlazy(dir)
963 self._loadlazy(dir)
964 if dir not in self._dirs:
964 if dir not in self._dirs:
965 self._dirs[dir] = treemanifest(self._subpath(dir))
965 self._dirs[dir] = treemanifest(self._subpath(dir))
966 self._dirs[dir].__setitem__(subpath, n)
966 self._dirs[dir].__setitem__(subpath, n)
967 else:
967 else:
968 self._files[f] = n[:21] # to match manifestdict's behavior
968 self._files[f] = n[:21] # to match manifestdict's behavior
969 self._dirty = True
969 self._dirty = True
970
970
971 def _load(self):
971 def _load(self):
972 if self._loadfunc is not _noop:
972 if self._loadfunc is not _noop:
973 lf, self._loadfunc = self._loadfunc, _noop
973 lf, self._loadfunc = self._loadfunc, _noop
974 lf(self)
974 lf(self)
975 elif self._copyfunc is not _noop:
975 elif self._copyfunc is not _noop:
976 cf, self._copyfunc = self._copyfunc, _noop
976 cf, self._copyfunc = self._copyfunc, _noop
977 cf(self)
977 cf(self)
978
978
979 def setflag(self, f, flags):
979 def setflag(self, f, flags):
980 """Set the flags (symlink, executable) for path f."""
980 """Set the flags (symlink, executable) for path f."""
981 self._load()
981 self._load()
982 dir, subpath = _splittopdir(f)
982 dir, subpath = _splittopdir(f)
983 if dir:
983 if dir:
984 self._loadlazy(dir)
984 self._loadlazy(dir)
985 if dir not in self._dirs:
985 if dir not in self._dirs:
986 self._dirs[dir] = treemanifest(self._subpath(dir))
986 self._dirs[dir] = treemanifest(self._subpath(dir))
987 self._dirs[dir].setflag(subpath, flags)
987 self._dirs[dir].setflag(subpath, flags)
988 else:
988 else:
989 self._flags[f] = flags
989 self._flags[f] = flags
990 self._dirty = True
990 self._dirty = True
991
991
992 def copy(self):
992 def copy(self):
993 copy = treemanifest(self._dir)
993 copy = treemanifest(self._dir)
994 copy._node = self._node
994 copy._node = self._node
995 copy._dirty = self._dirty
995 copy._dirty = self._dirty
996 if self._copyfunc is _noop:
996 if self._copyfunc is _noop:
997 def _copyfunc(s):
997 def _copyfunc(s):
998 self._load()
998 self._load()
999 s._lazydirs = {d: (p, n, r, True) for
999 s._lazydirs = {d: (p, n, r, True) for
1000 d, (p, n, r, c) in self._lazydirs.iteritems()}
1000 d, (p, n, r, c) in self._lazydirs.iteritems()}
1001 sdirs = s._dirs
1001 sdirs = s._dirs
1002 for d, v in self._dirs.iteritems():
1002 for d, v in self._dirs.iteritems():
1003 sdirs[d] = v.copy()
1003 sdirs[d] = v.copy()
1004 s._files = dict.copy(self._files)
1004 s._files = dict.copy(self._files)
1005 s._flags = dict.copy(self._flags)
1005 s._flags = dict.copy(self._flags)
1006 if self._loadfunc is _noop:
1006 if self._loadfunc is _noop:
1007 _copyfunc(copy)
1007 _copyfunc(copy)
1008 else:
1008 else:
1009 copy._copyfunc = _copyfunc
1009 copy._copyfunc = _copyfunc
1010 else:
1010 else:
1011 copy._copyfunc = self._copyfunc
1011 copy._copyfunc = self._copyfunc
1012 return copy
1012 return copy
1013
1013
1014 def filesnotin(self, m2, match=None):
1014 def filesnotin(self, m2, match=None):
1015 '''Set of files in this manifest that are not in the other'''
1015 '''Set of files in this manifest that are not in the other'''
1016 if match and not match.always():
1016 if match and not match.always():
1017 m1 = self.matches(match)
1017 m1 = self.matches(match)
1018 m2 = m2.matches(match)
1018 m2 = m2.matches(match)
1019 return m1.filesnotin(m2)
1019 return m1.filesnotin(m2)
1020
1020
1021 files = set()
1021 files = set()
1022 def _filesnotin(t1, t2):
1022 def _filesnotin(t1, t2):
1023 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1023 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1024 return
1024 return
1025 t1._load()
1025 t1._load()
1026 t2._load()
1026 t2._load()
1027 self._loaddifflazy(t1, t2)
1027 self._loaddifflazy(t1, t2)
1028 for d, m1 in t1._dirs.iteritems():
1028 for d, m1 in t1._dirs.iteritems():
1029 if d in t2._dirs:
1029 if d in t2._dirs:
1030 m2 = t2._dirs[d]
1030 m2 = t2._dirs[d]
1031 _filesnotin(m1, m2)
1031 _filesnotin(m1, m2)
1032 else:
1032 else:
1033 files.update(m1.iterkeys())
1033 files.update(m1.iterkeys())
1034
1034
1035 for fn in t1._files:
1035 for fn in t1._files:
1036 if fn not in t2._files:
1036 if fn not in t2._files:
1037 files.add(t1._subpath(fn))
1037 files.add(t1._subpath(fn))
1038
1038
1039 _filesnotin(self, m2)
1039 _filesnotin(self, m2)
1040 return files
1040 return files
1041
1041
1042 @propertycache
1042 @propertycache
1043 def _alldirs(self):
1043 def _alldirs(self):
1044 return util.dirs(self)
1044 return util.dirs(self)
1045
1045
1046 def dirs(self):
1046 def dirs(self):
1047 return self._alldirs
1047 return self._alldirs
1048
1048
1049 def hasdir(self, dir):
1049 def hasdir(self, dir):
1050 self._load()
1050 self._load()
1051 topdir, subdir = _splittopdir(dir)
1051 topdir, subdir = _splittopdir(dir)
1052 if topdir:
1052 if topdir:
1053 self._loadlazy(topdir)
1053 self._loadlazy(topdir)
1054 if topdir in self._dirs:
1054 if topdir in self._dirs:
1055 return self._dirs[topdir].hasdir(subdir)
1055 return self._dirs[topdir].hasdir(subdir)
1056 return False
1056 return False
1057 dirslash = dir + '/'
1057 dirslash = dir + '/'
1058 return dirslash in self._dirs or dirslash in self._lazydirs
1058 return dirslash in self._dirs or dirslash in self._lazydirs
1059
1059
1060 def walk(self, match):
1060 def walk(self, match):
1061 '''Generates matching file names.
1061 '''Generates matching file names.
1062
1062
1063 Equivalent to manifest.matches(match).iterkeys(), but without creating
1063 Equivalent to manifest.matches(match).iterkeys(), but without creating
1064 an entirely new manifest.
1064 an entirely new manifest.
1065
1065
1066 It also reports nonexistent files by marking them bad with match.bad().
1066 It also reports nonexistent files by marking them bad with match.bad().
1067 '''
1067 '''
1068 if match.always():
1068 if match.always():
1069 for f in iter(self):
1069 for f in iter(self):
1070 yield f
1070 yield f
1071 return
1071 return
1072
1072
1073 fset = set(match.files())
1073 fset = set(match.files())
1074
1074
1075 for fn in self._walk(match):
1075 for fn in self._walk(match):
1076 if fn in fset:
1076 if fn in fset:
1077 # specified pattern is the exact name
1077 # specified pattern is the exact name
1078 fset.remove(fn)
1078 fset.remove(fn)
1079 yield fn
1079 yield fn
1080
1080
1081 # for dirstate.walk, files=[''] means "walk the whole tree".
1081 # for dirstate.walk, files=[''] means "walk the whole tree".
1082 # follow that here, too
1082 # follow that here, too
1083 fset.discard('')
1083 fset.discard('')
1084
1084
1085 for fn in sorted(fset):
1085 for fn in sorted(fset):
1086 if not self.hasdir(fn):
1086 if not self.hasdir(fn):
1087 match.bad(fn, None)
1087 match.bad(fn, None)
1088
1088
1089 def _walk(self, match):
1089 def _walk(self, match):
1090 '''Recursively generates matching file names for walk().'''
1090 '''Recursively generates matching file names for walk().'''
1091 visit = match.visitchildrenset(self._dir[:-1])
1091 visit = match.visitchildrenset(self._dir[:-1])
1092 if not visit:
1092 if not visit:
1093 return
1093 return
1094
1094
1095 # yield this dir's files and walk its submanifests
1095 # yield this dir's files and walk its submanifests
1096 self._load()
1096 self._load()
1097 visit = self._loadchildrensetlazy(visit)
1097 visit = self._loadchildrensetlazy(visit)
1098 for p in sorted(list(self._dirs) + list(self._files)):
1098 for p in sorted(list(self._dirs) + list(self._files)):
1099 if p in self._files:
1099 if p in self._files:
1100 fullp = self._subpath(p)
1100 fullp = self._subpath(p)
1101 if match(fullp):
1101 if match(fullp):
1102 yield fullp
1102 yield fullp
1103 else:
1103 else:
1104 if not visit or p[:-1] in visit:
1104 if not visit or p[:-1] in visit:
1105 for f in self._dirs[p]._walk(match):
1105 for f in self._dirs[p]._walk(match):
1106 yield f
1106 yield f
1107
1107
1108 def matches(self, match):
1108 def matches(self, match):
1109 '''generate a new manifest filtered by the match argument'''
1109 '''generate a new manifest filtered by the match argument'''
1110 if match.always():
1110 if match.always():
1111 return self.copy()
1111 return self.copy()
1112
1112
1113 return self._matches(match)
1113 return self._matches(match)
1114
1114
1115 def _matches(self, match):
1115 def _matches(self, match):
1116 '''recursively generate a new manifest filtered by the match argument.
1116 '''recursively generate a new manifest filtered by the match argument.
1117 '''
1117 '''
1118
1118
1119 visit = match.visitchildrenset(self._dir[:-1])
1119 visit = match.visitchildrenset(self._dir[:-1])
1120 if visit == 'all':
1120 if visit == 'all':
1121 return self.copy()
1121 return self.copy()
1122 ret = treemanifest(self._dir)
1122 ret = treemanifest(self._dir)
1123 if not visit:
1123 if not visit:
1124 return ret
1124 return ret
1125
1125
1126 self._load()
1126 self._load()
1127 for fn in self._files:
1127 for fn in self._files:
1128 # While visitchildrenset *usually* lists only subdirs, this is
1128 # While visitchildrenset *usually* lists only subdirs, this is
1129 # actually up to the matcher and may have some files in the set().
1129 # actually up to the matcher and may have some files in the set().
1130 # If visit == 'this', we should obviously look at the files in this
1130 # If visit == 'this', we should obviously look at the files in this
1131 # directory; if visit is a set, and fn is in it, we should inspect
1131 # directory; if visit is a set, and fn is in it, we should inspect
1132 # fn (but no need to inspect things not in the set).
1132 # fn (but no need to inspect things not in the set).
1133 if visit != 'this' and fn not in visit:
1133 if visit != 'this' and fn not in visit:
1134 continue
1134 continue
1135 fullp = self._subpath(fn)
1135 fullp = self._subpath(fn)
1136 # visitchildrenset isn't perfect, we still need to call the regular
1136 # visitchildrenset isn't perfect, we still need to call the regular
1137 # matcher code to further filter results.
1137 # matcher code to further filter results.
1138 if not match(fullp):
1138 if not match(fullp):
1139 continue
1139 continue
1140 ret._files[fn] = self._files[fn]
1140 ret._files[fn] = self._files[fn]
1141 if fn in self._flags:
1141 if fn in self._flags:
1142 ret._flags[fn] = self._flags[fn]
1142 ret._flags[fn] = self._flags[fn]
1143
1143
1144 visit = self._loadchildrensetlazy(visit)
1144 visit = self._loadchildrensetlazy(visit)
1145 for dir, subm in self._dirs.iteritems():
1145 for dir, subm in self._dirs.iteritems():
1146 if visit and dir[:-1] not in visit:
1146 if visit and dir[:-1] not in visit:
1147 continue
1147 continue
1148 m = subm._matches(match)
1148 m = subm._matches(match)
1149 if not m._isempty():
1149 if not m._isempty():
1150 ret._dirs[dir] = m
1150 ret._dirs[dir] = m
1151
1151
1152 if not ret._isempty():
1152 if not ret._isempty():
1153 ret._dirty = True
1153 ret._dirty = True
1154 return ret
1154 return ret
1155
1155
1156 def diff(self, m2, match=None, clean=False):
1156 def diff(self, m2, match=None, clean=False):
1157 '''Finds changes between the current manifest and m2.
1157 '''Finds changes between the current manifest and m2.
1158
1158
1159 Args:
1159 Args:
1160 m2: the manifest to which this manifest should be compared.
1160 m2: the manifest to which this manifest should be compared.
1161 clean: if true, include files unchanged between these manifests
1161 clean: if true, include files unchanged between these manifests
1162 with a None value in the returned dictionary.
1162 with a None value in the returned dictionary.
1163
1163
1164 The result is returned as a dict with filename as key and
1164 The result is returned as a dict with filename as key and
1165 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1165 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1166 nodeid in the current/other manifest and fl1/fl2 is the flag
1166 nodeid in the current/other manifest and fl1/fl2 is the flag
1167 in the current/other manifest. Where the file does not exist,
1167 in the current/other manifest. Where the file does not exist,
1168 the nodeid will be None and the flags will be the empty
1168 the nodeid will be None and the flags will be the empty
1169 string.
1169 string.
1170 '''
1170 '''
1171 if match and not match.always():
1171 if match and not match.always():
1172 m1 = self.matches(match)
1172 m1 = self.matches(match)
1173 m2 = m2.matches(match)
1173 m2 = m2.matches(match)
1174 return m1.diff(m2, clean=clean)
1174 return m1.diff(m2, clean=clean)
1175 result = {}
1175 result = {}
1176 emptytree = treemanifest()
1176 emptytree = treemanifest()
1177
1177
1178 def _iterativediff(t1, t2, stack):
1178 def _iterativediff(t1, t2, stack):
1179 """compares two tree manifests and append new tree-manifests which
1179 """compares two tree manifests and append new tree-manifests which
1180 needs to be compared to stack"""
1180 needs to be compared to stack"""
1181 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1181 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1182 return
1182 return
1183 t1._load()
1183 t1._load()
1184 t2._load()
1184 t2._load()
1185 self._loaddifflazy(t1, t2)
1185 self._loaddifflazy(t1, t2)
1186
1186
1187 for d, m1 in t1._dirs.iteritems():
1187 for d, m1 in t1._dirs.iteritems():
1188 m2 = t2._dirs.get(d, emptytree)
1188 m2 = t2._dirs.get(d, emptytree)
1189 stack.append((m1, m2))
1189 stack.append((m1, m2))
1190
1190
1191 for d, m2 in t2._dirs.iteritems():
1191 for d, m2 in t2._dirs.iteritems():
1192 if d not in t1._dirs:
1192 if d not in t1._dirs:
1193 stack.append((emptytree, m2))
1193 stack.append((emptytree, m2))
1194
1194
1195 for fn, n1 in t1._files.iteritems():
1195 for fn, n1 in t1._files.iteritems():
1196 fl1 = t1._flags.get(fn, '')
1196 fl1 = t1._flags.get(fn, '')
1197 n2 = t2._files.get(fn, None)
1197 n2 = t2._files.get(fn, None)
1198 fl2 = t2._flags.get(fn, '')
1198 fl2 = t2._flags.get(fn, '')
1199 if n1 != n2 or fl1 != fl2:
1199 if n1 != n2 or fl1 != fl2:
1200 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1200 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1201 elif clean:
1201 elif clean:
1202 result[t1._subpath(fn)] = None
1202 result[t1._subpath(fn)] = None
1203
1203
1204 for fn, n2 in t2._files.iteritems():
1204 for fn, n2 in t2._files.iteritems():
1205 if fn not in t1._files:
1205 if fn not in t1._files:
1206 fl2 = t2._flags.get(fn, '')
1206 fl2 = t2._flags.get(fn, '')
1207 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1207 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1208
1208
1209 stackls = []
1209 stackls = []
1210 _iterativediff(self, m2, stackls)
1210 _iterativediff(self, m2, stackls)
1211 while stackls:
1211 while stackls:
1212 t1, t2 = stackls.pop()
1212 t1, t2 = stackls.pop()
1213 # stackls is populated in the function call
1213 # stackls is populated in the function call
1214 _iterativediff(t1, t2, stackls)
1214 _iterativediff(t1, t2, stackls)
1215 return result
1215 return result
1216
1216
1217 def unmodifiedsince(self, m2):
1217 def unmodifiedsince(self, m2):
1218 return not self._dirty and not m2._dirty and self._node == m2._node
1218 return not self._dirty and not m2._dirty and self._node == m2._node
1219
1219
1220 def parse(self, text, readsubtree):
1220 def parse(self, text, readsubtree):
1221 selflazy = self._lazydirs
1221 selflazy = self._lazydirs
1222 subpath = self._subpath
1222 subpath = self._subpath
1223 for f, n, fl in _parse(text):
1223 for f, n, fl in _parse(text):
1224 if fl == 't':
1224 if fl == 't':
1225 f = f + '/'
1225 f = f + '/'
1226 # False below means "doesn't need to be copied" and can use the
1226 # False below means "doesn't need to be copied" and can use the
1227 # cached value from readsubtree directly.
1227 # cached value from readsubtree directly.
1228 selflazy[f] = (subpath(f), n, readsubtree, False)
1228 selflazy[f] = (subpath(f), n, readsubtree, False)
1229 elif '/' in f:
1229 elif '/' in f:
1230 # This is a flat manifest, so use __setitem__ and setflag rather
1230 # This is a flat manifest, so use __setitem__ and setflag rather
1231 # than assigning directly to _files and _flags, so we can
1231 # than assigning directly to _files and _flags, so we can
1232 # assign a path in a subdirectory, and to mark dirty (compared
1232 # assign a path in a subdirectory, and to mark dirty (compared
1233 # to nullid).
1233 # to nullid).
1234 self[f] = n
1234 self[f] = n
1235 if fl:
1235 if fl:
1236 self.setflag(f, fl)
1236 self.setflag(f, fl)
1237 else:
1237 else:
1238 # Assigning to _files and _flags avoids marking as dirty,
1238 # Assigning to _files and _flags avoids marking as dirty,
1239 # and should be a little faster.
1239 # and should be a little faster.
1240 self._files[f] = n
1240 self._files[f] = n
1241 if fl:
1241 if fl:
1242 self._flags[f] = fl
1242 self._flags[f] = fl
1243
1243
1244 def text(self):
1244 def text(self):
1245 """Get the full data of this manifest as a bytestring."""
1245 """Get the full data of this manifest as a bytestring."""
1246 self._load()
1246 self._load()
1247 return _text(self.iterentries())
1247 return _text(self.iterentries())
1248
1248
1249 def dirtext(self):
1249 def dirtext(self):
1250 """Get the full data of this directory as a bytestring. Make sure that
1250 """Get the full data of this directory as a bytestring. Make sure that
1251 any submanifests have been written first, so their nodeids are correct.
1251 any submanifests have been written first, so their nodeids are correct.
1252 """
1252 """
1253 self._load()
1253 self._load()
1254 flags = self.flags
1254 flags = self.flags
1255 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1255 lazydirs = [(d[:-1], v[1], 't') for d, v in self._lazydirs.iteritems()]
1256 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1256 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1257 files = [(f, self._files[f], flags(f)) for f in self._files]
1257 files = [(f, self._files[f], flags(f)) for f in self._files]
1258 return _text(sorted(dirs + files + lazydirs))
1258 return _text(sorted(dirs + files + lazydirs))
1259
1259
1260 def read(self, gettext, readsubtree):
1260 def read(self, gettext, readsubtree):
1261 def _load_for_read(s):
1261 def _load_for_read(s):
1262 s.parse(gettext(), readsubtree)
1262 s.parse(gettext(), readsubtree)
1263 s._dirty = False
1263 s._dirty = False
1264 self._loadfunc = _load_for_read
1264 self._loadfunc = _load_for_read
1265
1265
1266 def writesubtrees(self, m1, m2, writesubtree, match):
1266 def writesubtrees(self, m1, m2, writesubtree, match):
1267 self._load() # for consistency; should never have any effect here
1267 self._load() # for consistency; should never have any effect here
1268 m1._load()
1268 m1._load()
1269 m2._load()
1269 m2._load()
1270 emptytree = treemanifest()
1270 emptytree = treemanifest()
1271 def getnode(m, d):
1271 def getnode(m, d):
1272 ld = m._lazydirs.get(d)
1272 ld = m._lazydirs.get(d)
1273 if ld:
1273 if ld:
1274 return ld[1]
1274 return ld[1]
1275 return m._dirs.get(d, emptytree)._node
1275 return m._dirs.get(d, emptytree)._node
1276
1276
1277 # let's skip investigating things that `match` says we do not need.
1277 # let's skip investigating things that `match` says we do not need.
1278 visit = match.visitchildrenset(self._dir[:-1])
1278 visit = match.visitchildrenset(self._dir[:-1])
1279 visit = self._loadchildrensetlazy(visit)
1279 visit = self._loadchildrensetlazy(visit)
1280 if visit == 'this' or visit == 'all':
1280 if visit == 'this' or visit == 'all':
1281 visit = None
1281 visit = None
1282 for d, subm in self._dirs.iteritems():
1282 for d, subm in self._dirs.iteritems():
1283 if visit and d[:-1] not in visit:
1283 if visit and d[:-1] not in visit:
1284 continue
1284 continue
1285 subp1 = getnode(m1, d)
1285 subp1 = getnode(m1, d)
1286 subp2 = getnode(m2, d)
1286 subp2 = getnode(m2, d)
1287 if subp1 == nullid:
1287 if subp1 == nullid:
1288 subp1, subp2 = subp2, subp1
1288 subp1, subp2 = subp2, subp1
1289 writesubtree(subm, subp1, subp2, match)
1289 writesubtree(subm, subp1, subp2, match)
1290
1290
1291 def walksubtrees(self, matcher=None):
1291 def walksubtrees(self, matcher=None):
1292 """Returns an iterator of the subtrees of this manifest, including this
1292 """Returns an iterator of the subtrees of this manifest, including this
1293 manifest itself.
1293 manifest itself.
1294
1294
1295 If `matcher` is provided, it only returns subtrees that match.
1295 If `matcher` is provided, it only returns subtrees that match.
1296 """
1296 """
1297 if matcher and not matcher.visitdir(self._dir[:-1]):
1297 if matcher and not matcher.visitdir(self._dir[:-1]):
1298 return
1298 return
1299 if not matcher or matcher(self._dir[:-1]):
1299 if not matcher or matcher(self._dir[:-1]):
1300 yield self
1300 yield self
1301
1301
1302 self._load()
1302 self._load()
1303 # OPT: use visitchildrenset to avoid loading everything.
1303 # OPT: use visitchildrenset to avoid loading everything.
1304 self._loadalllazy()
1304 self._loadalllazy()
1305 for d, subm in self._dirs.iteritems():
1305 for d, subm in self._dirs.iteritems():
1306 for subtree in subm.walksubtrees(matcher=matcher):
1306 for subtree in subm.walksubtrees(matcher=matcher):
1307 yield subtree
1307 yield subtree
1308
1308
1309 class manifestfulltextcache(util.lrucachedict):
1309 class manifestfulltextcache(util.lrucachedict):
1310 """File-backed LRU cache for the manifest cache
1310 """File-backed LRU cache for the manifest cache
1311
1311
1312 File consists of entries, up to EOF:
1312 File consists of entries, up to EOF:
1313
1313
1314 - 20 bytes node, 4 bytes length, <length> manifest data
1314 - 20 bytes node, 4 bytes length, <length> manifest data
1315
1315
1316 These are written in reverse cache order (oldest to newest).
1316 These are written in reverse cache order (oldest to newest).
1317
1317
1318 """
1318 """
1319
1319
1320 _file = 'manifestfulltextcache'
1320 _file = 'manifestfulltextcache'
1321
1321
1322 def __init__(self, max):
1322 def __init__(self, max):
1323 super(manifestfulltextcache, self).__init__(max)
1323 super(manifestfulltextcache, self).__init__(max)
1324 self._dirty = False
1324 self._dirty = False
1325 self._read = False
1325 self._read = False
1326 self._opener = None
1326 self._opener = None
1327
1327
1328 def read(self):
1328 def read(self):
1329 if self._read or self._opener is None:
1329 if self._read or self._opener is None:
1330 return
1330 return
1331
1331
1332 try:
1332 try:
1333 with self._opener(self._file) as fp:
1333 with self._opener(self._file) as fp:
1334 set = super(manifestfulltextcache, self).__setitem__
1334 set = super(manifestfulltextcache, self).__setitem__
1335 # ignore trailing data, this is a cache, corruption is skipped
1335 # ignore trailing data, this is a cache, corruption is skipped
1336 while True:
1336 while True:
1337 node = fp.read(20)
1337 node = fp.read(20)
1338 if len(node) < 20:
1338 if len(node) < 20:
1339 break
1339 break
1340 try:
1340 try:
1341 size = struct.unpack('>L', fp.read(4))[0]
1341 size = struct.unpack('>L', fp.read(4))[0]
1342 except struct.error:
1342 except struct.error:
1343 break
1343 break
1344 value = bytearray(fp.read(size))
1344 value = bytearray(fp.read(size))
1345 if len(value) != size:
1345 if len(value) != size:
1346 break
1346 break
1347 set(node, value)
1347 set(node, value)
1348 except IOError:
1348 except IOError:
1349 # the file is allowed to be missing
1349 # the file is allowed to be missing
1350 pass
1350 pass
1351
1351
1352 self._read = True
1352 self._read = True
1353 self._dirty = False
1353 self._dirty = False
1354
1354
1355 def write(self):
1355 def write(self):
1356 if not self._dirty or self._opener is None:
1356 if not self._dirty or self._opener is None:
1357 return
1357 return
1358 # rotate backwards to the first used node
1358 # rotate backwards to the first used node
1359 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1359 with self._opener(self._file, 'w', atomictemp=True, checkambig=True
1360 ) as fp:
1360 ) as fp:
1361 node = self._head.prev
1361 node = self._head.prev
1362 while True:
1362 while True:
1363 if node.key in self._cache:
1363 if node.key in self._cache:
1364 fp.write(node.key)
1364 fp.write(node.key)
1365 fp.write(struct.pack('>L', len(node.value)))
1365 fp.write(struct.pack('>L', len(node.value)))
1366 fp.write(node.value)
1366 fp.write(node.value)
1367 if node is self._head:
1367 if node is self._head:
1368 break
1368 break
1369 node = node.prev
1369 node = node.prev
1370
1370
1371 def __len__(self):
1371 def __len__(self):
1372 if not self._read:
1372 if not self._read:
1373 self.read()
1373 self.read()
1374 return super(manifestfulltextcache, self).__len__()
1374 return super(manifestfulltextcache, self).__len__()
1375
1375
1376 def __contains__(self, k):
1376 def __contains__(self, k):
1377 if not self._read:
1377 if not self._read:
1378 self.read()
1378 self.read()
1379 return super(manifestfulltextcache, self).__contains__(k)
1379 return super(manifestfulltextcache, self).__contains__(k)
1380
1380
1381 def __iter__(self):
1381 def __iter__(self):
1382 if not self._read:
1382 if not self._read:
1383 self.read()
1383 self.read()
1384 return super(manifestfulltextcache, self).__iter__()
1384 return super(manifestfulltextcache, self).__iter__()
1385
1385
1386 def __getitem__(self, k):
1386 def __getitem__(self, k):
1387 if not self._read:
1387 if not self._read:
1388 self.read()
1388 self.read()
1389 # the cache lru order can change on read
1389 # the cache lru order can change on read
1390 setdirty = self._cache.get(k) is not self._head
1390 setdirty = self._cache.get(k) is not self._head
1391 value = super(manifestfulltextcache, self).__getitem__(k)
1391 value = super(manifestfulltextcache, self).__getitem__(k)
1392 if setdirty:
1392 if setdirty:
1393 self._dirty = True
1393 self._dirty = True
1394 return value
1394 return value
1395
1395
1396 def __setitem__(self, k, v):
1396 def __setitem__(self, k, v):
1397 if not self._read:
1397 if not self._read:
1398 self.read()
1398 self.read()
1399 super(manifestfulltextcache, self).__setitem__(k, v)
1399 super(manifestfulltextcache, self).__setitem__(k, v)
1400 self._dirty = True
1400 self._dirty = True
1401
1401
1402 def __delitem__(self, k):
1402 def __delitem__(self, k):
1403 if not self._read:
1403 if not self._read:
1404 self.read()
1404 self.read()
1405 super(manifestfulltextcache, self).__delitem__(k)
1405 super(manifestfulltextcache, self).__delitem__(k)
1406 self._dirty = True
1406 self._dirty = True
1407
1407
1408 def get(self, k, default=None):
1408 def get(self, k, default=None):
1409 if not self._read:
1409 if not self._read:
1410 self.read()
1410 self.read()
1411 return super(manifestfulltextcache, self).get(k, default=default)
1411 return super(manifestfulltextcache, self).get(k, default=default)
1412
1412
1413 def clear(self, clear_persisted_data=False):
1413 def clear(self, clear_persisted_data=False):
1414 super(manifestfulltextcache, self).clear()
1414 super(manifestfulltextcache, self).clear()
1415 if clear_persisted_data:
1415 if clear_persisted_data:
1416 self._dirty = True
1416 self._dirty = True
1417 self.write()
1417 self.write()
1418 self._read = False
1418 self._read = False
1419
1419
1420 # and upper bound of what we expect from compression
1420 # and upper bound of what we expect from compression
1421 # (real live value seems to be "3")
1421 # (real live value seems to be "3")
1422 MAXCOMPRESSION = 3
1422 MAXCOMPRESSION = 3
1423
1423
1424 @interfaceutil.implementer(repository.imanifeststorage)
1424 @interfaceutil.implementer(repository.imanifeststorage)
1425 class manifestrevlog(object):
1425 class manifestrevlog(object):
1426 '''A revlog that stores manifest texts. This is responsible for caching the
1426 '''A revlog that stores manifest texts. This is responsible for caching the
1427 full-text manifest contents.
1427 full-text manifest contents.
1428 '''
1428 '''
1429 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1429 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1430 treemanifest=False):
1430 treemanifest=False):
1431 """Constructs a new manifest revlog
1431 """Constructs a new manifest revlog
1432
1432
1433 `indexfile` - used by extensions to have two manifests at once, like
1433 `indexfile` - used by extensions to have two manifests at once, like
1434 when transitioning between flatmanifeset and treemanifests.
1434 when transitioning between flatmanifeset and treemanifests.
1435
1435
1436 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1436 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1437 options can also be used to make this a tree manifest revlog. The opener
1437 options can also be used to make this a tree manifest revlog. The opener
1438 option takes precedence, so if it is set to True, we ignore whatever
1438 option takes precedence, so if it is set to True, we ignore whatever
1439 value is passed in to the constructor.
1439 value is passed in to the constructor.
1440 """
1440 """
1441 # During normal operations, we expect to deal with not more than four
1441 # During normal operations, we expect to deal with not more than four
1442 # revs at a time (such as during commit --amend). When rebasing large
1442 # revs at a time (such as during commit --amend). When rebasing large
1443 # stacks of commits, the number can go up, hence the config knob below.
1443 # stacks of commits, the number can go up, hence the config knob below.
1444 cachesize = 4
1444 cachesize = 4
1445 optiontreemanifest = False
1445 optiontreemanifest = False
1446 opts = getattr(opener, 'options', None)
1446 opts = getattr(opener, 'options', None)
1447 if opts is not None:
1447 if opts is not None:
1448 cachesize = opts.get('manifestcachesize', cachesize)
1448 cachesize = opts.get('manifestcachesize', cachesize)
1449 optiontreemanifest = opts.get('treemanifest', False)
1449 optiontreemanifest = opts.get('treemanifest', False)
1450
1450
1451 self._treeondisk = optiontreemanifest or treemanifest
1451 self._treeondisk = optiontreemanifest or treemanifest
1452
1452
1453 self._fulltextcache = manifestfulltextcache(cachesize)
1453 self._fulltextcache = manifestfulltextcache(cachesize)
1454
1454
1455 if tree:
1455 if tree:
1456 assert self._treeondisk, 'opts is %r' % opts
1456 assert self._treeondisk, 'opts is %r' % opts
1457
1457
1458 if indexfile is None:
1458 if indexfile is None:
1459 indexfile = '00manifest.i'
1459 indexfile = '00manifest.i'
1460 if tree:
1460 if tree:
1461 indexfile = "meta/" + tree + indexfile
1461 indexfile = "meta/" + tree + indexfile
1462
1462
1463 self.tree = tree
1463 self.tree = tree
1464
1464
1465 # The dirlogcache is kept on the root manifest log
1465 # The dirlogcache is kept on the root manifest log
1466 if tree:
1466 if tree:
1467 self._dirlogcache = dirlogcache
1467 self._dirlogcache = dirlogcache
1468 else:
1468 else:
1469 self._dirlogcache = {'': self}
1469 self._dirlogcache = {'': self}
1470
1470
1471 self._revlog = revlog.revlog(opener, indexfile,
1471 self._revlog = revlog.revlog(opener, indexfile,
1472 # only root indexfile is cached
1472 # only root indexfile is cached
1473 checkambig=not bool(tree),
1473 checkambig=not bool(tree),
1474 mmaplargeindex=True,
1474 mmaplargeindex=True,
1475 upperboundcomp=MAXCOMPRESSION)
1475 upperboundcomp=MAXCOMPRESSION)
1476
1476
1477 self.index = self._revlog.index
1477 self.index = self._revlog.index
1478 self.version = self._revlog.version
1478 self.version = self._revlog.version
1479 self._generaldelta = self._revlog._generaldelta
1479 self._generaldelta = self._revlog._generaldelta
1480
1480
1481 def _setupmanifestcachehooks(self, repo):
1481 def _setupmanifestcachehooks(self, repo):
1482 """Persist the manifestfulltextcache on lock release"""
1482 """Persist the manifestfulltextcache on lock release"""
1483 if not util.safehasattr(repo, '_wlockref'):
1483 if not util.safehasattr(repo, '_wlockref'):
1484 return
1484 return
1485
1485
1486 self._fulltextcache._opener = repo.wcachevfs
1486 self._fulltextcache._opener = repo.wcachevfs
1487 if repo._currentlock(repo._wlockref) is None:
1487 if repo._currentlock(repo._wlockref) is None:
1488 return
1488 return
1489
1489
1490 reporef = weakref.ref(repo)
1490 reporef = weakref.ref(repo)
1491 manifestrevlogref = weakref.ref(self)
1491 manifestrevlogref = weakref.ref(self)
1492
1492
1493 def persistmanifestcache():
1493 def persistmanifestcache():
1494 repo = reporef()
1494 repo = reporef()
1495 self = manifestrevlogref()
1495 self = manifestrevlogref()
1496 if repo is None or self is None:
1496 if repo is None or self is None:
1497 return
1497 return
1498 if repo.manifestlog.getstorage(b'') is not self:
1498 if repo.manifestlog.getstorage(b'') is not self:
1499 # there's a different manifest in play now, abort
1499 # there's a different manifest in play now, abort
1500 return
1500 return
1501 self._fulltextcache.write()
1501 self._fulltextcache.write()
1502
1502
1503 repo._afterlock(persistmanifestcache)
1503 repo._afterlock(persistmanifestcache)
1504
1504
1505 @property
1505 @property
1506 def fulltextcache(self):
1506 def fulltextcache(self):
1507 return self._fulltextcache
1507 return self._fulltextcache
1508
1508
1509 def clearcaches(self, clear_persisted_data=False):
1509 def clearcaches(self, clear_persisted_data=False):
1510 self._revlog.clearcaches()
1510 self._revlog.clearcaches()
1511 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1511 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1512 self._dirlogcache = {self.tree: self}
1512 self._dirlogcache = {self.tree: self}
1513
1513
1514 def dirlog(self, d):
1514 def dirlog(self, d):
1515 if d:
1515 if d:
1516 assert self._treeondisk
1516 assert self._treeondisk
1517 if d not in self._dirlogcache:
1517 if d not in self._dirlogcache:
1518 mfrevlog = manifestrevlog(self.opener, d,
1518 mfrevlog = manifestrevlog(self.opener, d,
1519 self._dirlogcache,
1519 self._dirlogcache,
1520 treemanifest=self._treeondisk)
1520 treemanifest=self._treeondisk)
1521 self._dirlogcache[d] = mfrevlog
1521 self._dirlogcache[d] = mfrevlog
1522 return self._dirlogcache[d]
1522 return self._dirlogcache[d]
1523
1523
1524 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1524 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1525 match=None):
1525 match=None):
1526 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1526 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1527 # If our first parent is in the manifest cache, we can
1527 # If our first parent is in the manifest cache, we can
1528 # compute a delta here using properties we know about the
1528 # compute a delta here using properties we know about the
1529 # manifest up-front, which may save time later for the
1529 # manifest up-front, which may save time later for the
1530 # revlog layer.
1530 # revlog layer.
1531
1531
1532 _checkforbidden(added)
1532 _checkforbidden(added)
1533 # combine the changed lists into one sorted iterator
1533 # combine the changed lists into one sorted iterator
1534 work = heapq.merge([(x, False) for x in sorted(added)],
1534 work = heapq.merge([(x, False) for x in sorted(added)],
1535 [(x, True) for x in sorted(removed)])
1535 [(x, True) for x in sorted(removed)])
1536
1536
1537 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1537 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1538 cachedelta = self._revlog.rev(p1), deltatext
1538 cachedelta = self._revlog.rev(p1), deltatext
1539 text = util.buffer(arraytext)
1539 text = util.buffer(arraytext)
1540 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1540 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1541 cachedelta)
1541 cachedelta)
1542 else:
1542 else:
1543 # The first parent manifest isn't already loaded, so we'll
1543 # The first parent manifest isn't already loaded, so we'll
1544 # just encode a fulltext of the manifest and pass that
1544 # just encode a fulltext of the manifest and pass that
1545 # through to the revlog layer, and let it handle the delta
1545 # through to the revlog layer, and let it handle the delta
1546 # process.
1546 # process.
1547 if self._treeondisk:
1547 if self._treeondisk:
1548 assert readtree, "readtree must be set for treemanifest writes"
1548 assert readtree, "readtree must be set for treemanifest writes"
1549 assert match, "match must be specified for treemanifest writes"
1549 assert match, "match must be specified for treemanifest writes"
1550 m1 = readtree(self.tree, p1)
1550 m1 = readtree(self.tree, p1)
1551 m2 = readtree(self.tree, p2)
1551 m2 = readtree(self.tree, p2)
1552 n = self._addtree(m, transaction, link, m1, m2, readtree,
1552 n = self._addtree(m, transaction, link, m1, m2, readtree,
1553 match=match)
1553 match=match)
1554 arraytext = None
1554 arraytext = None
1555 else:
1555 else:
1556 text = m.text()
1556 text = m.text()
1557 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1557 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1558 arraytext = bytearray(text)
1558 arraytext = bytearray(text)
1559
1559
1560 if arraytext is not None:
1560 if arraytext is not None:
1561 self.fulltextcache[n] = arraytext
1561 self.fulltextcache[n] = arraytext
1562
1562
1563 return n
1563 return n
1564
1564
1565 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1565 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1566 # If the manifest is unchanged compared to one parent,
1566 # If the manifest is unchanged compared to one parent,
1567 # don't write a new revision
1567 # don't write a new revision
1568 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1568 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1569 m2)):
1569 m2)):
1570 return m.node()
1570 return m.node()
1571 def writesubtree(subm, subp1, subp2, match):
1571 def writesubtree(subm, subp1, subp2, match):
1572 sublog = self.dirlog(subm.dir())
1572 sublog = self.dirlog(subm.dir())
1573 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1573 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1574 readtree=readtree, match=match)
1574 readtree=readtree, match=match)
1575 m.writesubtrees(m1, m2, writesubtree, match)
1575 m.writesubtrees(m1, m2, writesubtree, match)
1576 text = m.dirtext()
1576 text = m.dirtext()
1577 n = None
1577 n = None
1578 if self.tree != '':
1578 if self.tree != '':
1579 # Double-check whether contents are unchanged to one parent
1579 # Double-check whether contents are unchanged to one parent
1580 if text == m1.dirtext():
1580 if text == m1.dirtext():
1581 n = m1.node()
1581 n = m1.node()
1582 elif text == m2.dirtext():
1582 elif text == m2.dirtext():
1583 n = m2.node()
1583 n = m2.node()
1584
1584
1585 if not n:
1585 if not n:
1586 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1586 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1587 m2.node())
1587 m2.node())
1588
1588
1589 # Save nodeid so parent manifest can calculate its nodeid
1589 # Save nodeid so parent manifest can calculate its nodeid
1590 m.setnode(n)
1590 m.setnode(n)
1591 return n
1591 return n
1592
1592
1593 def __len__(self):
1593 def __len__(self):
1594 return len(self._revlog)
1594 return len(self._revlog)
1595
1595
1596 def __iter__(self):
1596 def __iter__(self):
1597 return self._revlog.__iter__()
1597 return self._revlog.__iter__()
1598
1598
1599 def rev(self, node):
1599 def rev(self, node):
1600 return self._revlog.rev(node)
1600 return self._revlog.rev(node)
1601
1601
1602 def node(self, rev):
1602 def node(self, rev):
1603 return self._revlog.node(rev)
1603 return self._revlog.node(rev)
1604
1604
1605 def lookup(self, value):
1605 def lookup(self, value):
1606 return self._revlog.lookup(value)
1606 return self._revlog.lookup(value)
1607
1607
1608 def parentrevs(self, rev):
1608 def parentrevs(self, rev):
1609 return self._revlog.parentrevs(rev)
1609 return self._revlog.parentrevs(rev)
1610
1610
1611 def parents(self, node):
1611 def parents(self, node):
1612 return self._revlog.parents(node)
1612 return self._revlog.parents(node)
1613
1613
1614 def linkrev(self, rev):
1614 def linkrev(self, rev):
1615 return self._revlog.linkrev(rev)
1615 return self._revlog.linkrev(rev)
1616
1616
1617 def checksize(self):
1617 def checksize(self):
1618 return self._revlog.checksize()
1618 return self._revlog.checksize()
1619
1619
1620 def revision(self, node, _df=None, raw=False):
1620 def revision(self, node, _df=None, raw=False):
1621 return self._revlog.revision(node, _df=_df, raw=raw)
1621 return self._revlog.revision(node, _df=_df, raw=raw)
1622
1622
1623 def rawdata(self, node, _df=None):
1624 return self._revlog.rawdata(node, _df=_df)
1625
1623 def revdiff(self, rev1, rev2):
1626 def revdiff(self, rev1, rev2):
1624 return self._revlog.revdiff(rev1, rev2)
1627 return self._revlog.revdiff(rev1, rev2)
1625
1628
1626 def cmp(self, node, text):
1629 def cmp(self, node, text):
1627 return self._revlog.cmp(node, text)
1630 return self._revlog.cmp(node, text)
1628
1631
1629 def deltaparent(self, rev):
1632 def deltaparent(self, rev):
1630 return self._revlog.deltaparent(rev)
1633 return self._revlog.deltaparent(rev)
1631
1634
1632 def emitrevisions(self, nodes, nodesorder=None,
1635 def emitrevisions(self, nodes, nodesorder=None,
1633 revisiondata=False, assumehaveparentrevisions=False,
1636 revisiondata=False, assumehaveparentrevisions=False,
1634 deltamode=repository.CG_DELTAMODE_STD):
1637 deltamode=repository.CG_DELTAMODE_STD):
1635 return self._revlog.emitrevisions(
1638 return self._revlog.emitrevisions(
1636 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1639 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
1637 assumehaveparentrevisions=assumehaveparentrevisions,
1640 assumehaveparentrevisions=assumehaveparentrevisions,
1638 deltamode=deltamode)
1641 deltamode=deltamode)
1639
1642
1640 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1643 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1641 return self._revlog.addgroup(deltas, linkmapper, transaction,
1644 return self._revlog.addgroup(deltas, linkmapper, transaction,
1642 addrevisioncb=addrevisioncb)
1645 addrevisioncb=addrevisioncb)
1643
1646
1644 def rawsize(self, rev):
1647 def rawsize(self, rev):
1645 return self._revlog.rawsize(rev)
1648 return self._revlog.rawsize(rev)
1646
1649
1647 def getstrippoint(self, minlink):
1650 def getstrippoint(self, minlink):
1648 return self._revlog.getstrippoint(minlink)
1651 return self._revlog.getstrippoint(minlink)
1649
1652
1650 def strip(self, minlink, transaction):
1653 def strip(self, minlink, transaction):
1651 return self._revlog.strip(minlink, transaction)
1654 return self._revlog.strip(minlink, transaction)
1652
1655
1653 def files(self):
1656 def files(self):
1654 return self._revlog.files()
1657 return self._revlog.files()
1655
1658
1656 def clone(self, tr, destrevlog, **kwargs):
1659 def clone(self, tr, destrevlog, **kwargs):
1657 if not isinstance(destrevlog, manifestrevlog):
1660 if not isinstance(destrevlog, manifestrevlog):
1658 raise error.ProgrammingError('expected manifestrevlog to clone()')
1661 raise error.ProgrammingError('expected manifestrevlog to clone()')
1659
1662
1660 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1663 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1661
1664
1662 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1665 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
1663 revisionscount=False, trackedsize=False,
1666 revisionscount=False, trackedsize=False,
1664 storedsize=False):
1667 storedsize=False):
1665 return self._revlog.storageinfo(
1668 return self._revlog.storageinfo(
1666 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1669 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
1667 revisionscount=revisionscount, trackedsize=trackedsize,
1670 revisionscount=revisionscount, trackedsize=trackedsize,
1668 storedsize=storedsize)
1671 storedsize=storedsize)
1669
1672
1670 @property
1673 @property
1671 def indexfile(self):
1674 def indexfile(self):
1672 return self._revlog.indexfile
1675 return self._revlog.indexfile
1673
1676
1674 @indexfile.setter
1677 @indexfile.setter
1675 def indexfile(self, value):
1678 def indexfile(self, value):
1676 self._revlog.indexfile = value
1679 self._revlog.indexfile = value
1677
1680
1678 @property
1681 @property
1679 def opener(self):
1682 def opener(self):
1680 return self._revlog.opener
1683 return self._revlog.opener
1681
1684
1682 @opener.setter
1685 @opener.setter
1683 def opener(self, value):
1686 def opener(self, value):
1684 self._revlog.opener = value
1687 self._revlog.opener = value
1685
1688
1686 @interfaceutil.implementer(repository.imanifestlog)
1689 @interfaceutil.implementer(repository.imanifestlog)
1687 class manifestlog(object):
1690 class manifestlog(object):
1688 """A collection class representing the collection of manifest snapshots
1691 """A collection class representing the collection of manifest snapshots
1689 referenced by commits in the repository.
1692 referenced by commits in the repository.
1690
1693
1691 In this situation, 'manifest' refers to the abstract concept of a snapshot
1694 In this situation, 'manifest' refers to the abstract concept of a snapshot
1692 of the list of files in the given commit. Consumers of the output of this
1695 of the list of files in the given commit. Consumers of the output of this
1693 class do not care about the implementation details of the actual manifests
1696 class do not care about the implementation details of the actual manifests
1694 they receive (i.e. tree or flat or lazily loaded, etc)."""
1697 they receive (i.e. tree or flat or lazily loaded, etc)."""
1695 def __init__(self, opener, repo, rootstore, narrowmatch):
1698 def __init__(self, opener, repo, rootstore, narrowmatch):
1696 usetreemanifest = False
1699 usetreemanifest = False
1697 cachesize = 4
1700 cachesize = 4
1698
1701
1699 opts = getattr(opener, 'options', None)
1702 opts = getattr(opener, 'options', None)
1700 if opts is not None:
1703 if opts is not None:
1701 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1704 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1702 cachesize = opts.get('manifestcachesize', cachesize)
1705 cachesize = opts.get('manifestcachesize', cachesize)
1703
1706
1704 self._treemanifests = usetreemanifest
1707 self._treemanifests = usetreemanifest
1705
1708
1706 self._rootstore = rootstore
1709 self._rootstore = rootstore
1707 self._rootstore._setupmanifestcachehooks(repo)
1710 self._rootstore._setupmanifestcachehooks(repo)
1708 self._narrowmatch = narrowmatch
1711 self._narrowmatch = narrowmatch
1709
1712
1710 # A cache of the manifestctx or treemanifestctx for each directory
1713 # A cache of the manifestctx or treemanifestctx for each directory
1711 self._dirmancache = {}
1714 self._dirmancache = {}
1712 self._dirmancache[''] = util.lrucachedict(cachesize)
1715 self._dirmancache[''] = util.lrucachedict(cachesize)
1713
1716
1714 self._cachesize = cachesize
1717 self._cachesize = cachesize
1715
1718
1716 def __getitem__(self, node):
1719 def __getitem__(self, node):
1717 """Retrieves the manifest instance for the given node. Throws a
1720 """Retrieves the manifest instance for the given node. Throws a
1718 LookupError if not found.
1721 LookupError if not found.
1719 """
1722 """
1720 return self.get('', node)
1723 return self.get('', node)
1721
1724
1722 def get(self, tree, node, verify=True):
1725 def get(self, tree, node, verify=True):
1723 """Retrieves the manifest instance for the given node. Throws a
1726 """Retrieves the manifest instance for the given node. Throws a
1724 LookupError if not found.
1727 LookupError if not found.
1725
1728
1726 `verify` - if True an exception will be thrown if the node is not in
1729 `verify` - if True an exception will be thrown if the node is not in
1727 the revlog
1730 the revlog
1728 """
1731 """
1729 if node in self._dirmancache.get(tree, ()):
1732 if node in self._dirmancache.get(tree, ()):
1730 return self._dirmancache[tree][node]
1733 return self._dirmancache[tree][node]
1731
1734
1732 if not self._narrowmatch.always():
1735 if not self._narrowmatch.always():
1733 if not self._narrowmatch.visitdir(tree[:-1]):
1736 if not self._narrowmatch.visitdir(tree[:-1]):
1734 return excludeddirmanifestctx(tree, node)
1737 return excludeddirmanifestctx(tree, node)
1735 if tree:
1738 if tree:
1736 if self._rootstore._treeondisk:
1739 if self._rootstore._treeondisk:
1737 if verify:
1740 if verify:
1738 # Side-effect is LookupError is raised if node doesn't
1741 # Side-effect is LookupError is raised if node doesn't
1739 # exist.
1742 # exist.
1740 self.getstorage(tree).rev(node)
1743 self.getstorage(tree).rev(node)
1741
1744
1742 m = treemanifestctx(self, tree, node)
1745 m = treemanifestctx(self, tree, node)
1743 else:
1746 else:
1744 raise error.Abort(
1747 raise error.Abort(
1745 _("cannot ask for manifest directory '%s' in a flat "
1748 _("cannot ask for manifest directory '%s' in a flat "
1746 "manifest") % tree)
1749 "manifest") % tree)
1747 else:
1750 else:
1748 if verify:
1751 if verify:
1749 # Side-effect is LookupError is raised if node doesn't exist.
1752 # Side-effect is LookupError is raised if node doesn't exist.
1750 self._rootstore.rev(node)
1753 self._rootstore.rev(node)
1751
1754
1752 if self._treemanifests:
1755 if self._treemanifests:
1753 m = treemanifestctx(self, '', node)
1756 m = treemanifestctx(self, '', node)
1754 else:
1757 else:
1755 m = manifestctx(self, node)
1758 m = manifestctx(self, node)
1756
1759
1757 if node != nullid:
1760 if node != nullid:
1758 mancache = self._dirmancache.get(tree)
1761 mancache = self._dirmancache.get(tree)
1759 if not mancache:
1762 if not mancache:
1760 mancache = util.lrucachedict(self._cachesize)
1763 mancache = util.lrucachedict(self._cachesize)
1761 self._dirmancache[tree] = mancache
1764 self._dirmancache[tree] = mancache
1762 mancache[node] = m
1765 mancache[node] = m
1763 return m
1766 return m
1764
1767
1765 def getstorage(self, tree):
1768 def getstorage(self, tree):
1766 return self._rootstore.dirlog(tree)
1769 return self._rootstore.dirlog(tree)
1767
1770
1768 def clearcaches(self, clear_persisted_data=False):
1771 def clearcaches(self, clear_persisted_data=False):
1769 self._dirmancache.clear()
1772 self._dirmancache.clear()
1770 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1773 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1771
1774
1772 def rev(self, node):
1775 def rev(self, node):
1773 return self._rootstore.rev(node)
1776 return self._rootstore.rev(node)
1774
1777
1775 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1778 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1776 class memmanifestctx(object):
1779 class memmanifestctx(object):
1777 def __init__(self, manifestlog):
1780 def __init__(self, manifestlog):
1778 self._manifestlog = manifestlog
1781 self._manifestlog = manifestlog
1779 self._manifestdict = manifestdict()
1782 self._manifestdict = manifestdict()
1780
1783
1781 def _storage(self):
1784 def _storage(self):
1782 return self._manifestlog.getstorage(b'')
1785 return self._manifestlog.getstorage(b'')
1783
1786
1784 def new(self):
1787 def new(self):
1785 return memmanifestctx(self._manifestlog)
1788 return memmanifestctx(self._manifestlog)
1786
1789
1787 def copy(self):
1790 def copy(self):
1788 memmf = memmanifestctx(self._manifestlog)
1791 memmf = memmanifestctx(self._manifestlog)
1789 memmf._manifestdict = self.read().copy()
1792 memmf._manifestdict = self.read().copy()
1790 return memmf
1793 return memmf
1791
1794
1792 def read(self):
1795 def read(self):
1793 return self._manifestdict
1796 return self._manifestdict
1794
1797
1795 def write(self, transaction, link, p1, p2, added, removed, match=None):
1798 def write(self, transaction, link, p1, p2, added, removed, match=None):
1796 return self._storage().add(self._manifestdict, transaction, link,
1799 return self._storage().add(self._manifestdict, transaction, link,
1797 p1, p2, added, removed, match=match)
1800 p1, p2, added, removed, match=match)
1798
1801
1799 @interfaceutil.implementer(repository.imanifestrevisionstored)
1802 @interfaceutil.implementer(repository.imanifestrevisionstored)
1800 class manifestctx(object):
1803 class manifestctx(object):
1801 """A class representing a single revision of a manifest, including its
1804 """A class representing a single revision of a manifest, including its
1802 contents, its parent revs, and its linkrev.
1805 contents, its parent revs, and its linkrev.
1803 """
1806 """
1804 def __init__(self, manifestlog, node):
1807 def __init__(self, manifestlog, node):
1805 self._manifestlog = manifestlog
1808 self._manifestlog = manifestlog
1806 self._data = None
1809 self._data = None
1807
1810
1808 self._node = node
1811 self._node = node
1809
1812
1810 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1813 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1811 # but let's add it later when something needs it and we can load it
1814 # but let's add it later when something needs it and we can load it
1812 # lazily.
1815 # lazily.
1813 #self.p1, self.p2 = store.parents(node)
1816 #self.p1, self.p2 = store.parents(node)
1814 #rev = store.rev(node)
1817 #rev = store.rev(node)
1815 #self.linkrev = store.linkrev(rev)
1818 #self.linkrev = store.linkrev(rev)
1816
1819
1817 def _storage(self):
1820 def _storage(self):
1818 return self._manifestlog.getstorage(b'')
1821 return self._manifestlog.getstorage(b'')
1819
1822
1820 def node(self):
1823 def node(self):
1821 return self._node
1824 return self._node
1822
1825
1823 def new(self):
1826 def new(self):
1824 return memmanifestctx(self._manifestlog)
1827 return memmanifestctx(self._manifestlog)
1825
1828
1826 def copy(self):
1829 def copy(self):
1827 memmf = memmanifestctx(self._manifestlog)
1830 memmf = memmanifestctx(self._manifestlog)
1828 memmf._manifestdict = self.read().copy()
1831 memmf._manifestdict = self.read().copy()
1829 return memmf
1832 return memmf
1830
1833
1831 @propertycache
1834 @propertycache
1832 def parents(self):
1835 def parents(self):
1833 return self._storage().parents(self._node)
1836 return self._storage().parents(self._node)
1834
1837
1835 def read(self):
1838 def read(self):
1836 if self._data is None:
1839 if self._data is None:
1837 if self._node == nullid:
1840 if self._node == nullid:
1838 self._data = manifestdict()
1841 self._data = manifestdict()
1839 else:
1842 else:
1840 store = self._storage()
1843 store = self._storage()
1841 if self._node in store.fulltextcache:
1844 if self._node in store.fulltextcache:
1842 text = pycompat.bytestr(store.fulltextcache[self._node])
1845 text = pycompat.bytestr(store.fulltextcache[self._node])
1843 else:
1846 else:
1844 text = store.revision(self._node)
1847 text = store.revision(self._node)
1845 arraytext = bytearray(text)
1848 arraytext = bytearray(text)
1846 store.fulltextcache[self._node] = arraytext
1849 store.fulltextcache[self._node] = arraytext
1847 self._data = manifestdict(text)
1850 self._data = manifestdict(text)
1848 return self._data
1851 return self._data
1849
1852
1850 def readfast(self, shallow=False):
1853 def readfast(self, shallow=False):
1851 '''Calls either readdelta or read, based on which would be less work.
1854 '''Calls either readdelta or read, based on which would be less work.
1852 readdelta is called if the delta is against the p1, and therefore can be
1855 readdelta is called if the delta is against the p1, and therefore can be
1853 read quickly.
1856 read quickly.
1854
1857
1855 If `shallow` is True, nothing changes since this is a flat manifest.
1858 If `shallow` is True, nothing changes since this is a flat manifest.
1856 '''
1859 '''
1857 store = self._storage()
1860 store = self._storage()
1858 r = store.rev(self._node)
1861 r = store.rev(self._node)
1859 deltaparent = store.deltaparent(r)
1862 deltaparent = store.deltaparent(r)
1860 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1863 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1861 return self.readdelta()
1864 return self.readdelta()
1862 return self.read()
1865 return self.read()
1863
1866
1864 def readdelta(self, shallow=False):
1867 def readdelta(self, shallow=False):
1865 '''Returns a manifest containing just the entries that are present
1868 '''Returns a manifest containing just the entries that are present
1866 in this manifest, but not in its p1 manifest. This is efficient to read
1869 in this manifest, but not in its p1 manifest. This is efficient to read
1867 if the revlog delta is already p1.
1870 if the revlog delta is already p1.
1868
1871
1869 Changing the value of `shallow` has no effect on flat manifests.
1872 Changing the value of `shallow` has no effect on flat manifests.
1870 '''
1873 '''
1871 store = self._storage()
1874 store = self._storage()
1872 r = store.rev(self._node)
1875 r = store.rev(self._node)
1873 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1876 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1874 return manifestdict(d)
1877 return manifestdict(d)
1875
1878
1876 def find(self, key):
1879 def find(self, key):
1877 return self.read().find(key)
1880 return self.read().find(key)
1878
1881
1879 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1882 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1880 class memtreemanifestctx(object):
1883 class memtreemanifestctx(object):
1881 def __init__(self, manifestlog, dir=''):
1884 def __init__(self, manifestlog, dir=''):
1882 self._manifestlog = manifestlog
1885 self._manifestlog = manifestlog
1883 self._dir = dir
1886 self._dir = dir
1884 self._treemanifest = treemanifest()
1887 self._treemanifest = treemanifest()
1885
1888
1886 def _storage(self):
1889 def _storage(self):
1887 return self._manifestlog.getstorage(b'')
1890 return self._manifestlog.getstorage(b'')
1888
1891
1889 def new(self, dir=''):
1892 def new(self, dir=''):
1890 return memtreemanifestctx(self._manifestlog, dir=dir)
1893 return memtreemanifestctx(self._manifestlog, dir=dir)
1891
1894
1892 def copy(self):
1895 def copy(self):
1893 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1896 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1894 memmf._treemanifest = self._treemanifest.copy()
1897 memmf._treemanifest = self._treemanifest.copy()
1895 return memmf
1898 return memmf
1896
1899
1897 def read(self):
1900 def read(self):
1898 return self._treemanifest
1901 return self._treemanifest
1899
1902
1900 def write(self, transaction, link, p1, p2, added, removed, match=None):
1903 def write(self, transaction, link, p1, p2, added, removed, match=None):
1901 def readtree(dir, node):
1904 def readtree(dir, node):
1902 return self._manifestlog.get(dir, node).read()
1905 return self._manifestlog.get(dir, node).read()
1903 return self._storage().add(self._treemanifest, transaction, link,
1906 return self._storage().add(self._treemanifest, transaction, link,
1904 p1, p2, added, removed, readtree=readtree,
1907 p1, p2, added, removed, readtree=readtree,
1905 match=match)
1908 match=match)
1906
1909
1907 @interfaceutil.implementer(repository.imanifestrevisionstored)
1910 @interfaceutil.implementer(repository.imanifestrevisionstored)
1908 class treemanifestctx(object):
1911 class treemanifestctx(object):
1909 def __init__(self, manifestlog, dir, node):
1912 def __init__(self, manifestlog, dir, node):
1910 self._manifestlog = manifestlog
1913 self._manifestlog = manifestlog
1911 self._dir = dir
1914 self._dir = dir
1912 self._data = None
1915 self._data = None
1913
1916
1914 self._node = node
1917 self._node = node
1915
1918
1916 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1919 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1917 # we can instantiate treemanifestctx objects for directories we don't
1920 # we can instantiate treemanifestctx objects for directories we don't
1918 # have on disk.
1921 # have on disk.
1919 #self.p1, self.p2 = store.parents(node)
1922 #self.p1, self.p2 = store.parents(node)
1920 #rev = store.rev(node)
1923 #rev = store.rev(node)
1921 #self.linkrev = store.linkrev(rev)
1924 #self.linkrev = store.linkrev(rev)
1922
1925
1923 def _storage(self):
1926 def _storage(self):
1924 narrowmatch = self._manifestlog._narrowmatch
1927 narrowmatch = self._manifestlog._narrowmatch
1925 if not narrowmatch.always():
1928 if not narrowmatch.always():
1926 if not narrowmatch.visitdir(self._dir[:-1]):
1929 if not narrowmatch.visitdir(self._dir[:-1]):
1927 return excludedmanifestrevlog(self._dir)
1930 return excludedmanifestrevlog(self._dir)
1928 return self._manifestlog.getstorage(self._dir)
1931 return self._manifestlog.getstorage(self._dir)
1929
1932
1930 def read(self):
1933 def read(self):
1931 if self._data is None:
1934 if self._data is None:
1932 store = self._storage()
1935 store = self._storage()
1933 if self._node == nullid:
1936 if self._node == nullid:
1934 self._data = treemanifest()
1937 self._data = treemanifest()
1935 # TODO accessing non-public API
1938 # TODO accessing non-public API
1936 elif store._treeondisk:
1939 elif store._treeondisk:
1937 m = treemanifest(dir=self._dir)
1940 m = treemanifest(dir=self._dir)
1938 def gettext():
1941 def gettext():
1939 return store.revision(self._node)
1942 return store.revision(self._node)
1940 def readsubtree(dir, subm):
1943 def readsubtree(dir, subm):
1941 # Set verify to False since we need to be able to create
1944 # Set verify to False since we need to be able to create
1942 # subtrees for trees that don't exist on disk.
1945 # subtrees for trees that don't exist on disk.
1943 return self._manifestlog.get(dir, subm, verify=False).read()
1946 return self._manifestlog.get(dir, subm, verify=False).read()
1944 m.read(gettext, readsubtree)
1947 m.read(gettext, readsubtree)
1945 m.setnode(self._node)
1948 m.setnode(self._node)
1946 self._data = m
1949 self._data = m
1947 else:
1950 else:
1948 if self._node in store.fulltextcache:
1951 if self._node in store.fulltextcache:
1949 text = pycompat.bytestr(store.fulltextcache[self._node])
1952 text = pycompat.bytestr(store.fulltextcache[self._node])
1950 else:
1953 else:
1951 text = store.revision(self._node)
1954 text = store.revision(self._node)
1952 arraytext = bytearray(text)
1955 arraytext = bytearray(text)
1953 store.fulltextcache[self._node] = arraytext
1956 store.fulltextcache[self._node] = arraytext
1954 self._data = treemanifest(dir=self._dir, text=text)
1957 self._data = treemanifest(dir=self._dir, text=text)
1955
1958
1956 return self._data
1959 return self._data
1957
1960
1958 def node(self):
1961 def node(self):
1959 return self._node
1962 return self._node
1960
1963
1961 def new(self, dir=''):
1964 def new(self, dir=''):
1962 return memtreemanifestctx(self._manifestlog, dir=dir)
1965 return memtreemanifestctx(self._manifestlog, dir=dir)
1963
1966
1964 def copy(self):
1967 def copy(self):
1965 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1968 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1966 memmf._treemanifest = self.read().copy()
1969 memmf._treemanifest = self.read().copy()
1967 return memmf
1970 return memmf
1968
1971
1969 @propertycache
1972 @propertycache
1970 def parents(self):
1973 def parents(self):
1971 return self._storage().parents(self._node)
1974 return self._storage().parents(self._node)
1972
1975
1973 def readdelta(self, shallow=False):
1976 def readdelta(self, shallow=False):
1974 '''Returns a manifest containing just the entries that are present
1977 '''Returns a manifest containing just the entries that are present
1975 in this manifest, but not in its p1 manifest. This is efficient to read
1978 in this manifest, but not in its p1 manifest. This is efficient to read
1976 if the revlog delta is already p1.
1979 if the revlog delta is already p1.
1977
1980
1978 If `shallow` is True, this will read the delta for this directory,
1981 If `shallow` is True, this will read the delta for this directory,
1979 without recursively reading subdirectory manifests. Instead, any
1982 without recursively reading subdirectory manifests. Instead, any
1980 subdirectory entry will be reported as it appears in the manifest, i.e.
1983 subdirectory entry will be reported as it appears in the manifest, i.e.
1981 the subdirectory will be reported among files and distinguished only by
1984 the subdirectory will be reported among files and distinguished only by
1982 its 't' flag.
1985 its 't' flag.
1983 '''
1986 '''
1984 store = self._storage()
1987 store = self._storage()
1985 if shallow:
1988 if shallow:
1986 r = store.rev(self._node)
1989 r = store.rev(self._node)
1987 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1990 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1988 return manifestdict(d)
1991 return manifestdict(d)
1989 else:
1992 else:
1990 # Need to perform a slow delta
1993 # Need to perform a slow delta
1991 r0 = store.deltaparent(store.rev(self._node))
1994 r0 = store.deltaparent(store.rev(self._node))
1992 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1995 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1993 m1 = self.read()
1996 m1 = self.read()
1994 md = treemanifest(dir=self._dir)
1997 md = treemanifest(dir=self._dir)
1995 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1998 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1996 if n1:
1999 if n1:
1997 md[f] = n1
2000 md[f] = n1
1998 if fl1:
2001 if fl1:
1999 md.setflag(f, fl1)
2002 md.setflag(f, fl1)
2000 return md
2003 return md
2001
2004
2002 def readfast(self, shallow=False):
2005 def readfast(self, shallow=False):
2003 '''Calls either readdelta or read, based on which would be less work.
2006 '''Calls either readdelta or read, based on which would be less work.
2004 readdelta is called if the delta is against the p1, and therefore can be
2007 readdelta is called if the delta is against the p1, and therefore can be
2005 read quickly.
2008 read quickly.
2006
2009
2007 If `shallow` is True, it only returns the entries from this manifest,
2010 If `shallow` is True, it only returns the entries from this manifest,
2008 and not any submanifests.
2011 and not any submanifests.
2009 '''
2012 '''
2010 store = self._storage()
2013 store = self._storage()
2011 r = store.rev(self._node)
2014 r = store.rev(self._node)
2012 deltaparent = store.deltaparent(r)
2015 deltaparent = store.deltaparent(r)
2013 if (deltaparent != nullrev and
2016 if (deltaparent != nullrev and
2014 deltaparent in store.parentrevs(r)):
2017 deltaparent in store.parentrevs(r)):
2015 return self.readdelta(shallow=shallow)
2018 return self.readdelta(shallow=shallow)
2016
2019
2017 if shallow:
2020 if shallow:
2018 return manifestdict(store.revision(self._node))
2021 return manifestdict(store.revision(self._node))
2019 else:
2022 else:
2020 return self.read()
2023 return self.read()
2021
2024
2022 def find(self, key):
2025 def find(self, key):
2023 return self.read().find(key)
2026 return self.read().find(key)
2024
2027
2025 class excludeddir(treemanifest):
2028 class excludeddir(treemanifest):
2026 """Stand-in for a directory that is excluded from the repository.
2029 """Stand-in for a directory that is excluded from the repository.
2027
2030
2028 With narrowing active on a repository that uses treemanifests,
2031 With narrowing active on a repository that uses treemanifests,
2029 some of the directory revlogs will be excluded from the resulting
2032 some of the directory revlogs will be excluded from the resulting
2030 clone. This is a huge storage win for clients, but means we need
2033 clone. This is a huge storage win for clients, but means we need
2031 some sort of pseudo-manifest to surface to internals so we can
2034 some sort of pseudo-manifest to surface to internals so we can
2032 detect a merge conflict outside the narrowspec. That's what this
2035 detect a merge conflict outside the narrowspec. That's what this
2033 class is: it stands in for a directory whose node is known, but
2036 class is: it stands in for a directory whose node is known, but
2034 whose contents are unknown.
2037 whose contents are unknown.
2035 """
2038 """
2036 def __init__(self, dir, node):
2039 def __init__(self, dir, node):
2037 super(excludeddir, self).__init__(dir)
2040 super(excludeddir, self).__init__(dir)
2038 self._node = node
2041 self._node = node
2039 # Add an empty file, which will be included by iterators and such,
2042 # Add an empty file, which will be included by iterators and such,
2040 # appearing as the directory itself (i.e. something like "dir/")
2043 # appearing as the directory itself (i.e. something like "dir/")
2041 self._files[''] = node
2044 self._files[''] = node
2042 self._flags[''] = 't'
2045 self._flags[''] = 't'
2043
2046
2044 # Manifests outside the narrowspec should never be modified, so avoid
2047 # Manifests outside the narrowspec should never be modified, so avoid
2045 # copying. This makes a noticeable difference when there are very many
2048 # copying. This makes a noticeable difference when there are very many
2046 # directories outside the narrowspec. Also, it makes sense for the copy to
2049 # directories outside the narrowspec. Also, it makes sense for the copy to
2047 # be of the same type as the original, which would not happen with the
2050 # be of the same type as the original, which would not happen with the
2048 # super type's copy().
2051 # super type's copy().
2049 def copy(self):
2052 def copy(self):
2050 return self
2053 return self
2051
2054
2052 class excludeddirmanifestctx(treemanifestctx):
2055 class excludeddirmanifestctx(treemanifestctx):
2053 """context wrapper for excludeddir - see that docstring for rationale"""
2056 """context wrapper for excludeddir - see that docstring for rationale"""
2054 def __init__(self, dir, node):
2057 def __init__(self, dir, node):
2055 self._dir = dir
2058 self._dir = dir
2056 self._node = node
2059 self._node = node
2057
2060
2058 def read(self):
2061 def read(self):
2059 return excludeddir(self._dir, self._node)
2062 return excludeddir(self._dir, self._node)
2060
2063
2061 def write(self, *args):
2064 def write(self, *args):
2062 raise error.ProgrammingError(
2065 raise error.ProgrammingError(
2063 'attempt to write manifest from excluded dir %s' % self._dir)
2066 'attempt to write manifest from excluded dir %s' % self._dir)
2064
2067
2065 class excludedmanifestrevlog(manifestrevlog):
2068 class excludedmanifestrevlog(manifestrevlog):
2066 """Stand-in for excluded treemanifest revlogs.
2069 """Stand-in for excluded treemanifest revlogs.
2067
2070
2068 When narrowing is active on a treemanifest repository, we'll have
2071 When narrowing is active on a treemanifest repository, we'll have
2069 references to directories we can't see due to the revlog being
2072 references to directories we can't see due to the revlog being
2070 skipped. This class exists to conform to the manifestrevlog
2073 skipped. This class exists to conform to the manifestrevlog
2071 interface for those directories and proactively prevent writes to
2074 interface for those directories and proactively prevent writes to
2072 outside the narrowspec.
2075 outside the narrowspec.
2073 """
2076 """
2074
2077
2075 def __init__(self, dir):
2078 def __init__(self, dir):
2076 self._dir = dir
2079 self._dir = dir
2077
2080
2078 def __len__(self):
2081 def __len__(self):
2079 raise error.ProgrammingError(
2082 raise error.ProgrammingError(
2080 'attempt to get length of excluded dir %s' % self._dir)
2083 'attempt to get length of excluded dir %s' % self._dir)
2081
2084
2082 def rev(self, node):
2085 def rev(self, node):
2083 raise error.ProgrammingError(
2086 raise error.ProgrammingError(
2084 'attempt to get rev from excluded dir %s' % self._dir)
2087 'attempt to get rev from excluded dir %s' % self._dir)
2085
2088
2086 def linkrev(self, node):
2089 def linkrev(self, node):
2087 raise error.ProgrammingError(
2090 raise error.ProgrammingError(
2088 'attempt to get linkrev from excluded dir %s' % self._dir)
2091 'attempt to get linkrev from excluded dir %s' % self._dir)
2089
2092
2090 def node(self, rev):
2093 def node(self, rev):
2091 raise error.ProgrammingError(
2094 raise error.ProgrammingError(
2092 'attempt to get node from excluded dir %s' % self._dir)
2095 'attempt to get node from excluded dir %s' % self._dir)
2093
2096
2094 def add(self, *args, **kwargs):
2097 def add(self, *args, **kwargs):
2095 # We should never write entries in dirlogs outside the narrow clone.
2098 # We should never write entries in dirlogs outside the narrow clone.
2096 # However, the method still gets called from writesubtree() in
2099 # However, the method still gets called from writesubtree() in
2097 # _addtree(), so we need to handle it. We should possibly make that
2100 # _addtree(), so we need to handle it. We should possibly make that
2098 # avoid calling add() with a clean manifest (_dirty is always False
2101 # avoid calling add() with a clean manifest (_dirty is always False
2099 # in excludeddir instances).
2102 # in excludeddir instances).
2100 pass
2103 pass
@@ -1,1870 +1,1873
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
34
35 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
38
39 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
41
42 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
45 CG_DELTAMODE_P1 = b'p1'
46
46
47 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
49
49
50 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
52
52
53 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
54 outside of this module.
54 outside of this module.
55 """
55 """
56 ui = interfaceutil.Attribute("""ui.ui instance""")
56 ui = interfaceutil.Attribute("""ui.ui instance""")
57
57
58 def url():
58 def url():
59 """Returns a URL string representing this peer.
59 """Returns a URL string representing this peer.
60
60
61 Currently, implementations expose the raw URL used to construct the
61 Currently, implementations expose the raw URL used to construct the
62 instance. It may contain credentials as part of the URL. The
62 instance. It may contain credentials as part of the URL. The
63 expectations of the value aren't well-defined and this could lead to
63 expectations of the value aren't well-defined and this could lead to
64 data leakage.
64 data leakage.
65
65
66 TODO audit/clean consumers and more clearly define the contents of this
66 TODO audit/clean consumers and more clearly define the contents of this
67 value.
67 value.
68 """
68 """
69
69
70 def local():
70 def local():
71 """Returns a local repository instance.
71 """Returns a local repository instance.
72
72
73 If the peer represents a local repository, returns an object that
73 If the peer represents a local repository, returns an object that
74 can be used to interface with it. Otherwise returns ``None``.
74 can be used to interface with it. Otherwise returns ``None``.
75 """
75 """
76
76
77 def peer():
77 def peer():
78 """Returns an object conforming to this interface.
78 """Returns an object conforming to this interface.
79
79
80 Most implementations will ``return self``.
80 Most implementations will ``return self``.
81 """
81 """
82
82
83 def canpush():
83 def canpush():
84 """Returns a boolean indicating if this peer can be pushed to."""
84 """Returns a boolean indicating if this peer can be pushed to."""
85
85
86 def close():
86 def close():
87 """Close the connection to this peer.
87 """Close the connection to this peer.
88
88
89 This is called when the peer will no longer be used. Resources
89 This is called when the peer will no longer be used. Resources
90 associated with the peer should be cleaned up.
90 associated with the peer should be cleaned up.
91 """
91 """
92
92
93 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
94 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
95
95
96 def capable(name):
96 def capable(name):
97 """Determine support for a named capability.
97 """Determine support for a named capability.
98
98
99 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
100
100
101 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
102 if capability support is non-boolean.
102 if capability support is non-boolean.
103
103
104 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
105 """
105 """
106
106
107 def requirecap(name, purpose):
107 def requirecap(name, purpose):
108 """Require a capability to be present.
108 """Require a capability to be present.
109
109
110 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
111 """
111 """
112
112
113 class ipeercommands(interfaceutil.Interface):
113 class ipeercommands(interfaceutil.Interface):
114 """Client-side interface for communicating over the wire protocol.
114 """Client-side interface for communicating over the wire protocol.
115
115
116 This interface is used as a gateway to the Mercurial wire protocol.
116 This interface is used as a gateway to the Mercurial wire protocol.
117 methods commonly call wire protocol commands of the same name.
117 methods commonly call wire protocol commands of the same name.
118 """
118 """
119
119
120 def branchmap():
120 def branchmap():
121 """Obtain heads in named branches.
121 """Obtain heads in named branches.
122
122
123 Returns a dict mapping branch name to an iterable of nodes that are
123 Returns a dict mapping branch name to an iterable of nodes that are
124 heads on that branch.
124 heads on that branch.
125 """
125 """
126
126
127 def capabilities():
127 def capabilities():
128 """Obtain capabilities of the peer.
128 """Obtain capabilities of the peer.
129
129
130 Returns a set of string capabilities.
130 Returns a set of string capabilities.
131 """
131 """
132
132
133 def clonebundles():
133 def clonebundles():
134 """Obtains the clone bundles manifest for the repo.
134 """Obtains the clone bundles manifest for the repo.
135
135
136 Returns the manifest as unparsed bytes.
136 Returns the manifest as unparsed bytes.
137 """
137 """
138
138
139 def debugwireargs(one, two, three=None, four=None, five=None):
139 def debugwireargs(one, two, three=None, four=None, five=None):
140 """Used to facilitate debugging of arguments passed over the wire."""
140 """Used to facilitate debugging of arguments passed over the wire."""
141
141
142 def getbundle(source, **kwargs):
142 def getbundle(source, **kwargs):
143 """Obtain remote repository data as a bundle.
143 """Obtain remote repository data as a bundle.
144
144
145 This command is how the bulk of repository data is transferred from
145 This command is how the bulk of repository data is transferred from
146 the peer to the local repository
146 the peer to the local repository
147
147
148 Returns a generator of bundle data.
148 Returns a generator of bundle data.
149 """
149 """
150
150
151 def heads():
151 def heads():
152 """Determine all known head revisions in the peer.
152 """Determine all known head revisions in the peer.
153
153
154 Returns an iterable of binary nodes.
154 Returns an iterable of binary nodes.
155 """
155 """
156
156
157 def known(nodes):
157 def known(nodes):
158 """Determine whether multiple nodes are known.
158 """Determine whether multiple nodes are known.
159
159
160 Accepts an iterable of nodes whose presence to check for.
160 Accepts an iterable of nodes whose presence to check for.
161
161
162 Returns an iterable of booleans indicating of the corresponding node
162 Returns an iterable of booleans indicating of the corresponding node
163 at that index is known to the peer.
163 at that index is known to the peer.
164 """
164 """
165
165
166 def listkeys(namespace):
166 def listkeys(namespace):
167 """Obtain all keys in a pushkey namespace.
167 """Obtain all keys in a pushkey namespace.
168
168
169 Returns an iterable of key names.
169 Returns an iterable of key names.
170 """
170 """
171
171
172 def lookup(key):
172 def lookup(key):
173 """Resolve a value to a known revision.
173 """Resolve a value to a known revision.
174
174
175 Returns a binary node of the resolved revision on success.
175 Returns a binary node of the resolved revision on success.
176 """
176 """
177
177
178 def pushkey(namespace, key, old, new):
178 def pushkey(namespace, key, old, new):
179 """Set a value using the ``pushkey`` protocol.
179 """Set a value using the ``pushkey`` protocol.
180
180
181 Arguments correspond to the pushkey namespace and key to operate on and
181 Arguments correspond to the pushkey namespace and key to operate on and
182 the old and new values for that key.
182 the old and new values for that key.
183
183
184 Returns a string with the peer result. The value inside varies by the
184 Returns a string with the peer result. The value inside varies by the
185 namespace.
185 namespace.
186 """
186 """
187
187
188 def stream_out():
188 def stream_out():
189 """Obtain streaming clone data.
189 """Obtain streaming clone data.
190
190
191 Successful result should be a generator of data chunks.
191 Successful result should be a generator of data chunks.
192 """
192 """
193
193
194 def unbundle(bundle, heads, url):
194 def unbundle(bundle, heads, url):
195 """Transfer repository data to the peer.
195 """Transfer repository data to the peer.
196
196
197 This is how the bulk of data during a push is transferred.
197 This is how the bulk of data during a push is transferred.
198
198
199 Returns the integer number of heads added to the peer.
199 Returns the integer number of heads added to the peer.
200 """
200 """
201
201
202 class ipeerlegacycommands(interfaceutil.Interface):
202 class ipeerlegacycommands(interfaceutil.Interface):
203 """Interface for implementing support for legacy wire protocol commands.
203 """Interface for implementing support for legacy wire protocol commands.
204
204
205 Wire protocol commands transition to legacy status when they are no longer
205 Wire protocol commands transition to legacy status when they are no longer
206 used by modern clients. To facilitate identifying which commands are
206 used by modern clients. To facilitate identifying which commands are
207 legacy, the interfaces are split.
207 legacy, the interfaces are split.
208 """
208 """
209
209
210 def between(pairs):
210 def between(pairs):
211 """Obtain nodes between pairs of nodes.
211 """Obtain nodes between pairs of nodes.
212
212
213 ``pairs`` is an iterable of node pairs.
213 ``pairs`` is an iterable of node pairs.
214
214
215 Returns an iterable of iterables of nodes corresponding to each
215 Returns an iterable of iterables of nodes corresponding to each
216 requested pair.
216 requested pair.
217 """
217 """
218
218
219 def branches(nodes):
219 def branches(nodes):
220 """Obtain ancestor changesets of specific nodes back to a branch point.
220 """Obtain ancestor changesets of specific nodes back to a branch point.
221
221
222 For each requested node, the peer finds the first ancestor node that is
222 For each requested node, the peer finds the first ancestor node that is
223 a DAG root or is a merge.
223 a DAG root or is a merge.
224
224
225 Returns an iterable of iterables with the resolved values for each node.
225 Returns an iterable of iterables with the resolved values for each node.
226 """
226 """
227
227
228 def changegroup(nodes, source):
228 def changegroup(nodes, source):
229 """Obtain a changegroup with data for descendants of specified nodes."""
229 """Obtain a changegroup with data for descendants of specified nodes."""
230
230
231 def changegroupsubset(bases, heads, source):
231 def changegroupsubset(bases, heads, source):
232 pass
232 pass
233
233
234 class ipeercommandexecutor(interfaceutil.Interface):
234 class ipeercommandexecutor(interfaceutil.Interface):
235 """Represents a mechanism to execute remote commands.
235 """Represents a mechanism to execute remote commands.
236
236
237 This is the primary interface for requesting that wire protocol commands
237 This is the primary interface for requesting that wire protocol commands
238 be executed. Instances of this interface are active in a context manager
238 be executed. Instances of this interface are active in a context manager
239 and have a well-defined lifetime. When the context manager exits, all
239 and have a well-defined lifetime. When the context manager exits, all
240 outstanding requests are waited on.
240 outstanding requests are waited on.
241 """
241 """
242
242
243 def callcommand(name, args):
243 def callcommand(name, args):
244 """Request that a named command be executed.
244 """Request that a named command be executed.
245
245
246 Receives the command name and a dictionary of command arguments.
246 Receives the command name and a dictionary of command arguments.
247
247
248 Returns a ``concurrent.futures.Future`` that will resolve to the
248 Returns a ``concurrent.futures.Future`` that will resolve to the
249 result of that command request. That exact value is left up to
249 result of that command request. That exact value is left up to
250 the implementation and possibly varies by command.
250 the implementation and possibly varies by command.
251
251
252 Not all commands can coexist with other commands in an executor
252 Not all commands can coexist with other commands in an executor
253 instance: it depends on the underlying wire protocol transport being
253 instance: it depends on the underlying wire protocol transport being
254 used and the command itself.
254 used and the command itself.
255
255
256 Implementations MAY call ``sendcommands()`` automatically if the
256 Implementations MAY call ``sendcommands()`` automatically if the
257 requested command can not coexist with other commands in this executor.
257 requested command can not coexist with other commands in this executor.
258
258
259 Implementations MAY call ``sendcommands()`` automatically when the
259 Implementations MAY call ``sendcommands()`` automatically when the
260 future's ``result()`` is called. So, consumers using multiple
260 future's ``result()`` is called. So, consumers using multiple
261 commands with an executor MUST ensure that ``result()`` is not called
261 commands with an executor MUST ensure that ``result()`` is not called
262 until all command requests have been issued.
262 until all command requests have been issued.
263 """
263 """
264
264
265 def sendcommands():
265 def sendcommands():
266 """Trigger submission of queued command requests.
266 """Trigger submission of queued command requests.
267
267
268 Not all transports submit commands as soon as they are requested to
268 Not all transports submit commands as soon as they are requested to
269 run. When called, this method forces queued command requests to be
269 run. When called, this method forces queued command requests to be
270 issued. It will no-op if all commands have already been sent.
270 issued. It will no-op if all commands have already been sent.
271
271
272 When called, no more new commands may be issued with this executor.
272 When called, no more new commands may be issued with this executor.
273 """
273 """
274
274
275 def close():
275 def close():
276 """Signal that this command request is finished.
276 """Signal that this command request is finished.
277
277
278 When called, no more new commands may be issued. All outstanding
278 When called, no more new commands may be issued. All outstanding
279 commands that have previously been issued are waited on before
279 commands that have previously been issued are waited on before
280 returning. This not only includes waiting for the futures to resolve,
280 returning. This not only includes waiting for the futures to resolve,
281 but also waiting for all response data to arrive. In other words,
281 but also waiting for all response data to arrive. In other words,
282 calling this waits for all on-wire state for issued command requests
282 calling this waits for all on-wire state for issued command requests
283 to finish.
283 to finish.
284
284
285 When used as a context manager, this method is called when exiting the
285 When used as a context manager, this method is called when exiting the
286 context manager.
286 context manager.
287
287
288 This method may call ``sendcommands()`` if there are buffered commands.
288 This method may call ``sendcommands()`` if there are buffered commands.
289 """
289 """
290
290
291 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
292 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
293
293
294 limitedarguments = interfaceutil.Attribute(
294 limitedarguments = interfaceutil.Attribute(
295 """True if the peer cannot receive large argument value for commands."""
295 """True if the peer cannot receive large argument value for commands."""
296 )
296 )
297
297
298 def commandexecutor():
298 def commandexecutor():
299 """A context manager that resolves to an ipeercommandexecutor.
299 """A context manager that resolves to an ipeercommandexecutor.
300
300
301 The object this resolves to can be used to issue command requests
301 The object this resolves to can be used to issue command requests
302 to the peer.
302 to the peer.
303
303
304 Callers should call its ``callcommand`` method to issue command
304 Callers should call its ``callcommand`` method to issue command
305 requests.
305 requests.
306
306
307 A new executor should be obtained for each distinct set of commands
307 A new executor should be obtained for each distinct set of commands
308 (possibly just a single command) that the consumer wants to execute
308 (possibly just a single command) that the consumer wants to execute
309 as part of a single operation or round trip. This is because some
309 as part of a single operation or round trip. This is because some
310 peers are half-duplex and/or don't support persistent connections.
310 peers are half-duplex and/or don't support persistent connections.
311 e.g. in the case of HTTP peers, commands sent to an executor represent
311 e.g. in the case of HTTP peers, commands sent to an executor represent
312 a single HTTP request. While some peers may support multiple command
312 a single HTTP request. While some peers may support multiple command
313 sends over the wire per executor, consumers need to code to the least
313 sends over the wire per executor, consumers need to code to the least
314 capable peer. So it should be assumed that command executors buffer
314 capable peer. So it should be assumed that command executors buffer
315 called commands until they are told to send them and that each
315 called commands until they are told to send them and that each
316 command executor could result in a new connection or wire-level request
316 command executor could result in a new connection or wire-level request
317 being issued.
317 being issued.
318 """
318 """
319
319
320 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
320 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
321 """Unified interface for peer repositories.
321 """Unified interface for peer repositories.
322
322
323 All peer instances must conform to this interface.
323 All peer instances must conform to this interface.
324 """
324 """
325
325
326 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
326 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
327 """Unified peer interface for wire protocol version 2 peers."""
327 """Unified peer interface for wire protocol version 2 peers."""
328
328
329 apidescriptor = interfaceutil.Attribute(
329 apidescriptor = interfaceutil.Attribute(
330 """Data structure holding description of server API.""")
330 """Data structure holding description of server API.""")
331
331
332 @interfaceutil.implementer(ipeerbase)
332 @interfaceutil.implementer(ipeerbase)
333 class peer(object):
333 class peer(object):
334 """Base class for peer repositories."""
334 """Base class for peer repositories."""
335
335
336 limitedarguments = False
336 limitedarguments = False
337
337
338 def capable(self, name):
338 def capable(self, name):
339 caps = self.capabilities()
339 caps = self.capabilities()
340 if name in caps:
340 if name in caps:
341 return True
341 return True
342
342
343 name = '%s=' % name
343 name = '%s=' % name
344 for cap in caps:
344 for cap in caps:
345 if cap.startswith(name):
345 if cap.startswith(name):
346 return cap[len(name):]
346 return cap[len(name):]
347
347
348 return False
348 return False
349
349
350 def requirecap(self, name, purpose):
350 def requirecap(self, name, purpose):
351 if self.capable(name):
351 if self.capable(name):
352 return
352 return
353
353
354 raise error.CapabilityError(
354 raise error.CapabilityError(
355 _('cannot %s; remote repository does not support the '
355 _('cannot %s; remote repository does not support the '
356 '\'%s\' capability') % (purpose, name))
356 '\'%s\' capability') % (purpose, name))
357
357
358 class iverifyproblem(interfaceutil.Interface):
358 class iverifyproblem(interfaceutil.Interface):
359 """Represents a problem with the integrity of the repository.
359 """Represents a problem with the integrity of the repository.
360
360
361 Instances of this interface are emitted to describe an integrity issue
361 Instances of this interface are emitted to describe an integrity issue
362 with a repository (e.g. corrupt storage, missing data, etc).
362 with a repository (e.g. corrupt storage, missing data, etc).
363
363
364 Instances are essentially messages associated with severity.
364 Instances are essentially messages associated with severity.
365 """
365 """
366 warning = interfaceutil.Attribute(
366 warning = interfaceutil.Attribute(
367 """Message indicating a non-fatal problem.""")
367 """Message indicating a non-fatal problem.""")
368
368
369 error = interfaceutil.Attribute(
369 error = interfaceutil.Attribute(
370 """Message indicating a fatal problem.""")
370 """Message indicating a fatal problem.""")
371
371
372 node = interfaceutil.Attribute(
372 node = interfaceutil.Attribute(
373 """Revision encountering the problem.
373 """Revision encountering the problem.
374
374
375 ``None`` means the problem doesn't apply to a single revision.
375 ``None`` means the problem doesn't apply to a single revision.
376 """)
376 """)
377
377
378 class irevisiondelta(interfaceutil.Interface):
378 class irevisiondelta(interfaceutil.Interface):
379 """Represents a delta between one revision and another.
379 """Represents a delta between one revision and another.
380
380
381 Instances convey enough information to allow a revision to be exchanged
381 Instances convey enough information to allow a revision to be exchanged
382 with another repository.
382 with another repository.
383
383
384 Instances represent the fulltext revision data or a delta against
384 Instances represent the fulltext revision data or a delta against
385 another revision. Therefore the ``revision`` and ``delta`` attributes
385 another revision. Therefore the ``revision`` and ``delta`` attributes
386 are mutually exclusive.
386 are mutually exclusive.
387
387
388 Typically used for changegroup generation.
388 Typically used for changegroup generation.
389 """
389 """
390
390
391 node = interfaceutil.Attribute(
391 node = interfaceutil.Attribute(
392 """20 byte node of this revision.""")
392 """20 byte node of this revision.""")
393
393
394 p1node = interfaceutil.Attribute(
394 p1node = interfaceutil.Attribute(
395 """20 byte node of 1st parent of this revision.""")
395 """20 byte node of 1st parent of this revision.""")
396
396
397 p2node = interfaceutil.Attribute(
397 p2node = interfaceutil.Attribute(
398 """20 byte node of 2nd parent of this revision.""")
398 """20 byte node of 2nd parent of this revision.""")
399
399
400 linknode = interfaceutil.Attribute(
400 linknode = interfaceutil.Attribute(
401 """20 byte node of the changelog revision this node is linked to.""")
401 """20 byte node of the changelog revision this node is linked to.""")
402
402
403 flags = interfaceutil.Attribute(
403 flags = interfaceutil.Attribute(
404 """2 bytes of integer flags that apply to this revision.
404 """2 bytes of integer flags that apply to this revision.
405
405
406 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
406 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
407 """)
407 """)
408
408
409 basenode = interfaceutil.Attribute(
409 basenode = interfaceutil.Attribute(
410 """20 byte node of the revision this data is a delta against.
410 """20 byte node of the revision this data is a delta against.
411
411
412 ``nullid`` indicates that the revision is a full revision and not
412 ``nullid`` indicates that the revision is a full revision and not
413 a delta.
413 a delta.
414 """)
414 """)
415
415
416 baserevisionsize = interfaceutil.Attribute(
416 baserevisionsize = interfaceutil.Attribute(
417 """Size of base revision this delta is against.
417 """Size of base revision this delta is against.
418
418
419 May be ``None`` if ``basenode`` is ``nullid``.
419 May be ``None`` if ``basenode`` is ``nullid``.
420 """)
420 """)
421
421
422 revision = interfaceutil.Attribute(
422 revision = interfaceutil.Attribute(
423 """Raw fulltext of revision data for this node.""")
423 """Raw fulltext of revision data for this node.""")
424
424
425 delta = interfaceutil.Attribute(
425 delta = interfaceutil.Attribute(
426 """Delta between ``basenode`` and ``node``.
426 """Delta between ``basenode`` and ``node``.
427
427
428 Stored in the bdiff delta format.
428 Stored in the bdiff delta format.
429 """)
429 """)
430
430
431 class ifilerevisionssequence(interfaceutil.Interface):
431 class ifilerevisionssequence(interfaceutil.Interface):
432 """Contains index data for all revisions of a file.
432 """Contains index data for all revisions of a file.
433
433
434 Types implementing this behave like lists of tuples. The index
434 Types implementing this behave like lists of tuples. The index
435 in the list corresponds to the revision number. The values contain
435 in the list corresponds to the revision number. The values contain
436 index metadata.
436 index metadata.
437
437
438 The *null* revision (revision number -1) is always the last item
438 The *null* revision (revision number -1) is always the last item
439 in the index.
439 in the index.
440 """
440 """
441
441
442 def __len__():
442 def __len__():
443 """The total number of revisions."""
443 """The total number of revisions."""
444
444
445 def __getitem__(rev):
445 def __getitem__(rev):
446 """Returns the object having a specific revision number.
446 """Returns the object having a specific revision number.
447
447
448 Returns an 8-tuple with the following fields:
448 Returns an 8-tuple with the following fields:
449
449
450 offset+flags
450 offset+flags
451 Contains the offset and flags for the revision. 64-bit unsigned
451 Contains the offset and flags for the revision. 64-bit unsigned
452 integer where first 6 bytes are the offset and the next 2 bytes
452 integer where first 6 bytes are the offset and the next 2 bytes
453 are flags. The offset can be 0 if it is not used by the store.
453 are flags. The offset can be 0 if it is not used by the store.
454 compressed size
454 compressed size
455 Size of the revision data in the store. It can be 0 if it isn't
455 Size of the revision data in the store. It can be 0 if it isn't
456 needed by the store.
456 needed by the store.
457 uncompressed size
457 uncompressed size
458 Fulltext size. It can be 0 if it isn't needed by the store.
458 Fulltext size. It can be 0 if it isn't needed by the store.
459 base revision
459 base revision
460 Revision number of revision the delta for storage is encoded
460 Revision number of revision the delta for storage is encoded
461 against. -1 indicates not encoded against a base revision.
461 against. -1 indicates not encoded against a base revision.
462 link revision
462 link revision
463 Revision number of changelog revision this entry is related to.
463 Revision number of changelog revision this entry is related to.
464 p1 revision
464 p1 revision
465 Revision number of 1st parent. -1 if no 1st parent.
465 Revision number of 1st parent. -1 if no 1st parent.
466 p2 revision
466 p2 revision
467 Revision number of 2nd parent. -1 if no 1st parent.
467 Revision number of 2nd parent. -1 if no 1st parent.
468 node
468 node
469 Binary node value for this revision number.
469 Binary node value for this revision number.
470
470
471 Negative values should index off the end of the sequence. ``-1``
471 Negative values should index off the end of the sequence. ``-1``
472 should return the null revision. ``-2`` should return the most
472 should return the null revision. ``-2`` should return the most
473 recent revision.
473 recent revision.
474 """
474 """
475
475
476 def __contains__(rev):
476 def __contains__(rev):
477 """Whether a revision number exists."""
477 """Whether a revision number exists."""
478
478
479 def insert(self, i, entry):
479 def insert(self, i, entry):
480 """Add an item to the index at specific revision."""
480 """Add an item to the index at specific revision."""
481
481
482 class ifileindex(interfaceutil.Interface):
482 class ifileindex(interfaceutil.Interface):
483 """Storage interface for index data of a single file.
483 """Storage interface for index data of a single file.
484
484
485 File storage data is divided into index metadata and data storage.
485 File storage data is divided into index metadata and data storage.
486 This interface defines the index portion of the interface.
486 This interface defines the index portion of the interface.
487
487
488 The index logically consists of:
488 The index logically consists of:
489
489
490 * A mapping between revision numbers and nodes.
490 * A mapping between revision numbers and nodes.
491 * DAG data (storing and querying the relationship between nodes).
491 * DAG data (storing and querying the relationship between nodes).
492 * Metadata to facilitate storage.
492 * Metadata to facilitate storage.
493 """
493 """
494 def __len__():
494 def __len__():
495 """Obtain the number of revisions stored for this file."""
495 """Obtain the number of revisions stored for this file."""
496
496
497 def __iter__():
497 def __iter__():
498 """Iterate over revision numbers for this file."""
498 """Iterate over revision numbers for this file."""
499
499
500 def hasnode(node):
500 def hasnode(node):
501 """Returns a bool indicating if a node is known to this store.
501 """Returns a bool indicating if a node is known to this store.
502
502
503 Implementations must only return True for full, binary node values:
503 Implementations must only return True for full, binary node values:
504 hex nodes, revision numbers, and partial node matches must be
504 hex nodes, revision numbers, and partial node matches must be
505 rejected.
505 rejected.
506
506
507 The null node is never present.
507 The null node is never present.
508 """
508 """
509
509
510 def revs(start=0, stop=None):
510 def revs(start=0, stop=None):
511 """Iterate over revision numbers for this file, with control."""
511 """Iterate over revision numbers for this file, with control."""
512
512
513 def parents(node):
513 def parents(node):
514 """Returns a 2-tuple of parent nodes for a revision.
514 """Returns a 2-tuple of parent nodes for a revision.
515
515
516 Values will be ``nullid`` if the parent is empty.
516 Values will be ``nullid`` if the parent is empty.
517 """
517 """
518
518
519 def parentrevs(rev):
519 def parentrevs(rev):
520 """Like parents() but operates on revision numbers."""
520 """Like parents() but operates on revision numbers."""
521
521
522 def rev(node):
522 def rev(node):
523 """Obtain the revision number given a node.
523 """Obtain the revision number given a node.
524
524
525 Raises ``error.LookupError`` if the node is not known.
525 Raises ``error.LookupError`` if the node is not known.
526 """
526 """
527
527
528 def node(rev):
528 def node(rev):
529 """Obtain the node value given a revision number.
529 """Obtain the node value given a revision number.
530
530
531 Raises ``IndexError`` if the node is not known.
531 Raises ``IndexError`` if the node is not known.
532 """
532 """
533
533
534 def lookup(node):
534 def lookup(node):
535 """Attempt to resolve a value to a node.
535 """Attempt to resolve a value to a node.
536
536
537 Value can be a binary node, hex node, revision number, or a string
537 Value can be a binary node, hex node, revision number, or a string
538 that can be converted to an integer.
538 that can be converted to an integer.
539
539
540 Raises ``error.LookupError`` if a node could not be resolved.
540 Raises ``error.LookupError`` if a node could not be resolved.
541 """
541 """
542
542
543 def linkrev(rev):
543 def linkrev(rev):
544 """Obtain the changeset revision number a revision is linked to."""
544 """Obtain the changeset revision number a revision is linked to."""
545
545
546 def iscensored(rev):
546 def iscensored(rev):
547 """Return whether a revision's content has been censored."""
547 """Return whether a revision's content has been censored."""
548
548
549 def commonancestorsheads(node1, node2):
549 def commonancestorsheads(node1, node2):
550 """Obtain an iterable of nodes containing heads of common ancestors.
550 """Obtain an iterable of nodes containing heads of common ancestors.
551
551
552 See ``ancestor.commonancestorsheads()``.
552 See ``ancestor.commonancestorsheads()``.
553 """
553 """
554
554
555 def descendants(revs):
555 def descendants(revs):
556 """Obtain descendant revision numbers for a set of revision numbers.
556 """Obtain descendant revision numbers for a set of revision numbers.
557
557
558 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
558 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
559 """
559 """
560
560
561 def heads(start=None, stop=None):
561 def heads(start=None, stop=None):
562 """Obtain a list of nodes that are DAG heads, with control.
562 """Obtain a list of nodes that are DAG heads, with control.
563
563
564 The set of revisions examined can be limited by specifying
564 The set of revisions examined can be limited by specifying
565 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
565 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
566 iterable of nodes. DAG traversal starts at earlier revision
566 iterable of nodes. DAG traversal starts at earlier revision
567 ``start`` and iterates forward until any node in ``stop`` is
567 ``start`` and iterates forward until any node in ``stop`` is
568 encountered.
568 encountered.
569 """
569 """
570
570
571 def children(node):
571 def children(node):
572 """Obtain nodes that are children of a node.
572 """Obtain nodes that are children of a node.
573
573
574 Returns a list of nodes.
574 Returns a list of nodes.
575 """
575 """
576
576
577 class ifiledata(interfaceutil.Interface):
577 class ifiledata(interfaceutil.Interface):
578 """Storage interface for data storage of a specific file.
578 """Storage interface for data storage of a specific file.
579
579
580 This complements ``ifileindex`` and provides an interface for accessing
580 This complements ``ifileindex`` and provides an interface for accessing
581 data for a tracked file.
581 data for a tracked file.
582 """
582 """
583 def size(rev):
583 def size(rev):
584 """Obtain the fulltext size of file data.
584 """Obtain the fulltext size of file data.
585
585
586 Any metadata is excluded from size measurements.
586 Any metadata is excluded from size measurements.
587 """
587 """
588
588
589 def revision(node, raw=False):
589 def revision(node, raw=False):
590 """"Obtain fulltext data for a node.
590 """"Obtain fulltext data for a node.
591
591
592 By default, any storage transformations are applied before the data
592 By default, any storage transformations are applied before the data
593 is returned. If ``raw`` is True, non-raw storage transformations
593 is returned. If ``raw`` is True, non-raw storage transformations
594 are not applied.
594 are not applied.
595
595
596 The fulltext data may contain a header containing metadata. Most
596 The fulltext data may contain a header containing metadata. Most
597 consumers should use ``read()`` to obtain the actual file data.
597 consumers should use ``read()`` to obtain the actual file data.
598 """
598 """
599
599
600 def read(node):
600 def read(node):
601 """Resolve file fulltext data.
601 """Resolve file fulltext data.
602
602
603 This is similar to ``revision()`` except any metadata in the data
603 This is similar to ``revision()`` except any metadata in the data
604 headers is stripped.
604 headers is stripped.
605 """
605 """
606
606
607 def renamed(node):
607 def renamed(node):
608 """Obtain copy metadata for a node.
608 """Obtain copy metadata for a node.
609
609
610 Returns ``False`` if no copy metadata is stored or a 2-tuple of
610 Returns ``False`` if no copy metadata is stored or a 2-tuple of
611 (path, node) from which this revision was copied.
611 (path, node) from which this revision was copied.
612 """
612 """
613
613
614 def cmp(node, fulltext):
614 def cmp(node, fulltext):
615 """Compare fulltext to another revision.
615 """Compare fulltext to another revision.
616
616
617 Returns True if the fulltext is different from what is stored.
617 Returns True if the fulltext is different from what is stored.
618
618
619 This takes copy metadata into account.
619 This takes copy metadata into account.
620
620
621 TODO better document the copy metadata and censoring logic.
621 TODO better document the copy metadata and censoring logic.
622 """
622 """
623
623
624 def emitrevisions(nodes,
624 def emitrevisions(nodes,
625 nodesorder=None,
625 nodesorder=None,
626 revisiondata=False,
626 revisiondata=False,
627 assumehaveparentrevisions=False,
627 assumehaveparentrevisions=False,
628 deltamode=CG_DELTAMODE_STD):
628 deltamode=CG_DELTAMODE_STD):
629 """Produce ``irevisiondelta`` for revisions.
629 """Produce ``irevisiondelta`` for revisions.
630
630
631 Given an iterable of nodes, emits objects conforming to the
631 Given an iterable of nodes, emits objects conforming to the
632 ``irevisiondelta`` interface that describe revisions in storage.
632 ``irevisiondelta`` interface that describe revisions in storage.
633
633
634 This method is a generator.
634 This method is a generator.
635
635
636 The input nodes may be unordered. Implementations must ensure that a
636 The input nodes may be unordered. Implementations must ensure that a
637 node's parents are emitted before the node itself. Transitively, this
637 node's parents are emitted before the node itself. Transitively, this
638 means that a node may only be emitted once all its ancestors in
638 means that a node may only be emitted once all its ancestors in
639 ``nodes`` have also been emitted.
639 ``nodes`` have also been emitted.
640
640
641 By default, emits "index" data (the ``node``, ``p1node``, and
641 By default, emits "index" data (the ``node``, ``p1node``, and
642 ``p2node`` attributes). If ``revisiondata`` is set, revision data
642 ``p2node`` attributes). If ``revisiondata`` is set, revision data
643 will also be present on the emitted objects.
643 will also be present on the emitted objects.
644
644
645 With default argument values, implementations can choose to emit
645 With default argument values, implementations can choose to emit
646 either fulltext revision data or a delta. When emitting deltas,
646 either fulltext revision data or a delta. When emitting deltas,
647 implementations must consider whether the delta's base revision
647 implementations must consider whether the delta's base revision
648 fulltext is available to the receiver.
648 fulltext is available to the receiver.
649
649
650 The base revision fulltext is guaranteed to be available if any of
650 The base revision fulltext is guaranteed to be available if any of
651 the following are met:
651 the following are met:
652
652
653 * Its fulltext revision was emitted by this method call.
653 * Its fulltext revision was emitted by this method call.
654 * A delta for that revision was emitted by this method call.
654 * A delta for that revision was emitted by this method call.
655 * ``assumehaveparentrevisions`` is True and the base revision is a
655 * ``assumehaveparentrevisions`` is True and the base revision is a
656 parent of the node.
656 parent of the node.
657
657
658 ``nodesorder`` can be used to control the order that revisions are
658 ``nodesorder`` can be used to control the order that revisions are
659 emitted. By default, revisions can be reordered as long as they are
659 emitted. By default, revisions can be reordered as long as they are
660 in DAG topological order (see above). If the value is ``nodes``,
660 in DAG topological order (see above). If the value is ``nodes``,
661 the iteration order from ``nodes`` should be used. If the value is
661 the iteration order from ``nodes`` should be used. If the value is
662 ``storage``, then the native order from the backing storage layer
662 ``storage``, then the native order from the backing storage layer
663 is used. (Not all storage layers will have strong ordering and behavior
663 is used. (Not all storage layers will have strong ordering and behavior
664 of this mode is storage-dependent.) ``nodes`` ordering can force
664 of this mode is storage-dependent.) ``nodes`` ordering can force
665 revisions to be emitted before their ancestors, so consumers should
665 revisions to be emitted before their ancestors, so consumers should
666 use it with care.
666 use it with care.
667
667
668 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
668 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
669 be set and it is the caller's responsibility to resolve it, if needed.
669 be set and it is the caller's responsibility to resolve it, if needed.
670
670
671 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
671 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
672 all revision data should be emitted as deltas against the revision
672 all revision data should be emitted as deltas against the revision
673 emitted just prior. The initial revision should be a delta against its
673 emitted just prior. The initial revision should be a delta against its
674 1st parent.
674 1st parent.
675 """
675 """
676
676
677 class ifilemutation(interfaceutil.Interface):
677 class ifilemutation(interfaceutil.Interface):
678 """Storage interface for mutation events of a tracked file."""
678 """Storage interface for mutation events of a tracked file."""
679
679
680 def add(filedata, meta, transaction, linkrev, p1, p2):
680 def add(filedata, meta, transaction, linkrev, p1, p2):
681 """Add a new revision to the store.
681 """Add a new revision to the store.
682
682
683 Takes file data, dictionary of metadata, a transaction, linkrev,
683 Takes file data, dictionary of metadata, a transaction, linkrev,
684 and parent nodes.
684 and parent nodes.
685
685
686 Returns the node that was added.
686 Returns the node that was added.
687
687
688 May no-op if a revision matching the supplied data is already stored.
688 May no-op if a revision matching the supplied data is already stored.
689 """
689 """
690
690
691 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
691 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
692 flags=0, cachedelta=None):
692 flags=0, cachedelta=None):
693 """Add a new revision to the store.
693 """Add a new revision to the store.
694
694
695 This is similar to ``add()`` except it operates at a lower level.
695 This is similar to ``add()`` except it operates at a lower level.
696
696
697 The data passed in already contains a metadata header, if any.
697 The data passed in already contains a metadata header, if any.
698
698
699 ``node`` and ``flags`` can be used to define the expected node and
699 ``node`` and ``flags`` can be used to define the expected node and
700 the flags to use with storage. ``flags`` is a bitwise value composed
700 the flags to use with storage. ``flags`` is a bitwise value composed
701 of the various ``REVISION_FLAG_*`` constants.
701 of the various ``REVISION_FLAG_*`` constants.
702
702
703 ``add()`` is usually called when adding files from e.g. the working
703 ``add()`` is usually called when adding files from e.g. the working
704 directory. ``addrevision()`` is often called by ``add()`` and for
704 directory. ``addrevision()`` is often called by ``add()`` and for
705 scenarios where revision data has already been computed, such as when
705 scenarios where revision data has already been computed, such as when
706 applying raw data from a peer repo.
706 applying raw data from a peer repo.
707 """
707 """
708
708
709 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
709 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
710 maybemissingparents=False):
710 maybemissingparents=False):
711 """Process a series of deltas for storage.
711 """Process a series of deltas for storage.
712
712
713 ``deltas`` is an iterable of 7-tuples of
713 ``deltas`` is an iterable of 7-tuples of
714 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
714 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
715 to add.
715 to add.
716
716
717 The ``delta`` field contains ``mpatch`` data to apply to a base
717 The ``delta`` field contains ``mpatch`` data to apply to a base
718 revision, identified by ``deltabase``. The base node can be
718 revision, identified by ``deltabase``. The base node can be
719 ``nullid``, in which case the header from the delta can be ignored
719 ``nullid``, in which case the header from the delta can be ignored
720 and the delta used as the fulltext.
720 and the delta used as the fulltext.
721
721
722 ``addrevisioncb`` should be called for each node as it is committed.
722 ``addrevisioncb`` should be called for each node as it is committed.
723
723
724 ``maybemissingparents`` is a bool indicating whether the incoming
724 ``maybemissingparents`` is a bool indicating whether the incoming
725 data may reference parents/ancestor revisions that aren't present.
725 data may reference parents/ancestor revisions that aren't present.
726 This flag is set when receiving data into a "shallow" store that
726 This flag is set when receiving data into a "shallow" store that
727 doesn't hold all history.
727 doesn't hold all history.
728
728
729 Returns a list of nodes that were processed. A node will be in the list
729 Returns a list of nodes that were processed. A node will be in the list
730 even if it existed in the store previously.
730 even if it existed in the store previously.
731 """
731 """
732
732
733 def censorrevision(tr, node, tombstone=b''):
733 def censorrevision(tr, node, tombstone=b''):
734 """Remove the content of a single revision.
734 """Remove the content of a single revision.
735
735
736 The specified ``node`` will have its content purged from storage.
736 The specified ``node`` will have its content purged from storage.
737 Future attempts to access the revision data for this node will
737 Future attempts to access the revision data for this node will
738 result in failure.
738 result in failure.
739
739
740 A ``tombstone`` message can optionally be stored. This message may be
740 A ``tombstone`` message can optionally be stored. This message may be
741 displayed to users when they attempt to access the missing revision
741 displayed to users when they attempt to access the missing revision
742 data.
742 data.
743
743
744 Storage backends may have stored deltas against the previous content
744 Storage backends may have stored deltas against the previous content
745 in this revision. As part of censoring a revision, these storage
745 in this revision. As part of censoring a revision, these storage
746 backends are expected to rewrite any internally stored deltas such
746 backends are expected to rewrite any internally stored deltas such
747 that they no longer reference the deleted content.
747 that they no longer reference the deleted content.
748 """
748 """
749
749
750 def getstrippoint(minlink):
750 def getstrippoint(minlink):
751 """Find the minimum revision that must be stripped to strip a linkrev.
751 """Find the minimum revision that must be stripped to strip a linkrev.
752
752
753 Returns a 2-tuple containing the minimum revision number and a set
753 Returns a 2-tuple containing the minimum revision number and a set
754 of all revisions numbers that would be broken by this strip.
754 of all revisions numbers that would be broken by this strip.
755
755
756 TODO this is highly revlog centric and should be abstracted into
756 TODO this is highly revlog centric and should be abstracted into
757 a higher-level deletion API. ``repair.strip()`` relies on this.
757 a higher-level deletion API. ``repair.strip()`` relies on this.
758 """
758 """
759
759
760 def strip(minlink, transaction):
760 def strip(minlink, transaction):
761 """Remove storage of items starting at a linkrev.
761 """Remove storage of items starting at a linkrev.
762
762
763 This uses ``getstrippoint()`` to determine the first node to remove.
763 This uses ``getstrippoint()`` to determine the first node to remove.
764 Then it effectively truncates storage for all revisions after that.
764 Then it effectively truncates storage for all revisions after that.
765
765
766 TODO this is highly revlog centric and should be abstracted into a
766 TODO this is highly revlog centric and should be abstracted into a
767 higher-level deletion API.
767 higher-level deletion API.
768 """
768 """
769
769
770 class ifilestorage(ifileindex, ifiledata, ifilemutation):
770 class ifilestorage(ifileindex, ifiledata, ifilemutation):
771 """Complete storage interface for a single tracked file."""
771 """Complete storage interface for a single tracked file."""
772
772
773 def files():
773 def files():
774 """Obtain paths that are backing storage for this file.
774 """Obtain paths that are backing storage for this file.
775
775
776 TODO this is used heavily by verify code and there should probably
776 TODO this is used heavily by verify code and there should probably
777 be a better API for that.
777 be a better API for that.
778 """
778 """
779
779
780 def storageinfo(exclusivefiles=False, sharedfiles=False,
780 def storageinfo(exclusivefiles=False, sharedfiles=False,
781 revisionscount=False, trackedsize=False,
781 revisionscount=False, trackedsize=False,
782 storedsize=False):
782 storedsize=False):
783 """Obtain information about storage for this file's data.
783 """Obtain information about storage for this file's data.
784
784
785 Returns a dict describing storage for this tracked path. The keys
785 Returns a dict describing storage for this tracked path. The keys
786 in the dict map to arguments of the same. The arguments are bools
786 in the dict map to arguments of the same. The arguments are bools
787 indicating whether to calculate and obtain that data.
787 indicating whether to calculate and obtain that data.
788
788
789 exclusivefiles
789 exclusivefiles
790 Iterable of (vfs, path) describing files that are exclusively
790 Iterable of (vfs, path) describing files that are exclusively
791 used to back storage for this tracked path.
791 used to back storage for this tracked path.
792
792
793 sharedfiles
793 sharedfiles
794 Iterable of (vfs, path) describing files that are used to back
794 Iterable of (vfs, path) describing files that are used to back
795 storage for this tracked path. Those files may also provide storage
795 storage for this tracked path. Those files may also provide storage
796 for other stored entities.
796 for other stored entities.
797
797
798 revisionscount
798 revisionscount
799 Number of revisions available for retrieval.
799 Number of revisions available for retrieval.
800
800
801 trackedsize
801 trackedsize
802 Total size in bytes of all tracked revisions. This is a sum of the
802 Total size in bytes of all tracked revisions. This is a sum of the
803 length of the fulltext of all revisions.
803 length of the fulltext of all revisions.
804
804
805 storedsize
805 storedsize
806 Total size in bytes used to store data for all tracked revisions.
806 Total size in bytes used to store data for all tracked revisions.
807 This is commonly less than ``trackedsize`` due to internal usage
807 This is commonly less than ``trackedsize`` due to internal usage
808 of deltas rather than fulltext revisions.
808 of deltas rather than fulltext revisions.
809
809
810 Not all storage backends may support all queries are have a reasonable
810 Not all storage backends may support all queries are have a reasonable
811 value to use. In that case, the value should be set to ``None`` and
811 value to use. In that case, the value should be set to ``None`` and
812 callers are expected to handle this special value.
812 callers are expected to handle this special value.
813 """
813 """
814
814
815 def verifyintegrity(state):
815 def verifyintegrity(state):
816 """Verifies the integrity of file storage.
816 """Verifies the integrity of file storage.
817
817
818 ``state`` is a dict holding state of the verifier process. It can be
818 ``state`` is a dict holding state of the verifier process. It can be
819 used to communicate data between invocations of multiple storage
819 used to communicate data between invocations of multiple storage
820 primitives.
820 primitives.
821
821
822 If individual revisions cannot have their revision content resolved,
822 If individual revisions cannot have their revision content resolved,
823 the method is expected to set the ``skipread`` key to a set of nodes
823 the method is expected to set the ``skipread`` key to a set of nodes
824 that encountered problems.
824 that encountered problems.
825
825
826 The method yields objects conforming to the ``iverifyproblem``
826 The method yields objects conforming to the ``iverifyproblem``
827 interface.
827 interface.
828 """
828 """
829
829
830 class idirs(interfaceutil.Interface):
830 class idirs(interfaceutil.Interface):
831 """Interface representing a collection of directories from paths.
831 """Interface representing a collection of directories from paths.
832
832
833 This interface is essentially a derived data structure representing
833 This interface is essentially a derived data structure representing
834 directories from a collection of paths.
834 directories from a collection of paths.
835 """
835 """
836
836
837 def addpath(path):
837 def addpath(path):
838 """Add a path to the collection.
838 """Add a path to the collection.
839
839
840 All directories in the path will be added to the collection.
840 All directories in the path will be added to the collection.
841 """
841 """
842
842
843 def delpath(path):
843 def delpath(path):
844 """Remove a path from the collection.
844 """Remove a path from the collection.
845
845
846 If the removal was the last path in a particular directory, the
846 If the removal was the last path in a particular directory, the
847 directory is removed from the collection.
847 directory is removed from the collection.
848 """
848 """
849
849
850 def __iter__():
850 def __iter__():
851 """Iterate over the directories in this collection of paths."""
851 """Iterate over the directories in this collection of paths."""
852
852
853 def __contains__(path):
853 def __contains__(path):
854 """Whether a specific directory is in this collection."""
854 """Whether a specific directory is in this collection."""
855
855
856 class imanifestdict(interfaceutil.Interface):
856 class imanifestdict(interfaceutil.Interface):
857 """Interface representing a manifest data structure.
857 """Interface representing a manifest data structure.
858
858
859 A manifest is effectively a dict mapping paths to entries. Each entry
859 A manifest is effectively a dict mapping paths to entries. Each entry
860 consists of a binary node and extra flags affecting that entry.
860 consists of a binary node and extra flags affecting that entry.
861 """
861 """
862
862
863 def __getitem__(path):
863 def __getitem__(path):
864 """Returns the binary node value for a path in the manifest.
864 """Returns the binary node value for a path in the manifest.
865
865
866 Raises ``KeyError`` if the path does not exist in the manifest.
866 Raises ``KeyError`` if the path does not exist in the manifest.
867
867
868 Equivalent to ``self.find(path)[0]``.
868 Equivalent to ``self.find(path)[0]``.
869 """
869 """
870
870
871 def find(path):
871 def find(path):
872 """Returns the entry for a path in the manifest.
872 """Returns the entry for a path in the manifest.
873
873
874 Returns a 2-tuple of (node, flags).
874 Returns a 2-tuple of (node, flags).
875
875
876 Raises ``KeyError`` if the path does not exist in the manifest.
876 Raises ``KeyError`` if the path does not exist in the manifest.
877 """
877 """
878
878
879 def __len__():
879 def __len__():
880 """Return the number of entries in the manifest."""
880 """Return the number of entries in the manifest."""
881
881
882 def __nonzero__():
882 def __nonzero__():
883 """Returns True if the manifest has entries, False otherwise."""
883 """Returns True if the manifest has entries, False otherwise."""
884
884
885 __bool__ = __nonzero__
885 __bool__ = __nonzero__
886
886
887 def __setitem__(path, node):
887 def __setitem__(path, node):
888 """Define the node value for a path in the manifest.
888 """Define the node value for a path in the manifest.
889
889
890 If the path is already in the manifest, its flags will be copied to
890 If the path is already in the manifest, its flags will be copied to
891 the new entry.
891 the new entry.
892 """
892 """
893
893
894 def __contains__(path):
894 def __contains__(path):
895 """Whether a path exists in the manifest."""
895 """Whether a path exists in the manifest."""
896
896
897 def __delitem__(path):
897 def __delitem__(path):
898 """Remove a path from the manifest.
898 """Remove a path from the manifest.
899
899
900 Raises ``KeyError`` if the path is not in the manifest.
900 Raises ``KeyError`` if the path is not in the manifest.
901 """
901 """
902
902
903 def __iter__():
903 def __iter__():
904 """Iterate over paths in the manifest."""
904 """Iterate over paths in the manifest."""
905
905
906 def iterkeys():
906 def iterkeys():
907 """Iterate over paths in the manifest."""
907 """Iterate over paths in the manifest."""
908
908
909 def keys():
909 def keys():
910 """Obtain a list of paths in the manifest."""
910 """Obtain a list of paths in the manifest."""
911
911
912 def filesnotin(other, match=None):
912 def filesnotin(other, match=None):
913 """Obtain the set of paths in this manifest but not in another.
913 """Obtain the set of paths in this manifest but not in another.
914
914
915 ``match`` is an optional matcher function to be applied to both
915 ``match`` is an optional matcher function to be applied to both
916 manifests.
916 manifests.
917
917
918 Returns a set of paths.
918 Returns a set of paths.
919 """
919 """
920
920
921 def dirs():
921 def dirs():
922 """Returns an object implementing the ``idirs`` interface."""
922 """Returns an object implementing the ``idirs`` interface."""
923
923
924 def hasdir(dir):
924 def hasdir(dir):
925 """Returns a bool indicating if a directory is in this manifest."""
925 """Returns a bool indicating if a directory is in this manifest."""
926
926
927 def matches(match):
927 def matches(match):
928 """Generate a new manifest filtered through a matcher.
928 """Generate a new manifest filtered through a matcher.
929
929
930 Returns an object conforming to the ``imanifestdict`` interface.
930 Returns an object conforming to the ``imanifestdict`` interface.
931 """
931 """
932
932
933 def walk(match):
933 def walk(match):
934 """Generator of paths in manifest satisfying a matcher.
934 """Generator of paths in manifest satisfying a matcher.
935
935
936 This is equivalent to ``self.matches(match).iterkeys()`` except a new
936 This is equivalent to ``self.matches(match).iterkeys()`` except a new
937 manifest object is not created.
937 manifest object is not created.
938
938
939 If the matcher has explicit files listed and they don't exist in
939 If the matcher has explicit files listed and they don't exist in
940 the manifest, ``match.bad()`` is called for each missing file.
940 the manifest, ``match.bad()`` is called for each missing file.
941 """
941 """
942
942
943 def diff(other, match=None, clean=False):
943 def diff(other, match=None, clean=False):
944 """Find differences between this manifest and another.
944 """Find differences between this manifest and another.
945
945
946 This manifest is compared to ``other``.
946 This manifest is compared to ``other``.
947
947
948 If ``match`` is provided, the two manifests are filtered against this
948 If ``match`` is provided, the two manifests are filtered against this
949 matcher and only entries satisfying the matcher are compared.
949 matcher and only entries satisfying the matcher are compared.
950
950
951 If ``clean`` is True, unchanged files are included in the returned
951 If ``clean`` is True, unchanged files are included in the returned
952 object.
952 object.
953
953
954 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
954 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
955 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
955 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
956 represents the node and flags for this manifest and ``(node2, flag2)``
956 represents the node and flags for this manifest and ``(node2, flag2)``
957 are the same for the other manifest.
957 are the same for the other manifest.
958 """
958 """
959
959
960 def setflag(path, flag):
960 def setflag(path, flag):
961 """Set the flag value for a given path.
961 """Set the flag value for a given path.
962
962
963 Raises ``KeyError`` if the path is not already in the manifest.
963 Raises ``KeyError`` if the path is not already in the manifest.
964 """
964 """
965
965
966 def get(path, default=None):
966 def get(path, default=None):
967 """Obtain the node value for a path or a default value if missing."""
967 """Obtain the node value for a path or a default value if missing."""
968
968
969 def flags(path, default=''):
969 def flags(path, default=''):
970 """Return the flags value for a path or a default value if missing."""
970 """Return the flags value for a path or a default value if missing."""
971
971
972 def copy():
972 def copy():
973 """Return a copy of this manifest."""
973 """Return a copy of this manifest."""
974
974
975 def items():
975 def items():
976 """Returns an iterable of (path, node) for items in this manifest."""
976 """Returns an iterable of (path, node) for items in this manifest."""
977
977
978 def iteritems():
978 def iteritems():
979 """Identical to items()."""
979 """Identical to items()."""
980
980
981 def iterentries():
981 def iterentries():
982 """Returns an iterable of (path, node, flags) for this manifest.
982 """Returns an iterable of (path, node, flags) for this manifest.
983
983
984 Similar to ``iteritems()`` except items are a 3-tuple and include
984 Similar to ``iteritems()`` except items are a 3-tuple and include
985 flags.
985 flags.
986 """
986 """
987
987
988 def text():
988 def text():
989 """Obtain the raw data representation for this manifest.
989 """Obtain the raw data representation for this manifest.
990
990
991 Result is used to create a manifest revision.
991 Result is used to create a manifest revision.
992 """
992 """
993
993
994 def fastdelta(base, changes):
994 def fastdelta(base, changes):
995 """Obtain a delta between this manifest and another given changes.
995 """Obtain a delta between this manifest and another given changes.
996
996
997 ``base`` in the raw data representation for another manifest.
997 ``base`` in the raw data representation for another manifest.
998
998
999 ``changes`` is an iterable of ``(path, to_delete)``.
999 ``changes`` is an iterable of ``(path, to_delete)``.
1000
1000
1001 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1001 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1002 delta between ``base`` and this manifest.
1002 delta between ``base`` and this manifest.
1003 """
1003 """
1004
1004
1005 class imanifestrevisionbase(interfaceutil.Interface):
1005 class imanifestrevisionbase(interfaceutil.Interface):
1006 """Base interface representing a single revision of a manifest.
1006 """Base interface representing a single revision of a manifest.
1007
1007
1008 Should not be used as a primary interface: should always be inherited
1008 Should not be used as a primary interface: should always be inherited
1009 as part of a larger interface.
1009 as part of a larger interface.
1010 """
1010 """
1011
1011
1012 def new():
1012 def new():
1013 """Obtain a new manifest instance.
1013 """Obtain a new manifest instance.
1014
1014
1015 Returns an object conforming to the ``imanifestrevisionwritable``
1015 Returns an object conforming to the ``imanifestrevisionwritable``
1016 interface. The instance will be associated with the same
1016 interface. The instance will be associated with the same
1017 ``imanifestlog`` collection as this instance.
1017 ``imanifestlog`` collection as this instance.
1018 """
1018 """
1019
1019
1020 def copy():
1020 def copy():
1021 """Obtain a copy of this manifest instance.
1021 """Obtain a copy of this manifest instance.
1022
1022
1023 Returns an object conforming to the ``imanifestrevisionwritable``
1023 Returns an object conforming to the ``imanifestrevisionwritable``
1024 interface. The instance will be associated with the same
1024 interface. The instance will be associated with the same
1025 ``imanifestlog`` collection as this instance.
1025 ``imanifestlog`` collection as this instance.
1026 """
1026 """
1027
1027
1028 def read():
1028 def read():
1029 """Obtain the parsed manifest data structure.
1029 """Obtain the parsed manifest data structure.
1030
1030
1031 The returned object conforms to the ``imanifestdict`` interface.
1031 The returned object conforms to the ``imanifestdict`` interface.
1032 """
1032 """
1033
1033
1034 class imanifestrevisionstored(imanifestrevisionbase):
1034 class imanifestrevisionstored(imanifestrevisionbase):
1035 """Interface representing a manifest revision committed to storage."""
1035 """Interface representing a manifest revision committed to storage."""
1036
1036
1037 def node():
1037 def node():
1038 """The binary node for this manifest."""
1038 """The binary node for this manifest."""
1039
1039
1040 parents = interfaceutil.Attribute(
1040 parents = interfaceutil.Attribute(
1041 """List of binary nodes that are parents for this manifest revision."""
1041 """List of binary nodes that are parents for this manifest revision."""
1042 )
1042 )
1043
1043
1044 def readdelta(shallow=False):
1044 def readdelta(shallow=False):
1045 """Obtain the manifest data structure representing changes from parent.
1045 """Obtain the manifest data structure representing changes from parent.
1046
1046
1047 This manifest is compared to its 1st parent. A new manifest representing
1047 This manifest is compared to its 1st parent. A new manifest representing
1048 those differences is constructed.
1048 those differences is constructed.
1049
1049
1050 The returned object conforms to the ``imanifestdict`` interface.
1050 The returned object conforms to the ``imanifestdict`` interface.
1051 """
1051 """
1052
1052
1053 def readfast(shallow=False):
1053 def readfast(shallow=False):
1054 """Calls either ``read()`` or ``readdelta()``.
1054 """Calls either ``read()`` or ``readdelta()``.
1055
1055
1056 The faster of the two options is called.
1056 The faster of the two options is called.
1057 """
1057 """
1058
1058
1059 def find(key):
1059 def find(key):
1060 """Calls self.read().find(key)``.
1060 """Calls self.read().find(key)``.
1061
1061
1062 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1062 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1063 """
1063 """
1064
1064
1065 class imanifestrevisionwritable(imanifestrevisionbase):
1065 class imanifestrevisionwritable(imanifestrevisionbase):
1066 """Interface representing a manifest revision that can be committed."""
1066 """Interface representing a manifest revision that can be committed."""
1067
1067
1068 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1068 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1069 """Add this revision to storage.
1069 """Add this revision to storage.
1070
1070
1071 Takes a transaction object, the changeset revision number it will
1071 Takes a transaction object, the changeset revision number it will
1072 be associated with, its parent nodes, and lists of added and
1072 be associated with, its parent nodes, and lists of added and
1073 removed paths.
1073 removed paths.
1074
1074
1075 If match is provided, storage can choose not to inspect or write out
1075 If match is provided, storage can choose not to inspect or write out
1076 items that do not match. Storage is still required to be able to provide
1076 items that do not match. Storage is still required to be able to provide
1077 the full manifest in the future for any directories written (these
1077 the full manifest in the future for any directories written (these
1078 manifests should not be "narrowed on disk").
1078 manifests should not be "narrowed on disk").
1079
1079
1080 Returns the binary node of the created revision.
1080 Returns the binary node of the created revision.
1081 """
1081 """
1082
1082
1083 class imanifeststorage(interfaceutil.Interface):
1083 class imanifeststorage(interfaceutil.Interface):
1084 """Storage interface for manifest data."""
1084 """Storage interface for manifest data."""
1085
1085
1086 tree = interfaceutil.Attribute(
1086 tree = interfaceutil.Attribute(
1087 """The path to the directory this manifest tracks.
1087 """The path to the directory this manifest tracks.
1088
1088
1089 The empty bytestring represents the root manifest.
1089 The empty bytestring represents the root manifest.
1090 """)
1090 """)
1091
1091
1092 index = interfaceutil.Attribute(
1092 index = interfaceutil.Attribute(
1093 """An ``ifilerevisionssequence`` instance.""")
1093 """An ``ifilerevisionssequence`` instance.""")
1094
1094
1095 indexfile = interfaceutil.Attribute(
1095 indexfile = interfaceutil.Attribute(
1096 """Path of revlog index file.
1096 """Path of revlog index file.
1097
1097
1098 TODO this is revlog specific and should not be exposed.
1098 TODO this is revlog specific and should not be exposed.
1099 """)
1099 """)
1100
1100
1101 opener = interfaceutil.Attribute(
1101 opener = interfaceutil.Attribute(
1102 """VFS opener to use to access underlying files used for storage.
1102 """VFS opener to use to access underlying files used for storage.
1103
1103
1104 TODO this is revlog specific and should not be exposed.
1104 TODO this is revlog specific and should not be exposed.
1105 """)
1105 """)
1106
1106
1107 version = interfaceutil.Attribute(
1107 version = interfaceutil.Attribute(
1108 """Revlog version number.
1108 """Revlog version number.
1109
1109
1110 TODO this is revlog specific and should not be exposed.
1110 TODO this is revlog specific and should not be exposed.
1111 """)
1111 """)
1112
1112
1113 _generaldelta = interfaceutil.Attribute(
1113 _generaldelta = interfaceutil.Attribute(
1114 """Whether generaldelta storage is being used.
1114 """Whether generaldelta storage is being used.
1115
1115
1116 TODO this is revlog specific and should not be exposed.
1116 TODO this is revlog specific and should not be exposed.
1117 """)
1117 """)
1118
1118
1119 fulltextcache = interfaceutil.Attribute(
1119 fulltextcache = interfaceutil.Attribute(
1120 """Dict with cache of fulltexts.
1120 """Dict with cache of fulltexts.
1121
1121
1122 TODO this doesn't feel appropriate for the storage interface.
1122 TODO this doesn't feel appropriate for the storage interface.
1123 """)
1123 """)
1124
1124
1125 def __len__():
1125 def __len__():
1126 """Obtain the number of revisions stored for this manifest."""
1126 """Obtain the number of revisions stored for this manifest."""
1127
1127
1128 def __iter__():
1128 def __iter__():
1129 """Iterate over revision numbers for this manifest."""
1129 """Iterate over revision numbers for this manifest."""
1130
1130
1131 def rev(node):
1131 def rev(node):
1132 """Obtain the revision number given a binary node.
1132 """Obtain the revision number given a binary node.
1133
1133
1134 Raises ``error.LookupError`` if the node is not known.
1134 Raises ``error.LookupError`` if the node is not known.
1135 """
1135 """
1136
1136
1137 def node(rev):
1137 def node(rev):
1138 """Obtain the node value given a revision number.
1138 """Obtain the node value given a revision number.
1139
1139
1140 Raises ``error.LookupError`` if the revision is not known.
1140 Raises ``error.LookupError`` if the revision is not known.
1141 """
1141 """
1142
1142
1143 def lookup(value):
1143 def lookup(value):
1144 """Attempt to resolve a value to a node.
1144 """Attempt to resolve a value to a node.
1145
1145
1146 Value can be a binary node, hex node, revision number, or a bytes
1146 Value can be a binary node, hex node, revision number, or a bytes
1147 that can be converted to an integer.
1147 that can be converted to an integer.
1148
1148
1149 Raises ``error.LookupError`` if a ndoe could not be resolved.
1149 Raises ``error.LookupError`` if a ndoe could not be resolved.
1150 """
1150 """
1151
1151
1152 def parents(node):
1152 def parents(node):
1153 """Returns a 2-tuple of parent nodes for a node.
1153 """Returns a 2-tuple of parent nodes for a node.
1154
1154
1155 Values will be ``nullid`` if the parent is empty.
1155 Values will be ``nullid`` if the parent is empty.
1156 """
1156 """
1157
1157
1158 def parentrevs(rev):
1158 def parentrevs(rev):
1159 """Like parents() but operates on revision numbers."""
1159 """Like parents() but operates on revision numbers."""
1160
1160
1161 def linkrev(rev):
1161 def linkrev(rev):
1162 """Obtain the changeset revision number a revision is linked to."""
1162 """Obtain the changeset revision number a revision is linked to."""
1163
1163
1164 def revision(node, _df=None, raw=False):
1164 def revision(node, _df=None, raw=False):
1165 """Obtain fulltext data for a node."""
1165 """Obtain fulltext data for a node."""
1166
1166
1167 def rawdata(node, _df=None):
1168 """Obtain raw data for a node."""
1169
1167 def revdiff(rev1, rev2):
1170 def revdiff(rev1, rev2):
1168 """Obtain a delta between two revision numbers.
1171 """Obtain a delta between two revision numbers.
1169
1172
1170 The returned data is the result of ``bdiff.bdiff()`` on the raw
1173 The returned data is the result of ``bdiff.bdiff()`` on the raw
1171 revision data.
1174 revision data.
1172 """
1175 """
1173
1176
1174 def cmp(node, fulltext):
1177 def cmp(node, fulltext):
1175 """Compare fulltext to another revision.
1178 """Compare fulltext to another revision.
1176
1179
1177 Returns True if the fulltext is different from what is stored.
1180 Returns True if the fulltext is different from what is stored.
1178 """
1181 """
1179
1182
1180 def emitrevisions(nodes,
1183 def emitrevisions(nodes,
1181 nodesorder=None,
1184 nodesorder=None,
1182 revisiondata=False,
1185 revisiondata=False,
1183 assumehaveparentrevisions=False):
1186 assumehaveparentrevisions=False):
1184 """Produce ``irevisiondelta`` describing revisions.
1187 """Produce ``irevisiondelta`` describing revisions.
1185
1188
1186 See the documentation for ``ifiledata`` for more.
1189 See the documentation for ``ifiledata`` for more.
1187 """
1190 """
1188
1191
1189 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1192 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1190 """Process a series of deltas for storage.
1193 """Process a series of deltas for storage.
1191
1194
1192 See the documentation in ``ifilemutation`` for more.
1195 See the documentation in ``ifilemutation`` for more.
1193 """
1196 """
1194
1197
1195 def rawsize(rev):
1198 def rawsize(rev):
1196 """Obtain the size of tracked data.
1199 """Obtain the size of tracked data.
1197
1200
1198 Is equivalent to ``len(m.revision(node, raw=True))``.
1201 Is equivalent to ``len(m.revision(node, raw=True))``.
1199
1202
1200 TODO this method is only used by upgrade code and may be removed.
1203 TODO this method is only used by upgrade code and may be removed.
1201 """
1204 """
1202
1205
1203 def getstrippoint(minlink):
1206 def getstrippoint(minlink):
1204 """Find minimum revision that must be stripped to strip a linkrev.
1207 """Find minimum revision that must be stripped to strip a linkrev.
1205
1208
1206 See the documentation in ``ifilemutation`` for more.
1209 See the documentation in ``ifilemutation`` for more.
1207 """
1210 """
1208
1211
1209 def strip(minlink, transaction):
1212 def strip(minlink, transaction):
1210 """Remove storage of items starting at a linkrev.
1213 """Remove storage of items starting at a linkrev.
1211
1214
1212 See the documentation in ``ifilemutation`` for more.
1215 See the documentation in ``ifilemutation`` for more.
1213 """
1216 """
1214
1217
1215 def checksize():
1218 def checksize():
1216 """Obtain the expected sizes of backing files.
1219 """Obtain the expected sizes of backing files.
1217
1220
1218 TODO this is used by verify and it should not be part of the interface.
1221 TODO this is used by verify and it should not be part of the interface.
1219 """
1222 """
1220
1223
1221 def files():
1224 def files():
1222 """Obtain paths that are backing storage for this manifest.
1225 """Obtain paths that are backing storage for this manifest.
1223
1226
1224 TODO this is used by verify and there should probably be a better API
1227 TODO this is used by verify and there should probably be a better API
1225 for this functionality.
1228 for this functionality.
1226 """
1229 """
1227
1230
1228 def deltaparent(rev):
1231 def deltaparent(rev):
1229 """Obtain the revision that a revision is delta'd against.
1232 """Obtain the revision that a revision is delta'd against.
1230
1233
1231 TODO delta encoding is an implementation detail of storage and should
1234 TODO delta encoding is an implementation detail of storage and should
1232 not be exposed to the storage interface.
1235 not be exposed to the storage interface.
1233 """
1236 """
1234
1237
1235 def clone(tr, dest, **kwargs):
1238 def clone(tr, dest, **kwargs):
1236 """Clone this instance to another."""
1239 """Clone this instance to another."""
1237
1240
1238 def clearcaches(clear_persisted_data=False):
1241 def clearcaches(clear_persisted_data=False):
1239 """Clear any caches associated with this instance."""
1242 """Clear any caches associated with this instance."""
1240
1243
1241 def dirlog(d):
1244 def dirlog(d):
1242 """Obtain a manifest storage instance for a tree."""
1245 """Obtain a manifest storage instance for a tree."""
1243
1246
1244 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1247 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1245 match=None):
1248 match=None):
1246 """Add a revision to storage.
1249 """Add a revision to storage.
1247
1250
1248 ``m`` is an object conforming to ``imanifestdict``.
1251 ``m`` is an object conforming to ``imanifestdict``.
1249
1252
1250 ``link`` is the linkrev revision number.
1253 ``link`` is the linkrev revision number.
1251
1254
1252 ``p1`` and ``p2`` are the parent revision numbers.
1255 ``p1`` and ``p2`` are the parent revision numbers.
1253
1256
1254 ``added`` and ``removed`` are iterables of added and removed paths,
1257 ``added`` and ``removed`` are iterables of added and removed paths,
1255 respectively.
1258 respectively.
1256
1259
1257 ``readtree`` is a function that can be used to read the child tree(s)
1260 ``readtree`` is a function that can be used to read the child tree(s)
1258 when recursively writing the full tree structure when using
1261 when recursively writing the full tree structure when using
1259 treemanifets.
1262 treemanifets.
1260
1263
1261 ``match`` is a matcher that can be used to hint to storage that not all
1264 ``match`` is a matcher that can be used to hint to storage that not all
1262 paths must be inspected; this is an optimization and can be safely
1265 paths must be inspected; this is an optimization and can be safely
1263 ignored. Note that the storage must still be able to reproduce a full
1266 ignored. Note that the storage must still be able to reproduce a full
1264 manifest including files that did not match.
1267 manifest including files that did not match.
1265 """
1268 """
1266
1269
1267 def storageinfo(exclusivefiles=False, sharedfiles=False,
1270 def storageinfo(exclusivefiles=False, sharedfiles=False,
1268 revisionscount=False, trackedsize=False,
1271 revisionscount=False, trackedsize=False,
1269 storedsize=False):
1272 storedsize=False):
1270 """Obtain information about storage for this manifest's data.
1273 """Obtain information about storage for this manifest's data.
1271
1274
1272 See ``ifilestorage.storageinfo()`` for a description of this method.
1275 See ``ifilestorage.storageinfo()`` for a description of this method.
1273 This one behaves the same way, except for manifest data.
1276 This one behaves the same way, except for manifest data.
1274 """
1277 """
1275
1278
1276 class imanifestlog(interfaceutil.Interface):
1279 class imanifestlog(interfaceutil.Interface):
1277 """Interface representing a collection of manifest snapshots.
1280 """Interface representing a collection of manifest snapshots.
1278
1281
1279 Represents the root manifest in a repository.
1282 Represents the root manifest in a repository.
1280
1283
1281 Also serves as a means to access nested tree manifests and to cache
1284 Also serves as a means to access nested tree manifests and to cache
1282 tree manifests.
1285 tree manifests.
1283 """
1286 """
1284
1287
1285 def __getitem__(node):
1288 def __getitem__(node):
1286 """Obtain a manifest instance for a given binary node.
1289 """Obtain a manifest instance for a given binary node.
1287
1290
1288 Equivalent to calling ``self.get('', node)``.
1291 Equivalent to calling ``self.get('', node)``.
1289
1292
1290 The returned object conforms to the ``imanifestrevisionstored``
1293 The returned object conforms to the ``imanifestrevisionstored``
1291 interface.
1294 interface.
1292 """
1295 """
1293
1296
1294 def get(tree, node, verify=True):
1297 def get(tree, node, verify=True):
1295 """Retrieve the manifest instance for a given directory and binary node.
1298 """Retrieve the manifest instance for a given directory and binary node.
1296
1299
1297 ``node`` always refers to the node of the root manifest (which will be
1300 ``node`` always refers to the node of the root manifest (which will be
1298 the only manifest if flat manifests are being used).
1301 the only manifest if flat manifests are being used).
1299
1302
1300 If ``tree`` is the empty string, the root manifest is returned.
1303 If ``tree`` is the empty string, the root manifest is returned.
1301 Otherwise the manifest for the specified directory will be returned
1304 Otherwise the manifest for the specified directory will be returned
1302 (requires tree manifests).
1305 (requires tree manifests).
1303
1306
1304 If ``verify`` is True, ``LookupError`` is raised if the node is not
1307 If ``verify`` is True, ``LookupError`` is raised if the node is not
1305 known.
1308 known.
1306
1309
1307 The returned object conforms to the ``imanifestrevisionstored``
1310 The returned object conforms to the ``imanifestrevisionstored``
1308 interface.
1311 interface.
1309 """
1312 """
1310
1313
1311 def getstorage(tree):
1314 def getstorage(tree):
1312 """Retrieve an interface to storage for a particular tree.
1315 """Retrieve an interface to storage for a particular tree.
1313
1316
1314 If ``tree`` is the empty bytestring, storage for the root manifest will
1317 If ``tree`` is the empty bytestring, storage for the root manifest will
1315 be returned. Otherwise storage for a tree manifest is returned.
1318 be returned. Otherwise storage for a tree manifest is returned.
1316
1319
1317 TODO formalize interface for returned object.
1320 TODO formalize interface for returned object.
1318 """
1321 """
1319
1322
1320 def clearcaches():
1323 def clearcaches():
1321 """Clear caches associated with this collection."""
1324 """Clear caches associated with this collection."""
1322
1325
1323 def rev(node):
1326 def rev(node):
1324 """Obtain the revision number for a binary node.
1327 """Obtain the revision number for a binary node.
1325
1328
1326 Raises ``error.LookupError`` if the node is not known.
1329 Raises ``error.LookupError`` if the node is not known.
1327 """
1330 """
1328
1331
1329 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1332 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1330 """Local repository sub-interface providing access to tracked file storage.
1333 """Local repository sub-interface providing access to tracked file storage.
1331
1334
1332 This interface defines how a repository accesses storage for a single
1335 This interface defines how a repository accesses storage for a single
1333 tracked file path.
1336 tracked file path.
1334 """
1337 """
1335
1338
1336 def file(f):
1339 def file(f):
1337 """Obtain a filelog for a tracked path.
1340 """Obtain a filelog for a tracked path.
1338
1341
1339 The returned type conforms to the ``ifilestorage`` interface.
1342 The returned type conforms to the ``ifilestorage`` interface.
1340 """
1343 """
1341
1344
1342 class ilocalrepositorymain(interfaceutil.Interface):
1345 class ilocalrepositorymain(interfaceutil.Interface):
1343 """Main interface for local repositories.
1346 """Main interface for local repositories.
1344
1347
1345 This currently captures the reality of things - not how things should be.
1348 This currently captures the reality of things - not how things should be.
1346 """
1349 """
1347
1350
1348 supportedformats = interfaceutil.Attribute(
1351 supportedformats = interfaceutil.Attribute(
1349 """Set of requirements that apply to stream clone.
1352 """Set of requirements that apply to stream clone.
1350
1353
1351 This is actually a class attribute and is shared among all instances.
1354 This is actually a class attribute and is shared among all instances.
1352 """)
1355 """)
1353
1356
1354 supported = interfaceutil.Attribute(
1357 supported = interfaceutil.Attribute(
1355 """Set of requirements that this repo is capable of opening.""")
1358 """Set of requirements that this repo is capable of opening.""")
1356
1359
1357 requirements = interfaceutil.Attribute(
1360 requirements = interfaceutil.Attribute(
1358 """Set of requirements this repo uses.""")
1361 """Set of requirements this repo uses.""")
1359
1362
1360 features = interfaceutil.Attribute(
1363 features = interfaceutil.Attribute(
1361 """Set of "features" this repository supports.
1364 """Set of "features" this repository supports.
1362
1365
1363 A "feature" is a loosely-defined term. It can refer to a feature
1366 A "feature" is a loosely-defined term. It can refer to a feature
1364 in the classical sense or can describe an implementation detail
1367 in the classical sense or can describe an implementation detail
1365 of the repository. For example, a ``readonly`` feature may denote
1368 of the repository. For example, a ``readonly`` feature may denote
1366 the repository as read-only. Or a ``revlogfilestore`` feature may
1369 the repository as read-only. Or a ``revlogfilestore`` feature may
1367 denote that the repository is using revlogs for file storage.
1370 denote that the repository is using revlogs for file storage.
1368
1371
1369 The intent of features is to provide a machine-queryable mechanism
1372 The intent of features is to provide a machine-queryable mechanism
1370 for repo consumers to test for various repository characteristics.
1373 for repo consumers to test for various repository characteristics.
1371
1374
1372 Features are similar to ``requirements``. The main difference is that
1375 Features are similar to ``requirements``. The main difference is that
1373 requirements are stored on-disk and represent requirements to open the
1376 requirements are stored on-disk and represent requirements to open the
1374 repository. Features are more run-time capabilities of the repository
1377 repository. Features are more run-time capabilities of the repository
1375 and more granular capabilities (which may be derived from requirements).
1378 and more granular capabilities (which may be derived from requirements).
1376 """)
1379 """)
1377
1380
1378 filtername = interfaceutil.Attribute(
1381 filtername = interfaceutil.Attribute(
1379 """Name of the repoview that is active on this repo.""")
1382 """Name of the repoview that is active on this repo.""")
1380
1383
1381 wvfs = interfaceutil.Attribute(
1384 wvfs = interfaceutil.Attribute(
1382 """VFS used to access the working directory.""")
1385 """VFS used to access the working directory.""")
1383
1386
1384 vfs = interfaceutil.Attribute(
1387 vfs = interfaceutil.Attribute(
1385 """VFS rooted at the .hg directory.
1388 """VFS rooted at the .hg directory.
1386
1389
1387 Used to access repository data not in the store.
1390 Used to access repository data not in the store.
1388 """)
1391 """)
1389
1392
1390 svfs = interfaceutil.Attribute(
1393 svfs = interfaceutil.Attribute(
1391 """VFS rooted at the store.
1394 """VFS rooted at the store.
1392
1395
1393 Used to access repository data in the store. Typically .hg/store.
1396 Used to access repository data in the store. Typically .hg/store.
1394 But can point elsewhere if the store is shared.
1397 But can point elsewhere if the store is shared.
1395 """)
1398 """)
1396
1399
1397 root = interfaceutil.Attribute(
1400 root = interfaceutil.Attribute(
1398 """Path to the root of the working directory.""")
1401 """Path to the root of the working directory.""")
1399
1402
1400 path = interfaceutil.Attribute(
1403 path = interfaceutil.Attribute(
1401 """Path to the .hg directory.""")
1404 """Path to the .hg directory.""")
1402
1405
1403 origroot = interfaceutil.Attribute(
1406 origroot = interfaceutil.Attribute(
1404 """The filesystem path that was used to construct the repo.""")
1407 """The filesystem path that was used to construct the repo.""")
1405
1408
1406 auditor = interfaceutil.Attribute(
1409 auditor = interfaceutil.Attribute(
1407 """A pathauditor for the working directory.
1410 """A pathauditor for the working directory.
1408
1411
1409 This checks if a path refers to a nested repository.
1412 This checks if a path refers to a nested repository.
1410
1413
1411 Operates on the filesystem.
1414 Operates on the filesystem.
1412 """)
1415 """)
1413
1416
1414 nofsauditor = interfaceutil.Attribute(
1417 nofsauditor = interfaceutil.Attribute(
1415 """A pathauditor for the working directory.
1418 """A pathauditor for the working directory.
1416
1419
1417 This is like ``auditor`` except it doesn't do filesystem checks.
1420 This is like ``auditor`` except it doesn't do filesystem checks.
1418 """)
1421 """)
1419
1422
1420 baseui = interfaceutil.Attribute(
1423 baseui = interfaceutil.Attribute(
1421 """Original ui instance passed into constructor.""")
1424 """Original ui instance passed into constructor.""")
1422
1425
1423 ui = interfaceutil.Attribute(
1426 ui = interfaceutil.Attribute(
1424 """Main ui instance for this instance.""")
1427 """Main ui instance for this instance.""")
1425
1428
1426 sharedpath = interfaceutil.Attribute(
1429 sharedpath = interfaceutil.Attribute(
1427 """Path to the .hg directory of the repo this repo was shared from.""")
1430 """Path to the .hg directory of the repo this repo was shared from.""")
1428
1431
1429 store = interfaceutil.Attribute(
1432 store = interfaceutil.Attribute(
1430 """A store instance.""")
1433 """A store instance.""")
1431
1434
1432 spath = interfaceutil.Attribute(
1435 spath = interfaceutil.Attribute(
1433 """Path to the store.""")
1436 """Path to the store.""")
1434
1437
1435 sjoin = interfaceutil.Attribute(
1438 sjoin = interfaceutil.Attribute(
1436 """Alias to self.store.join.""")
1439 """Alias to self.store.join.""")
1437
1440
1438 cachevfs = interfaceutil.Attribute(
1441 cachevfs = interfaceutil.Attribute(
1439 """A VFS used to access the cache directory.
1442 """A VFS used to access the cache directory.
1440
1443
1441 Typically .hg/cache.
1444 Typically .hg/cache.
1442 """)
1445 """)
1443
1446
1444 wcachevfs = interfaceutil.Attribute(
1447 wcachevfs = interfaceutil.Attribute(
1445 """A VFS used to access the cache directory dedicated to working copy
1448 """A VFS used to access the cache directory dedicated to working copy
1446
1449
1447 Typically .hg/wcache.
1450 Typically .hg/wcache.
1448 """)
1451 """)
1449
1452
1450 filteredrevcache = interfaceutil.Attribute(
1453 filteredrevcache = interfaceutil.Attribute(
1451 """Holds sets of revisions to be filtered.""")
1454 """Holds sets of revisions to be filtered.""")
1452
1455
1453 names = interfaceutil.Attribute(
1456 names = interfaceutil.Attribute(
1454 """A ``namespaces`` instance.""")
1457 """A ``namespaces`` instance.""")
1455
1458
1456 def close():
1459 def close():
1457 """Close the handle on this repository."""
1460 """Close the handle on this repository."""
1458
1461
1459 def peer():
1462 def peer():
1460 """Obtain an object conforming to the ``peer`` interface."""
1463 """Obtain an object conforming to the ``peer`` interface."""
1461
1464
1462 def unfiltered():
1465 def unfiltered():
1463 """Obtain an unfiltered/raw view of this repo."""
1466 """Obtain an unfiltered/raw view of this repo."""
1464
1467
1465 def filtered(name, visibilityexceptions=None):
1468 def filtered(name, visibilityexceptions=None):
1466 """Obtain a named view of this repository."""
1469 """Obtain a named view of this repository."""
1467
1470
1468 obsstore = interfaceutil.Attribute(
1471 obsstore = interfaceutil.Attribute(
1469 """A store of obsolescence data.""")
1472 """A store of obsolescence data.""")
1470
1473
1471 changelog = interfaceutil.Attribute(
1474 changelog = interfaceutil.Attribute(
1472 """A handle on the changelog revlog.""")
1475 """A handle on the changelog revlog.""")
1473
1476
1474 manifestlog = interfaceutil.Attribute(
1477 manifestlog = interfaceutil.Attribute(
1475 """An instance conforming to the ``imanifestlog`` interface.
1478 """An instance conforming to the ``imanifestlog`` interface.
1476
1479
1477 Provides access to manifests for the repository.
1480 Provides access to manifests for the repository.
1478 """)
1481 """)
1479
1482
1480 dirstate = interfaceutil.Attribute(
1483 dirstate = interfaceutil.Attribute(
1481 """Working directory state.""")
1484 """Working directory state.""")
1482
1485
1483 narrowpats = interfaceutil.Attribute(
1486 narrowpats = interfaceutil.Attribute(
1484 """Matcher patterns for this repository's narrowspec.""")
1487 """Matcher patterns for this repository's narrowspec.""")
1485
1488
1486 def narrowmatch(match=None, includeexact=False):
1489 def narrowmatch(match=None, includeexact=False):
1487 """Obtain a matcher for the narrowspec."""
1490 """Obtain a matcher for the narrowspec."""
1488
1491
1489 def setnarrowpats(newincludes, newexcludes):
1492 def setnarrowpats(newincludes, newexcludes):
1490 """Define the narrowspec for this repository."""
1493 """Define the narrowspec for this repository."""
1491
1494
1492 def __getitem__(changeid):
1495 def __getitem__(changeid):
1493 """Try to resolve a changectx."""
1496 """Try to resolve a changectx."""
1494
1497
1495 def __contains__(changeid):
1498 def __contains__(changeid):
1496 """Whether a changeset exists."""
1499 """Whether a changeset exists."""
1497
1500
1498 def __nonzero__():
1501 def __nonzero__():
1499 """Always returns True."""
1502 """Always returns True."""
1500 return True
1503 return True
1501
1504
1502 __bool__ = __nonzero__
1505 __bool__ = __nonzero__
1503
1506
1504 def __len__():
1507 def __len__():
1505 """Returns the number of changesets in the repo."""
1508 """Returns the number of changesets in the repo."""
1506
1509
1507 def __iter__():
1510 def __iter__():
1508 """Iterate over revisions in the changelog."""
1511 """Iterate over revisions in the changelog."""
1509
1512
1510 def revs(expr, *args):
1513 def revs(expr, *args):
1511 """Evaluate a revset.
1514 """Evaluate a revset.
1512
1515
1513 Emits revisions.
1516 Emits revisions.
1514 """
1517 """
1515
1518
1516 def set(expr, *args):
1519 def set(expr, *args):
1517 """Evaluate a revset.
1520 """Evaluate a revset.
1518
1521
1519 Emits changectx instances.
1522 Emits changectx instances.
1520 """
1523 """
1521
1524
1522 def anyrevs(specs, user=False, localalias=None):
1525 def anyrevs(specs, user=False, localalias=None):
1523 """Find revisions matching one of the given revsets."""
1526 """Find revisions matching one of the given revsets."""
1524
1527
1525 def url():
1528 def url():
1526 """Returns a string representing the location of this repo."""
1529 """Returns a string representing the location of this repo."""
1527
1530
1528 def hook(name, throw=False, **args):
1531 def hook(name, throw=False, **args):
1529 """Call a hook."""
1532 """Call a hook."""
1530
1533
1531 def tags():
1534 def tags():
1532 """Return a mapping of tag to node."""
1535 """Return a mapping of tag to node."""
1533
1536
1534 def tagtype(tagname):
1537 def tagtype(tagname):
1535 """Return the type of a given tag."""
1538 """Return the type of a given tag."""
1536
1539
1537 def tagslist():
1540 def tagslist():
1538 """Return a list of tags ordered by revision."""
1541 """Return a list of tags ordered by revision."""
1539
1542
1540 def nodetags(node):
1543 def nodetags(node):
1541 """Return the tags associated with a node."""
1544 """Return the tags associated with a node."""
1542
1545
1543 def nodebookmarks(node):
1546 def nodebookmarks(node):
1544 """Return the list of bookmarks pointing to the specified node."""
1547 """Return the list of bookmarks pointing to the specified node."""
1545
1548
1546 def branchmap():
1549 def branchmap():
1547 """Return a mapping of branch to heads in that branch."""
1550 """Return a mapping of branch to heads in that branch."""
1548
1551
1549 def revbranchcache():
1552 def revbranchcache():
1550 pass
1553 pass
1551
1554
1552 def branchtip(branchtip, ignoremissing=False):
1555 def branchtip(branchtip, ignoremissing=False):
1553 """Return the tip node for a given branch."""
1556 """Return the tip node for a given branch."""
1554
1557
1555 def lookup(key):
1558 def lookup(key):
1556 """Resolve the node for a revision."""
1559 """Resolve the node for a revision."""
1557
1560
1558 def lookupbranch(key):
1561 def lookupbranch(key):
1559 """Look up the branch name of the given revision or branch name."""
1562 """Look up the branch name of the given revision or branch name."""
1560
1563
1561 def known(nodes):
1564 def known(nodes):
1562 """Determine whether a series of nodes is known.
1565 """Determine whether a series of nodes is known.
1563
1566
1564 Returns a list of bools.
1567 Returns a list of bools.
1565 """
1568 """
1566
1569
1567 def local():
1570 def local():
1568 """Whether the repository is local."""
1571 """Whether the repository is local."""
1569 return True
1572 return True
1570
1573
1571 def publishing():
1574 def publishing():
1572 """Whether the repository is a publishing repository."""
1575 """Whether the repository is a publishing repository."""
1573
1576
1574 def cancopy():
1577 def cancopy():
1575 pass
1578 pass
1576
1579
1577 def shared():
1580 def shared():
1578 """The type of shared repository or None."""
1581 """The type of shared repository or None."""
1579
1582
1580 def wjoin(f, *insidef):
1583 def wjoin(f, *insidef):
1581 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1584 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1582
1585
1583 def setparents(p1, p2):
1586 def setparents(p1, p2):
1584 """Set the parent nodes of the working directory."""
1587 """Set the parent nodes of the working directory."""
1585
1588
1586 def filectx(path, changeid=None, fileid=None):
1589 def filectx(path, changeid=None, fileid=None):
1587 """Obtain a filectx for the given file revision."""
1590 """Obtain a filectx for the given file revision."""
1588
1591
1589 def getcwd():
1592 def getcwd():
1590 """Obtain the current working directory from the dirstate."""
1593 """Obtain the current working directory from the dirstate."""
1591
1594
1592 def pathto(f, cwd=None):
1595 def pathto(f, cwd=None):
1593 """Obtain the relative path to a file."""
1596 """Obtain the relative path to a file."""
1594
1597
1595 def adddatafilter(name, fltr):
1598 def adddatafilter(name, fltr):
1596 pass
1599 pass
1597
1600
1598 def wread(filename):
1601 def wread(filename):
1599 """Read a file from wvfs, using data filters."""
1602 """Read a file from wvfs, using data filters."""
1600
1603
1601 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1604 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1602 """Write data to a file in the wvfs, using data filters."""
1605 """Write data to a file in the wvfs, using data filters."""
1603
1606
1604 def wwritedata(filename, data):
1607 def wwritedata(filename, data):
1605 """Resolve data for writing to the wvfs, using data filters."""
1608 """Resolve data for writing to the wvfs, using data filters."""
1606
1609
1607 def currenttransaction():
1610 def currenttransaction():
1608 """Obtain the current transaction instance or None."""
1611 """Obtain the current transaction instance or None."""
1609
1612
1610 def transaction(desc, report=None):
1613 def transaction(desc, report=None):
1611 """Open a new transaction to write to the repository."""
1614 """Open a new transaction to write to the repository."""
1612
1615
1613 def undofiles():
1616 def undofiles():
1614 """Returns a list of (vfs, path) for files to undo transactions."""
1617 """Returns a list of (vfs, path) for files to undo transactions."""
1615
1618
1616 def recover():
1619 def recover():
1617 """Roll back an interrupted transaction."""
1620 """Roll back an interrupted transaction."""
1618
1621
1619 def rollback(dryrun=False, force=False):
1622 def rollback(dryrun=False, force=False):
1620 """Undo the last transaction.
1623 """Undo the last transaction.
1621
1624
1622 DANGEROUS.
1625 DANGEROUS.
1623 """
1626 """
1624
1627
1625 def updatecaches(tr=None, full=False):
1628 def updatecaches(tr=None, full=False):
1626 """Warm repo caches."""
1629 """Warm repo caches."""
1627
1630
1628 def invalidatecaches():
1631 def invalidatecaches():
1629 """Invalidate cached data due to the repository mutating."""
1632 """Invalidate cached data due to the repository mutating."""
1630
1633
1631 def invalidatevolatilesets():
1634 def invalidatevolatilesets():
1632 pass
1635 pass
1633
1636
1634 def invalidatedirstate():
1637 def invalidatedirstate():
1635 """Invalidate the dirstate."""
1638 """Invalidate the dirstate."""
1636
1639
1637 def invalidate(clearfilecache=False):
1640 def invalidate(clearfilecache=False):
1638 pass
1641 pass
1639
1642
1640 def invalidateall():
1643 def invalidateall():
1641 pass
1644 pass
1642
1645
1643 def lock(wait=True):
1646 def lock(wait=True):
1644 """Lock the repository store and return a lock instance."""
1647 """Lock the repository store and return a lock instance."""
1645
1648
1646 def wlock(wait=True):
1649 def wlock(wait=True):
1647 """Lock the non-store parts of the repository."""
1650 """Lock the non-store parts of the repository."""
1648
1651
1649 def currentwlock():
1652 def currentwlock():
1650 """Return the wlock if it's held or None."""
1653 """Return the wlock if it's held or None."""
1651
1654
1652 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1655 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1653 pass
1656 pass
1654
1657
1655 def commit(text='', user=None, date=None, match=None, force=False,
1658 def commit(text='', user=None, date=None, match=None, force=False,
1656 editor=False, extra=None):
1659 editor=False, extra=None):
1657 """Add a new revision to the repository."""
1660 """Add a new revision to the repository."""
1658
1661
1659 def commitctx(ctx, error=False, origctx=None):
1662 def commitctx(ctx, error=False, origctx=None):
1660 """Commit a commitctx instance to the repository."""
1663 """Commit a commitctx instance to the repository."""
1661
1664
1662 def destroying():
1665 def destroying():
1663 """Inform the repository that nodes are about to be destroyed."""
1666 """Inform the repository that nodes are about to be destroyed."""
1664
1667
1665 def destroyed():
1668 def destroyed():
1666 """Inform the repository that nodes have been destroyed."""
1669 """Inform the repository that nodes have been destroyed."""
1667
1670
1668 def status(node1='.', node2=None, match=None, ignored=False,
1671 def status(node1='.', node2=None, match=None, ignored=False,
1669 clean=False, unknown=False, listsubrepos=False):
1672 clean=False, unknown=False, listsubrepos=False):
1670 """Convenience method to call repo[x].status()."""
1673 """Convenience method to call repo[x].status()."""
1671
1674
1672 def addpostdsstatus(ps):
1675 def addpostdsstatus(ps):
1673 pass
1676 pass
1674
1677
1675 def postdsstatus():
1678 def postdsstatus():
1676 pass
1679 pass
1677
1680
1678 def clearpostdsstatus():
1681 def clearpostdsstatus():
1679 pass
1682 pass
1680
1683
1681 def heads(start=None):
1684 def heads(start=None):
1682 """Obtain list of nodes that are DAG heads."""
1685 """Obtain list of nodes that are DAG heads."""
1683
1686
1684 def branchheads(branch=None, start=None, closed=False):
1687 def branchheads(branch=None, start=None, closed=False):
1685 pass
1688 pass
1686
1689
1687 def branches(nodes):
1690 def branches(nodes):
1688 pass
1691 pass
1689
1692
1690 def between(pairs):
1693 def between(pairs):
1691 pass
1694 pass
1692
1695
1693 def checkpush(pushop):
1696 def checkpush(pushop):
1694 pass
1697 pass
1695
1698
1696 prepushoutgoinghooks = interfaceutil.Attribute(
1699 prepushoutgoinghooks = interfaceutil.Attribute(
1697 """util.hooks instance.""")
1700 """util.hooks instance.""")
1698
1701
1699 def pushkey(namespace, key, old, new):
1702 def pushkey(namespace, key, old, new):
1700 pass
1703 pass
1701
1704
1702 def listkeys(namespace):
1705 def listkeys(namespace):
1703 pass
1706 pass
1704
1707
1705 def debugwireargs(one, two, three=None, four=None, five=None):
1708 def debugwireargs(one, two, three=None, four=None, five=None):
1706 pass
1709 pass
1707
1710
1708 def savecommitmessage(text):
1711 def savecommitmessage(text):
1709 pass
1712 pass
1710
1713
1711 class completelocalrepository(ilocalrepositorymain,
1714 class completelocalrepository(ilocalrepositorymain,
1712 ilocalrepositoryfilestorage):
1715 ilocalrepositoryfilestorage):
1713 """Complete interface for a local repository."""
1716 """Complete interface for a local repository."""
1714
1717
1715 class iwireprotocolcommandcacher(interfaceutil.Interface):
1718 class iwireprotocolcommandcacher(interfaceutil.Interface):
1716 """Represents a caching backend for wire protocol commands.
1719 """Represents a caching backend for wire protocol commands.
1717
1720
1718 Wire protocol version 2 supports transparent caching of many commands.
1721 Wire protocol version 2 supports transparent caching of many commands.
1719 To leverage this caching, servers can activate objects that cache
1722 To leverage this caching, servers can activate objects that cache
1720 command responses. Objects handle both cache writing and reading.
1723 command responses. Objects handle both cache writing and reading.
1721 This interface defines how that response caching mechanism works.
1724 This interface defines how that response caching mechanism works.
1722
1725
1723 Wire protocol version 2 commands emit a series of objects that are
1726 Wire protocol version 2 commands emit a series of objects that are
1724 serialized and sent to the client. The caching layer exists between
1727 serialized and sent to the client. The caching layer exists between
1725 the invocation of the command function and the sending of its output
1728 the invocation of the command function and the sending of its output
1726 objects to an output layer.
1729 objects to an output layer.
1727
1730
1728 Instances of this interface represent a binding to a cache that
1731 Instances of this interface represent a binding to a cache that
1729 can serve a response (in place of calling a command function) and/or
1732 can serve a response (in place of calling a command function) and/or
1730 write responses to a cache for subsequent use.
1733 write responses to a cache for subsequent use.
1731
1734
1732 When a command request arrives, the following happens with regards
1735 When a command request arrives, the following happens with regards
1733 to this interface:
1736 to this interface:
1734
1737
1735 1. The server determines whether the command request is cacheable.
1738 1. The server determines whether the command request is cacheable.
1736 2. If it is, an instance of this interface is spawned.
1739 2. If it is, an instance of this interface is spawned.
1737 3. The cacher is activated in a context manager (``__enter__`` is called).
1740 3. The cacher is activated in a context manager (``__enter__`` is called).
1738 4. A cache *key* for that request is derived. This will call the
1741 4. A cache *key* for that request is derived. This will call the
1739 instance's ``adjustcachekeystate()`` method so the derivation
1742 instance's ``adjustcachekeystate()`` method so the derivation
1740 can be influenced.
1743 can be influenced.
1741 5. The cacher is informed of the derived cache key via a call to
1744 5. The cacher is informed of the derived cache key via a call to
1742 ``setcachekey()``.
1745 ``setcachekey()``.
1743 6. The cacher's ``lookup()`` method is called to test for presence of
1746 6. The cacher's ``lookup()`` method is called to test for presence of
1744 the derived key in the cache.
1747 the derived key in the cache.
1745 7. If ``lookup()`` returns a hit, that cached result is used in place
1748 7. If ``lookup()`` returns a hit, that cached result is used in place
1746 of invoking the command function. ``__exit__`` is called and the instance
1749 of invoking the command function. ``__exit__`` is called and the instance
1747 is discarded.
1750 is discarded.
1748 8. The command function is invoked.
1751 8. The command function is invoked.
1749 9. ``onobject()`` is called for each object emitted by the command
1752 9. ``onobject()`` is called for each object emitted by the command
1750 function.
1753 function.
1751 10. After the final object is seen, ``onfinished()`` is called.
1754 10. After the final object is seen, ``onfinished()`` is called.
1752 11. ``__exit__`` is called to signal the end of use of the instance.
1755 11. ``__exit__`` is called to signal the end of use of the instance.
1753
1756
1754 Cache *key* derivation can be influenced by the instance.
1757 Cache *key* derivation can be influenced by the instance.
1755
1758
1756 Cache keys are initially derived by a deterministic representation of
1759 Cache keys are initially derived by a deterministic representation of
1757 the command request. This includes the command name, arguments, protocol
1760 the command request. This includes the command name, arguments, protocol
1758 version, etc. This initial key derivation is performed by CBOR-encoding a
1761 version, etc. This initial key derivation is performed by CBOR-encoding a
1759 data structure and feeding that output into a hasher.
1762 data structure and feeding that output into a hasher.
1760
1763
1761 Instances of this interface can influence this initial key derivation
1764 Instances of this interface can influence this initial key derivation
1762 via ``adjustcachekeystate()``.
1765 via ``adjustcachekeystate()``.
1763
1766
1764 The instance is informed of the derived cache key via a call to
1767 The instance is informed of the derived cache key via a call to
1765 ``setcachekey()``. The instance must store the key locally so it can
1768 ``setcachekey()``. The instance must store the key locally so it can
1766 be consulted on subsequent operations that may require it.
1769 be consulted on subsequent operations that may require it.
1767
1770
1768 When constructed, the instance has access to a callable that can be used
1771 When constructed, the instance has access to a callable that can be used
1769 for encoding response objects. This callable receives as its single
1772 for encoding response objects. This callable receives as its single
1770 argument an object emitted by a command function. It returns an iterable
1773 argument an object emitted by a command function. It returns an iterable
1771 of bytes chunks representing the encoded object. Unless the cacher is
1774 of bytes chunks representing the encoded object. Unless the cacher is
1772 caching native Python objects in memory or has a way of reconstructing
1775 caching native Python objects in memory or has a way of reconstructing
1773 the original Python objects, implementations typically call this function
1776 the original Python objects, implementations typically call this function
1774 to produce bytes from the output objects and then store those bytes in
1777 to produce bytes from the output objects and then store those bytes in
1775 the cache. When it comes time to re-emit those bytes, they are wrapped
1778 the cache. When it comes time to re-emit those bytes, they are wrapped
1776 in a ``wireprototypes.encodedresponse`` instance to tell the output
1779 in a ``wireprototypes.encodedresponse`` instance to tell the output
1777 layer that they are pre-encoded.
1780 layer that they are pre-encoded.
1778
1781
1779 When receiving the objects emitted by the command function, instances
1782 When receiving the objects emitted by the command function, instances
1780 can choose what to do with those objects. The simplest thing to do is
1783 can choose what to do with those objects. The simplest thing to do is
1781 re-emit the original objects. They will be forwarded to the output
1784 re-emit the original objects. They will be forwarded to the output
1782 layer and will be processed as if the cacher did not exist.
1785 layer and will be processed as if the cacher did not exist.
1783
1786
1784 Implementations could also choose to not emit objects - instead locally
1787 Implementations could also choose to not emit objects - instead locally
1785 buffering objects or their encoded representation. They could then emit
1788 buffering objects or their encoded representation. They could then emit
1786 a single "coalesced" object when ``onfinished()`` is called. In
1789 a single "coalesced" object when ``onfinished()`` is called. In
1787 this way, the implementation would function as a filtering layer of
1790 this way, the implementation would function as a filtering layer of
1788 sorts.
1791 sorts.
1789
1792
1790 When caching objects, typically the encoded form of the object will
1793 When caching objects, typically the encoded form of the object will
1791 be stored. Keep in mind that if the original object is forwarded to
1794 be stored. Keep in mind that if the original object is forwarded to
1792 the output layer, it will need to be encoded there as well. For large
1795 the output layer, it will need to be encoded there as well. For large
1793 output, this redundant encoding could add overhead. Implementations
1796 output, this redundant encoding could add overhead. Implementations
1794 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1797 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1795 instances to avoid this overhead.
1798 instances to avoid this overhead.
1796 """
1799 """
1797 def __enter__():
1800 def __enter__():
1798 """Marks the instance as active.
1801 """Marks the instance as active.
1799
1802
1800 Should return self.
1803 Should return self.
1801 """
1804 """
1802
1805
1803 def __exit__(exctype, excvalue, exctb):
1806 def __exit__(exctype, excvalue, exctb):
1804 """Called when cacher is no longer used.
1807 """Called when cacher is no longer used.
1805
1808
1806 This can be used by implementations to perform cleanup actions (e.g.
1809 This can be used by implementations to perform cleanup actions (e.g.
1807 disconnecting network sockets, aborting a partially cached response.
1810 disconnecting network sockets, aborting a partially cached response.
1808 """
1811 """
1809
1812
1810 def adjustcachekeystate(state):
1813 def adjustcachekeystate(state):
1811 """Influences cache key derivation by adjusting state to derive key.
1814 """Influences cache key derivation by adjusting state to derive key.
1812
1815
1813 A dict defining the state used to derive the cache key is passed.
1816 A dict defining the state used to derive the cache key is passed.
1814
1817
1815 Implementations can modify this dict to record additional state that
1818 Implementations can modify this dict to record additional state that
1816 is wanted to influence key derivation.
1819 is wanted to influence key derivation.
1817
1820
1818 Implementations are *highly* encouraged to not modify or delete
1821 Implementations are *highly* encouraged to not modify or delete
1819 existing keys.
1822 existing keys.
1820 """
1823 """
1821
1824
1822 def setcachekey(key):
1825 def setcachekey(key):
1823 """Record the derived cache key for this request.
1826 """Record the derived cache key for this request.
1824
1827
1825 Instances may mutate the key for internal usage, as desired. e.g.
1828 Instances may mutate the key for internal usage, as desired. e.g.
1826 instances may wish to prepend the repo name, introduce path
1829 instances may wish to prepend the repo name, introduce path
1827 components for filesystem or URL addressing, etc. Behavior is up to
1830 components for filesystem or URL addressing, etc. Behavior is up to
1828 the cache.
1831 the cache.
1829
1832
1830 Returns a bool indicating if the request is cacheable by this
1833 Returns a bool indicating if the request is cacheable by this
1831 instance.
1834 instance.
1832 """
1835 """
1833
1836
1834 def lookup():
1837 def lookup():
1835 """Attempt to resolve an entry in the cache.
1838 """Attempt to resolve an entry in the cache.
1836
1839
1837 The instance is instructed to look for the cache key that it was
1840 The instance is instructed to look for the cache key that it was
1838 informed about via the call to ``setcachekey()``.
1841 informed about via the call to ``setcachekey()``.
1839
1842
1840 If there's no cache hit or the cacher doesn't wish to use the cached
1843 If there's no cache hit or the cacher doesn't wish to use the cached
1841 entry, ``None`` should be returned.
1844 entry, ``None`` should be returned.
1842
1845
1843 Else, a dict defining the cached result should be returned. The
1846 Else, a dict defining the cached result should be returned. The
1844 dict may have the following keys:
1847 dict may have the following keys:
1845
1848
1846 objs
1849 objs
1847 An iterable of objects that should be sent to the client. That
1850 An iterable of objects that should be sent to the client. That
1848 iterable of objects is expected to be what the command function
1851 iterable of objects is expected to be what the command function
1849 would return if invoked or an equivalent representation thereof.
1852 would return if invoked or an equivalent representation thereof.
1850 """
1853 """
1851
1854
1852 def onobject(obj):
1855 def onobject(obj):
1853 """Called when a new object is emitted from the command function.
1856 """Called when a new object is emitted from the command function.
1854
1857
1855 Receives as its argument the object that was emitted from the
1858 Receives as its argument the object that was emitted from the
1856 command function.
1859 command function.
1857
1860
1858 This method returns an iterator of objects to forward to the output
1861 This method returns an iterator of objects to forward to the output
1859 layer. The easiest implementation is a generator that just
1862 layer. The easiest implementation is a generator that just
1860 ``yield obj``.
1863 ``yield obj``.
1861 """
1864 """
1862
1865
1863 def onfinished():
1866 def onfinished():
1864 """Called after all objects have been emitted from the command function.
1867 """Called after all objects have been emitted from the command function.
1865
1868
1866 Implementations should return an iterator of objects to forward to
1869 Implementations should return an iterator of objects to forward to
1867 the output layer.
1870 the output layer.
1868
1871
1869 This method can be a generator.
1872 This method can be a generator.
1870 """
1873 """
General Comments 0
You need to be logged in to leave comments. Login now