##// END OF EJS Templates
manifest: add rawsize() proxy (API)...
Gregory Szorc -
r39894:9534fe1e default
parent child Browse files
Show More
@@ -1,2017 +1,2020 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 error,
23 error,
24 mdiff,
24 mdiff,
25 policy,
25 policy,
26 pycompat,
26 pycompat,
27 repository,
27 repository,
28 revlog,
28 revlog,
29 util,
29 util,
30 )
30 )
31 from .utils import (
31 from .utils import (
32 interfaceutil,
32 interfaceutil,
33 )
33 )
34
34
35 parsers = policy.importmod(r'parsers')
35 parsers = policy.importmod(r'parsers')
36 propertycache = util.propertycache
36 propertycache = util.propertycache
37
37
38 def _parse(data):
38 def _parse(data):
39 # This method does a little bit of excessive-looking
39 # This method does a little bit of excessive-looking
40 # precondition checking. This is so that the behavior of this
40 # precondition checking. This is so that the behavior of this
41 # class exactly matches its C counterpart to try and help
41 # class exactly matches its C counterpart to try and help
42 # prevent surprise breakage for anyone that develops against
42 # prevent surprise breakage for anyone that develops against
43 # the pure version.
43 # the pure version.
44 if data and data[-1:] != '\n':
44 if data and data[-1:] != '\n':
45 raise ValueError('Manifest did not end in a newline.')
45 raise ValueError('Manifest did not end in a newline.')
46 prev = None
46 prev = None
47 for l in data.splitlines():
47 for l in data.splitlines():
48 if prev is not None and prev > l:
48 if prev is not None and prev > l:
49 raise ValueError('Manifest lines not in sorted order.')
49 raise ValueError('Manifest lines not in sorted order.')
50 prev = l
50 prev = l
51 f, n = l.split('\0')
51 f, n = l.split('\0')
52 if len(n) > 40:
52 if len(n) > 40:
53 yield f, bin(n[:40]), n[40:]
53 yield f, bin(n[:40]), n[40:]
54 else:
54 else:
55 yield f, bin(n), ''
55 yield f, bin(n), ''
56
56
57 def _text(it):
57 def _text(it):
58 files = []
58 files = []
59 lines = []
59 lines = []
60 for f, n, fl in it:
60 for f, n, fl in it:
61 files.append(f)
61 files.append(f)
62 # if this is changed to support newlines in filenames,
62 # if this is changed to support newlines in filenames,
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
63 # be sure to check the templates/ dir again (especially *-raw.tmpl)
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
64 lines.append("%s\0%s%s\n" % (f, hex(n), fl))
65
65
66 _checkforbidden(files)
66 _checkforbidden(files)
67 return ''.join(lines)
67 return ''.join(lines)
68
68
69 class lazymanifestiter(object):
69 class lazymanifestiter(object):
70 def __init__(self, lm):
70 def __init__(self, lm):
71 self.pos = 0
71 self.pos = 0
72 self.lm = lm
72 self.lm = lm
73
73
74 def __iter__(self):
74 def __iter__(self):
75 return self
75 return self
76
76
77 def next(self):
77 def next(self):
78 try:
78 try:
79 data, pos = self.lm._get(self.pos)
79 data, pos = self.lm._get(self.pos)
80 except IndexError:
80 except IndexError:
81 raise StopIteration
81 raise StopIteration
82 if pos == -1:
82 if pos == -1:
83 self.pos += 1
83 self.pos += 1
84 return data[0]
84 return data[0]
85 self.pos += 1
85 self.pos += 1
86 zeropos = data.find('\x00', pos)
86 zeropos = data.find('\x00', pos)
87 return data[pos:zeropos]
87 return data[pos:zeropos]
88
88
89 __next__ = next
89 __next__ = next
90
90
91 class lazymanifestiterentries(object):
91 class lazymanifestiterentries(object):
92 def __init__(self, lm):
92 def __init__(self, lm):
93 self.lm = lm
93 self.lm = lm
94 self.pos = 0
94 self.pos = 0
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return self
97 return self
98
98
99 def next(self):
99 def next(self):
100 try:
100 try:
101 data, pos = self.lm._get(self.pos)
101 data, pos = self.lm._get(self.pos)
102 except IndexError:
102 except IndexError:
103 raise StopIteration
103 raise StopIteration
104 if pos == -1:
104 if pos == -1:
105 self.pos += 1
105 self.pos += 1
106 return data
106 return data
107 zeropos = data.find('\x00', pos)
107 zeropos = data.find('\x00', pos)
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
108 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
109 zeropos + 1, 40)
109 zeropos + 1, 40)
110 flags = self.lm._getflags(data, self.pos, zeropos)
110 flags = self.lm._getflags(data, self.pos, zeropos)
111 self.pos += 1
111 self.pos += 1
112 return (data[pos:zeropos], hashval, flags)
112 return (data[pos:zeropos], hashval, flags)
113
113
114 __next__ = next
114 __next__ = next
115
115
116 def unhexlify(data, extra, pos, length):
116 def unhexlify(data, extra, pos, length):
117 s = bin(data[pos:pos + length])
117 s = bin(data[pos:pos + length])
118 if extra:
118 if extra:
119 s += chr(extra & 0xff)
119 s += chr(extra & 0xff)
120 return s
120 return s
121
121
122 def _cmp(a, b):
122 def _cmp(a, b):
123 return (a > b) - (a < b)
123 return (a > b) - (a < b)
124
124
125 class _lazymanifest(object):
125 class _lazymanifest(object):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
126 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
127 if positions is None:
127 if positions is None:
128 self.positions = self.findlines(data)
128 self.positions = self.findlines(data)
129 self.extrainfo = [0] * len(self.positions)
129 self.extrainfo = [0] * len(self.positions)
130 self.data = data
130 self.data = data
131 self.extradata = []
131 self.extradata = []
132 else:
132 else:
133 self.positions = positions[:]
133 self.positions = positions[:]
134 self.extrainfo = extrainfo[:]
134 self.extrainfo = extrainfo[:]
135 self.extradata = extradata[:]
135 self.extradata = extradata[:]
136 self.data = data
136 self.data = data
137
137
138 def findlines(self, data):
138 def findlines(self, data):
139 if not data:
139 if not data:
140 return []
140 return []
141 pos = data.find("\n")
141 pos = data.find("\n")
142 if pos == -1 or data[-1:] != '\n':
142 if pos == -1 or data[-1:] != '\n':
143 raise ValueError("Manifest did not end in a newline.")
143 raise ValueError("Manifest did not end in a newline.")
144 positions = [0]
144 positions = [0]
145 prev = data[:data.find('\x00')]
145 prev = data[:data.find('\x00')]
146 while pos < len(data) - 1 and pos != -1:
146 while pos < len(data) - 1 and pos != -1:
147 positions.append(pos + 1)
147 positions.append(pos + 1)
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
148 nexts = data[pos + 1:data.find('\x00', pos + 1)]
149 if nexts < prev:
149 if nexts < prev:
150 raise ValueError("Manifest lines not in sorted order.")
150 raise ValueError("Manifest lines not in sorted order.")
151 prev = nexts
151 prev = nexts
152 pos = data.find("\n", pos + 1)
152 pos = data.find("\n", pos + 1)
153 return positions
153 return positions
154
154
155 def _get(self, index):
155 def _get(self, index):
156 # get the position encoded in pos:
156 # get the position encoded in pos:
157 # positive number is an index in 'data'
157 # positive number is an index in 'data'
158 # negative number is in extrapieces
158 # negative number is in extrapieces
159 pos = self.positions[index]
159 pos = self.positions[index]
160 if pos >= 0:
160 if pos >= 0:
161 return self.data, pos
161 return self.data, pos
162 return self.extradata[-pos - 1], -1
162 return self.extradata[-pos - 1], -1
163
163
164 def _getkey(self, pos):
164 def _getkey(self, pos):
165 if pos >= 0:
165 if pos >= 0:
166 return self.data[pos:self.data.find('\x00', pos + 1)]
166 return self.data[pos:self.data.find('\x00', pos + 1)]
167 return self.extradata[-pos - 1][0]
167 return self.extradata[-pos - 1][0]
168
168
169 def bsearch(self, key):
169 def bsearch(self, key):
170 first = 0
170 first = 0
171 last = len(self.positions) - 1
171 last = len(self.positions) - 1
172
172
173 while first <= last:
173 while first <= last:
174 midpoint = (first + last)//2
174 midpoint = (first + last)//2
175 nextpos = self.positions[midpoint]
175 nextpos = self.positions[midpoint]
176 candidate = self._getkey(nextpos)
176 candidate = self._getkey(nextpos)
177 r = _cmp(key, candidate)
177 r = _cmp(key, candidate)
178 if r == 0:
178 if r == 0:
179 return midpoint
179 return midpoint
180 else:
180 else:
181 if r < 0:
181 if r < 0:
182 last = midpoint - 1
182 last = midpoint - 1
183 else:
183 else:
184 first = midpoint + 1
184 first = midpoint + 1
185 return -1
185 return -1
186
186
187 def bsearch2(self, key):
187 def bsearch2(self, key):
188 # same as the above, but will always return the position
188 # same as the above, but will always return the position
189 # done for performance reasons
189 # done for performance reasons
190 first = 0
190 first = 0
191 last = len(self.positions) - 1
191 last = len(self.positions) - 1
192
192
193 while first <= last:
193 while first <= last:
194 midpoint = (first + last)//2
194 midpoint = (first + last)//2
195 nextpos = self.positions[midpoint]
195 nextpos = self.positions[midpoint]
196 candidate = self._getkey(nextpos)
196 candidate = self._getkey(nextpos)
197 r = _cmp(key, candidate)
197 r = _cmp(key, candidate)
198 if r == 0:
198 if r == 0:
199 return (midpoint, True)
199 return (midpoint, True)
200 else:
200 else:
201 if r < 0:
201 if r < 0:
202 last = midpoint - 1
202 last = midpoint - 1
203 else:
203 else:
204 first = midpoint + 1
204 first = midpoint + 1
205 return (first, False)
205 return (first, False)
206
206
207 def __contains__(self, key):
207 def __contains__(self, key):
208 return self.bsearch(key) != -1
208 return self.bsearch(key) != -1
209
209
210 def _getflags(self, data, needle, pos):
210 def _getflags(self, data, needle, pos):
211 start = pos + 41
211 start = pos + 41
212 end = data.find("\n", start)
212 end = data.find("\n", start)
213 if end == -1:
213 if end == -1:
214 end = len(data) - 1
214 end = len(data) - 1
215 if start == end:
215 if start == end:
216 return ''
216 return ''
217 return self.data[start:end]
217 return self.data[start:end]
218
218
219 def __getitem__(self, key):
219 def __getitem__(self, key):
220 if not isinstance(key, bytes):
220 if not isinstance(key, bytes):
221 raise TypeError("getitem: manifest keys must be a bytes.")
221 raise TypeError("getitem: manifest keys must be a bytes.")
222 needle = self.bsearch(key)
222 needle = self.bsearch(key)
223 if needle == -1:
223 if needle == -1:
224 raise KeyError
224 raise KeyError
225 data, pos = self._get(needle)
225 data, pos = self._get(needle)
226 if pos == -1:
226 if pos == -1:
227 return (data[1], data[2])
227 return (data[1], data[2])
228 zeropos = data.find('\x00', pos)
228 zeropos = data.find('\x00', pos)
229 assert 0 <= needle <= len(self.positions)
229 assert 0 <= needle <= len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
230 assert len(self.extrainfo) == len(self.positions)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
231 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
232 flags = self._getflags(data, needle, zeropos)
232 flags = self._getflags(data, needle, zeropos)
233 return (hashval, flags)
233 return (hashval, flags)
234
234
235 def __delitem__(self, key):
235 def __delitem__(self, key):
236 needle, found = self.bsearch2(key)
236 needle, found = self.bsearch2(key)
237 if not found:
237 if not found:
238 raise KeyError
238 raise KeyError
239 cur = self.positions[needle]
239 cur = self.positions[needle]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
240 self.positions = self.positions[:needle] + self.positions[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
241 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
242 if cur >= 0:
242 if cur >= 0:
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
243 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
244
244
245 def __setitem__(self, key, value):
245 def __setitem__(self, key, value):
246 if not isinstance(key, bytes):
246 if not isinstance(key, bytes):
247 raise TypeError("setitem: manifest keys must be a byte string.")
247 raise TypeError("setitem: manifest keys must be a byte string.")
248 if not isinstance(value, tuple) or len(value) != 2:
248 if not isinstance(value, tuple) or len(value) != 2:
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
249 raise TypeError("Manifest values must be a tuple of (node, flags).")
250 hashval = value[0]
250 hashval = value[0]
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
251 if not isinstance(hashval, bytes) or not 20 <= len(hashval) <= 22:
252 raise TypeError("node must be a 20-byte byte string")
252 raise TypeError("node must be a 20-byte byte string")
253 flags = value[1]
253 flags = value[1]
254 if len(hashval) == 22:
254 if len(hashval) == 22:
255 hashval = hashval[:-1]
255 hashval = hashval[:-1]
256 if not isinstance(flags, bytes) or len(flags) > 1:
256 if not isinstance(flags, bytes) or len(flags) > 1:
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
257 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
258 needle, found = self.bsearch2(key)
258 needle, found = self.bsearch2(key)
259 if found:
259 if found:
260 # put the item
260 # put the item
261 pos = self.positions[needle]
261 pos = self.positions[needle]
262 if pos < 0:
262 if pos < 0:
263 self.extradata[-pos - 1] = (key, hashval, value[1])
263 self.extradata[-pos - 1] = (key, hashval, value[1])
264 else:
264 else:
265 # just don't bother
265 # just don't bother
266 self.extradata.append((key, hashval, value[1]))
266 self.extradata.append((key, hashval, value[1]))
267 self.positions[needle] = -len(self.extradata)
267 self.positions[needle] = -len(self.extradata)
268 else:
268 else:
269 # not found, put it in with extra positions
269 # not found, put it in with extra positions
270 self.extradata.append((key, hashval, value[1]))
270 self.extradata.append((key, hashval, value[1]))
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
271 self.positions = (self.positions[:needle] + [-len(self.extradata)]
272 + self.positions[needle:])
272 + self.positions[needle:])
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
273 self.extrainfo = (self.extrainfo[:needle] + [0] +
274 self.extrainfo[needle:])
274 self.extrainfo[needle:])
275
275
276 def copy(self):
276 def copy(self):
277 # XXX call _compact like in C?
277 # XXX call _compact like in C?
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
278 return _lazymanifest(self.data, self.positions, self.extrainfo,
279 self.extradata)
279 self.extradata)
280
280
281 def _compact(self):
281 def _compact(self):
282 # hopefully not called TOO often
282 # hopefully not called TOO often
283 if len(self.extradata) == 0:
283 if len(self.extradata) == 0:
284 return
284 return
285 l = []
285 l = []
286 last_cut = 0
286 last_cut = 0
287 i = 0
287 i = 0
288 offset = 0
288 offset = 0
289 self.extrainfo = [0] * len(self.positions)
289 self.extrainfo = [0] * len(self.positions)
290 while i < len(self.positions):
290 while i < len(self.positions):
291 if self.positions[i] >= 0:
291 if self.positions[i] >= 0:
292 cur = self.positions[i]
292 cur = self.positions[i]
293 last_cut = cur
293 last_cut = cur
294 while True:
294 while True:
295 self.positions[i] = offset
295 self.positions[i] = offset
296 i += 1
296 i += 1
297 if i == len(self.positions) or self.positions[i] < 0:
297 if i == len(self.positions) or self.positions[i] < 0:
298 break
298 break
299 offset += self.positions[i] - cur
299 offset += self.positions[i] - cur
300 cur = self.positions[i]
300 cur = self.positions[i]
301 end_cut = self.data.find('\n', cur)
301 end_cut = self.data.find('\n', cur)
302 if end_cut != -1:
302 if end_cut != -1:
303 end_cut += 1
303 end_cut += 1
304 offset += end_cut - cur
304 offset += end_cut - cur
305 l.append(self.data[last_cut:end_cut])
305 l.append(self.data[last_cut:end_cut])
306 else:
306 else:
307 while i < len(self.positions) and self.positions[i] < 0:
307 while i < len(self.positions) and self.positions[i] < 0:
308 cur = self.positions[i]
308 cur = self.positions[i]
309 t = self.extradata[-cur - 1]
309 t = self.extradata[-cur - 1]
310 l.append(self._pack(t))
310 l.append(self._pack(t))
311 self.positions[i] = offset
311 self.positions[i] = offset
312 if len(t[1]) > 20:
312 if len(t[1]) > 20:
313 self.extrainfo[i] = ord(t[1][21])
313 self.extrainfo[i] = ord(t[1][21])
314 offset += len(l[-1])
314 offset += len(l[-1])
315 i += 1
315 i += 1
316 self.data = ''.join(l)
316 self.data = ''.join(l)
317 self.extradata = []
317 self.extradata = []
318
318
319 def _pack(self, d):
319 def _pack(self, d):
320 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
320 return d[0] + '\x00' + hex(d[1][:20]) + d[2] + '\n'
321
321
322 def text(self):
322 def text(self):
323 self._compact()
323 self._compact()
324 return self.data
324 return self.data
325
325
326 def diff(self, m2, clean=False):
326 def diff(self, m2, clean=False):
327 '''Finds changes between the current manifest and m2.'''
327 '''Finds changes between the current manifest and m2.'''
328 # XXX think whether efficiency matters here
328 # XXX think whether efficiency matters here
329 diff = {}
329 diff = {}
330
330
331 for fn, e1, flags in self.iterentries():
331 for fn, e1, flags in self.iterentries():
332 if fn not in m2:
332 if fn not in m2:
333 diff[fn] = (e1, flags), (None, '')
333 diff[fn] = (e1, flags), (None, '')
334 else:
334 else:
335 e2 = m2[fn]
335 e2 = m2[fn]
336 if (e1, flags) != e2:
336 if (e1, flags) != e2:
337 diff[fn] = (e1, flags), e2
337 diff[fn] = (e1, flags), e2
338 elif clean:
338 elif clean:
339 diff[fn] = None
339 diff[fn] = None
340
340
341 for fn, e2, flags in m2.iterentries():
341 for fn, e2, flags in m2.iterentries():
342 if fn not in self:
342 if fn not in self:
343 diff[fn] = (None, ''), (e2, flags)
343 diff[fn] = (None, ''), (e2, flags)
344
344
345 return diff
345 return diff
346
346
347 def iterentries(self):
347 def iterentries(self):
348 return lazymanifestiterentries(self)
348 return lazymanifestiterentries(self)
349
349
350 def iterkeys(self):
350 def iterkeys(self):
351 return lazymanifestiter(self)
351 return lazymanifestiter(self)
352
352
353 def __iter__(self):
353 def __iter__(self):
354 return lazymanifestiter(self)
354 return lazymanifestiter(self)
355
355
356 def __len__(self):
356 def __len__(self):
357 return len(self.positions)
357 return len(self.positions)
358
358
359 def filtercopy(self, filterfn):
359 def filtercopy(self, filterfn):
360 # XXX should be optimized
360 # XXX should be optimized
361 c = _lazymanifest('')
361 c = _lazymanifest('')
362 for f, n, fl in self.iterentries():
362 for f, n, fl in self.iterentries():
363 if filterfn(f):
363 if filterfn(f):
364 c[f] = n, fl
364 c[f] = n, fl
365 return c
365 return c
366
366
367 try:
367 try:
368 _lazymanifest = parsers.lazymanifest
368 _lazymanifest = parsers.lazymanifest
369 except AttributeError:
369 except AttributeError:
370 pass
370 pass
371
371
372 @interfaceutil.implementer(repository.imanifestdict)
372 @interfaceutil.implementer(repository.imanifestdict)
373 class manifestdict(object):
373 class manifestdict(object):
374 def __init__(self, data=''):
374 def __init__(self, data=''):
375 self._lm = _lazymanifest(data)
375 self._lm = _lazymanifest(data)
376
376
377 def __getitem__(self, key):
377 def __getitem__(self, key):
378 return self._lm[key][0]
378 return self._lm[key][0]
379
379
380 def find(self, key):
380 def find(self, key):
381 return self._lm[key]
381 return self._lm[key]
382
382
383 def __len__(self):
383 def __len__(self):
384 return len(self._lm)
384 return len(self._lm)
385
385
386 def __nonzero__(self):
386 def __nonzero__(self):
387 # nonzero is covered by the __len__ function, but implementing it here
387 # nonzero is covered by the __len__ function, but implementing it here
388 # makes it easier for extensions to override.
388 # makes it easier for extensions to override.
389 return len(self._lm) != 0
389 return len(self._lm) != 0
390
390
391 __bool__ = __nonzero__
391 __bool__ = __nonzero__
392
392
393 def __setitem__(self, key, node):
393 def __setitem__(self, key, node):
394 self._lm[key] = node, self.flags(key, '')
394 self._lm[key] = node, self.flags(key, '')
395
395
396 def __contains__(self, key):
396 def __contains__(self, key):
397 if key is None:
397 if key is None:
398 return False
398 return False
399 return key in self._lm
399 return key in self._lm
400
400
401 def __delitem__(self, key):
401 def __delitem__(self, key):
402 del self._lm[key]
402 del self._lm[key]
403
403
404 def __iter__(self):
404 def __iter__(self):
405 return self._lm.__iter__()
405 return self._lm.__iter__()
406
406
407 def iterkeys(self):
407 def iterkeys(self):
408 return self._lm.iterkeys()
408 return self._lm.iterkeys()
409
409
410 def keys(self):
410 def keys(self):
411 return list(self.iterkeys())
411 return list(self.iterkeys())
412
412
413 def filesnotin(self, m2, match=None):
413 def filesnotin(self, m2, match=None):
414 '''Set of files in this manifest that are not in the other'''
414 '''Set of files in this manifest that are not in the other'''
415 if match:
415 if match:
416 m1 = self.matches(match)
416 m1 = self.matches(match)
417 m2 = m2.matches(match)
417 m2 = m2.matches(match)
418 return m1.filesnotin(m2)
418 return m1.filesnotin(m2)
419 diff = self.diff(m2)
419 diff = self.diff(m2)
420 files = set(filepath
420 files = set(filepath
421 for filepath, hashflags in diff.iteritems()
421 for filepath, hashflags in diff.iteritems()
422 if hashflags[1][0] is None)
422 if hashflags[1][0] is None)
423 return files
423 return files
424
424
425 @propertycache
425 @propertycache
426 def _dirs(self):
426 def _dirs(self):
427 return util.dirs(self)
427 return util.dirs(self)
428
428
429 def dirs(self):
429 def dirs(self):
430 return self._dirs
430 return self._dirs
431
431
432 def hasdir(self, dir):
432 def hasdir(self, dir):
433 return dir in self._dirs
433 return dir in self._dirs
434
434
435 def _filesfastpath(self, match):
435 def _filesfastpath(self, match):
436 '''Checks whether we can correctly and quickly iterate over matcher
436 '''Checks whether we can correctly and quickly iterate over matcher
437 files instead of over manifest files.'''
437 files instead of over manifest files.'''
438 files = match.files()
438 files = match.files()
439 return (len(files) < 100 and (match.isexact() or
439 return (len(files) < 100 and (match.isexact() or
440 (match.prefix() and all(fn in self for fn in files))))
440 (match.prefix() and all(fn in self for fn in files))))
441
441
442 def walk(self, match):
442 def walk(self, match):
443 '''Generates matching file names.
443 '''Generates matching file names.
444
444
445 Equivalent to manifest.matches(match).iterkeys(), but without creating
445 Equivalent to manifest.matches(match).iterkeys(), but without creating
446 an entirely new manifest.
446 an entirely new manifest.
447
447
448 It also reports nonexistent files by marking them bad with match.bad().
448 It also reports nonexistent files by marking them bad with match.bad().
449 '''
449 '''
450 if match.always():
450 if match.always():
451 for f in iter(self):
451 for f in iter(self):
452 yield f
452 yield f
453 return
453 return
454
454
455 fset = set(match.files())
455 fset = set(match.files())
456
456
457 # avoid the entire walk if we're only looking for specific files
457 # avoid the entire walk if we're only looking for specific files
458 if self._filesfastpath(match):
458 if self._filesfastpath(match):
459 for fn in sorted(fset):
459 for fn in sorted(fset):
460 yield fn
460 yield fn
461 return
461 return
462
462
463 for fn in self:
463 for fn in self:
464 if fn in fset:
464 if fn in fset:
465 # specified pattern is the exact name
465 # specified pattern is the exact name
466 fset.remove(fn)
466 fset.remove(fn)
467 if match(fn):
467 if match(fn):
468 yield fn
468 yield fn
469
469
470 # for dirstate.walk, files=['.'] means "walk the whole tree".
470 # for dirstate.walk, files=['.'] means "walk the whole tree".
471 # follow that here, too
471 # follow that here, too
472 fset.discard('.')
472 fset.discard('.')
473
473
474 for fn in sorted(fset):
474 for fn in sorted(fset):
475 if not self.hasdir(fn):
475 if not self.hasdir(fn):
476 match.bad(fn, None)
476 match.bad(fn, None)
477
477
478 def matches(self, match):
478 def matches(self, match):
479 '''generate a new manifest filtered by the match argument'''
479 '''generate a new manifest filtered by the match argument'''
480 if match.always():
480 if match.always():
481 return self.copy()
481 return self.copy()
482
482
483 if self._filesfastpath(match):
483 if self._filesfastpath(match):
484 m = manifestdict()
484 m = manifestdict()
485 lm = self._lm
485 lm = self._lm
486 for fn in match.files():
486 for fn in match.files():
487 if fn in lm:
487 if fn in lm:
488 m._lm[fn] = lm[fn]
488 m._lm[fn] = lm[fn]
489 return m
489 return m
490
490
491 m = manifestdict()
491 m = manifestdict()
492 m._lm = self._lm.filtercopy(match)
492 m._lm = self._lm.filtercopy(match)
493 return m
493 return m
494
494
495 def diff(self, m2, match=None, clean=False):
495 def diff(self, m2, match=None, clean=False):
496 '''Finds changes between the current manifest and m2.
496 '''Finds changes between the current manifest and m2.
497
497
498 Args:
498 Args:
499 m2: the manifest to which this manifest should be compared.
499 m2: the manifest to which this manifest should be compared.
500 clean: if true, include files unchanged between these manifests
500 clean: if true, include files unchanged between these manifests
501 with a None value in the returned dictionary.
501 with a None value in the returned dictionary.
502
502
503 The result is returned as a dict with filename as key and
503 The result is returned as a dict with filename as key and
504 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
504 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
505 nodeid in the current/other manifest and fl1/fl2 is the flag
505 nodeid in the current/other manifest and fl1/fl2 is the flag
506 in the current/other manifest. Where the file does not exist,
506 in the current/other manifest. Where the file does not exist,
507 the nodeid will be None and the flags will be the empty
507 the nodeid will be None and the flags will be the empty
508 string.
508 string.
509 '''
509 '''
510 if match:
510 if match:
511 m1 = self.matches(match)
511 m1 = self.matches(match)
512 m2 = m2.matches(match)
512 m2 = m2.matches(match)
513 return m1.diff(m2, clean=clean)
513 return m1.diff(m2, clean=clean)
514 return self._lm.diff(m2._lm, clean)
514 return self._lm.diff(m2._lm, clean)
515
515
516 def setflag(self, key, flag):
516 def setflag(self, key, flag):
517 self._lm[key] = self[key], flag
517 self._lm[key] = self[key], flag
518
518
519 def get(self, key, default=None):
519 def get(self, key, default=None):
520 try:
520 try:
521 return self._lm[key][0]
521 return self._lm[key][0]
522 except KeyError:
522 except KeyError:
523 return default
523 return default
524
524
525 def flags(self, key, default=''):
525 def flags(self, key, default=''):
526 try:
526 try:
527 return self._lm[key][1]
527 return self._lm[key][1]
528 except KeyError:
528 except KeyError:
529 return default
529 return default
530
530
531 def copy(self):
531 def copy(self):
532 c = manifestdict()
532 c = manifestdict()
533 c._lm = self._lm.copy()
533 c._lm = self._lm.copy()
534 return c
534 return c
535
535
536 def items(self):
536 def items(self):
537 return (x[:2] for x in self._lm.iterentries())
537 return (x[:2] for x in self._lm.iterentries())
538
538
539 def iteritems(self):
539 def iteritems(self):
540 return (x[:2] for x in self._lm.iterentries())
540 return (x[:2] for x in self._lm.iterentries())
541
541
542 def iterentries(self):
542 def iterentries(self):
543 return self._lm.iterentries()
543 return self._lm.iterentries()
544
544
545 def text(self):
545 def text(self):
546 # most likely uses native version
546 # most likely uses native version
547 return self._lm.text()
547 return self._lm.text()
548
548
549 def fastdelta(self, base, changes):
549 def fastdelta(self, base, changes):
550 """Given a base manifest text as a bytearray and a list of changes
550 """Given a base manifest text as a bytearray and a list of changes
551 relative to that text, compute a delta that can be used by revlog.
551 relative to that text, compute a delta that can be used by revlog.
552 """
552 """
553 delta = []
553 delta = []
554 dstart = None
554 dstart = None
555 dend = None
555 dend = None
556 dline = [""]
556 dline = [""]
557 start = 0
557 start = 0
558 # zero copy representation of base as a buffer
558 # zero copy representation of base as a buffer
559 addbuf = util.buffer(base)
559 addbuf = util.buffer(base)
560
560
561 changes = list(changes)
561 changes = list(changes)
562 if len(changes) < 1000:
562 if len(changes) < 1000:
563 # start with a readonly loop that finds the offset of
563 # start with a readonly loop that finds the offset of
564 # each line and creates the deltas
564 # each line and creates the deltas
565 for f, todelete in changes:
565 for f, todelete in changes:
566 # bs will either be the index of the item or the insert point
566 # bs will either be the index of the item or the insert point
567 start, end = _msearch(addbuf, f, start)
567 start, end = _msearch(addbuf, f, start)
568 if not todelete:
568 if not todelete:
569 h, fl = self._lm[f]
569 h, fl = self._lm[f]
570 l = "%s\0%s%s\n" % (f, hex(h), fl)
570 l = "%s\0%s%s\n" % (f, hex(h), fl)
571 else:
571 else:
572 if start == end:
572 if start == end:
573 # item we want to delete was not found, error out
573 # item we want to delete was not found, error out
574 raise AssertionError(
574 raise AssertionError(
575 _("failed to remove %s from manifest") % f)
575 _("failed to remove %s from manifest") % f)
576 l = ""
576 l = ""
577 if dstart is not None and dstart <= start and dend >= start:
577 if dstart is not None and dstart <= start and dend >= start:
578 if dend < end:
578 if dend < end:
579 dend = end
579 dend = end
580 if l:
580 if l:
581 dline.append(l)
581 dline.append(l)
582 else:
582 else:
583 if dstart is not None:
583 if dstart is not None:
584 delta.append([dstart, dend, "".join(dline)])
584 delta.append([dstart, dend, "".join(dline)])
585 dstart = start
585 dstart = start
586 dend = end
586 dend = end
587 dline = [l]
587 dline = [l]
588
588
589 if dstart is not None:
589 if dstart is not None:
590 delta.append([dstart, dend, "".join(dline)])
590 delta.append([dstart, dend, "".join(dline)])
591 # apply the delta to the base, and get a delta for addrevision
591 # apply the delta to the base, and get a delta for addrevision
592 deltatext, arraytext = _addlistdelta(base, delta)
592 deltatext, arraytext = _addlistdelta(base, delta)
593 else:
593 else:
594 # For large changes, it's much cheaper to just build the text and
594 # For large changes, it's much cheaper to just build the text and
595 # diff it.
595 # diff it.
596 arraytext = bytearray(self.text())
596 arraytext = bytearray(self.text())
597 deltatext = mdiff.textdiff(
597 deltatext = mdiff.textdiff(
598 util.buffer(base), util.buffer(arraytext))
598 util.buffer(base), util.buffer(arraytext))
599
599
600 return arraytext, deltatext
600 return arraytext, deltatext
601
601
602 def _msearch(m, s, lo=0, hi=None):
602 def _msearch(m, s, lo=0, hi=None):
603 '''return a tuple (start, end) that says where to find s within m.
603 '''return a tuple (start, end) that says where to find s within m.
604
604
605 If the string is found m[start:end] are the line containing
605 If the string is found m[start:end] are the line containing
606 that string. If start == end the string was not found and
606 that string. If start == end the string was not found and
607 they indicate the proper sorted insertion point.
607 they indicate the proper sorted insertion point.
608
608
609 m should be a buffer, a memoryview or a byte string.
609 m should be a buffer, a memoryview or a byte string.
610 s is a byte string'''
610 s is a byte string'''
611 def advance(i, c):
611 def advance(i, c):
612 while i < lenm and m[i:i + 1] != c:
612 while i < lenm and m[i:i + 1] != c:
613 i += 1
613 i += 1
614 return i
614 return i
615 if not s:
615 if not s:
616 return (lo, lo)
616 return (lo, lo)
617 lenm = len(m)
617 lenm = len(m)
618 if not hi:
618 if not hi:
619 hi = lenm
619 hi = lenm
620 while lo < hi:
620 while lo < hi:
621 mid = (lo + hi) // 2
621 mid = (lo + hi) // 2
622 start = mid
622 start = mid
623 while start > 0 and m[start - 1:start] != '\n':
623 while start > 0 and m[start - 1:start] != '\n':
624 start -= 1
624 start -= 1
625 end = advance(start, '\0')
625 end = advance(start, '\0')
626 if bytes(m[start:end]) < s:
626 if bytes(m[start:end]) < s:
627 # we know that after the null there are 40 bytes of sha1
627 # we know that after the null there are 40 bytes of sha1
628 # this translates to the bisect lo = mid + 1
628 # this translates to the bisect lo = mid + 1
629 lo = advance(end + 40, '\n') + 1
629 lo = advance(end + 40, '\n') + 1
630 else:
630 else:
631 # this translates to the bisect hi = mid
631 # this translates to the bisect hi = mid
632 hi = start
632 hi = start
633 end = advance(lo, '\0')
633 end = advance(lo, '\0')
634 found = m[lo:end]
634 found = m[lo:end]
635 if s == found:
635 if s == found:
636 # we know that after the null there are 40 bytes of sha1
636 # we know that after the null there are 40 bytes of sha1
637 end = advance(end + 40, '\n')
637 end = advance(end + 40, '\n')
638 return (lo, end + 1)
638 return (lo, end + 1)
639 else:
639 else:
640 return (lo, lo)
640 return (lo, lo)
641
641
642 def _checkforbidden(l):
642 def _checkforbidden(l):
643 """Check filenames for illegal characters."""
643 """Check filenames for illegal characters."""
644 for f in l:
644 for f in l:
645 if '\n' in f or '\r' in f:
645 if '\n' in f or '\r' in f:
646 raise error.StorageError(
646 raise error.StorageError(
647 _("'\\n' and '\\r' disallowed in filenames: %r")
647 _("'\\n' and '\\r' disallowed in filenames: %r")
648 % pycompat.bytestr(f))
648 % pycompat.bytestr(f))
649
649
650
650
651 # apply the changes collected during the bisect loop to our addlist
651 # apply the changes collected during the bisect loop to our addlist
652 # return a delta suitable for addrevision
652 # return a delta suitable for addrevision
653 def _addlistdelta(addlist, x):
653 def _addlistdelta(addlist, x):
654 # for large addlist arrays, building a new array is cheaper
654 # for large addlist arrays, building a new array is cheaper
655 # than repeatedly modifying the existing one
655 # than repeatedly modifying the existing one
656 currentposition = 0
656 currentposition = 0
657 newaddlist = bytearray()
657 newaddlist = bytearray()
658
658
659 for start, end, content in x:
659 for start, end, content in x:
660 newaddlist += addlist[currentposition:start]
660 newaddlist += addlist[currentposition:start]
661 if content:
661 if content:
662 newaddlist += bytearray(content)
662 newaddlist += bytearray(content)
663
663
664 currentposition = end
664 currentposition = end
665
665
666 newaddlist += addlist[currentposition:]
666 newaddlist += addlist[currentposition:]
667
667
668 deltatext = "".join(struct.pack(">lll", start, end, len(content))
668 deltatext = "".join(struct.pack(">lll", start, end, len(content))
669 + content for start, end, content in x)
669 + content for start, end, content in x)
670 return deltatext, newaddlist
670 return deltatext, newaddlist
671
671
672 def _splittopdir(f):
672 def _splittopdir(f):
673 if '/' in f:
673 if '/' in f:
674 dir, subpath = f.split('/', 1)
674 dir, subpath = f.split('/', 1)
675 return dir + '/', subpath
675 return dir + '/', subpath
676 else:
676 else:
677 return '', f
677 return '', f
678
678
679 _noop = lambda s: None
679 _noop = lambda s: None
680
680
681 class treemanifest(object):
681 class treemanifest(object):
682 def __init__(self, dir='', text=''):
682 def __init__(self, dir='', text=''):
683 self._dir = dir
683 self._dir = dir
684 self._node = nullid
684 self._node = nullid
685 self._loadfunc = _noop
685 self._loadfunc = _noop
686 self._copyfunc = _noop
686 self._copyfunc = _noop
687 self._dirty = False
687 self._dirty = False
688 self._dirs = {}
688 self._dirs = {}
689 self._lazydirs = {}
689 self._lazydirs = {}
690 # Using _lazymanifest here is a little slower than plain old dicts
690 # Using _lazymanifest here is a little slower than plain old dicts
691 self._files = {}
691 self._files = {}
692 self._flags = {}
692 self._flags = {}
693 if text:
693 if text:
694 def readsubtree(subdir, subm):
694 def readsubtree(subdir, subm):
695 raise AssertionError('treemanifest constructor only accepts '
695 raise AssertionError('treemanifest constructor only accepts '
696 'flat manifests')
696 'flat manifests')
697 self.parse(text, readsubtree)
697 self.parse(text, readsubtree)
698 self._dirty = True # Mark flat manifest dirty after parsing
698 self._dirty = True # Mark flat manifest dirty after parsing
699
699
700 def _subpath(self, path):
700 def _subpath(self, path):
701 return self._dir + path
701 return self._dir + path
702
702
703 def _loadalllazy(self):
703 def _loadalllazy(self):
704 for k, (path, node, readsubtree) in self._lazydirs.iteritems():
704 for k, (path, node, readsubtree) in self._lazydirs.iteritems():
705 self._dirs[k] = readsubtree(path, node)
705 self._dirs[k] = readsubtree(path, node)
706 self._lazydirs = {}
706 self._lazydirs = {}
707
707
708 def _loadlazy(self, d):
708 def _loadlazy(self, d):
709 path, node, readsubtree = self._lazydirs[d]
709 path, node, readsubtree = self._lazydirs[d]
710 self._dirs[d] = readsubtree(path, node)
710 self._dirs[d] = readsubtree(path, node)
711 del self._lazydirs[d]
711 del self._lazydirs[d]
712
712
713 def _loadchildrensetlazy(self, visit):
713 def _loadchildrensetlazy(self, visit):
714 if not visit:
714 if not visit:
715 return None
715 return None
716 if visit == 'all' or visit == 'this':
716 if visit == 'all' or visit == 'this':
717 self._loadalllazy()
717 self._loadalllazy()
718 return None
718 return None
719
719
720 todel = []
720 todel = []
721 for k in visit:
721 for k in visit:
722 kslash = k + '/'
722 kslash = k + '/'
723 ld = self._lazydirs.get(kslash)
723 ld = self._lazydirs.get(kslash)
724 if ld:
724 if ld:
725 path, node, readsubtree = ld
725 path, node, readsubtree = ld
726 self._dirs[kslash] = readsubtree(path, node)
726 self._dirs[kslash] = readsubtree(path, node)
727 todel.append(kslash)
727 todel.append(kslash)
728 for kslash in todel:
728 for kslash in todel:
729 del self._lazydirs[kslash]
729 del self._lazydirs[kslash]
730 return visit
730 return visit
731
731
732 def __len__(self):
732 def __len__(self):
733 self._load()
733 self._load()
734 size = len(self._files)
734 size = len(self._files)
735 self._loadalllazy()
735 self._loadalllazy()
736 for m in self._dirs.values():
736 for m in self._dirs.values():
737 size += m.__len__()
737 size += m.__len__()
738 return size
738 return size
739
739
740 def __nonzero__(self):
740 def __nonzero__(self):
741 # Faster than "__len() != 0" since it avoids loading sub-manifests
741 # Faster than "__len() != 0" since it avoids loading sub-manifests
742 return not self._isempty()
742 return not self._isempty()
743
743
744 __bool__ = __nonzero__
744 __bool__ = __nonzero__
745
745
746 def _isempty(self):
746 def _isempty(self):
747 self._load() # for consistency; already loaded by all callers
747 self._load() # for consistency; already loaded by all callers
748 # See if we can skip loading everything.
748 # See if we can skip loading everything.
749 if self._files or (self._dirs and
749 if self._files or (self._dirs and
750 any(not m._isempty() for m in self._dirs.values())):
750 any(not m._isempty() for m in self._dirs.values())):
751 return False
751 return False
752 self._loadalllazy()
752 self._loadalllazy()
753 return (not self._dirs or
753 return (not self._dirs or
754 all(m._isempty() for m in self._dirs.values()))
754 all(m._isempty() for m in self._dirs.values()))
755
755
756 def __repr__(self):
756 def __repr__(self):
757 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
757 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
758 (self._dir, hex(self._node),
758 (self._dir, hex(self._node),
759 bool(self._loadfunc is _noop),
759 bool(self._loadfunc is _noop),
760 self._dirty, id(self)))
760 self._dirty, id(self)))
761
761
762 def dir(self):
762 def dir(self):
763 '''The directory that this tree manifest represents, including a
763 '''The directory that this tree manifest represents, including a
764 trailing '/'. Empty string for the repo root directory.'''
764 trailing '/'. Empty string for the repo root directory.'''
765 return self._dir
765 return self._dir
766
766
767 def node(self):
767 def node(self):
768 '''This node of this instance. nullid for unsaved instances. Should
768 '''This node of this instance. nullid for unsaved instances. Should
769 be updated when the instance is read or written from a revlog.
769 be updated when the instance is read or written from a revlog.
770 '''
770 '''
771 assert not self._dirty
771 assert not self._dirty
772 return self._node
772 return self._node
773
773
774 def setnode(self, node):
774 def setnode(self, node):
775 self._node = node
775 self._node = node
776 self._dirty = False
776 self._dirty = False
777
777
778 def iterentries(self):
778 def iterentries(self):
779 self._load()
779 self._load()
780 self._loadalllazy()
780 self._loadalllazy()
781 for p, n in sorted(itertools.chain(self._dirs.items(),
781 for p, n in sorted(itertools.chain(self._dirs.items(),
782 self._files.items())):
782 self._files.items())):
783 if p in self._files:
783 if p in self._files:
784 yield self._subpath(p), n, self._flags.get(p, '')
784 yield self._subpath(p), n, self._flags.get(p, '')
785 else:
785 else:
786 for x in n.iterentries():
786 for x in n.iterentries():
787 yield x
787 yield x
788
788
789 def items(self):
789 def items(self):
790 self._load()
790 self._load()
791 self._loadalllazy()
791 self._loadalllazy()
792 for p, n in sorted(itertools.chain(self._dirs.items(),
792 for p, n in sorted(itertools.chain(self._dirs.items(),
793 self._files.items())):
793 self._files.items())):
794 if p in self._files:
794 if p in self._files:
795 yield self._subpath(p), n
795 yield self._subpath(p), n
796 else:
796 else:
797 for f, sn in n.iteritems():
797 for f, sn in n.iteritems():
798 yield f, sn
798 yield f, sn
799
799
800 iteritems = items
800 iteritems = items
801
801
802 def iterkeys(self):
802 def iterkeys(self):
803 self._load()
803 self._load()
804 self._loadalllazy()
804 self._loadalllazy()
805 for p in sorted(itertools.chain(self._dirs, self._files)):
805 for p in sorted(itertools.chain(self._dirs, self._files)):
806 if p in self._files:
806 if p in self._files:
807 yield self._subpath(p)
807 yield self._subpath(p)
808 else:
808 else:
809 for f in self._dirs[p]:
809 for f in self._dirs[p]:
810 yield f
810 yield f
811
811
812 def keys(self):
812 def keys(self):
813 return list(self.iterkeys())
813 return list(self.iterkeys())
814
814
815 def __iter__(self):
815 def __iter__(self):
816 return self.iterkeys()
816 return self.iterkeys()
817
817
818 def __contains__(self, f):
818 def __contains__(self, f):
819 if f is None:
819 if f is None:
820 return False
820 return False
821 self._load()
821 self._load()
822 dir, subpath = _splittopdir(f)
822 dir, subpath = _splittopdir(f)
823 if dir:
823 if dir:
824 if dir in self._lazydirs:
824 if dir in self._lazydirs:
825 self._loadlazy(dir)
825 self._loadlazy(dir)
826
826
827 if dir not in self._dirs:
827 if dir not in self._dirs:
828 return False
828 return False
829
829
830 return self._dirs[dir].__contains__(subpath)
830 return self._dirs[dir].__contains__(subpath)
831 else:
831 else:
832 return f in self._files
832 return f in self._files
833
833
834 def get(self, f, default=None):
834 def get(self, f, default=None):
835 self._load()
835 self._load()
836 dir, subpath = _splittopdir(f)
836 dir, subpath = _splittopdir(f)
837 if dir:
837 if dir:
838 if dir in self._lazydirs:
838 if dir in self._lazydirs:
839 self._loadlazy(dir)
839 self._loadlazy(dir)
840
840
841 if dir not in self._dirs:
841 if dir not in self._dirs:
842 return default
842 return default
843 return self._dirs[dir].get(subpath, default)
843 return self._dirs[dir].get(subpath, default)
844 else:
844 else:
845 return self._files.get(f, default)
845 return self._files.get(f, default)
846
846
847 def __getitem__(self, f):
847 def __getitem__(self, f):
848 self._load()
848 self._load()
849 dir, subpath = _splittopdir(f)
849 dir, subpath = _splittopdir(f)
850 if dir:
850 if dir:
851 if dir in self._lazydirs:
851 if dir in self._lazydirs:
852 self._loadlazy(dir)
852 self._loadlazy(dir)
853
853
854 return self._dirs[dir].__getitem__(subpath)
854 return self._dirs[dir].__getitem__(subpath)
855 else:
855 else:
856 return self._files[f]
856 return self._files[f]
857
857
858 def flags(self, f):
858 def flags(self, f):
859 self._load()
859 self._load()
860 dir, subpath = _splittopdir(f)
860 dir, subpath = _splittopdir(f)
861 if dir:
861 if dir:
862 if dir in self._lazydirs:
862 if dir in self._lazydirs:
863 self._loadlazy(dir)
863 self._loadlazy(dir)
864
864
865 if dir not in self._dirs:
865 if dir not in self._dirs:
866 return ''
866 return ''
867 return self._dirs[dir].flags(subpath)
867 return self._dirs[dir].flags(subpath)
868 else:
868 else:
869 if f in self._lazydirs or f in self._dirs:
869 if f in self._lazydirs or f in self._dirs:
870 return ''
870 return ''
871 return self._flags.get(f, '')
871 return self._flags.get(f, '')
872
872
873 def find(self, f):
873 def find(self, f):
874 self._load()
874 self._load()
875 dir, subpath = _splittopdir(f)
875 dir, subpath = _splittopdir(f)
876 if dir:
876 if dir:
877 if dir in self._lazydirs:
877 if dir in self._lazydirs:
878 self._loadlazy(dir)
878 self._loadlazy(dir)
879
879
880 return self._dirs[dir].find(subpath)
880 return self._dirs[dir].find(subpath)
881 else:
881 else:
882 return self._files[f], self._flags.get(f, '')
882 return self._files[f], self._flags.get(f, '')
883
883
884 def __delitem__(self, f):
884 def __delitem__(self, f):
885 self._load()
885 self._load()
886 dir, subpath = _splittopdir(f)
886 dir, subpath = _splittopdir(f)
887 if dir:
887 if dir:
888 if dir in self._lazydirs:
888 if dir in self._lazydirs:
889 self._loadlazy(dir)
889 self._loadlazy(dir)
890
890
891 self._dirs[dir].__delitem__(subpath)
891 self._dirs[dir].__delitem__(subpath)
892 # If the directory is now empty, remove it
892 # If the directory is now empty, remove it
893 if self._dirs[dir]._isempty():
893 if self._dirs[dir]._isempty():
894 del self._dirs[dir]
894 del self._dirs[dir]
895 else:
895 else:
896 del self._files[f]
896 del self._files[f]
897 if f in self._flags:
897 if f in self._flags:
898 del self._flags[f]
898 del self._flags[f]
899 self._dirty = True
899 self._dirty = True
900
900
901 def __setitem__(self, f, n):
901 def __setitem__(self, f, n):
902 assert n is not None
902 assert n is not None
903 self._load()
903 self._load()
904 dir, subpath = _splittopdir(f)
904 dir, subpath = _splittopdir(f)
905 if dir:
905 if dir:
906 if dir in self._lazydirs:
906 if dir in self._lazydirs:
907 self._loadlazy(dir)
907 self._loadlazy(dir)
908 if dir not in self._dirs:
908 if dir not in self._dirs:
909 self._dirs[dir] = treemanifest(self._subpath(dir))
909 self._dirs[dir] = treemanifest(self._subpath(dir))
910 self._dirs[dir].__setitem__(subpath, n)
910 self._dirs[dir].__setitem__(subpath, n)
911 else:
911 else:
912 self._files[f] = n[:21] # to match manifestdict's behavior
912 self._files[f] = n[:21] # to match manifestdict's behavior
913 self._dirty = True
913 self._dirty = True
914
914
915 def _load(self):
915 def _load(self):
916 if self._loadfunc is not _noop:
916 if self._loadfunc is not _noop:
917 lf, self._loadfunc = self._loadfunc, _noop
917 lf, self._loadfunc = self._loadfunc, _noop
918 lf(self)
918 lf(self)
919 elif self._copyfunc is not _noop:
919 elif self._copyfunc is not _noop:
920 cf, self._copyfunc = self._copyfunc, _noop
920 cf, self._copyfunc = self._copyfunc, _noop
921 cf(self)
921 cf(self)
922
922
923 def setflag(self, f, flags):
923 def setflag(self, f, flags):
924 """Set the flags (symlink, executable) for path f."""
924 """Set the flags (symlink, executable) for path f."""
925 self._load()
925 self._load()
926 dir, subpath = _splittopdir(f)
926 dir, subpath = _splittopdir(f)
927 if dir:
927 if dir:
928 if dir in self._lazydirs:
928 if dir in self._lazydirs:
929 self._loadlazy(dir)
929 self._loadlazy(dir)
930 if dir not in self._dirs:
930 if dir not in self._dirs:
931 self._dirs[dir] = treemanifest(self._subpath(dir))
931 self._dirs[dir] = treemanifest(self._subpath(dir))
932 self._dirs[dir].setflag(subpath, flags)
932 self._dirs[dir].setflag(subpath, flags)
933 else:
933 else:
934 self._flags[f] = flags
934 self._flags[f] = flags
935 self._dirty = True
935 self._dirty = True
936
936
937 def copy(self):
937 def copy(self):
938 copy = treemanifest(self._dir)
938 copy = treemanifest(self._dir)
939 copy._node = self._node
939 copy._node = self._node
940 copy._dirty = self._dirty
940 copy._dirty = self._dirty
941 if self._copyfunc is _noop:
941 if self._copyfunc is _noop:
942 def _copyfunc(s):
942 def _copyfunc(s):
943 self._load()
943 self._load()
944 # OPT: it'd be nice to not load everything here. Unfortunately
944 # OPT: it'd be nice to not load everything here. Unfortunately
945 # this makes a mess of the "dirty" state tracking if we don't.
945 # this makes a mess of the "dirty" state tracking if we don't.
946 self._loadalllazy()
946 self._loadalllazy()
947 sdirs = s._dirs
947 sdirs = s._dirs
948 for d, v in self._dirs.iteritems():
948 for d, v in self._dirs.iteritems():
949 sdirs[d] = v.copy()
949 sdirs[d] = v.copy()
950 s._files = dict.copy(self._files)
950 s._files = dict.copy(self._files)
951 s._flags = dict.copy(self._flags)
951 s._flags = dict.copy(self._flags)
952 if self._loadfunc is _noop:
952 if self._loadfunc is _noop:
953 _copyfunc(copy)
953 _copyfunc(copy)
954 else:
954 else:
955 copy._copyfunc = _copyfunc
955 copy._copyfunc = _copyfunc
956 else:
956 else:
957 copy._copyfunc = self._copyfunc
957 copy._copyfunc = self._copyfunc
958 return copy
958 return copy
959
959
960 def filesnotin(self, m2, match=None):
960 def filesnotin(self, m2, match=None):
961 '''Set of files in this manifest that are not in the other'''
961 '''Set of files in this manifest that are not in the other'''
962 if match and not match.always():
962 if match and not match.always():
963 m1 = self.matches(match)
963 m1 = self.matches(match)
964 m2 = m2.matches(match)
964 m2 = m2.matches(match)
965 return m1.filesnotin(m2)
965 return m1.filesnotin(m2)
966
966
967 files = set()
967 files = set()
968 def _filesnotin(t1, t2):
968 def _filesnotin(t1, t2):
969 if t1._node == t2._node and not t1._dirty and not t2._dirty:
969 if t1._node == t2._node and not t1._dirty and not t2._dirty:
970 return
970 return
971 t1._load()
971 t1._load()
972 t2._load()
972 t2._load()
973 t1._loadalllazy()
973 t1._loadalllazy()
974 t2._loadalllazy()
974 t2._loadalllazy()
975 for d, m1 in t1._dirs.iteritems():
975 for d, m1 in t1._dirs.iteritems():
976 if d in t2._dirs:
976 if d in t2._dirs:
977 m2 = t2._dirs[d]
977 m2 = t2._dirs[d]
978 _filesnotin(m1, m2)
978 _filesnotin(m1, m2)
979 else:
979 else:
980 files.update(m1.iterkeys())
980 files.update(m1.iterkeys())
981
981
982 for fn in t1._files:
982 for fn in t1._files:
983 if fn not in t2._files:
983 if fn not in t2._files:
984 files.add(t1._subpath(fn))
984 files.add(t1._subpath(fn))
985
985
986 _filesnotin(self, m2)
986 _filesnotin(self, m2)
987 return files
987 return files
988
988
989 @propertycache
989 @propertycache
990 def _alldirs(self):
990 def _alldirs(self):
991 return util.dirs(self)
991 return util.dirs(self)
992
992
993 def dirs(self):
993 def dirs(self):
994 return self._alldirs
994 return self._alldirs
995
995
996 def hasdir(self, dir):
996 def hasdir(self, dir):
997 self._load()
997 self._load()
998 topdir, subdir = _splittopdir(dir)
998 topdir, subdir = _splittopdir(dir)
999 if topdir:
999 if topdir:
1000 if topdir in self._lazydirs:
1000 if topdir in self._lazydirs:
1001 self._loadlazy(topdir)
1001 self._loadlazy(topdir)
1002 if topdir in self._dirs:
1002 if topdir in self._dirs:
1003 return self._dirs[topdir].hasdir(subdir)
1003 return self._dirs[topdir].hasdir(subdir)
1004 return False
1004 return False
1005 dirslash = dir + '/'
1005 dirslash = dir + '/'
1006 return dirslash in self._dirs or dirslash in self._lazydirs
1006 return dirslash in self._dirs or dirslash in self._lazydirs
1007
1007
1008 def walk(self, match):
1008 def walk(self, match):
1009 '''Generates matching file names.
1009 '''Generates matching file names.
1010
1010
1011 Equivalent to manifest.matches(match).iterkeys(), but without creating
1011 Equivalent to manifest.matches(match).iterkeys(), but without creating
1012 an entirely new manifest.
1012 an entirely new manifest.
1013
1013
1014 It also reports nonexistent files by marking them bad with match.bad().
1014 It also reports nonexistent files by marking them bad with match.bad().
1015 '''
1015 '''
1016 if match.always():
1016 if match.always():
1017 for f in iter(self):
1017 for f in iter(self):
1018 yield f
1018 yield f
1019 return
1019 return
1020
1020
1021 fset = set(match.files())
1021 fset = set(match.files())
1022
1022
1023 for fn in self._walk(match):
1023 for fn in self._walk(match):
1024 if fn in fset:
1024 if fn in fset:
1025 # specified pattern is the exact name
1025 # specified pattern is the exact name
1026 fset.remove(fn)
1026 fset.remove(fn)
1027 yield fn
1027 yield fn
1028
1028
1029 # for dirstate.walk, files=['.'] means "walk the whole tree".
1029 # for dirstate.walk, files=['.'] means "walk the whole tree".
1030 # follow that here, too
1030 # follow that here, too
1031 fset.discard('.')
1031 fset.discard('.')
1032
1032
1033 for fn in sorted(fset):
1033 for fn in sorted(fset):
1034 if not self.hasdir(fn):
1034 if not self.hasdir(fn):
1035 match.bad(fn, None)
1035 match.bad(fn, None)
1036
1036
1037 def _walk(self, match):
1037 def _walk(self, match):
1038 '''Recursively generates matching file names for walk().'''
1038 '''Recursively generates matching file names for walk().'''
1039 visit = match.visitchildrenset(self._dir[:-1] or '.')
1039 visit = match.visitchildrenset(self._dir[:-1] or '.')
1040 if not visit:
1040 if not visit:
1041 return
1041 return
1042
1042
1043 # yield this dir's files and walk its submanifests
1043 # yield this dir's files and walk its submanifests
1044 self._load()
1044 self._load()
1045 visit = self._loadchildrensetlazy(visit)
1045 visit = self._loadchildrensetlazy(visit)
1046 for p in sorted(list(self._dirs) + list(self._files)):
1046 for p in sorted(list(self._dirs) + list(self._files)):
1047 if p in self._files:
1047 if p in self._files:
1048 fullp = self._subpath(p)
1048 fullp = self._subpath(p)
1049 if match(fullp):
1049 if match(fullp):
1050 yield fullp
1050 yield fullp
1051 else:
1051 else:
1052 if not visit or p[:-1] in visit:
1052 if not visit or p[:-1] in visit:
1053 for f in self._dirs[p]._walk(match):
1053 for f in self._dirs[p]._walk(match):
1054 yield f
1054 yield f
1055
1055
1056 def matches(self, match):
1056 def matches(self, match):
1057 '''generate a new manifest filtered by the match argument'''
1057 '''generate a new manifest filtered by the match argument'''
1058 if match.always():
1058 if match.always():
1059 return self.copy()
1059 return self.copy()
1060
1060
1061 return self._matches(match)
1061 return self._matches(match)
1062
1062
1063 def _matches(self, match):
1063 def _matches(self, match):
1064 '''recursively generate a new manifest filtered by the match argument.
1064 '''recursively generate a new manifest filtered by the match argument.
1065 '''
1065 '''
1066
1066
1067 visit = match.visitchildrenset(self._dir[:-1] or '.')
1067 visit = match.visitchildrenset(self._dir[:-1] or '.')
1068 if visit == 'all':
1068 if visit == 'all':
1069 return self.copy()
1069 return self.copy()
1070 ret = treemanifest(self._dir)
1070 ret = treemanifest(self._dir)
1071 if not visit:
1071 if not visit:
1072 return ret
1072 return ret
1073
1073
1074 self._load()
1074 self._load()
1075 for fn in self._files:
1075 for fn in self._files:
1076 # While visitchildrenset *usually* lists only subdirs, this is
1076 # While visitchildrenset *usually* lists only subdirs, this is
1077 # actually up to the matcher and may have some files in the set().
1077 # actually up to the matcher and may have some files in the set().
1078 # If visit == 'this', we should obviously look at the files in this
1078 # If visit == 'this', we should obviously look at the files in this
1079 # directory; if visit is a set, and fn is in it, we should inspect
1079 # directory; if visit is a set, and fn is in it, we should inspect
1080 # fn (but no need to inspect things not in the set).
1080 # fn (but no need to inspect things not in the set).
1081 if visit != 'this' and fn not in visit:
1081 if visit != 'this' and fn not in visit:
1082 continue
1082 continue
1083 fullp = self._subpath(fn)
1083 fullp = self._subpath(fn)
1084 # visitchildrenset isn't perfect, we still need to call the regular
1084 # visitchildrenset isn't perfect, we still need to call the regular
1085 # matcher code to further filter results.
1085 # matcher code to further filter results.
1086 if not match(fullp):
1086 if not match(fullp):
1087 continue
1087 continue
1088 ret._files[fn] = self._files[fn]
1088 ret._files[fn] = self._files[fn]
1089 if fn in self._flags:
1089 if fn in self._flags:
1090 ret._flags[fn] = self._flags[fn]
1090 ret._flags[fn] = self._flags[fn]
1091
1091
1092 visit = self._loadchildrensetlazy(visit)
1092 visit = self._loadchildrensetlazy(visit)
1093 for dir, subm in self._dirs.iteritems():
1093 for dir, subm in self._dirs.iteritems():
1094 if visit and dir[:-1] not in visit:
1094 if visit and dir[:-1] not in visit:
1095 continue
1095 continue
1096 m = subm._matches(match)
1096 m = subm._matches(match)
1097 if not m._isempty():
1097 if not m._isempty():
1098 ret._dirs[dir] = m
1098 ret._dirs[dir] = m
1099
1099
1100 if not ret._isempty():
1100 if not ret._isempty():
1101 ret._dirty = True
1101 ret._dirty = True
1102 return ret
1102 return ret
1103
1103
1104 def diff(self, m2, match=None, clean=False):
1104 def diff(self, m2, match=None, clean=False):
1105 '''Finds changes between the current manifest and m2.
1105 '''Finds changes between the current manifest and m2.
1106
1106
1107 Args:
1107 Args:
1108 m2: the manifest to which this manifest should be compared.
1108 m2: the manifest to which this manifest should be compared.
1109 clean: if true, include files unchanged between these manifests
1109 clean: if true, include files unchanged between these manifests
1110 with a None value in the returned dictionary.
1110 with a None value in the returned dictionary.
1111
1111
1112 The result is returned as a dict with filename as key and
1112 The result is returned as a dict with filename as key and
1113 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1113 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1114 nodeid in the current/other manifest and fl1/fl2 is the flag
1114 nodeid in the current/other manifest and fl1/fl2 is the flag
1115 in the current/other manifest. Where the file does not exist,
1115 in the current/other manifest. Where the file does not exist,
1116 the nodeid will be None and the flags will be the empty
1116 the nodeid will be None and the flags will be the empty
1117 string.
1117 string.
1118 '''
1118 '''
1119 if match and not match.always():
1119 if match and not match.always():
1120 m1 = self.matches(match)
1120 m1 = self.matches(match)
1121 m2 = m2.matches(match)
1121 m2 = m2.matches(match)
1122 return m1.diff(m2, clean=clean)
1122 return m1.diff(m2, clean=clean)
1123 result = {}
1123 result = {}
1124 emptytree = treemanifest()
1124 emptytree = treemanifest()
1125 def _diff(t1, t2):
1125 def _diff(t1, t2):
1126 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1126 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1127 return
1127 return
1128 t1._load()
1128 t1._load()
1129 t2._load()
1129 t2._load()
1130 # OPT: do we need to load everything?
1130 # OPT: do we need to load everything?
1131 t1._loadalllazy()
1131 t1._loadalllazy()
1132 t2._loadalllazy()
1132 t2._loadalllazy()
1133 for d, m1 in t1._dirs.iteritems():
1133 for d, m1 in t1._dirs.iteritems():
1134 m2 = t2._dirs.get(d, emptytree)
1134 m2 = t2._dirs.get(d, emptytree)
1135 _diff(m1, m2)
1135 _diff(m1, m2)
1136
1136
1137 for d, m2 in t2._dirs.iteritems():
1137 for d, m2 in t2._dirs.iteritems():
1138 if d not in t1._dirs:
1138 if d not in t1._dirs:
1139 _diff(emptytree, m2)
1139 _diff(emptytree, m2)
1140
1140
1141 for fn, n1 in t1._files.iteritems():
1141 for fn, n1 in t1._files.iteritems():
1142 fl1 = t1._flags.get(fn, '')
1142 fl1 = t1._flags.get(fn, '')
1143 n2 = t2._files.get(fn, None)
1143 n2 = t2._files.get(fn, None)
1144 fl2 = t2._flags.get(fn, '')
1144 fl2 = t2._flags.get(fn, '')
1145 if n1 != n2 or fl1 != fl2:
1145 if n1 != n2 or fl1 != fl2:
1146 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1146 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1147 elif clean:
1147 elif clean:
1148 result[t1._subpath(fn)] = None
1148 result[t1._subpath(fn)] = None
1149
1149
1150 for fn, n2 in t2._files.iteritems():
1150 for fn, n2 in t2._files.iteritems():
1151 if fn not in t1._files:
1151 if fn not in t1._files:
1152 fl2 = t2._flags.get(fn, '')
1152 fl2 = t2._flags.get(fn, '')
1153 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1153 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1154
1154
1155 _diff(self, m2)
1155 _diff(self, m2)
1156 return result
1156 return result
1157
1157
1158 def unmodifiedsince(self, m2):
1158 def unmodifiedsince(self, m2):
1159 return not self._dirty and not m2._dirty and self._node == m2._node
1159 return not self._dirty and not m2._dirty and self._node == m2._node
1160
1160
1161 def parse(self, text, readsubtree):
1161 def parse(self, text, readsubtree):
1162 selflazy = self._lazydirs
1162 selflazy = self._lazydirs
1163 subpath = self._subpath
1163 subpath = self._subpath
1164 for f, n, fl in _parse(text):
1164 for f, n, fl in _parse(text):
1165 if fl == 't':
1165 if fl == 't':
1166 f = f + '/'
1166 f = f + '/'
1167 selflazy[f] = (subpath(f), n, readsubtree)
1167 selflazy[f] = (subpath(f), n, readsubtree)
1168 elif '/' in f:
1168 elif '/' in f:
1169 # This is a flat manifest, so use __setitem__ and setflag rather
1169 # This is a flat manifest, so use __setitem__ and setflag rather
1170 # than assigning directly to _files and _flags, so we can
1170 # than assigning directly to _files and _flags, so we can
1171 # assign a path in a subdirectory, and to mark dirty (compared
1171 # assign a path in a subdirectory, and to mark dirty (compared
1172 # to nullid).
1172 # to nullid).
1173 self[f] = n
1173 self[f] = n
1174 if fl:
1174 if fl:
1175 self.setflag(f, fl)
1175 self.setflag(f, fl)
1176 else:
1176 else:
1177 # Assigning to _files and _flags avoids marking as dirty,
1177 # Assigning to _files and _flags avoids marking as dirty,
1178 # and should be a little faster.
1178 # and should be a little faster.
1179 self._files[f] = n
1179 self._files[f] = n
1180 if fl:
1180 if fl:
1181 self._flags[f] = fl
1181 self._flags[f] = fl
1182
1182
1183 def text(self):
1183 def text(self):
1184 """Get the full data of this manifest as a bytestring."""
1184 """Get the full data of this manifest as a bytestring."""
1185 self._load()
1185 self._load()
1186 return _text(self.iterentries())
1186 return _text(self.iterentries())
1187
1187
1188 def dirtext(self):
1188 def dirtext(self):
1189 """Get the full data of this directory as a bytestring. Make sure that
1189 """Get the full data of this directory as a bytestring. Make sure that
1190 any submanifests have been written first, so their nodeids are correct.
1190 any submanifests have been written first, so their nodeids are correct.
1191 """
1191 """
1192 self._load()
1192 self._load()
1193 flags = self.flags
1193 flags = self.flags
1194 lazydirs = [(d[:-1], node, 't') for
1194 lazydirs = [(d[:-1], node, 't') for
1195 d, (path, node, readsubtree) in self._lazydirs.iteritems()]
1195 d, (path, node, readsubtree) in self._lazydirs.iteritems()]
1196 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1196 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1197 files = [(f, self._files[f], flags(f)) for f in self._files]
1197 files = [(f, self._files[f], flags(f)) for f in self._files]
1198 return _text(sorted(dirs + files + lazydirs))
1198 return _text(sorted(dirs + files + lazydirs))
1199
1199
1200 def read(self, gettext, readsubtree):
1200 def read(self, gettext, readsubtree):
1201 def _load_for_read(s):
1201 def _load_for_read(s):
1202 s.parse(gettext(), readsubtree)
1202 s.parse(gettext(), readsubtree)
1203 s._dirty = False
1203 s._dirty = False
1204 self._loadfunc = _load_for_read
1204 self._loadfunc = _load_for_read
1205
1205
1206 def writesubtrees(self, m1, m2, writesubtree, match):
1206 def writesubtrees(self, m1, m2, writesubtree, match):
1207 self._load() # for consistency; should never have any effect here
1207 self._load() # for consistency; should never have any effect here
1208 m1._load()
1208 m1._load()
1209 m2._load()
1209 m2._load()
1210 emptytree = treemanifest()
1210 emptytree = treemanifest()
1211 def getnode(m, d):
1211 def getnode(m, d):
1212 ld = m._lazydirs.get(d)
1212 ld = m._lazydirs.get(d)
1213 if ld:
1213 if ld:
1214 return ld[1]
1214 return ld[1]
1215 return m._dirs.get(d, emptytree)._node
1215 return m._dirs.get(d, emptytree)._node
1216
1216
1217 # we should have always loaded everything by the time we get here for
1217 # we should have always loaded everything by the time we get here for
1218 # `self`, but possibly not in `m1` or `m2`.
1218 # `self`, but possibly not in `m1` or `m2`.
1219 assert not self._lazydirs
1219 assert not self._lazydirs
1220 # let's skip investigating things that `match` says we do not need.
1220 # let's skip investigating things that `match` says we do not need.
1221 visit = match.visitchildrenset(self._dir[:-1] or '.')
1221 visit = match.visitchildrenset(self._dir[:-1] or '.')
1222 if visit == 'this' or visit == 'all':
1222 if visit == 'this' or visit == 'all':
1223 visit = None
1223 visit = None
1224 for d, subm in self._dirs.iteritems():
1224 for d, subm in self._dirs.iteritems():
1225 if visit and d[:-1] not in visit:
1225 if visit and d[:-1] not in visit:
1226 continue
1226 continue
1227 subp1 = getnode(m1, d)
1227 subp1 = getnode(m1, d)
1228 subp2 = getnode(m2, d)
1228 subp2 = getnode(m2, d)
1229 if subp1 == nullid:
1229 if subp1 == nullid:
1230 subp1, subp2 = subp2, subp1
1230 subp1, subp2 = subp2, subp1
1231 writesubtree(subm, subp1, subp2, match)
1231 writesubtree(subm, subp1, subp2, match)
1232
1232
1233 def walksubtrees(self, matcher=None):
1233 def walksubtrees(self, matcher=None):
1234 """Returns an iterator of the subtrees of this manifest, including this
1234 """Returns an iterator of the subtrees of this manifest, including this
1235 manifest itself.
1235 manifest itself.
1236
1236
1237 If `matcher` is provided, it only returns subtrees that match.
1237 If `matcher` is provided, it only returns subtrees that match.
1238 """
1238 """
1239 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1239 if matcher and not matcher.visitdir(self._dir[:-1] or '.'):
1240 return
1240 return
1241 if not matcher or matcher(self._dir[:-1]):
1241 if not matcher or matcher(self._dir[:-1]):
1242 yield self
1242 yield self
1243
1243
1244 self._load()
1244 self._load()
1245 # OPT: use visitchildrenset to avoid loading everything.
1245 # OPT: use visitchildrenset to avoid loading everything.
1246 self._loadalllazy()
1246 self._loadalllazy()
1247 for d, subm in self._dirs.iteritems():
1247 for d, subm in self._dirs.iteritems():
1248 for subtree in subm.walksubtrees(matcher=matcher):
1248 for subtree in subm.walksubtrees(matcher=matcher):
1249 yield subtree
1249 yield subtree
1250
1250
1251 class manifestfulltextcache(util.lrucachedict):
1251 class manifestfulltextcache(util.lrucachedict):
1252 """File-backed LRU cache for the manifest cache
1252 """File-backed LRU cache for the manifest cache
1253
1253
1254 File consists of entries, up to EOF:
1254 File consists of entries, up to EOF:
1255
1255
1256 - 20 bytes node, 4 bytes length, <length> manifest data
1256 - 20 bytes node, 4 bytes length, <length> manifest data
1257
1257
1258 These are written in reverse cache order (oldest to newest).
1258 These are written in reverse cache order (oldest to newest).
1259
1259
1260 """
1260 """
1261 def __init__(self, max):
1261 def __init__(self, max):
1262 super(manifestfulltextcache, self).__init__(max)
1262 super(manifestfulltextcache, self).__init__(max)
1263 self._dirty = False
1263 self._dirty = False
1264 self._read = False
1264 self._read = False
1265 self._opener = None
1265 self._opener = None
1266
1266
1267 def read(self):
1267 def read(self):
1268 if self._read or self._opener is None:
1268 if self._read or self._opener is None:
1269 return
1269 return
1270
1270
1271 try:
1271 try:
1272 with self._opener('manifestfulltextcache') as fp:
1272 with self._opener('manifestfulltextcache') as fp:
1273 set = super(manifestfulltextcache, self).__setitem__
1273 set = super(manifestfulltextcache, self).__setitem__
1274 # ignore trailing data, this is a cache, corruption is skipped
1274 # ignore trailing data, this is a cache, corruption is skipped
1275 while True:
1275 while True:
1276 node = fp.read(20)
1276 node = fp.read(20)
1277 if len(node) < 20:
1277 if len(node) < 20:
1278 break
1278 break
1279 try:
1279 try:
1280 size = struct.unpack('>L', fp.read(4))[0]
1280 size = struct.unpack('>L', fp.read(4))[0]
1281 except struct.error:
1281 except struct.error:
1282 break
1282 break
1283 value = bytearray(fp.read(size))
1283 value = bytearray(fp.read(size))
1284 if len(value) != size:
1284 if len(value) != size:
1285 break
1285 break
1286 set(node, value)
1286 set(node, value)
1287 except IOError:
1287 except IOError:
1288 # the file is allowed to be missing
1288 # the file is allowed to be missing
1289 pass
1289 pass
1290
1290
1291 self._read = True
1291 self._read = True
1292 self._dirty = False
1292 self._dirty = False
1293
1293
1294 def write(self):
1294 def write(self):
1295 if not self._dirty or self._opener is None:
1295 if not self._dirty or self._opener is None:
1296 return
1296 return
1297 # rotate backwards to the first used node
1297 # rotate backwards to the first used node
1298 with self._opener(
1298 with self._opener(
1299 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
1299 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True
1300 ) as fp:
1300 ) as fp:
1301 node = self._head.prev
1301 node = self._head.prev
1302 while True:
1302 while True:
1303 if node.key in self._cache:
1303 if node.key in self._cache:
1304 fp.write(node.key)
1304 fp.write(node.key)
1305 fp.write(struct.pack('>L', len(node.value)))
1305 fp.write(struct.pack('>L', len(node.value)))
1306 fp.write(node.value)
1306 fp.write(node.value)
1307 if node is self._head:
1307 if node is self._head:
1308 break
1308 break
1309 node = node.prev
1309 node = node.prev
1310
1310
1311 def __len__(self):
1311 def __len__(self):
1312 if not self._read:
1312 if not self._read:
1313 self.read()
1313 self.read()
1314 return super(manifestfulltextcache, self).__len__()
1314 return super(manifestfulltextcache, self).__len__()
1315
1315
1316 def __contains__(self, k):
1316 def __contains__(self, k):
1317 if not self._read:
1317 if not self._read:
1318 self.read()
1318 self.read()
1319 return super(manifestfulltextcache, self).__contains__(k)
1319 return super(manifestfulltextcache, self).__contains__(k)
1320
1320
1321 def __iter__(self):
1321 def __iter__(self):
1322 if not self._read:
1322 if not self._read:
1323 self.read()
1323 self.read()
1324 return super(manifestfulltextcache, self).__iter__()
1324 return super(manifestfulltextcache, self).__iter__()
1325
1325
1326 def __getitem__(self, k):
1326 def __getitem__(self, k):
1327 if not self._read:
1327 if not self._read:
1328 self.read()
1328 self.read()
1329 # the cache lru order can change on read
1329 # the cache lru order can change on read
1330 setdirty = self._cache.get(k) is not self._head
1330 setdirty = self._cache.get(k) is not self._head
1331 value = super(manifestfulltextcache, self).__getitem__(k)
1331 value = super(manifestfulltextcache, self).__getitem__(k)
1332 if setdirty:
1332 if setdirty:
1333 self._dirty = True
1333 self._dirty = True
1334 return value
1334 return value
1335
1335
1336 def __setitem__(self, k, v):
1336 def __setitem__(self, k, v):
1337 if not self._read:
1337 if not self._read:
1338 self.read()
1338 self.read()
1339 super(manifestfulltextcache, self).__setitem__(k, v)
1339 super(manifestfulltextcache, self).__setitem__(k, v)
1340 self._dirty = True
1340 self._dirty = True
1341
1341
1342 def __delitem__(self, k):
1342 def __delitem__(self, k):
1343 if not self._read:
1343 if not self._read:
1344 self.read()
1344 self.read()
1345 super(manifestfulltextcache, self).__delitem__(k)
1345 super(manifestfulltextcache, self).__delitem__(k)
1346 self._dirty = True
1346 self._dirty = True
1347
1347
1348 def get(self, k, default=None):
1348 def get(self, k, default=None):
1349 if not self._read:
1349 if not self._read:
1350 self.read()
1350 self.read()
1351 return super(manifestfulltextcache, self).get(k, default=default)
1351 return super(manifestfulltextcache, self).get(k, default=default)
1352
1352
1353 def clear(self, clear_persisted_data=False):
1353 def clear(self, clear_persisted_data=False):
1354 super(manifestfulltextcache, self).clear()
1354 super(manifestfulltextcache, self).clear()
1355 if clear_persisted_data:
1355 if clear_persisted_data:
1356 self._dirty = True
1356 self._dirty = True
1357 self.write()
1357 self.write()
1358 self._read = False
1358 self._read = False
1359
1359
1360 @interfaceutil.implementer(repository.imanifeststorage)
1360 @interfaceutil.implementer(repository.imanifeststorage)
1361 class manifestrevlog(object):
1361 class manifestrevlog(object):
1362 '''A revlog that stores manifest texts. This is responsible for caching the
1362 '''A revlog that stores manifest texts. This is responsible for caching the
1363 full-text manifest contents.
1363 full-text manifest contents.
1364 '''
1364 '''
1365 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1365 def __init__(self, opener, tree='', dirlogcache=None, indexfile=None,
1366 treemanifest=False):
1366 treemanifest=False):
1367 """Constructs a new manifest revlog
1367 """Constructs a new manifest revlog
1368
1368
1369 `indexfile` - used by extensions to have two manifests at once, like
1369 `indexfile` - used by extensions to have two manifests at once, like
1370 when transitioning between flatmanifeset and treemanifests.
1370 when transitioning between flatmanifeset and treemanifests.
1371
1371
1372 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1372 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1373 options can also be used to make this a tree manifest revlog. The opener
1373 options can also be used to make this a tree manifest revlog. The opener
1374 option takes precedence, so if it is set to True, we ignore whatever
1374 option takes precedence, so if it is set to True, we ignore whatever
1375 value is passed in to the constructor.
1375 value is passed in to the constructor.
1376 """
1376 """
1377 # During normal operations, we expect to deal with not more than four
1377 # During normal operations, we expect to deal with not more than four
1378 # revs at a time (such as during commit --amend). When rebasing large
1378 # revs at a time (such as during commit --amend). When rebasing large
1379 # stacks of commits, the number can go up, hence the config knob below.
1379 # stacks of commits, the number can go up, hence the config knob below.
1380 cachesize = 4
1380 cachesize = 4
1381 optiontreemanifest = False
1381 optiontreemanifest = False
1382 opts = getattr(opener, 'options', None)
1382 opts = getattr(opener, 'options', None)
1383 if opts is not None:
1383 if opts is not None:
1384 cachesize = opts.get('manifestcachesize', cachesize)
1384 cachesize = opts.get('manifestcachesize', cachesize)
1385 optiontreemanifest = opts.get('treemanifest', False)
1385 optiontreemanifest = opts.get('treemanifest', False)
1386
1386
1387 self._treeondisk = optiontreemanifest or treemanifest
1387 self._treeondisk = optiontreemanifest or treemanifest
1388
1388
1389 self._fulltextcache = manifestfulltextcache(cachesize)
1389 self._fulltextcache = manifestfulltextcache(cachesize)
1390
1390
1391 if tree:
1391 if tree:
1392 assert self._treeondisk, 'opts is %r' % opts
1392 assert self._treeondisk, 'opts is %r' % opts
1393
1393
1394 if indexfile is None:
1394 if indexfile is None:
1395 indexfile = '00manifest.i'
1395 indexfile = '00manifest.i'
1396 if tree:
1396 if tree:
1397 indexfile = "meta/" + tree + indexfile
1397 indexfile = "meta/" + tree + indexfile
1398
1398
1399 self.tree = tree
1399 self.tree = tree
1400
1400
1401 # The dirlogcache is kept on the root manifest log
1401 # The dirlogcache is kept on the root manifest log
1402 if tree:
1402 if tree:
1403 self._dirlogcache = dirlogcache
1403 self._dirlogcache = dirlogcache
1404 else:
1404 else:
1405 self._dirlogcache = {'': self}
1405 self._dirlogcache = {'': self}
1406
1406
1407 self._revlog = revlog.revlog(opener, indexfile,
1407 self._revlog = revlog.revlog(opener, indexfile,
1408 # only root indexfile is cached
1408 # only root indexfile is cached
1409 checkambig=not bool(tree),
1409 checkambig=not bool(tree),
1410 mmaplargeindex=True)
1410 mmaplargeindex=True)
1411
1411
1412 self.index = self._revlog.index
1412 self.index = self._revlog.index
1413 self.version = self._revlog.version
1413 self.version = self._revlog.version
1414 self._generaldelta = self._revlog._generaldelta
1414 self._generaldelta = self._revlog._generaldelta
1415
1415
1416 def _setupmanifestcachehooks(self, repo):
1416 def _setupmanifestcachehooks(self, repo):
1417 """Persist the manifestfulltextcache on lock release"""
1417 """Persist the manifestfulltextcache on lock release"""
1418 if not util.safehasattr(repo, '_lockref'):
1418 if not util.safehasattr(repo, '_lockref'):
1419 return
1419 return
1420
1420
1421 self._fulltextcache._opener = repo.cachevfs
1421 self._fulltextcache._opener = repo.cachevfs
1422 reporef = weakref.ref(repo)
1422 reporef = weakref.ref(repo)
1423 manifestrevlogref = weakref.ref(self)
1423 manifestrevlogref = weakref.ref(self)
1424
1424
1425 def persistmanifestcache():
1425 def persistmanifestcache():
1426 repo = reporef()
1426 repo = reporef()
1427 self = manifestrevlogref()
1427 self = manifestrevlogref()
1428 if repo is None or self is None:
1428 if repo is None or self is None:
1429 return
1429 return
1430 if repo.manifestlog.getstorage(b'') is not self:
1430 if repo.manifestlog.getstorage(b'') is not self:
1431 # there's a different manifest in play now, abort
1431 # there's a different manifest in play now, abort
1432 return
1432 return
1433 self._fulltextcache.write()
1433 self._fulltextcache.write()
1434
1434
1435 if repo._currentlock(repo._lockref) is not None:
1435 if repo._currentlock(repo._lockref) is not None:
1436 repo._afterlock(persistmanifestcache)
1436 repo._afterlock(persistmanifestcache)
1437
1437
1438 @property
1438 @property
1439 def fulltextcache(self):
1439 def fulltextcache(self):
1440 return self._fulltextcache
1440 return self._fulltextcache
1441
1441
1442 def clearcaches(self, clear_persisted_data=False):
1442 def clearcaches(self, clear_persisted_data=False):
1443 self._revlog.clearcaches()
1443 self._revlog.clearcaches()
1444 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1444 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1445 self._dirlogcache = {self.tree: self}
1445 self._dirlogcache = {self.tree: self}
1446
1446
1447 def dirlog(self, d):
1447 def dirlog(self, d):
1448 if d:
1448 if d:
1449 assert self._treeondisk
1449 assert self._treeondisk
1450 if d not in self._dirlogcache:
1450 if d not in self._dirlogcache:
1451 mfrevlog = manifestrevlog(self.opener, d,
1451 mfrevlog = manifestrevlog(self.opener, d,
1452 self._dirlogcache,
1452 self._dirlogcache,
1453 treemanifest=self._treeondisk)
1453 treemanifest=self._treeondisk)
1454 self._dirlogcache[d] = mfrevlog
1454 self._dirlogcache[d] = mfrevlog
1455 return self._dirlogcache[d]
1455 return self._dirlogcache[d]
1456
1456
1457 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1457 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None,
1458 match=None):
1458 match=None):
1459 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1459 if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
1460 # If our first parent is in the manifest cache, we can
1460 # If our first parent is in the manifest cache, we can
1461 # compute a delta here using properties we know about the
1461 # compute a delta here using properties we know about the
1462 # manifest up-front, which may save time later for the
1462 # manifest up-front, which may save time later for the
1463 # revlog layer.
1463 # revlog layer.
1464
1464
1465 _checkforbidden(added)
1465 _checkforbidden(added)
1466 # combine the changed lists into one sorted iterator
1466 # combine the changed lists into one sorted iterator
1467 work = heapq.merge([(x, False) for x in added],
1467 work = heapq.merge([(x, False) for x in added],
1468 [(x, True) for x in removed])
1468 [(x, True) for x in removed])
1469
1469
1470 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1470 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1471 cachedelta = self._revlog.rev(p1), deltatext
1471 cachedelta = self._revlog.rev(p1), deltatext
1472 text = util.buffer(arraytext)
1472 text = util.buffer(arraytext)
1473 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1473 n = self._revlog.addrevision(text, transaction, link, p1, p2,
1474 cachedelta)
1474 cachedelta)
1475 else:
1475 else:
1476 # The first parent manifest isn't already loaded, so we'll
1476 # The first parent manifest isn't already loaded, so we'll
1477 # just encode a fulltext of the manifest and pass that
1477 # just encode a fulltext of the manifest and pass that
1478 # through to the revlog layer, and let it handle the delta
1478 # through to the revlog layer, and let it handle the delta
1479 # process.
1479 # process.
1480 if self._treeondisk:
1480 if self._treeondisk:
1481 assert readtree, "readtree must be set for treemanifest writes"
1481 assert readtree, "readtree must be set for treemanifest writes"
1482 assert match, "match must be specified for treemanifest writes"
1482 assert match, "match must be specified for treemanifest writes"
1483 m1 = readtree(self.tree, p1)
1483 m1 = readtree(self.tree, p1)
1484 m2 = readtree(self.tree, p2)
1484 m2 = readtree(self.tree, p2)
1485 n = self._addtree(m, transaction, link, m1, m2, readtree,
1485 n = self._addtree(m, transaction, link, m1, m2, readtree,
1486 match=match)
1486 match=match)
1487 arraytext = None
1487 arraytext = None
1488 else:
1488 else:
1489 text = m.text()
1489 text = m.text()
1490 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1490 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1491 arraytext = bytearray(text)
1491 arraytext = bytearray(text)
1492
1492
1493 if arraytext is not None:
1493 if arraytext is not None:
1494 self.fulltextcache[n] = arraytext
1494 self.fulltextcache[n] = arraytext
1495
1495
1496 return n
1496 return n
1497
1497
1498 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1498 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1499 # If the manifest is unchanged compared to one parent,
1499 # If the manifest is unchanged compared to one parent,
1500 # don't write a new revision
1500 # don't write a new revision
1501 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1501 if self.tree != '' and (m.unmodifiedsince(m1) or m.unmodifiedsince(
1502 m2)):
1502 m2)):
1503 return m.node()
1503 return m.node()
1504 def writesubtree(subm, subp1, subp2, match):
1504 def writesubtree(subm, subp1, subp2, match):
1505 sublog = self.dirlog(subm.dir())
1505 sublog = self.dirlog(subm.dir())
1506 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1506 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1507 readtree=readtree, match=match)
1507 readtree=readtree, match=match)
1508 m.writesubtrees(m1, m2, writesubtree, match)
1508 m.writesubtrees(m1, m2, writesubtree, match)
1509 text = m.dirtext()
1509 text = m.dirtext()
1510 n = None
1510 n = None
1511 if self.tree != '':
1511 if self.tree != '':
1512 # Double-check whether contents are unchanged to one parent
1512 # Double-check whether contents are unchanged to one parent
1513 if text == m1.dirtext():
1513 if text == m1.dirtext():
1514 n = m1.node()
1514 n = m1.node()
1515 elif text == m2.dirtext():
1515 elif text == m2.dirtext():
1516 n = m2.node()
1516 n = m2.node()
1517
1517
1518 if not n:
1518 if not n:
1519 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1519 n = self._revlog.addrevision(text, transaction, link, m1.node(),
1520 m2.node())
1520 m2.node())
1521
1521
1522 # Save nodeid so parent manifest can calculate its nodeid
1522 # Save nodeid so parent manifest can calculate its nodeid
1523 m.setnode(n)
1523 m.setnode(n)
1524 return n
1524 return n
1525
1525
1526 def __len__(self):
1526 def __len__(self):
1527 return len(self._revlog)
1527 return len(self._revlog)
1528
1528
1529 def __iter__(self):
1529 def __iter__(self):
1530 return self._revlog.__iter__()
1530 return self._revlog.__iter__()
1531
1531
1532 def rev(self, node):
1532 def rev(self, node):
1533 return self._revlog.rev(node)
1533 return self._revlog.rev(node)
1534
1534
1535 def node(self, rev):
1535 def node(self, rev):
1536 return self._revlog.node(rev)
1536 return self._revlog.node(rev)
1537
1537
1538 def lookup(self, value):
1538 def lookup(self, value):
1539 return self._revlog.lookup(value)
1539 return self._revlog.lookup(value)
1540
1540
1541 def parentrevs(self, rev):
1541 def parentrevs(self, rev):
1542 return self._revlog.parentrevs(rev)
1542 return self._revlog.parentrevs(rev)
1543
1543
1544 def parents(self, node):
1544 def parents(self, node):
1545 return self._revlog.parents(node)
1545 return self._revlog.parents(node)
1546
1546
1547 def linkrev(self, rev):
1547 def linkrev(self, rev):
1548 return self._revlog.linkrev(rev)
1548 return self._revlog.linkrev(rev)
1549
1549
1550 def checksize(self):
1550 def checksize(self):
1551 return self._revlog.checksize()
1551 return self._revlog.checksize()
1552
1552
1553 def revision(self, node, _df=None, raw=False):
1553 def revision(self, node, _df=None, raw=False):
1554 return self._revlog.revision(node, _df=_df, raw=raw)
1554 return self._revlog.revision(node, _df=_df, raw=raw)
1555
1555
1556 def revdiff(self, rev1, rev2):
1556 def revdiff(self, rev1, rev2):
1557 return self._revlog.revdiff(rev1, rev2)
1557 return self._revlog.revdiff(rev1, rev2)
1558
1558
1559 def cmp(self, node, text):
1559 def cmp(self, node, text):
1560 return self._revlog.cmp(node, text)
1560 return self._revlog.cmp(node, text)
1561
1561
1562 def deltaparent(self, rev):
1562 def deltaparent(self, rev):
1563 return self._revlog.deltaparent(rev)
1563 return self._revlog.deltaparent(rev)
1564
1564
1565 def emitrevisiondeltas(self, requests):
1565 def emitrevisiondeltas(self, requests):
1566 return self._revlog.emitrevisiondeltas(requests)
1566 return self._revlog.emitrevisiondeltas(requests)
1567
1567
1568 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1568 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1569 return self._revlog.addgroup(deltas, linkmapper, transaction,
1569 return self._revlog.addgroup(deltas, linkmapper, transaction,
1570 addrevisioncb=addrevisioncb)
1570 addrevisioncb=addrevisioncb)
1571
1571
1572 def rawsize(self, rev):
1573 return self._revlog.rawsize(rev)
1574
1572 def getstrippoint(self, minlink):
1575 def getstrippoint(self, minlink):
1573 return self._revlog.getstrippoint(minlink)
1576 return self._revlog.getstrippoint(minlink)
1574
1577
1575 def strip(self, minlink, transaction):
1578 def strip(self, minlink, transaction):
1576 return self._revlog.strip(minlink, transaction)
1579 return self._revlog.strip(minlink, transaction)
1577
1580
1578 def files(self):
1581 def files(self):
1579 return self._revlog.files()
1582 return self._revlog.files()
1580
1583
1581 def clone(self, tr, destrevlog, **kwargs):
1584 def clone(self, tr, destrevlog, **kwargs):
1582 if not isinstance(destrevlog, manifestrevlog):
1585 if not isinstance(destrevlog, manifestrevlog):
1583 raise error.ProgrammingError('expected manifestrevlog to clone()')
1586 raise error.ProgrammingError('expected manifestrevlog to clone()')
1584
1587
1585 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1588 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1586
1589
1587 @property
1590 @property
1588 def indexfile(self):
1591 def indexfile(self):
1589 return self._revlog.indexfile
1592 return self._revlog.indexfile
1590
1593
1591 @indexfile.setter
1594 @indexfile.setter
1592 def indexfile(self, value):
1595 def indexfile(self, value):
1593 self._revlog.indexfile = value
1596 self._revlog.indexfile = value
1594
1597
1595 @property
1598 @property
1596 def opener(self):
1599 def opener(self):
1597 return self._revlog.opener
1600 return self._revlog.opener
1598
1601
1599 @opener.setter
1602 @opener.setter
1600 def opener(self, value):
1603 def opener(self, value):
1601 self._revlog.opener = value
1604 self._revlog.opener = value
1602
1605
1603 @interfaceutil.implementer(repository.imanifestlog)
1606 @interfaceutil.implementer(repository.imanifestlog)
1604 class manifestlog(object):
1607 class manifestlog(object):
1605 """A collection class representing the collection of manifest snapshots
1608 """A collection class representing the collection of manifest snapshots
1606 referenced by commits in the repository.
1609 referenced by commits in the repository.
1607
1610
1608 In this situation, 'manifest' refers to the abstract concept of a snapshot
1611 In this situation, 'manifest' refers to the abstract concept of a snapshot
1609 of the list of files in the given commit. Consumers of the output of this
1612 of the list of files in the given commit. Consumers of the output of this
1610 class do not care about the implementation details of the actual manifests
1613 class do not care about the implementation details of the actual manifests
1611 they receive (i.e. tree or flat or lazily loaded, etc)."""
1614 they receive (i.e. tree or flat or lazily loaded, etc)."""
1612 def __init__(self, opener, repo, rootstore):
1615 def __init__(self, opener, repo, rootstore):
1613 usetreemanifest = False
1616 usetreemanifest = False
1614 cachesize = 4
1617 cachesize = 4
1615
1618
1616 opts = getattr(opener, 'options', None)
1619 opts = getattr(opener, 'options', None)
1617 if opts is not None:
1620 if opts is not None:
1618 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1621 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1619 cachesize = opts.get('manifestcachesize', cachesize)
1622 cachesize = opts.get('manifestcachesize', cachesize)
1620
1623
1621 self._treemanifests = usetreemanifest
1624 self._treemanifests = usetreemanifest
1622
1625
1623 self._rootstore = rootstore
1626 self._rootstore = rootstore
1624 self._rootstore._setupmanifestcachehooks(repo)
1627 self._rootstore._setupmanifestcachehooks(repo)
1625 self._narrowmatch = repo.narrowmatch()
1628 self._narrowmatch = repo.narrowmatch()
1626
1629
1627 # A cache of the manifestctx or treemanifestctx for each directory
1630 # A cache of the manifestctx or treemanifestctx for each directory
1628 self._dirmancache = {}
1631 self._dirmancache = {}
1629 self._dirmancache[''] = util.lrucachedict(cachesize)
1632 self._dirmancache[''] = util.lrucachedict(cachesize)
1630
1633
1631 self._cachesize = cachesize
1634 self._cachesize = cachesize
1632
1635
1633 def __getitem__(self, node):
1636 def __getitem__(self, node):
1634 """Retrieves the manifest instance for the given node. Throws a
1637 """Retrieves the manifest instance for the given node. Throws a
1635 LookupError if not found.
1638 LookupError if not found.
1636 """
1639 """
1637 return self.get('', node)
1640 return self.get('', node)
1638
1641
1639 def get(self, tree, node, verify=True):
1642 def get(self, tree, node, verify=True):
1640 """Retrieves the manifest instance for the given node. Throws a
1643 """Retrieves the manifest instance for the given node. Throws a
1641 LookupError if not found.
1644 LookupError if not found.
1642
1645
1643 `verify` - if True an exception will be thrown if the node is not in
1646 `verify` - if True an exception will be thrown if the node is not in
1644 the revlog
1647 the revlog
1645 """
1648 """
1646 if node in self._dirmancache.get(tree, ()):
1649 if node in self._dirmancache.get(tree, ()):
1647 return self._dirmancache[tree][node]
1650 return self._dirmancache[tree][node]
1648
1651
1649 if not self._narrowmatch.always():
1652 if not self._narrowmatch.always():
1650 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1653 if not self._narrowmatch.visitdir(tree[:-1] or '.'):
1651 return excludeddirmanifestctx(tree, node)
1654 return excludeddirmanifestctx(tree, node)
1652 if tree:
1655 if tree:
1653 if self._rootstore._treeondisk:
1656 if self._rootstore._treeondisk:
1654 if verify:
1657 if verify:
1655 # Side-effect is LookupError is raised if node doesn't
1658 # Side-effect is LookupError is raised if node doesn't
1656 # exist.
1659 # exist.
1657 self.getstorage(tree).rev(node)
1660 self.getstorage(tree).rev(node)
1658
1661
1659 m = treemanifestctx(self, tree, node)
1662 m = treemanifestctx(self, tree, node)
1660 else:
1663 else:
1661 raise error.Abort(
1664 raise error.Abort(
1662 _("cannot ask for manifest directory '%s' in a flat "
1665 _("cannot ask for manifest directory '%s' in a flat "
1663 "manifest") % tree)
1666 "manifest") % tree)
1664 else:
1667 else:
1665 if verify:
1668 if verify:
1666 # Side-effect is LookupError is raised if node doesn't exist.
1669 # Side-effect is LookupError is raised if node doesn't exist.
1667 self._rootstore.rev(node)
1670 self._rootstore.rev(node)
1668
1671
1669 if self._treemanifests:
1672 if self._treemanifests:
1670 m = treemanifestctx(self, '', node)
1673 m = treemanifestctx(self, '', node)
1671 else:
1674 else:
1672 m = manifestctx(self, node)
1675 m = manifestctx(self, node)
1673
1676
1674 if node != nullid:
1677 if node != nullid:
1675 mancache = self._dirmancache.get(tree)
1678 mancache = self._dirmancache.get(tree)
1676 if not mancache:
1679 if not mancache:
1677 mancache = util.lrucachedict(self._cachesize)
1680 mancache = util.lrucachedict(self._cachesize)
1678 self._dirmancache[tree] = mancache
1681 self._dirmancache[tree] = mancache
1679 mancache[node] = m
1682 mancache[node] = m
1680 return m
1683 return m
1681
1684
1682 def getstorage(self, tree):
1685 def getstorage(self, tree):
1683 return self._rootstore.dirlog(tree)
1686 return self._rootstore.dirlog(tree)
1684
1687
1685 def clearcaches(self, clear_persisted_data=False):
1688 def clearcaches(self, clear_persisted_data=False):
1686 self._dirmancache.clear()
1689 self._dirmancache.clear()
1687 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1690 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1688
1691
1689 def rev(self, node):
1692 def rev(self, node):
1690 return self._rootstore.rev(node)
1693 return self._rootstore.rev(node)
1691
1694
1692 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1695 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1693 class memmanifestctx(object):
1696 class memmanifestctx(object):
1694 def __init__(self, manifestlog):
1697 def __init__(self, manifestlog):
1695 self._manifestlog = manifestlog
1698 self._manifestlog = manifestlog
1696 self._manifestdict = manifestdict()
1699 self._manifestdict = manifestdict()
1697
1700
1698 def _storage(self):
1701 def _storage(self):
1699 return self._manifestlog.getstorage(b'')
1702 return self._manifestlog.getstorage(b'')
1700
1703
1701 def new(self):
1704 def new(self):
1702 return memmanifestctx(self._manifestlog)
1705 return memmanifestctx(self._manifestlog)
1703
1706
1704 def copy(self):
1707 def copy(self):
1705 memmf = memmanifestctx(self._manifestlog)
1708 memmf = memmanifestctx(self._manifestlog)
1706 memmf._manifestdict = self.read().copy()
1709 memmf._manifestdict = self.read().copy()
1707 return memmf
1710 return memmf
1708
1711
1709 def read(self):
1712 def read(self):
1710 return self._manifestdict
1713 return self._manifestdict
1711
1714
1712 def write(self, transaction, link, p1, p2, added, removed, match=None):
1715 def write(self, transaction, link, p1, p2, added, removed, match=None):
1713 return self._storage().add(self._manifestdict, transaction, link,
1716 return self._storage().add(self._manifestdict, transaction, link,
1714 p1, p2, added, removed, match=match)
1717 p1, p2, added, removed, match=match)
1715
1718
1716 @interfaceutil.implementer(repository.imanifestrevisionstored)
1719 @interfaceutil.implementer(repository.imanifestrevisionstored)
1717 class manifestctx(object):
1720 class manifestctx(object):
1718 """A class representing a single revision of a manifest, including its
1721 """A class representing a single revision of a manifest, including its
1719 contents, its parent revs, and its linkrev.
1722 contents, its parent revs, and its linkrev.
1720 """
1723 """
1721 def __init__(self, manifestlog, node):
1724 def __init__(self, manifestlog, node):
1722 self._manifestlog = manifestlog
1725 self._manifestlog = manifestlog
1723 self._data = None
1726 self._data = None
1724
1727
1725 self._node = node
1728 self._node = node
1726
1729
1727 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1730 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1728 # but let's add it later when something needs it and we can load it
1731 # but let's add it later when something needs it and we can load it
1729 # lazily.
1732 # lazily.
1730 #self.p1, self.p2 = store.parents(node)
1733 #self.p1, self.p2 = store.parents(node)
1731 #rev = store.rev(node)
1734 #rev = store.rev(node)
1732 #self.linkrev = store.linkrev(rev)
1735 #self.linkrev = store.linkrev(rev)
1733
1736
1734 def _storage(self):
1737 def _storage(self):
1735 return self._manifestlog.getstorage(b'')
1738 return self._manifestlog.getstorage(b'')
1736
1739
1737 def node(self):
1740 def node(self):
1738 return self._node
1741 return self._node
1739
1742
1740 def new(self):
1743 def new(self):
1741 return memmanifestctx(self._manifestlog)
1744 return memmanifestctx(self._manifestlog)
1742
1745
1743 def copy(self):
1746 def copy(self):
1744 memmf = memmanifestctx(self._manifestlog)
1747 memmf = memmanifestctx(self._manifestlog)
1745 memmf._manifestdict = self.read().copy()
1748 memmf._manifestdict = self.read().copy()
1746 return memmf
1749 return memmf
1747
1750
1748 @propertycache
1751 @propertycache
1749 def parents(self):
1752 def parents(self):
1750 return self._storage().parents(self._node)
1753 return self._storage().parents(self._node)
1751
1754
1752 def read(self):
1755 def read(self):
1753 if self._data is None:
1756 if self._data is None:
1754 if self._node == nullid:
1757 if self._node == nullid:
1755 self._data = manifestdict()
1758 self._data = manifestdict()
1756 else:
1759 else:
1757 store = self._storage()
1760 store = self._storage()
1758 if self._node in store.fulltextcache:
1761 if self._node in store.fulltextcache:
1759 text = pycompat.bytestr(store.fulltextcache[self._node])
1762 text = pycompat.bytestr(store.fulltextcache[self._node])
1760 else:
1763 else:
1761 text = store.revision(self._node)
1764 text = store.revision(self._node)
1762 arraytext = bytearray(text)
1765 arraytext = bytearray(text)
1763 store.fulltextcache[self._node] = arraytext
1766 store.fulltextcache[self._node] = arraytext
1764 self._data = manifestdict(text)
1767 self._data = manifestdict(text)
1765 return self._data
1768 return self._data
1766
1769
1767 def readfast(self, shallow=False):
1770 def readfast(self, shallow=False):
1768 '''Calls either readdelta or read, based on which would be less work.
1771 '''Calls either readdelta or read, based on which would be less work.
1769 readdelta is called if the delta is against the p1, and therefore can be
1772 readdelta is called if the delta is against the p1, and therefore can be
1770 read quickly.
1773 read quickly.
1771
1774
1772 If `shallow` is True, nothing changes since this is a flat manifest.
1775 If `shallow` is True, nothing changes since this is a flat manifest.
1773 '''
1776 '''
1774 store = self._storage()
1777 store = self._storage()
1775 r = store.rev(self._node)
1778 r = store.rev(self._node)
1776 deltaparent = store.deltaparent(r)
1779 deltaparent = store.deltaparent(r)
1777 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1780 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
1778 return self.readdelta()
1781 return self.readdelta()
1779 return self.read()
1782 return self.read()
1780
1783
1781 def readdelta(self, shallow=False):
1784 def readdelta(self, shallow=False):
1782 '''Returns a manifest containing just the entries that are present
1785 '''Returns a manifest containing just the entries that are present
1783 in this manifest, but not in its p1 manifest. This is efficient to read
1786 in this manifest, but not in its p1 manifest. This is efficient to read
1784 if the revlog delta is already p1.
1787 if the revlog delta is already p1.
1785
1788
1786 Changing the value of `shallow` has no effect on flat manifests.
1789 Changing the value of `shallow` has no effect on flat manifests.
1787 '''
1790 '''
1788 store = self._storage()
1791 store = self._storage()
1789 r = store.rev(self._node)
1792 r = store.rev(self._node)
1790 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1793 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1791 return manifestdict(d)
1794 return manifestdict(d)
1792
1795
1793 def find(self, key):
1796 def find(self, key):
1794 return self.read().find(key)
1797 return self.read().find(key)
1795
1798
1796 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1799 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1797 class memtreemanifestctx(object):
1800 class memtreemanifestctx(object):
1798 def __init__(self, manifestlog, dir=''):
1801 def __init__(self, manifestlog, dir=''):
1799 self._manifestlog = manifestlog
1802 self._manifestlog = manifestlog
1800 self._dir = dir
1803 self._dir = dir
1801 self._treemanifest = treemanifest()
1804 self._treemanifest = treemanifest()
1802
1805
1803 def _storage(self):
1806 def _storage(self):
1804 return self._manifestlog.getstorage(b'')
1807 return self._manifestlog.getstorage(b'')
1805
1808
1806 def new(self, dir=''):
1809 def new(self, dir=''):
1807 return memtreemanifestctx(self._manifestlog, dir=dir)
1810 return memtreemanifestctx(self._manifestlog, dir=dir)
1808
1811
1809 def copy(self):
1812 def copy(self):
1810 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1813 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1811 memmf._treemanifest = self._treemanifest.copy()
1814 memmf._treemanifest = self._treemanifest.copy()
1812 return memmf
1815 return memmf
1813
1816
1814 def read(self):
1817 def read(self):
1815 return self._treemanifest
1818 return self._treemanifest
1816
1819
1817 def write(self, transaction, link, p1, p2, added, removed, match=None):
1820 def write(self, transaction, link, p1, p2, added, removed, match=None):
1818 def readtree(dir, node):
1821 def readtree(dir, node):
1819 return self._manifestlog.get(dir, node).read()
1822 return self._manifestlog.get(dir, node).read()
1820 return self._storage().add(self._treemanifest, transaction, link,
1823 return self._storage().add(self._treemanifest, transaction, link,
1821 p1, p2, added, removed, readtree=readtree,
1824 p1, p2, added, removed, readtree=readtree,
1822 match=match)
1825 match=match)
1823
1826
1824 @interfaceutil.implementer(repository.imanifestrevisionstored)
1827 @interfaceutil.implementer(repository.imanifestrevisionstored)
1825 class treemanifestctx(object):
1828 class treemanifestctx(object):
1826 def __init__(self, manifestlog, dir, node):
1829 def __init__(self, manifestlog, dir, node):
1827 self._manifestlog = manifestlog
1830 self._manifestlog = manifestlog
1828 self._dir = dir
1831 self._dir = dir
1829 self._data = None
1832 self._data = None
1830
1833
1831 self._node = node
1834 self._node = node
1832
1835
1833 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1836 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1834 # we can instantiate treemanifestctx objects for directories we don't
1837 # we can instantiate treemanifestctx objects for directories we don't
1835 # have on disk.
1838 # have on disk.
1836 #self.p1, self.p2 = store.parents(node)
1839 #self.p1, self.p2 = store.parents(node)
1837 #rev = store.rev(node)
1840 #rev = store.rev(node)
1838 #self.linkrev = store.linkrev(rev)
1841 #self.linkrev = store.linkrev(rev)
1839
1842
1840 def _storage(self):
1843 def _storage(self):
1841 narrowmatch = self._manifestlog._narrowmatch
1844 narrowmatch = self._manifestlog._narrowmatch
1842 if not narrowmatch.always():
1845 if not narrowmatch.always():
1843 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1846 if not narrowmatch.visitdir(self._dir[:-1] or '.'):
1844 return excludedmanifestrevlog(self._dir)
1847 return excludedmanifestrevlog(self._dir)
1845 return self._manifestlog.getstorage(self._dir)
1848 return self._manifestlog.getstorage(self._dir)
1846
1849
1847 def read(self):
1850 def read(self):
1848 if self._data is None:
1851 if self._data is None:
1849 store = self._storage()
1852 store = self._storage()
1850 if self._node == nullid:
1853 if self._node == nullid:
1851 self._data = treemanifest()
1854 self._data = treemanifest()
1852 # TODO accessing non-public API
1855 # TODO accessing non-public API
1853 elif store._treeondisk:
1856 elif store._treeondisk:
1854 m = treemanifest(dir=self._dir)
1857 m = treemanifest(dir=self._dir)
1855 def gettext():
1858 def gettext():
1856 return store.revision(self._node)
1859 return store.revision(self._node)
1857 def readsubtree(dir, subm):
1860 def readsubtree(dir, subm):
1858 # Set verify to False since we need to be able to create
1861 # Set verify to False since we need to be able to create
1859 # subtrees for trees that don't exist on disk.
1862 # subtrees for trees that don't exist on disk.
1860 return self._manifestlog.get(dir, subm, verify=False).read()
1863 return self._manifestlog.get(dir, subm, verify=False).read()
1861 m.read(gettext, readsubtree)
1864 m.read(gettext, readsubtree)
1862 m.setnode(self._node)
1865 m.setnode(self._node)
1863 self._data = m
1866 self._data = m
1864 else:
1867 else:
1865 if self._node in store.fulltextcache:
1868 if self._node in store.fulltextcache:
1866 text = pycompat.bytestr(store.fulltextcache[self._node])
1869 text = pycompat.bytestr(store.fulltextcache[self._node])
1867 else:
1870 else:
1868 text = store.revision(self._node)
1871 text = store.revision(self._node)
1869 arraytext = bytearray(text)
1872 arraytext = bytearray(text)
1870 store.fulltextcache[self._node] = arraytext
1873 store.fulltextcache[self._node] = arraytext
1871 self._data = treemanifest(dir=self._dir, text=text)
1874 self._data = treemanifest(dir=self._dir, text=text)
1872
1875
1873 return self._data
1876 return self._data
1874
1877
1875 def node(self):
1878 def node(self):
1876 return self._node
1879 return self._node
1877
1880
1878 def new(self, dir=''):
1881 def new(self, dir=''):
1879 return memtreemanifestctx(self._manifestlog, dir=dir)
1882 return memtreemanifestctx(self._manifestlog, dir=dir)
1880
1883
1881 def copy(self):
1884 def copy(self):
1882 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1885 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
1883 memmf._treemanifest = self.read().copy()
1886 memmf._treemanifest = self.read().copy()
1884 return memmf
1887 return memmf
1885
1888
1886 @propertycache
1889 @propertycache
1887 def parents(self):
1890 def parents(self):
1888 return self._storage().parents(self._node)
1891 return self._storage().parents(self._node)
1889
1892
1890 def readdelta(self, shallow=False):
1893 def readdelta(self, shallow=False):
1891 '''Returns a manifest containing just the entries that are present
1894 '''Returns a manifest containing just the entries that are present
1892 in this manifest, but not in its p1 manifest. This is efficient to read
1895 in this manifest, but not in its p1 manifest. This is efficient to read
1893 if the revlog delta is already p1.
1896 if the revlog delta is already p1.
1894
1897
1895 If `shallow` is True, this will read the delta for this directory,
1898 If `shallow` is True, this will read the delta for this directory,
1896 without recursively reading subdirectory manifests. Instead, any
1899 without recursively reading subdirectory manifests. Instead, any
1897 subdirectory entry will be reported as it appears in the manifest, i.e.
1900 subdirectory entry will be reported as it appears in the manifest, i.e.
1898 the subdirectory will be reported among files and distinguished only by
1901 the subdirectory will be reported among files and distinguished only by
1899 its 't' flag.
1902 its 't' flag.
1900 '''
1903 '''
1901 store = self._storage()
1904 store = self._storage()
1902 if shallow:
1905 if shallow:
1903 r = store.rev(self._node)
1906 r = store.rev(self._node)
1904 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1907 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
1905 return manifestdict(d)
1908 return manifestdict(d)
1906 else:
1909 else:
1907 # Need to perform a slow delta
1910 # Need to perform a slow delta
1908 r0 = store.deltaparent(store.rev(self._node))
1911 r0 = store.deltaparent(store.rev(self._node))
1909 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1912 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
1910 m1 = self.read()
1913 m1 = self.read()
1911 md = treemanifest(dir=self._dir)
1914 md = treemanifest(dir=self._dir)
1912 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1915 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1913 if n1:
1916 if n1:
1914 md[f] = n1
1917 md[f] = n1
1915 if fl1:
1918 if fl1:
1916 md.setflag(f, fl1)
1919 md.setflag(f, fl1)
1917 return md
1920 return md
1918
1921
1919 def readfast(self, shallow=False):
1922 def readfast(self, shallow=False):
1920 '''Calls either readdelta or read, based on which would be less work.
1923 '''Calls either readdelta or read, based on which would be less work.
1921 readdelta is called if the delta is against the p1, and therefore can be
1924 readdelta is called if the delta is against the p1, and therefore can be
1922 read quickly.
1925 read quickly.
1923
1926
1924 If `shallow` is True, it only returns the entries from this manifest,
1927 If `shallow` is True, it only returns the entries from this manifest,
1925 and not any submanifests.
1928 and not any submanifests.
1926 '''
1929 '''
1927 store = self._storage()
1930 store = self._storage()
1928 r = store.rev(self._node)
1931 r = store.rev(self._node)
1929 deltaparent = store.deltaparent(r)
1932 deltaparent = store.deltaparent(r)
1930 if (deltaparent != nullrev and
1933 if (deltaparent != nullrev and
1931 deltaparent in store.parentrevs(r)):
1934 deltaparent in store.parentrevs(r)):
1932 return self.readdelta(shallow=shallow)
1935 return self.readdelta(shallow=shallow)
1933
1936
1934 if shallow:
1937 if shallow:
1935 return manifestdict(store.revision(self._node))
1938 return manifestdict(store.revision(self._node))
1936 else:
1939 else:
1937 return self.read()
1940 return self.read()
1938
1941
1939 def find(self, key):
1942 def find(self, key):
1940 return self.read().find(key)
1943 return self.read().find(key)
1941
1944
1942 class excludeddir(treemanifest):
1945 class excludeddir(treemanifest):
1943 """Stand-in for a directory that is excluded from the repository.
1946 """Stand-in for a directory that is excluded from the repository.
1944
1947
1945 With narrowing active on a repository that uses treemanifests,
1948 With narrowing active on a repository that uses treemanifests,
1946 some of the directory revlogs will be excluded from the resulting
1949 some of the directory revlogs will be excluded from the resulting
1947 clone. This is a huge storage win for clients, but means we need
1950 clone. This is a huge storage win for clients, but means we need
1948 some sort of pseudo-manifest to surface to internals so we can
1951 some sort of pseudo-manifest to surface to internals so we can
1949 detect a merge conflict outside the narrowspec. That's what this
1952 detect a merge conflict outside the narrowspec. That's what this
1950 class is: it stands in for a directory whose node is known, but
1953 class is: it stands in for a directory whose node is known, but
1951 whose contents are unknown.
1954 whose contents are unknown.
1952 """
1955 """
1953 def __init__(self, dir, node):
1956 def __init__(self, dir, node):
1954 super(excludeddir, self).__init__(dir)
1957 super(excludeddir, self).__init__(dir)
1955 self._node = node
1958 self._node = node
1956 # Add an empty file, which will be included by iterators and such,
1959 # Add an empty file, which will be included by iterators and such,
1957 # appearing as the directory itself (i.e. something like "dir/")
1960 # appearing as the directory itself (i.e. something like "dir/")
1958 self._files[''] = node
1961 self._files[''] = node
1959 self._flags[''] = 't'
1962 self._flags[''] = 't'
1960
1963
1961 # Manifests outside the narrowspec should never be modified, so avoid
1964 # Manifests outside the narrowspec should never be modified, so avoid
1962 # copying. This makes a noticeable difference when there are very many
1965 # copying. This makes a noticeable difference when there are very many
1963 # directories outside the narrowspec. Also, it makes sense for the copy to
1966 # directories outside the narrowspec. Also, it makes sense for the copy to
1964 # be of the same type as the original, which would not happen with the
1967 # be of the same type as the original, which would not happen with the
1965 # super type's copy().
1968 # super type's copy().
1966 def copy(self):
1969 def copy(self):
1967 return self
1970 return self
1968
1971
1969 class excludeddirmanifestctx(treemanifestctx):
1972 class excludeddirmanifestctx(treemanifestctx):
1970 """context wrapper for excludeddir - see that docstring for rationale"""
1973 """context wrapper for excludeddir - see that docstring for rationale"""
1971 def __init__(self, dir, node):
1974 def __init__(self, dir, node):
1972 self._dir = dir
1975 self._dir = dir
1973 self._node = node
1976 self._node = node
1974
1977
1975 def read(self):
1978 def read(self):
1976 return excludeddir(self._dir, self._node)
1979 return excludeddir(self._dir, self._node)
1977
1980
1978 def write(self, *args):
1981 def write(self, *args):
1979 raise error.ProgrammingError(
1982 raise error.ProgrammingError(
1980 'attempt to write manifest from excluded dir %s' % self._dir)
1983 'attempt to write manifest from excluded dir %s' % self._dir)
1981
1984
1982 class excludedmanifestrevlog(manifestrevlog):
1985 class excludedmanifestrevlog(manifestrevlog):
1983 """Stand-in for excluded treemanifest revlogs.
1986 """Stand-in for excluded treemanifest revlogs.
1984
1987
1985 When narrowing is active on a treemanifest repository, we'll have
1988 When narrowing is active on a treemanifest repository, we'll have
1986 references to directories we can't see due to the revlog being
1989 references to directories we can't see due to the revlog being
1987 skipped. This class exists to conform to the manifestrevlog
1990 skipped. This class exists to conform to the manifestrevlog
1988 interface for those directories and proactively prevent writes to
1991 interface for those directories and proactively prevent writes to
1989 outside the narrowspec.
1992 outside the narrowspec.
1990 """
1993 """
1991
1994
1992 def __init__(self, dir):
1995 def __init__(self, dir):
1993 self._dir = dir
1996 self._dir = dir
1994
1997
1995 def __len__(self):
1998 def __len__(self):
1996 raise error.ProgrammingError(
1999 raise error.ProgrammingError(
1997 'attempt to get length of excluded dir %s' % self._dir)
2000 'attempt to get length of excluded dir %s' % self._dir)
1998
2001
1999 def rev(self, node):
2002 def rev(self, node):
2000 raise error.ProgrammingError(
2003 raise error.ProgrammingError(
2001 'attempt to get rev from excluded dir %s' % self._dir)
2004 'attempt to get rev from excluded dir %s' % self._dir)
2002
2005
2003 def linkrev(self, node):
2006 def linkrev(self, node):
2004 raise error.ProgrammingError(
2007 raise error.ProgrammingError(
2005 'attempt to get linkrev from excluded dir %s' % self._dir)
2008 'attempt to get linkrev from excluded dir %s' % self._dir)
2006
2009
2007 def node(self, rev):
2010 def node(self, rev):
2008 raise error.ProgrammingError(
2011 raise error.ProgrammingError(
2009 'attempt to get node from excluded dir %s' % self._dir)
2012 'attempt to get node from excluded dir %s' % self._dir)
2010
2013
2011 def add(self, *args, **kwargs):
2014 def add(self, *args, **kwargs):
2012 # We should never write entries in dirlogs outside the narrow clone.
2015 # We should never write entries in dirlogs outside the narrow clone.
2013 # However, the method still gets called from writesubtree() in
2016 # However, the method still gets called from writesubtree() in
2014 # _addtree(), so we need to handle it. We should possibly make that
2017 # _addtree(), so we need to handle it. We should possibly make that
2015 # avoid calling add() with a clean manifest (_dirty is always False
2018 # avoid calling add() with a clean manifest (_dirty is always False
2016 # in excludeddir instances).
2019 # in excludeddir instances).
2017 pass
2020 pass
@@ -1,1634 +1,1642 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30
30
31 class ipeerconnection(interfaceutil.Interface):
31 class ipeerconnection(interfaceutil.Interface):
32 """Represents a "connection" to a repository.
32 """Represents a "connection" to a repository.
33
33
34 This is the base interface for representing a connection to a repository.
34 This is the base interface for representing a connection to a repository.
35 It holds basic properties and methods applicable to all peer types.
35 It holds basic properties and methods applicable to all peer types.
36
36
37 This is not a complete interface definition and should not be used
37 This is not a complete interface definition and should not be used
38 outside of this module.
38 outside of this module.
39 """
39 """
40 ui = interfaceutil.Attribute("""ui.ui instance""")
40 ui = interfaceutil.Attribute("""ui.ui instance""")
41
41
42 def url():
42 def url():
43 """Returns a URL string representing this peer.
43 """Returns a URL string representing this peer.
44
44
45 Currently, implementations expose the raw URL used to construct the
45 Currently, implementations expose the raw URL used to construct the
46 instance. It may contain credentials as part of the URL. The
46 instance. It may contain credentials as part of the URL. The
47 expectations of the value aren't well-defined and this could lead to
47 expectations of the value aren't well-defined and this could lead to
48 data leakage.
48 data leakage.
49
49
50 TODO audit/clean consumers and more clearly define the contents of this
50 TODO audit/clean consumers and more clearly define the contents of this
51 value.
51 value.
52 """
52 """
53
53
54 def local():
54 def local():
55 """Returns a local repository instance.
55 """Returns a local repository instance.
56
56
57 If the peer represents a local repository, returns an object that
57 If the peer represents a local repository, returns an object that
58 can be used to interface with it. Otherwise returns ``None``.
58 can be used to interface with it. Otherwise returns ``None``.
59 """
59 """
60
60
61 def peer():
61 def peer():
62 """Returns an object conforming to this interface.
62 """Returns an object conforming to this interface.
63
63
64 Most implementations will ``return self``.
64 Most implementations will ``return self``.
65 """
65 """
66
66
67 def canpush():
67 def canpush():
68 """Returns a boolean indicating if this peer can be pushed to."""
68 """Returns a boolean indicating if this peer can be pushed to."""
69
69
70 def close():
70 def close():
71 """Close the connection to this peer.
71 """Close the connection to this peer.
72
72
73 This is called when the peer will no longer be used. Resources
73 This is called when the peer will no longer be used. Resources
74 associated with the peer should be cleaned up.
74 associated with the peer should be cleaned up.
75 """
75 """
76
76
77 class ipeercapabilities(interfaceutil.Interface):
77 class ipeercapabilities(interfaceutil.Interface):
78 """Peer sub-interface related to capabilities."""
78 """Peer sub-interface related to capabilities."""
79
79
80 def capable(name):
80 def capable(name):
81 """Determine support for a named capability.
81 """Determine support for a named capability.
82
82
83 Returns ``False`` if capability not supported.
83 Returns ``False`` if capability not supported.
84
84
85 Returns ``True`` if boolean capability is supported. Returns a string
85 Returns ``True`` if boolean capability is supported. Returns a string
86 if capability support is non-boolean.
86 if capability support is non-boolean.
87
87
88 Capability strings may or may not map to wire protocol capabilities.
88 Capability strings may or may not map to wire protocol capabilities.
89 """
89 """
90
90
91 def requirecap(name, purpose):
91 def requirecap(name, purpose):
92 """Require a capability to be present.
92 """Require a capability to be present.
93
93
94 Raises a ``CapabilityError`` if the capability isn't present.
94 Raises a ``CapabilityError`` if the capability isn't present.
95 """
95 """
96
96
97 class ipeercommands(interfaceutil.Interface):
97 class ipeercommands(interfaceutil.Interface):
98 """Client-side interface for communicating over the wire protocol.
98 """Client-side interface for communicating over the wire protocol.
99
99
100 This interface is used as a gateway to the Mercurial wire protocol.
100 This interface is used as a gateway to the Mercurial wire protocol.
101 methods commonly call wire protocol commands of the same name.
101 methods commonly call wire protocol commands of the same name.
102 """
102 """
103
103
104 def branchmap():
104 def branchmap():
105 """Obtain heads in named branches.
105 """Obtain heads in named branches.
106
106
107 Returns a dict mapping branch name to an iterable of nodes that are
107 Returns a dict mapping branch name to an iterable of nodes that are
108 heads on that branch.
108 heads on that branch.
109 """
109 """
110
110
111 def capabilities():
111 def capabilities():
112 """Obtain capabilities of the peer.
112 """Obtain capabilities of the peer.
113
113
114 Returns a set of string capabilities.
114 Returns a set of string capabilities.
115 """
115 """
116
116
117 def clonebundles():
117 def clonebundles():
118 """Obtains the clone bundles manifest for the repo.
118 """Obtains the clone bundles manifest for the repo.
119
119
120 Returns the manifest as unparsed bytes.
120 Returns the manifest as unparsed bytes.
121 """
121 """
122
122
123 def debugwireargs(one, two, three=None, four=None, five=None):
123 def debugwireargs(one, two, three=None, four=None, five=None):
124 """Used to facilitate debugging of arguments passed over the wire."""
124 """Used to facilitate debugging of arguments passed over the wire."""
125
125
126 def getbundle(source, **kwargs):
126 def getbundle(source, **kwargs):
127 """Obtain remote repository data as a bundle.
127 """Obtain remote repository data as a bundle.
128
128
129 This command is how the bulk of repository data is transferred from
129 This command is how the bulk of repository data is transferred from
130 the peer to the local repository
130 the peer to the local repository
131
131
132 Returns a generator of bundle data.
132 Returns a generator of bundle data.
133 """
133 """
134
134
135 def heads():
135 def heads():
136 """Determine all known head revisions in the peer.
136 """Determine all known head revisions in the peer.
137
137
138 Returns an iterable of binary nodes.
138 Returns an iterable of binary nodes.
139 """
139 """
140
140
141 def known(nodes):
141 def known(nodes):
142 """Determine whether multiple nodes are known.
142 """Determine whether multiple nodes are known.
143
143
144 Accepts an iterable of nodes whose presence to check for.
144 Accepts an iterable of nodes whose presence to check for.
145
145
146 Returns an iterable of booleans indicating of the corresponding node
146 Returns an iterable of booleans indicating of the corresponding node
147 at that index is known to the peer.
147 at that index is known to the peer.
148 """
148 """
149
149
150 def listkeys(namespace):
150 def listkeys(namespace):
151 """Obtain all keys in a pushkey namespace.
151 """Obtain all keys in a pushkey namespace.
152
152
153 Returns an iterable of key names.
153 Returns an iterable of key names.
154 """
154 """
155
155
156 def lookup(key):
156 def lookup(key):
157 """Resolve a value to a known revision.
157 """Resolve a value to a known revision.
158
158
159 Returns a binary node of the resolved revision on success.
159 Returns a binary node of the resolved revision on success.
160 """
160 """
161
161
162 def pushkey(namespace, key, old, new):
162 def pushkey(namespace, key, old, new):
163 """Set a value using the ``pushkey`` protocol.
163 """Set a value using the ``pushkey`` protocol.
164
164
165 Arguments correspond to the pushkey namespace and key to operate on and
165 Arguments correspond to the pushkey namespace and key to operate on and
166 the old and new values for that key.
166 the old and new values for that key.
167
167
168 Returns a string with the peer result. The value inside varies by the
168 Returns a string with the peer result. The value inside varies by the
169 namespace.
169 namespace.
170 """
170 """
171
171
172 def stream_out():
172 def stream_out():
173 """Obtain streaming clone data.
173 """Obtain streaming clone data.
174
174
175 Successful result should be a generator of data chunks.
175 Successful result should be a generator of data chunks.
176 """
176 """
177
177
178 def unbundle(bundle, heads, url):
178 def unbundle(bundle, heads, url):
179 """Transfer repository data to the peer.
179 """Transfer repository data to the peer.
180
180
181 This is how the bulk of data during a push is transferred.
181 This is how the bulk of data during a push is transferred.
182
182
183 Returns the integer number of heads added to the peer.
183 Returns the integer number of heads added to the peer.
184 """
184 """
185
185
186 class ipeerlegacycommands(interfaceutil.Interface):
186 class ipeerlegacycommands(interfaceutil.Interface):
187 """Interface for implementing support for legacy wire protocol commands.
187 """Interface for implementing support for legacy wire protocol commands.
188
188
189 Wire protocol commands transition to legacy status when they are no longer
189 Wire protocol commands transition to legacy status when they are no longer
190 used by modern clients. To facilitate identifying which commands are
190 used by modern clients. To facilitate identifying which commands are
191 legacy, the interfaces are split.
191 legacy, the interfaces are split.
192 """
192 """
193
193
194 def between(pairs):
194 def between(pairs):
195 """Obtain nodes between pairs of nodes.
195 """Obtain nodes between pairs of nodes.
196
196
197 ``pairs`` is an iterable of node pairs.
197 ``pairs`` is an iterable of node pairs.
198
198
199 Returns an iterable of iterables of nodes corresponding to each
199 Returns an iterable of iterables of nodes corresponding to each
200 requested pair.
200 requested pair.
201 """
201 """
202
202
203 def branches(nodes):
203 def branches(nodes):
204 """Obtain ancestor changesets of specific nodes back to a branch point.
204 """Obtain ancestor changesets of specific nodes back to a branch point.
205
205
206 For each requested node, the peer finds the first ancestor node that is
206 For each requested node, the peer finds the first ancestor node that is
207 a DAG root or is a merge.
207 a DAG root or is a merge.
208
208
209 Returns an iterable of iterables with the resolved values for each node.
209 Returns an iterable of iterables with the resolved values for each node.
210 """
210 """
211
211
212 def changegroup(nodes, source):
212 def changegroup(nodes, source):
213 """Obtain a changegroup with data for descendants of specified nodes."""
213 """Obtain a changegroup with data for descendants of specified nodes."""
214
214
215 def changegroupsubset(bases, heads, source):
215 def changegroupsubset(bases, heads, source):
216 pass
216 pass
217
217
218 class ipeercommandexecutor(interfaceutil.Interface):
218 class ipeercommandexecutor(interfaceutil.Interface):
219 """Represents a mechanism to execute remote commands.
219 """Represents a mechanism to execute remote commands.
220
220
221 This is the primary interface for requesting that wire protocol commands
221 This is the primary interface for requesting that wire protocol commands
222 be executed. Instances of this interface are active in a context manager
222 be executed. Instances of this interface are active in a context manager
223 and have a well-defined lifetime. When the context manager exits, all
223 and have a well-defined lifetime. When the context manager exits, all
224 outstanding requests are waited on.
224 outstanding requests are waited on.
225 """
225 """
226
226
227 def callcommand(name, args):
227 def callcommand(name, args):
228 """Request that a named command be executed.
228 """Request that a named command be executed.
229
229
230 Receives the command name and a dictionary of command arguments.
230 Receives the command name and a dictionary of command arguments.
231
231
232 Returns a ``concurrent.futures.Future`` that will resolve to the
232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 result of that command request. That exact value is left up to
233 result of that command request. That exact value is left up to
234 the implementation and possibly varies by command.
234 the implementation and possibly varies by command.
235
235
236 Not all commands can coexist with other commands in an executor
236 Not all commands can coexist with other commands in an executor
237 instance: it depends on the underlying wire protocol transport being
237 instance: it depends on the underlying wire protocol transport being
238 used and the command itself.
238 used and the command itself.
239
239
240 Implementations MAY call ``sendcommands()`` automatically if the
240 Implementations MAY call ``sendcommands()`` automatically if the
241 requested command can not coexist with other commands in this executor.
241 requested command can not coexist with other commands in this executor.
242
242
243 Implementations MAY call ``sendcommands()`` automatically when the
243 Implementations MAY call ``sendcommands()`` automatically when the
244 future's ``result()`` is called. So, consumers using multiple
244 future's ``result()`` is called. So, consumers using multiple
245 commands with an executor MUST ensure that ``result()`` is not called
245 commands with an executor MUST ensure that ``result()`` is not called
246 until all command requests have been issued.
246 until all command requests have been issued.
247 """
247 """
248
248
249 def sendcommands():
249 def sendcommands():
250 """Trigger submission of queued command requests.
250 """Trigger submission of queued command requests.
251
251
252 Not all transports submit commands as soon as they are requested to
252 Not all transports submit commands as soon as they are requested to
253 run. When called, this method forces queued command requests to be
253 run. When called, this method forces queued command requests to be
254 issued. It will no-op if all commands have already been sent.
254 issued. It will no-op if all commands have already been sent.
255
255
256 When called, no more new commands may be issued with this executor.
256 When called, no more new commands may be issued with this executor.
257 """
257 """
258
258
259 def close():
259 def close():
260 """Signal that this command request is finished.
260 """Signal that this command request is finished.
261
261
262 When called, no more new commands may be issued. All outstanding
262 When called, no more new commands may be issued. All outstanding
263 commands that have previously been issued are waited on before
263 commands that have previously been issued are waited on before
264 returning. This not only includes waiting for the futures to resolve,
264 returning. This not only includes waiting for the futures to resolve,
265 but also waiting for all response data to arrive. In other words,
265 but also waiting for all response data to arrive. In other words,
266 calling this waits for all on-wire state for issued command requests
266 calling this waits for all on-wire state for issued command requests
267 to finish.
267 to finish.
268
268
269 When used as a context manager, this method is called when exiting the
269 When used as a context manager, this method is called when exiting the
270 context manager.
270 context manager.
271
271
272 This method may call ``sendcommands()`` if there are buffered commands.
272 This method may call ``sendcommands()`` if there are buffered commands.
273 """
273 """
274
274
275 class ipeerrequests(interfaceutil.Interface):
275 class ipeerrequests(interfaceutil.Interface):
276 """Interface for executing commands on a peer."""
276 """Interface for executing commands on a peer."""
277
277
278 def commandexecutor():
278 def commandexecutor():
279 """A context manager that resolves to an ipeercommandexecutor.
279 """A context manager that resolves to an ipeercommandexecutor.
280
280
281 The object this resolves to can be used to issue command requests
281 The object this resolves to can be used to issue command requests
282 to the peer.
282 to the peer.
283
283
284 Callers should call its ``callcommand`` method to issue command
284 Callers should call its ``callcommand`` method to issue command
285 requests.
285 requests.
286
286
287 A new executor should be obtained for each distinct set of commands
287 A new executor should be obtained for each distinct set of commands
288 (possibly just a single command) that the consumer wants to execute
288 (possibly just a single command) that the consumer wants to execute
289 as part of a single operation or round trip. This is because some
289 as part of a single operation or round trip. This is because some
290 peers are half-duplex and/or don't support persistent connections.
290 peers are half-duplex and/or don't support persistent connections.
291 e.g. in the case of HTTP peers, commands sent to an executor represent
291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 a single HTTP request. While some peers may support multiple command
292 a single HTTP request. While some peers may support multiple command
293 sends over the wire per executor, consumers need to code to the least
293 sends over the wire per executor, consumers need to code to the least
294 capable peer. So it should be assumed that command executors buffer
294 capable peer. So it should be assumed that command executors buffer
295 called commands until they are told to send them and that each
295 called commands until they are told to send them and that each
296 command executor could result in a new connection or wire-level request
296 command executor could result in a new connection or wire-level request
297 being issued.
297 being issued.
298 """
298 """
299
299
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 """Unified interface for peer repositories.
301 """Unified interface for peer repositories.
302
302
303 All peer instances must conform to this interface.
303 All peer instances must conform to this interface.
304 """
304 """
305
305
306 @interfaceutil.implementer(ipeerbase)
306 @interfaceutil.implementer(ipeerbase)
307 class peer(object):
307 class peer(object):
308 """Base class for peer repositories."""
308 """Base class for peer repositories."""
309
309
310 def capable(self, name):
310 def capable(self, name):
311 caps = self.capabilities()
311 caps = self.capabilities()
312 if name in caps:
312 if name in caps:
313 return True
313 return True
314
314
315 name = '%s=' % name
315 name = '%s=' % name
316 for cap in caps:
316 for cap in caps:
317 if cap.startswith(name):
317 if cap.startswith(name):
318 return cap[len(name):]
318 return cap[len(name):]
319
319
320 return False
320 return False
321
321
322 def requirecap(self, name, purpose):
322 def requirecap(self, name, purpose):
323 if self.capable(name):
323 if self.capable(name):
324 return
324 return
325
325
326 raise error.CapabilityError(
326 raise error.CapabilityError(
327 _('cannot %s; remote repository does not support the %r '
327 _('cannot %s; remote repository does not support the %r '
328 'capability') % (purpose, name))
328 'capability') % (purpose, name))
329
329
330 class iverifyproblem(interfaceutil.Interface):
330 class iverifyproblem(interfaceutil.Interface):
331 """Represents a problem with the integrity of the repository.
331 """Represents a problem with the integrity of the repository.
332
332
333 Instances of this interface are emitted to describe an integrity issue
333 Instances of this interface are emitted to describe an integrity issue
334 with a repository (e.g. corrupt storage, missing data, etc).
334 with a repository (e.g. corrupt storage, missing data, etc).
335
335
336 Instances are essentially messages associated with severity.
336 Instances are essentially messages associated with severity.
337 """
337 """
338 warning = interfaceutil.Attribute(
338 warning = interfaceutil.Attribute(
339 """Message indicating a non-fatal problem.""")
339 """Message indicating a non-fatal problem.""")
340
340
341 error = interfaceutil.Attribute(
341 error = interfaceutil.Attribute(
342 """Message indicating a fatal problem.""")
342 """Message indicating a fatal problem.""")
343
343
344 class irevisiondelta(interfaceutil.Interface):
344 class irevisiondelta(interfaceutil.Interface):
345 """Represents a delta between one revision and another.
345 """Represents a delta between one revision and another.
346
346
347 Instances convey enough information to allow a revision to be exchanged
347 Instances convey enough information to allow a revision to be exchanged
348 with another repository.
348 with another repository.
349
349
350 Instances represent the fulltext revision data or a delta against
350 Instances represent the fulltext revision data or a delta against
351 another revision. Therefore the ``revision`` and ``delta`` attributes
351 another revision. Therefore the ``revision`` and ``delta`` attributes
352 are mutually exclusive.
352 are mutually exclusive.
353
353
354 Typically used for changegroup generation.
354 Typically used for changegroup generation.
355 """
355 """
356
356
357 node = interfaceutil.Attribute(
357 node = interfaceutil.Attribute(
358 """20 byte node of this revision.""")
358 """20 byte node of this revision.""")
359
359
360 p1node = interfaceutil.Attribute(
360 p1node = interfaceutil.Attribute(
361 """20 byte node of 1st parent of this revision.""")
361 """20 byte node of 1st parent of this revision.""")
362
362
363 p2node = interfaceutil.Attribute(
363 p2node = interfaceutil.Attribute(
364 """20 byte node of 2nd parent of this revision.""")
364 """20 byte node of 2nd parent of this revision.""")
365
365
366 linknode = interfaceutil.Attribute(
366 linknode = interfaceutil.Attribute(
367 """20 byte node of the changelog revision this node is linked to.""")
367 """20 byte node of the changelog revision this node is linked to.""")
368
368
369 flags = interfaceutil.Attribute(
369 flags = interfaceutil.Attribute(
370 """2 bytes of integer flags that apply to this revision.""")
370 """2 bytes of integer flags that apply to this revision.""")
371
371
372 basenode = interfaceutil.Attribute(
372 basenode = interfaceutil.Attribute(
373 """20 byte node of the revision this data is a delta against.
373 """20 byte node of the revision this data is a delta against.
374
374
375 ``nullid`` indicates that the revision is a full revision and not
375 ``nullid`` indicates that the revision is a full revision and not
376 a delta.
376 a delta.
377 """)
377 """)
378
378
379 baserevisionsize = interfaceutil.Attribute(
379 baserevisionsize = interfaceutil.Attribute(
380 """Size of base revision this delta is against.
380 """Size of base revision this delta is against.
381
381
382 May be ``None`` if ``basenode`` is ``nullid``.
382 May be ``None`` if ``basenode`` is ``nullid``.
383 """)
383 """)
384
384
385 revision = interfaceutil.Attribute(
385 revision = interfaceutil.Attribute(
386 """Raw fulltext of revision data for this node.""")
386 """Raw fulltext of revision data for this node.""")
387
387
388 delta = interfaceutil.Attribute(
388 delta = interfaceutil.Attribute(
389 """Delta between ``basenode`` and ``node``.
389 """Delta between ``basenode`` and ``node``.
390
390
391 Stored in the bdiff delta format.
391 Stored in the bdiff delta format.
392 """)
392 """)
393
393
394 class irevisiondeltarequest(interfaceutil.Interface):
394 class irevisiondeltarequest(interfaceutil.Interface):
395 """Represents a request to generate an ``irevisiondelta``."""
395 """Represents a request to generate an ``irevisiondelta``."""
396
396
397 node = interfaceutil.Attribute(
397 node = interfaceutil.Attribute(
398 """20 byte node of revision being requested.""")
398 """20 byte node of revision being requested.""")
399
399
400 p1node = interfaceutil.Attribute(
400 p1node = interfaceutil.Attribute(
401 """20 byte node of 1st parent of revision.""")
401 """20 byte node of 1st parent of revision.""")
402
402
403 p2node = interfaceutil.Attribute(
403 p2node = interfaceutil.Attribute(
404 """20 byte node of 2nd parent of revision.""")
404 """20 byte node of 2nd parent of revision.""")
405
405
406 linknode = interfaceutil.Attribute(
406 linknode = interfaceutil.Attribute(
407 """20 byte node to store in ``linknode`` attribute.""")
407 """20 byte node to store in ``linknode`` attribute.""")
408
408
409 basenode = interfaceutil.Attribute(
409 basenode = interfaceutil.Attribute(
410 """Base revision that delta should be generated against.
410 """Base revision that delta should be generated against.
411
411
412 If ``nullid``, the derived ``irevisiondelta`` should have its
412 If ``nullid``, the derived ``irevisiondelta`` should have its
413 ``revision`` field populated and no delta should be generated.
413 ``revision`` field populated and no delta should be generated.
414
414
415 If ``None``, the delta may be generated against any revision that
415 If ``None``, the delta may be generated against any revision that
416 is an ancestor of this revision. Or a full revision may be used.
416 is an ancestor of this revision. Or a full revision may be used.
417
417
418 If any other value, the delta should be produced against that
418 If any other value, the delta should be produced against that
419 revision.
419 revision.
420 """)
420 """)
421
421
422 ellipsis = interfaceutil.Attribute(
422 ellipsis = interfaceutil.Attribute(
423 """Boolean on whether the ellipsis flag should be set.""")
423 """Boolean on whether the ellipsis flag should be set.""")
424
424
425 class ifilerevisionssequence(interfaceutil.Interface):
425 class ifilerevisionssequence(interfaceutil.Interface):
426 """Contains index data for all revisions of a file.
426 """Contains index data for all revisions of a file.
427
427
428 Types implementing this behave like lists of tuples. The index
428 Types implementing this behave like lists of tuples. The index
429 in the list corresponds to the revision number. The values contain
429 in the list corresponds to the revision number. The values contain
430 index metadata.
430 index metadata.
431
431
432 The *null* revision (revision number -1) is always the last item
432 The *null* revision (revision number -1) is always the last item
433 in the index.
433 in the index.
434 """
434 """
435
435
436 def __len__():
436 def __len__():
437 """The total number of revisions."""
437 """The total number of revisions."""
438
438
439 def __getitem__(rev):
439 def __getitem__(rev):
440 """Returns the object having a specific revision number.
440 """Returns the object having a specific revision number.
441
441
442 Returns an 8-tuple with the following fields:
442 Returns an 8-tuple with the following fields:
443
443
444 offset+flags
444 offset+flags
445 Contains the offset and flags for the revision. 64-bit unsigned
445 Contains the offset and flags for the revision. 64-bit unsigned
446 integer where first 6 bytes are the offset and the next 2 bytes
446 integer where first 6 bytes are the offset and the next 2 bytes
447 are flags. The offset can be 0 if it is not used by the store.
447 are flags. The offset can be 0 if it is not used by the store.
448 compressed size
448 compressed size
449 Size of the revision data in the store. It can be 0 if it isn't
449 Size of the revision data in the store. It can be 0 if it isn't
450 needed by the store.
450 needed by the store.
451 uncompressed size
451 uncompressed size
452 Fulltext size. It can be 0 if it isn't needed by the store.
452 Fulltext size. It can be 0 if it isn't needed by the store.
453 base revision
453 base revision
454 Revision number of revision the delta for storage is encoded
454 Revision number of revision the delta for storage is encoded
455 against. -1 indicates not encoded against a base revision.
455 against. -1 indicates not encoded against a base revision.
456 link revision
456 link revision
457 Revision number of changelog revision this entry is related to.
457 Revision number of changelog revision this entry is related to.
458 p1 revision
458 p1 revision
459 Revision number of 1st parent. -1 if no 1st parent.
459 Revision number of 1st parent. -1 if no 1st parent.
460 p2 revision
460 p2 revision
461 Revision number of 2nd parent. -1 if no 1st parent.
461 Revision number of 2nd parent. -1 if no 1st parent.
462 node
462 node
463 Binary node value for this revision number.
463 Binary node value for this revision number.
464
464
465 Negative values should index off the end of the sequence. ``-1``
465 Negative values should index off the end of the sequence. ``-1``
466 should return the null revision. ``-2`` should return the most
466 should return the null revision. ``-2`` should return the most
467 recent revision.
467 recent revision.
468 """
468 """
469
469
470 def __contains__(rev):
470 def __contains__(rev):
471 """Whether a revision number exists."""
471 """Whether a revision number exists."""
472
472
473 def insert(self, i, entry):
473 def insert(self, i, entry):
474 """Add an item to the index at specific revision."""
474 """Add an item to the index at specific revision."""
475
475
476 class ifileindex(interfaceutil.Interface):
476 class ifileindex(interfaceutil.Interface):
477 """Storage interface for index data of a single file.
477 """Storage interface for index data of a single file.
478
478
479 File storage data is divided into index metadata and data storage.
479 File storage data is divided into index metadata and data storage.
480 This interface defines the index portion of the interface.
480 This interface defines the index portion of the interface.
481
481
482 The index logically consists of:
482 The index logically consists of:
483
483
484 * A mapping between revision numbers and nodes.
484 * A mapping between revision numbers and nodes.
485 * DAG data (storing and querying the relationship between nodes).
485 * DAG data (storing and querying the relationship between nodes).
486 * Metadata to facilitate storage.
486 * Metadata to facilitate storage.
487 """
487 """
488 index = interfaceutil.Attribute(
488 index = interfaceutil.Attribute(
489 """An ``ifilerevisionssequence`` instance.""")
489 """An ``ifilerevisionssequence`` instance.""")
490
490
491 def __len__():
491 def __len__():
492 """Obtain the number of revisions stored for this file."""
492 """Obtain the number of revisions stored for this file."""
493
493
494 def __iter__():
494 def __iter__():
495 """Iterate over revision numbers for this file."""
495 """Iterate over revision numbers for this file."""
496
496
497 def revs(start=0, stop=None):
497 def revs(start=0, stop=None):
498 """Iterate over revision numbers for this file, with control."""
498 """Iterate over revision numbers for this file, with control."""
499
499
500 def parents(node):
500 def parents(node):
501 """Returns a 2-tuple of parent nodes for a revision.
501 """Returns a 2-tuple of parent nodes for a revision.
502
502
503 Values will be ``nullid`` if the parent is empty.
503 Values will be ``nullid`` if the parent is empty.
504 """
504 """
505
505
506 def parentrevs(rev):
506 def parentrevs(rev):
507 """Like parents() but operates on revision numbers."""
507 """Like parents() but operates on revision numbers."""
508
508
509 def rev(node):
509 def rev(node):
510 """Obtain the revision number given a node.
510 """Obtain the revision number given a node.
511
511
512 Raises ``error.LookupError`` if the node is not known.
512 Raises ``error.LookupError`` if the node is not known.
513 """
513 """
514
514
515 def node(rev):
515 def node(rev):
516 """Obtain the node value given a revision number.
516 """Obtain the node value given a revision number.
517
517
518 Raises ``IndexError`` if the node is not known.
518 Raises ``IndexError`` if the node is not known.
519 """
519 """
520
520
521 def lookup(node):
521 def lookup(node):
522 """Attempt to resolve a value to a node.
522 """Attempt to resolve a value to a node.
523
523
524 Value can be a binary node, hex node, revision number, or a string
524 Value can be a binary node, hex node, revision number, or a string
525 that can be converted to an integer.
525 that can be converted to an integer.
526
526
527 Raises ``error.LookupError`` if a node could not be resolved.
527 Raises ``error.LookupError`` if a node could not be resolved.
528 """
528 """
529
529
530 def linkrev(rev):
530 def linkrev(rev):
531 """Obtain the changeset revision number a revision is linked to."""
531 """Obtain the changeset revision number a revision is linked to."""
532
532
533 def flags(rev):
533 def flags(rev):
534 """Obtain flags used to affect storage of a revision."""
534 """Obtain flags used to affect storage of a revision."""
535
535
536 def iscensored(rev):
536 def iscensored(rev):
537 """Return whether a revision's content has been censored."""
537 """Return whether a revision's content has been censored."""
538
538
539 def commonancestorsheads(node1, node2):
539 def commonancestorsheads(node1, node2):
540 """Obtain an iterable of nodes containing heads of common ancestors.
540 """Obtain an iterable of nodes containing heads of common ancestors.
541
541
542 See ``ancestor.commonancestorsheads()``.
542 See ``ancestor.commonancestorsheads()``.
543 """
543 """
544
544
545 def descendants(revs):
545 def descendants(revs):
546 """Obtain descendant revision numbers for a set of revision numbers.
546 """Obtain descendant revision numbers for a set of revision numbers.
547
547
548 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
548 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
549 """
549 """
550
550
551 def heads(start=None, stop=None):
551 def heads(start=None, stop=None):
552 """Obtain a list of nodes that are DAG heads, with control.
552 """Obtain a list of nodes that are DAG heads, with control.
553
553
554 The set of revisions examined can be limited by specifying
554 The set of revisions examined can be limited by specifying
555 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
555 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
556 iterable of nodes. DAG traversal starts at earlier revision
556 iterable of nodes. DAG traversal starts at earlier revision
557 ``start`` and iterates forward until any node in ``stop`` is
557 ``start`` and iterates forward until any node in ``stop`` is
558 encountered.
558 encountered.
559 """
559 """
560
560
561 def children(node):
561 def children(node):
562 """Obtain nodes that are children of a node.
562 """Obtain nodes that are children of a node.
563
563
564 Returns a list of nodes.
564 Returns a list of nodes.
565 """
565 """
566
566
567 def deltaparent(rev):
567 def deltaparent(rev):
568 """"Return the revision that is a suitable parent to delta against."""
568 """"Return the revision that is a suitable parent to delta against."""
569
569
570 class ifiledata(interfaceutil.Interface):
570 class ifiledata(interfaceutil.Interface):
571 """Storage interface for data storage of a specific file.
571 """Storage interface for data storage of a specific file.
572
572
573 This complements ``ifileindex`` and provides an interface for accessing
573 This complements ``ifileindex`` and provides an interface for accessing
574 data for a tracked file.
574 data for a tracked file.
575 """
575 """
576 def rawsize(rev):
576 def rawsize(rev):
577 """The size of the fulltext data for a revision as stored."""
577 """The size of the fulltext data for a revision as stored."""
578
578
579 def size(rev):
579 def size(rev):
580 """Obtain the fulltext size of file data.
580 """Obtain the fulltext size of file data.
581
581
582 Any metadata is excluded from size measurements. Use ``rawsize()`` if
582 Any metadata is excluded from size measurements. Use ``rawsize()`` if
583 metadata size is important.
583 metadata size is important.
584 """
584 """
585
585
586 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
586 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
587 """Validate the stored hash of a given fulltext and node.
587 """Validate the stored hash of a given fulltext and node.
588
588
589 Raises ``error.StorageError`` is hash validation fails.
589 Raises ``error.StorageError`` is hash validation fails.
590 """
590 """
591
591
592 def revision(node, raw=False):
592 def revision(node, raw=False):
593 """"Obtain fulltext data for a node.
593 """"Obtain fulltext data for a node.
594
594
595 By default, any storage transformations are applied before the data
595 By default, any storage transformations are applied before the data
596 is returned. If ``raw`` is True, non-raw storage transformations
596 is returned. If ``raw`` is True, non-raw storage transformations
597 are not applied.
597 are not applied.
598
598
599 The fulltext data may contain a header containing metadata. Most
599 The fulltext data may contain a header containing metadata. Most
600 consumers should use ``read()`` to obtain the actual file data.
600 consumers should use ``read()`` to obtain the actual file data.
601 """
601 """
602
602
603 def read(node):
603 def read(node):
604 """Resolve file fulltext data.
604 """Resolve file fulltext data.
605
605
606 This is similar to ``revision()`` except any metadata in the data
606 This is similar to ``revision()`` except any metadata in the data
607 headers is stripped.
607 headers is stripped.
608 """
608 """
609
609
610 def renamed(node):
610 def renamed(node):
611 """Obtain copy metadata for a node.
611 """Obtain copy metadata for a node.
612
612
613 Returns ``False`` if no copy metadata is stored or a 2-tuple of
613 Returns ``False`` if no copy metadata is stored or a 2-tuple of
614 (path, node) from which this revision was copied.
614 (path, node) from which this revision was copied.
615 """
615 """
616
616
617 def cmp(node, fulltext):
617 def cmp(node, fulltext):
618 """Compare fulltext to another revision.
618 """Compare fulltext to another revision.
619
619
620 Returns True if the fulltext is different from what is stored.
620 Returns True if the fulltext is different from what is stored.
621
621
622 This takes copy metadata into account.
622 This takes copy metadata into account.
623
623
624 TODO better document the copy metadata and censoring logic.
624 TODO better document the copy metadata and censoring logic.
625 """
625 """
626
626
627 def revdiff(rev1, rev2):
627 def revdiff(rev1, rev2):
628 """Obtain a delta between two revision numbers.
628 """Obtain a delta between two revision numbers.
629
629
630 Operates on raw data in the store (``revision(node, raw=True)``).
630 Operates on raw data in the store (``revision(node, raw=True)``).
631
631
632 The returned data is the result of ``bdiff.bdiff`` on the raw
632 The returned data is the result of ``bdiff.bdiff`` on the raw
633 revision data.
633 revision data.
634 """
634 """
635
635
636 def emitrevisiondeltas(requests):
636 def emitrevisiondeltas(requests):
637 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
637 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
638
638
639 Given an iterable of objects conforming to the ``irevisiondeltarequest``
639 Given an iterable of objects conforming to the ``irevisiondeltarequest``
640 interface, emits objects conforming to the ``irevisiondelta``
640 interface, emits objects conforming to the ``irevisiondelta``
641 interface.
641 interface.
642
642
643 This method is a generator.
643 This method is a generator.
644
644
645 ``irevisiondelta`` should be emitted in the same order of
645 ``irevisiondelta`` should be emitted in the same order of
646 ``irevisiondeltarequest`` that was passed in.
646 ``irevisiondeltarequest`` that was passed in.
647
647
648 The emitted objects MUST conform by the results of
648 The emitted objects MUST conform by the results of
649 ``irevisiondeltarequest``. Namely, they must respect any requests
649 ``irevisiondeltarequest``. Namely, they must respect any requests
650 for building a delta from a specific ``basenode`` if defined.
650 for building a delta from a specific ``basenode`` if defined.
651
651
652 When sending deltas, implementations must take into account whether
652 When sending deltas, implementations must take into account whether
653 the client has the base delta before encoding a delta against that
653 the client has the base delta before encoding a delta against that
654 revision. A revision encountered previously in ``requests`` is
654 revision. A revision encountered previously in ``requests`` is
655 always a suitable base revision. An example of a bad delta is a delta
655 always a suitable base revision. An example of a bad delta is a delta
656 against a non-ancestor revision. Another example of a bad delta is a
656 against a non-ancestor revision. Another example of a bad delta is a
657 delta against a censored revision.
657 delta against a censored revision.
658 """
658 """
659
659
660 class ifilemutation(interfaceutil.Interface):
660 class ifilemutation(interfaceutil.Interface):
661 """Storage interface for mutation events of a tracked file."""
661 """Storage interface for mutation events of a tracked file."""
662
662
663 def add(filedata, meta, transaction, linkrev, p1, p2):
663 def add(filedata, meta, transaction, linkrev, p1, p2):
664 """Add a new revision to the store.
664 """Add a new revision to the store.
665
665
666 Takes file data, dictionary of metadata, a transaction, linkrev,
666 Takes file data, dictionary of metadata, a transaction, linkrev,
667 and parent nodes.
667 and parent nodes.
668
668
669 Returns the node that was added.
669 Returns the node that was added.
670
670
671 May no-op if a revision matching the supplied data is already stored.
671 May no-op if a revision matching the supplied data is already stored.
672 """
672 """
673
673
674 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
674 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
675 flags=0, cachedelta=None):
675 flags=0, cachedelta=None):
676 """Add a new revision to the store.
676 """Add a new revision to the store.
677
677
678 This is similar to ``add()`` except it operates at a lower level.
678 This is similar to ``add()`` except it operates at a lower level.
679
679
680 The data passed in already contains a metadata header, if any.
680 The data passed in already contains a metadata header, if any.
681
681
682 ``node`` and ``flags`` can be used to define the expected node and
682 ``node`` and ``flags`` can be used to define the expected node and
683 the flags to use with storage.
683 the flags to use with storage.
684
684
685 ``add()`` is usually called when adding files from e.g. the working
685 ``add()`` is usually called when adding files from e.g. the working
686 directory. ``addrevision()`` is often called by ``add()`` and for
686 directory. ``addrevision()`` is often called by ``add()`` and for
687 scenarios where revision data has already been computed, such as when
687 scenarios where revision data has already been computed, such as when
688 applying raw data from a peer repo.
688 applying raw data from a peer repo.
689 """
689 """
690
690
691 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
691 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
692 """Process a series of deltas for storage.
692 """Process a series of deltas for storage.
693
693
694 ``deltas`` is an iterable of 7-tuples of
694 ``deltas`` is an iterable of 7-tuples of
695 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
695 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
696 to add.
696 to add.
697
697
698 The ``delta`` field contains ``mpatch`` data to apply to a base
698 The ``delta`` field contains ``mpatch`` data to apply to a base
699 revision, identified by ``deltabase``. The base node can be
699 revision, identified by ``deltabase``. The base node can be
700 ``nullid``, in which case the header from the delta can be ignored
700 ``nullid``, in which case the header from the delta can be ignored
701 and the delta used as the fulltext.
701 and the delta used as the fulltext.
702
702
703 ``addrevisioncb`` should be called for each node as it is committed.
703 ``addrevisioncb`` should be called for each node as it is committed.
704
704
705 Returns a list of nodes that were processed. A node will be in the list
705 Returns a list of nodes that were processed. A node will be in the list
706 even if it existed in the store previously.
706 even if it existed in the store previously.
707 """
707 """
708
708
709 def censorrevision(tr, node, tombstone=b''):
709 def censorrevision(tr, node, tombstone=b''):
710 """Remove the content of a single revision.
710 """Remove the content of a single revision.
711
711
712 The specified ``node`` will have its content purged from storage.
712 The specified ``node`` will have its content purged from storage.
713 Future attempts to access the revision data for this node will
713 Future attempts to access the revision data for this node will
714 result in failure.
714 result in failure.
715
715
716 A ``tombstone`` message can optionally be stored. This message may be
716 A ``tombstone`` message can optionally be stored. This message may be
717 displayed to users when they attempt to access the missing revision
717 displayed to users when they attempt to access the missing revision
718 data.
718 data.
719
719
720 Storage backends may have stored deltas against the previous content
720 Storage backends may have stored deltas against the previous content
721 in this revision. As part of censoring a revision, these storage
721 in this revision. As part of censoring a revision, these storage
722 backends are expected to rewrite any internally stored deltas such
722 backends are expected to rewrite any internally stored deltas such
723 that they no longer reference the deleted content.
723 that they no longer reference the deleted content.
724 """
724 """
725
725
726 def getstrippoint(minlink):
726 def getstrippoint(minlink):
727 """Find the minimum revision that must be stripped to strip a linkrev.
727 """Find the minimum revision that must be stripped to strip a linkrev.
728
728
729 Returns a 2-tuple containing the minimum revision number and a set
729 Returns a 2-tuple containing the minimum revision number and a set
730 of all revisions numbers that would be broken by this strip.
730 of all revisions numbers that would be broken by this strip.
731
731
732 TODO this is highly revlog centric and should be abstracted into
732 TODO this is highly revlog centric and should be abstracted into
733 a higher-level deletion API. ``repair.strip()`` relies on this.
733 a higher-level deletion API. ``repair.strip()`` relies on this.
734 """
734 """
735
735
736 def strip(minlink, transaction):
736 def strip(minlink, transaction):
737 """Remove storage of items starting at a linkrev.
737 """Remove storage of items starting at a linkrev.
738
738
739 This uses ``getstrippoint()`` to determine the first node to remove.
739 This uses ``getstrippoint()`` to determine the first node to remove.
740 Then it effectively truncates storage for all revisions after that.
740 Then it effectively truncates storage for all revisions after that.
741
741
742 TODO this is highly revlog centric and should be abstracted into a
742 TODO this is highly revlog centric and should be abstracted into a
743 higher-level deletion API.
743 higher-level deletion API.
744 """
744 """
745
745
746 class ifilestorage(ifileindex, ifiledata, ifilemutation):
746 class ifilestorage(ifileindex, ifiledata, ifilemutation):
747 """Complete storage interface for a single tracked file."""
747 """Complete storage interface for a single tracked file."""
748
748
749 _generaldelta = interfaceutil.Attribute(
749 _generaldelta = interfaceutil.Attribute(
750 """Whether deltas can be against any parent revision.
750 """Whether deltas can be against any parent revision.
751
751
752 TODO this is used by changegroup code and it could probably be
752 TODO this is used by changegroup code and it could probably be
753 folded into another API.
753 folded into another API.
754 """)
754 """)
755
755
756 def files():
756 def files():
757 """Obtain paths that are backing storage for this file.
757 """Obtain paths that are backing storage for this file.
758
758
759 TODO this is used heavily by verify code and there should probably
759 TODO this is used heavily by verify code and there should probably
760 be a better API for that.
760 be a better API for that.
761 """
761 """
762
762
763 def verifyintegrity(state):
763 def verifyintegrity(state):
764 """Verifies the integrity of file storage.
764 """Verifies the integrity of file storage.
765
765
766 ``state`` is a dict holding state of the verifier process. It can be
766 ``state`` is a dict holding state of the verifier process. It can be
767 used to communicate data between invocations of multiple storage
767 used to communicate data between invocations of multiple storage
768 primitives.
768 primitives.
769
769
770 The method yields objects conforming to the ``iverifyproblem``
770 The method yields objects conforming to the ``iverifyproblem``
771 interface.
771 interface.
772 """
772 """
773
773
774 class idirs(interfaceutil.Interface):
774 class idirs(interfaceutil.Interface):
775 """Interface representing a collection of directories from paths.
775 """Interface representing a collection of directories from paths.
776
776
777 This interface is essentially a derived data structure representing
777 This interface is essentially a derived data structure representing
778 directories from a collection of paths.
778 directories from a collection of paths.
779 """
779 """
780
780
781 def addpath(path):
781 def addpath(path):
782 """Add a path to the collection.
782 """Add a path to the collection.
783
783
784 All directories in the path will be added to the collection.
784 All directories in the path will be added to the collection.
785 """
785 """
786
786
787 def delpath(path):
787 def delpath(path):
788 """Remove a path from the collection.
788 """Remove a path from the collection.
789
789
790 If the removal was the last path in a particular directory, the
790 If the removal was the last path in a particular directory, the
791 directory is removed from the collection.
791 directory is removed from the collection.
792 """
792 """
793
793
794 def __iter__():
794 def __iter__():
795 """Iterate over the directories in this collection of paths."""
795 """Iterate over the directories in this collection of paths."""
796
796
797 def __contains__(path):
797 def __contains__(path):
798 """Whether a specific directory is in this collection."""
798 """Whether a specific directory is in this collection."""
799
799
800 class imanifestdict(interfaceutil.Interface):
800 class imanifestdict(interfaceutil.Interface):
801 """Interface representing a manifest data structure.
801 """Interface representing a manifest data structure.
802
802
803 A manifest is effectively a dict mapping paths to entries. Each entry
803 A manifest is effectively a dict mapping paths to entries. Each entry
804 consists of a binary node and extra flags affecting that entry.
804 consists of a binary node and extra flags affecting that entry.
805 """
805 """
806
806
807 def __getitem__(path):
807 def __getitem__(path):
808 """Returns the binary node value for a path in the manifest.
808 """Returns the binary node value for a path in the manifest.
809
809
810 Raises ``KeyError`` if the path does not exist in the manifest.
810 Raises ``KeyError`` if the path does not exist in the manifest.
811
811
812 Equivalent to ``self.find(path)[0]``.
812 Equivalent to ``self.find(path)[0]``.
813 """
813 """
814
814
815 def find(path):
815 def find(path):
816 """Returns the entry for a path in the manifest.
816 """Returns the entry for a path in the manifest.
817
817
818 Returns a 2-tuple of (node, flags).
818 Returns a 2-tuple of (node, flags).
819
819
820 Raises ``KeyError`` if the path does not exist in the manifest.
820 Raises ``KeyError`` if the path does not exist in the manifest.
821 """
821 """
822
822
823 def __len__():
823 def __len__():
824 """Return the number of entries in the manifest."""
824 """Return the number of entries in the manifest."""
825
825
826 def __nonzero__():
826 def __nonzero__():
827 """Returns True if the manifest has entries, False otherwise."""
827 """Returns True if the manifest has entries, False otherwise."""
828
828
829 __bool__ = __nonzero__
829 __bool__ = __nonzero__
830
830
831 def __setitem__(path, node):
831 def __setitem__(path, node):
832 """Define the node value for a path in the manifest.
832 """Define the node value for a path in the manifest.
833
833
834 If the path is already in the manifest, its flags will be copied to
834 If the path is already in the manifest, its flags will be copied to
835 the new entry.
835 the new entry.
836 """
836 """
837
837
838 def __contains__(path):
838 def __contains__(path):
839 """Whether a path exists in the manifest."""
839 """Whether a path exists in the manifest."""
840
840
841 def __delitem__(path):
841 def __delitem__(path):
842 """Remove a path from the manifest.
842 """Remove a path from the manifest.
843
843
844 Raises ``KeyError`` if the path is not in the manifest.
844 Raises ``KeyError`` if the path is not in the manifest.
845 """
845 """
846
846
847 def __iter__():
847 def __iter__():
848 """Iterate over paths in the manifest."""
848 """Iterate over paths in the manifest."""
849
849
850 def iterkeys():
850 def iterkeys():
851 """Iterate over paths in the manifest."""
851 """Iterate over paths in the manifest."""
852
852
853 def keys():
853 def keys():
854 """Obtain a list of paths in the manifest."""
854 """Obtain a list of paths in the manifest."""
855
855
856 def filesnotin(other, match=None):
856 def filesnotin(other, match=None):
857 """Obtain the set of paths in this manifest but not in another.
857 """Obtain the set of paths in this manifest but not in another.
858
858
859 ``match`` is an optional matcher function to be applied to both
859 ``match`` is an optional matcher function to be applied to both
860 manifests.
860 manifests.
861
861
862 Returns a set of paths.
862 Returns a set of paths.
863 """
863 """
864
864
865 def dirs():
865 def dirs():
866 """Returns an object implementing the ``idirs`` interface."""
866 """Returns an object implementing the ``idirs`` interface."""
867
867
868 def hasdir(dir):
868 def hasdir(dir):
869 """Returns a bool indicating if a directory is in this manifest."""
869 """Returns a bool indicating if a directory is in this manifest."""
870
870
871 def matches(match):
871 def matches(match):
872 """Generate a new manifest filtered through a matcher.
872 """Generate a new manifest filtered through a matcher.
873
873
874 Returns an object conforming to the ``imanifestdict`` interface.
874 Returns an object conforming to the ``imanifestdict`` interface.
875 """
875 """
876
876
877 def walk(match):
877 def walk(match):
878 """Generator of paths in manifest satisfying a matcher.
878 """Generator of paths in manifest satisfying a matcher.
879
879
880 This is equivalent to ``self.matches(match).iterkeys()`` except a new
880 This is equivalent to ``self.matches(match).iterkeys()`` except a new
881 manifest object is not created.
881 manifest object is not created.
882
882
883 If the matcher has explicit files listed and they don't exist in
883 If the matcher has explicit files listed and they don't exist in
884 the manifest, ``match.bad()`` is called for each missing file.
884 the manifest, ``match.bad()`` is called for each missing file.
885 """
885 """
886
886
887 def diff(other, match=None, clean=False):
887 def diff(other, match=None, clean=False):
888 """Find differences between this manifest and another.
888 """Find differences between this manifest and another.
889
889
890 This manifest is compared to ``other``.
890 This manifest is compared to ``other``.
891
891
892 If ``match`` is provided, the two manifests are filtered against this
892 If ``match`` is provided, the two manifests are filtered against this
893 matcher and only entries satisfying the matcher are compared.
893 matcher and only entries satisfying the matcher are compared.
894
894
895 If ``clean`` is True, unchanged files are included in the returned
895 If ``clean`` is True, unchanged files are included in the returned
896 object.
896 object.
897
897
898 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
898 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
899 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
899 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
900 represents the node and flags for this manifest and ``(node2, flag2)``
900 represents the node and flags for this manifest and ``(node2, flag2)``
901 are the same for the other manifest.
901 are the same for the other manifest.
902 """
902 """
903
903
904 def setflag(path, flag):
904 def setflag(path, flag):
905 """Set the flag value for a given path.
905 """Set the flag value for a given path.
906
906
907 Raises ``KeyError`` if the path is not already in the manifest.
907 Raises ``KeyError`` if the path is not already in the manifest.
908 """
908 """
909
909
910 def get(path, default=None):
910 def get(path, default=None):
911 """Obtain the node value for a path or a default value if missing."""
911 """Obtain the node value for a path or a default value if missing."""
912
912
913 def flags(path, default=''):
913 def flags(path, default=''):
914 """Return the flags value for a path or a default value if missing."""
914 """Return the flags value for a path or a default value if missing."""
915
915
916 def copy():
916 def copy():
917 """Return a copy of this manifest."""
917 """Return a copy of this manifest."""
918
918
919 def items():
919 def items():
920 """Returns an iterable of (path, node) for items in this manifest."""
920 """Returns an iterable of (path, node) for items in this manifest."""
921
921
922 def iteritems():
922 def iteritems():
923 """Identical to items()."""
923 """Identical to items()."""
924
924
925 def iterentries():
925 def iterentries():
926 """Returns an iterable of (path, node, flags) for this manifest.
926 """Returns an iterable of (path, node, flags) for this manifest.
927
927
928 Similar to ``iteritems()`` except items are a 3-tuple and include
928 Similar to ``iteritems()`` except items are a 3-tuple and include
929 flags.
929 flags.
930 """
930 """
931
931
932 def text():
932 def text():
933 """Obtain the raw data representation for this manifest.
933 """Obtain the raw data representation for this manifest.
934
934
935 Result is used to create a manifest revision.
935 Result is used to create a manifest revision.
936 """
936 """
937
937
938 def fastdelta(base, changes):
938 def fastdelta(base, changes):
939 """Obtain a delta between this manifest and another given changes.
939 """Obtain a delta between this manifest and another given changes.
940
940
941 ``base`` in the raw data representation for another manifest.
941 ``base`` in the raw data representation for another manifest.
942
942
943 ``changes`` is an iterable of ``(path, to_delete)``.
943 ``changes`` is an iterable of ``(path, to_delete)``.
944
944
945 Returns a 2-tuple containing ``bytearray(self.text())`` and the
945 Returns a 2-tuple containing ``bytearray(self.text())`` and the
946 delta between ``base`` and this manifest.
946 delta between ``base`` and this manifest.
947 """
947 """
948
948
949 class imanifestrevisionbase(interfaceutil.Interface):
949 class imanifestrevisionbase(interfaceutil.Interface):
950 """Base interface representing a single revision of a manifest.
950 """Base interface representing a single revision of a manifest.
951
951
952 Should not be used as a primary interface: should always be inherited
952 Should not be used as a primary interface: should always be inherited
953 as part of a larger interface.
953 as part of a larger interface.
954 """
954 """
955
955
956 def new():
956 def new():
957 """Obtain a new manifest instance.
957 """Obtain a new manifest instance.
958
958
959 Returns an object conforming to the ``imanifestrevisionwritable``
959 Returns an object conforming to the ``imanifestrevisionwritable``
960 interface. The instance will be associated with the same
960 interface. The instance will be associated with the same
961 ``imanifestlog`` collection as this instance.
961 ``imanifestlog`` collection as this instance.
962 """
962 """
963
963
964 def copy():
964 def copy():
965 """Obtain a copy of this manifest instance.
965 """Obtain a copy of this manifest instance.
966
966
967 Returns an object conforming to the ``imanifestrevisionwritable``
967 Returns an object conforming to the ``imanifestrevisionwritable``
968 interface. The instance will be associated with the same
968 interface. The instance will be associated with the same
969 ``imanifestlog`` collection as this instance.
969 ``imanifestlog`` collection as this instance.
970 """
970 """
971
971
972 def read():
972 def read():
973 """Obtain the parsed manifest data structure.
973 """Obtain the parsed manifest data structure.
974
974
975 The returned object conforms to the ``imanifestdict`` interface.
975 The returned object conforms to the ``imanifestdict`` interface.
976 """
976 """
977
977
978 class imanifestrevisionstored(imanifestrevisionbase):
978 class imanifestrevisionstored(imanifestrevisionbase):
979 """Interface representing a manifest revision committed to storage."""
979 """Interface representing a manifest revision committed to storage."""
980
980
981 def node():
981 def node():
982 """The binary node for this manifest."""
982 """The binary node for this manifest."""
983
983
984 parents = interfaceutil.Attribute(
984 parents = interfaceutil.Attribute(
985 """List of binary nodes that are parents for this manifest revision."""
985 """List of binary nodes that are parents for this manifest revision."""
986 )
986 )
987
987
988 def readdelta(shallow=False):
988 def readdelta(shallow=False):
989 """Obtain the manifest data structure representing changes from parent.
989 """Obtain the manifest data structure representing changes from parent.
990
990
991 This manifest is compared to its 1st parent. A new manifest representing
991 This manifest is compared to its 1st parent. A new manifest representing
992 those differences is constructed.
992 those differences is constructed.
993
993
994 The returned object conforms to the ``imanifestdict`` interface.
994 The returned object conforms to the ``imanifestdict`` interface.
995 """
995 """
996
996
997 def readfast(shallow=False):
997 def readfast(shallow=False):
998 """Calls either ``read()`` or ``readdelta()``.
998 """Calls either ``read()`` or ``readdelta()``.
999
999
1000 The faster of the two options is called.
1000 The faster of the two options is called.
1001 """
1001 """
1002
1002
1003 def find(key):
1003 def find(key):
1004 """Calls self.read().find(key)``.
1004 """Calls self.read().find(key)``.
1005
1005
1006 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1006 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1007 """
1007 """
1008
1008
1009 class imanifestrevisionwritable(imanifestrevisionbase):
1009 class imanifestrevisionwritable(imanifestrevisionbase):
1010 """Interface representing a manifest revision that can be committed."""
1010 """Interface representing a manifest revision that can be committed."""
1011
1011
1012 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1012 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1013 """Add this revision to storage.
1013 """Add this revision to storage.
1014
1014
1015 Takes a transaction object, the changeset revision number it will
1015 Takes a transaction object, the changeset revision number it will
1016 be associated with, its parent nodes, and lists of added and
1016 be associated with, its parent nodes, and lists of added and
1017 removed paths.
1017 removed paths.
1018
1018
1019 If match is provided, storage can choose not to inspect or write out
1019 If match is provided, storage can choose not to inspect or write out
1020 items that do not match. Storage is still required to be able to provide
1020 items that do not match. Storage is still required to be able to provide
1021 the full manifest in the future for any directories written (these
1021 the full manifest in the future for any directories written (these
1022 manifests should not be "narrowed on disk").
1022 manifests should not be "narrowed on disk").
1023
1023
1024 Returns the binary node of the created revision.
1024 Returns the binary node of the created revision.
1025 """
1025 """
1026
1026
1027 class imanifeststorage(interfaceutil.Interface):
1027 class imanifeststorage(interfaceutil.Interface):
1028 """Storage interface for manifest data."""
1028 """Storage interface for manifest data."""
1029
1029
1030 tree = interfaceutil.Attribute(
1030 tree = interfaceutil.Attribute(
1031 """The path to the directory this manifest tracks.
1031 """The path to the directory this manifest tracks.
1032
1032
1033 The empty bytestring represents the root manifest.
1033 The empty bytestring represents the root manifest.
1034 """)
1034 """)
1035
1035
1036 index = interfaceutil.Attribute(
1036 index = interfaceutil.Attribute(
1037 """An ``ifilerevisionssequence`` instance.""")
1037 """An ``ifilerevisionssequence`` instance.""")
1038
1038
1039 indexfile = interfaceutil.Attribute(
1039 indexfile = interfaceutil.Attribute(
1040 """Path of revlog index file.
1040 """Path of revlog index file.
1041
1041
1042 TODO this is revlog specific and should not be exposed.
1042 TODO this is revlog specific and should not be exposed.
1043 """)
1043 """)
1044
1044
1045 opener = interfaceutil.Attribute(
1045 opener = interfaceutil.Attribute(
1046 """VFS opener to use to access underlying files used for storage.
1046 """VFS opener to use to access underlying files used for storage.
1047
1047
1048 TODO this is revlog specific and should not be exposed.
1048 TODO this is revlog specific and should not be exposed.
1049 """)
1049 """)
1050
1050
1051 version = interfaceutil.Attribute(
1051 version = interfaceutil.Attribute(
1052 """Revlog version number.
1052 """Revlog version number.
1053
1053
1054 TODO this is revlog specific and should not be exposed.
1054 TODO this is revlog specific and should not be exposed.
1055 """)
1055 """)
1056
1056
1057 _generaldelta = interfaceutil.Attribute(
1057 _generaldelta = interfaceutil.Attribute(
1058 """Whether generaldelta storage is being used.
1058 """Whether generaldelta storage is being used.
1059
1059
1060 TODO this is revlog specific and should not be exposed.
1060 TODO this is revlog specific and should not be exposed.
1061 """)
1061 """)
1062
1062
1063 fulltextcache = interfaceutil.Attribute(
1063 fulltextcache = interfaceutil.Attribute(
1064 """Dict with cache of fulltexts.
1064 """Dict with cache of fulltexts.
1065
1065
1066 TODO this doesn't feel appropriate for the storage interface.
1066 TODO this doesn't feel appropriate for the storage interface.
1067 """)
1067 """)
1068
1068
1069 def __len__():
1069 def __len__():
1070 """Obtain the number of revisions stored for this manifest."""
1070 """Obtain the number of revisions stored for this manifest."""
1071
1071
1072 def __iter__():
1072 def __iter__():
1073 """Iterate over revision numbers for this manifest."""
1073 """Iterate over revision numbers for this manifest."""
1074
1074
1075 def rev(node):
1075 def rev(node):
1076 """Obtain the revision number given a binary node.
1076 """Obtain the revision number given a binary node.
1077
1077
1078 Raises ``error.LookupError`` if the node is not known.
1078 Raises ``error.LookupError`` if the node is not known.
1079 """
1079 """
1080
1080
1081 def node(rev):
1081 def node(rev):
1082 """Obtain the node value given a revision number.
1082 """Obtain the node value given a revision number.
1083
1083
1084 Raises ``error.LookupError`` if the revision is not known.
1084 Raises ``error.LookupError`` if the revision is not known.
1085 """
1085 """
1086
1086
1087 def lookup(value):
1087 def lookup(value):
1088 """Attempt to resolve a value to a node.
1088 """Attempt to resolve a value to a node.
1089
1089
1090 Value can be a binary node, hex node, revision number, or a bytes
1090 Value can be a binary node, hex node, revision number, or a bytes
1091 that can be converted to an integer.
1091 that can be converted to an integer.
1092
1092
1093 Raises ``error.LookupError`` if a ndoe could not be resolved.
1093 Raises ``error.LookupError`` if a ndoe could not be resolved.
1094
1094
1095 TODO this is only used by debug* commands and can probably be deleted
1095 TODO this is only used by debug* commands and can probably be deleted
1096 easily.
1096 easily.
1097 """
1097 """
1098
1098
1099 def parents(node):
1099 def parents(node):
1100 """Returns a 2-tuple of parent nodes for a node.
1100 """Returns a 2-tuple of parent nodes for a node.
1101
1101
1102 Values will be ``nullid`` if the parent is empty.
1102 Values will be ``nullid`` if the parent is empty.
1103 """
1103 """
1104
1104
1105 def parentrevs(rev):
1105 def parentrevs(rev):
1106 """Like parents() but operates on revision numbers."""
1106 """Like parents() but operates on revision numbers."""
1107
1107
1108 def linkrev(rev):
1108 def linkrev(rev):
1109 """Obtain the changeset revision number a revision is linked to."""
1109 """Obtain the changeset revision number a revision is linked to."""
1110
1110
1111 def revision(node, _df=None, raw=False):
1111 def revision(node, _df=None, raw=False):
1112 """Obtain fulltext data for a node."""
1112 """Obtain fulltext data for a node."""
1113
1113
1114 def revdiff(rev1, rev2):
1114 def revdiff(rev1, rev2):
1115 """Obtain a delta between two revision numbers.
1115 """Obtain a delta between two revision numbers.
1116
1116
1117 The returned data is the result of ``bdiff.bdiff()`` on the raw
1117 The returned data is the result of ``bdiff.bdiff()`` on the raw
1118 revision data.
1118 revision data.
1119 """
1119 """
1120
1120
1121 def cmp(node, fulltext):
1121 def cmp(node, fulltext):
1122 """Compare fulltext to another revision.
1122 """Compare fulltext to another revision.
1123
1123
1124 Returns True if the fulltext is different from what is stored.
1124 Returns True if the fulltext is different from what is stored.
1125 """
1125 """
1126
1126
1127 def emitrevisiondeltas(requests):
1127 def emitrevisiondeltas(requests):
1128 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1128 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1129
1129
1130 See the documentation for ``ifiledata`` for more.
1130 See the documentation for ``ifiledata`` for more.
1131 """
1131 """
1132
1132
1133 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1133 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1134 """Process a series of deltas for storage.
1134 """Process a series of deltas for storage.
1135
1135
1136 See the documentation in ``ifilemutation`` for more.
1136 See the documentation in ``ifilemutation`` for more.
1137 """
1137 """
1138
1138
1139 def rawsize(rev):
1140 """Obtain the size of tracked data.
1141
1142 Is equivalent to ``len(m.revision(node, raw=True))``.
1143
1144 TODO this method is only used by upgrade code and may be removed.
1145 """
1146
1139 def getstrippoint(minlink):
1147 def getstrippoint(minlink):
1140 """Find minimum revision that must be stripped to strip a linkrev.
1148 """Find minimum revision that must be stripped to strip a linkrev.
1141
1149
1142 See the documentation in ``ifilemutation`` for more.
1150 See the documentation in ``ifilemutation`` for more.
1143 """
1151 """
1144
1152
1145 def strip(minlink, transaction):
1153 def strip(minlink, transaction):
1146 """Remove storage of items starting at a linkrev.
1154 """Remove storage of items starting at a linkrev.
1147
1155
1148 See the documentation in ``ifilemutation`` for more.
1156 See the documentation in ``ifilemutation`` for more.
1149 """
1157 """
1150
1158
1151 def checksize():
1159 def checksize():
1152 """Obtain the expected sizes of backing files.
1160 """Obtain the expected sizes of backing files.
1153
1161
1154 TODO this is used by verify and it should not be part of the interface.
1162 TODO this is used by verify and it should not be part of the interface.
1155 """
1163 """
1156
1164
1157 def files():
1165 def files():
1158 """Obtain paths that are backing storage for this manifest.
1166 """Obtain paths that are backing storage for this manifest.
1159
1167
1160 TODO this is used by verify and there should probably be a better API
1168 TODO this is used by verify and there should probably be a better API
1161 for this functionality.
1169 for this functionality.
1162 """
1170 """
1163
1171
1164 def deltaparent(rev):
1172 def deltaparent(rev):
1165 """Obtain the revision that a revision is delta'd against.
1173 """Obtain the revision that a revision is delta'd against.
1166
1174
1167 TODO delta encoding is an implementation detail of storage and should
1175 TODO delta encoding is an implementation detail of storage and should
1168 not be exposed to the storage interface.
1176 not be exposed to the storage interface.
1169 """
1177 """
1170
1178
1171 def clone(tr, dest, **kwargs):
1179 def clone(tr, dest, **kwargs):
1172 """Clone this instance to another."""
1180 """Clone this instance to another."""
1173
1181
1174 def clearcaches(clear_persisted_data=False):
1182 def clearcaches(clear_persisted_data=False):
1175 """Clear any caches associated with this instance."""
1183 """Clear any caches associated with this instance."""
1176
1184
1177 def dirlog(d):
1185 def dirlog(d):
1178 """Obtain a manifest storage instance for a tree."""
1186 """Obtain a manifest storage instance for a tree."""
1179
1187
1180 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1188 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1181 match=None):
1189 match=None):
1182 """Add a revision to storage.
1190 """Add a revision to storage.
1183
1191
1184 ``m`` is an object conforming to ``imanifestdict``.
1192 ``m`` is an object conforming to ``imanifestdict``.
1185
1193
1186 ``link`` is the linkrev revision number.
1194 ``link`` is the linkrev revision number.
1187
1195
1188 ``p1`` and ``p2`` are the parent revision numbers.
1196 ``p1`` and ``p2`` are the parent revision numbers.
1189
1197
1190 ``added`` and ``removed`` are iterables of added and removed paths,
1198 ``added`` and ``removed`` are iterables of added and removed paths,
1191 respectively.
1199 respectively.
1192
1200
1193 ``readtree`` is a function that can be used to read the child tree(s)
1201 ``readtree`` is a function that can be used to read the child tree(s)
1194 when recursively writing the full tree structure when using
1202 when recursively writing the full tree structure when using
1195 treemanifets.
1203 treemanifets.
1196
1204
1197 ``match`` is a matcher that can be used to hint to storage that not all
1205 ``match`` is a matcher that can be used to hint to storage that not all
1198 paths must be inspected; this is an optimization and can be safely
1206 paths must be inspected; this is an optimization and can be safely
1199 ignored. Note that the storage must still be able to reproduce a full
1207 ignored. Note that the storage must still be able to reproduce a full
1200 manifest including files that did not match.
1208 manifest including files that did not match.
1201 """
1209 """
1202
1210
1203 class imanifestlog(interfaceutil.Interface):
1211 class imanifestlog(interfaceutil.Interface):
1204 """Interface representing a collection of manifest snapshots.
1212 """Interface representing a collection of manifest snapshots.
1205
1213
1206 Represents the root manifest in a repository.
1214 Represents the root manifest in a repository.
1207
1215
1208 Also serves as a means to access nested tree manifests and to cache
1216 Also serves as a means to access nested tree manifests and to cache
1209 tree manifests.
1217 tree manifests.
1210 """
1218 """
1211
1219
1212 def __getitem__(node):
1220 def __getitem__(node):
1213 """Obtain a manifest instance for a given binary node.
1221 """Obtain a manifest instance for a given binary node.
1214
1222
1215 Equivalent to calling ``self.get('', node)``.
1223 Equivalent to calling ``self.get('', node)``.
1216
1224
1217 The returned object conforms to the ``imanifestrevisionstored``
1225 The returned object conforms to the ``imanifestrevisionstored``
1218 interface.
1226 interface.
1219 """
1227 """
1220
1228
1221 def get(tree, node, verify=True):
1229 def get(tree, node, verify=True):
1222 """Retrieve the manifest instance for a given directory and binary node.
1230 """Retrieve the manifest instance for a given directory and binary node.
1223
1231
1224 ``node`` always refers to the node of the root manifest (which will be
1232 ``node`` always refers to the node of the root manifest (which will be
1225 the only manifest if flat manifests are being used).
1233 the only manifest if flat manifests are being used).
1226
1234
1227 If ``tree`` is the empty string, the root manifest is returned.
1235 If ``tree`` is the empty string, the root manifest is returned.
1228 Otherwise the manifest for the specified directory will be returned
1236 Otherwise the manifest for the specified directory will be returned
1229 (requires tree manifests).
1237 (requires tree manifests).
1230
1238
1231 If ``verify`` is True, ``LookupError`` is raised if the node is not
1239 If ``verify`` is True, ``LookupError`` is raised if the node is not
1232 known.
1240 known.
1233
1241
1234 The returned object conforms to the ``imanifestrevisionstored``
1242 The returned object conforms to the ``imanifestrevisionstored``
1235 interface.
1243 interface.
1236 """
1244 """
1237
1245
1238 def getstorage(tree):
1246 def getstorage(tree):
1239 """Retrieve an interface to storage for a particular tree.
1247 """Retrieve an interface to storage for a particular tree.
1240
1248
1241 If ``tree`` is the empty bytestring, storage for the root manifest will
1249 If ``tree`` is the empty bytestring, storage for the root manifest will
1242 be returned. Otherwise storage for a tree manifest is returned.
1250 be returned. Otherwise storage for a tree manifest is returned.
1243
1251
1244 TODO formalize interface for returned object.
1252 TODO formalize interface for returned object.
1245 """
1253 """
1246
1254
1247 def clearcaches():
1255 def clearcaches():
1248 """Clear caches associated with this collection."""
1256 """Clear caches associated with this collection."""
1249
1257
1250 def rev(node):
1258 def rev(node):
1251 """Obtain the revision number for a binary node.
1259 """Obtain the revision number for a binary node.
1252
1260
1253 Raises ``error.LookupError`` if the node is not known.
1261 Raises ``error.LookupError`` if the node is not known.
1254 """
1262 """
1255
1263
1256 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1264 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1257 """Local repository sub-interface providing access to tracked file storage.
1265 """Local repository sub-interface providing access to tracked file storage.
1258
1266
1259 This interface defines how a repository accesses storage for a single
1267 This interface defines how a repository accesses storage for a single
1260 tracked file path.
1268 tracked file path.
1261 """
1269 """
1262
1270
1263 def file(f):
1271 def file(f):
1264 """Obtain a filelog for a tracked path.
1272 """Obtain a filelog for a tracked path.
1265
1273
1266 The returned type conforms to the ``ifilestorage`` interface.
1274 The returned type conforms to the ``ifilestorage`` interface.
1267 """
1275 """
1268
1276
1269 class ilocalrepositorymain(interfaceutil.Interface):
1277 class ilocalrepositorymain(interfaceutil.Interface):
1270 """Main interface for local repositories.
1278 """Main interface for local repositories.
1271
1279
1272 This currently captures the reality of things - not how things should be.
1280 This currently captures the reality of things - not how things should be.
1273 """
1281 """
1274
1282
1275 supportedformats = interfaceutil.Attribute(
1283 supportedformats = interfaceutil.Attribute(
1276 """Set of requirements that apply to stream clone.
1284 """Set of requirements that apply to stream clone.
1277
1285
1278 This is actually a class attribute and is shared among all instances.
1286 This is actually a class attribute and is shared among all instances.
1279 """)
1287 """)
1280
1288
1281 supported = interfaceutil.Attribute(
1289 supported = interfaceutil.Attribute(
1282 """Set of requirements that this repo is capable of opening.""")
1290 """Set of requirements that this repo is capable of opening.""")
1283
1291
1284 requirements = interfaceutil.Attribute(
1292 requirements = interfaceutil.Attribute(
1285 """Set of requirements this repo uses.""")
1293 """Set of requirements this repo uses.""")
1286
1294
1287 features = interfaceutil.Attribute(
1295 features = interfaceutil.Attribute(
1288 """Set of "features" this repository supports.
1296 """Set of "features" this repository supports.
1289
1297
1290 A "feature" is a loosely-defined term. It can refer to a feature
1298 A "feature" is a loosely-defined term. It can refer to a feature
1291 in the classical sense or can describe an implementation detail
1299 in the classical sense or can describe an implementation detail
1292 of the repository. For example, a ``readonly`` feature may denote
1300 of the repository. For example, a ``readonly`` feature may denote
1293 the repository as read-only. Or a ``revlogfilestore`` feature may
1301 the repository as read-only. Or a ``revlogfilestore`` feature may
1294 denote that the repository is using revlogs for file storage.
1302 denote that the repository is using revlogs for file storage.
1295
1303
1296 The intent of features is to provide a machine-queryable mechanism
1304 The intent of features is to provide a machine-queryable mechanism
1297 for repo consumers to test for various repository characteristics.
1305 for repo consumers to test for various repository characteristics.
1298
1306
1299 Features are similar to ``requirements``. The main difference is that
1307 Features are similar to ``requirements``. The main difference is that
1300 requirements are stored on-disk and represent requirements to open the
1308 requirements are stored on-disk and represent requirements to open the
1301 repository. Features are more run-time capabilities of the repository
1309 repository. Features are more run-time capabilities of the repository
1302 and more granular capabilities (which may be derived from requirements).
1310 and more granular capabilities (which may be derived from requirements).
1303 """)
1311 """)
1304
1312
1305 filtername = interfaceutil.Attribute(
1313 filtername = interfaceutil.Attribute(
1306 """Name of the repoview that is active on this repo.""")
1314 """Name of the repoview that is active on this repo.""")
1307
1315
1308 wvfs = interfaceutil.Attribute(
1316 wvfs = interfaceutil.Attribute(
1309 """VFS used to access the working directory.""")
1317 """VFS used to access the working directory.""")
1310
1318
1311 vfs = interfaceutil.Attribute(
1319 vfs = interfaceutil.Attribute(
1312 """VFS rooted at the .hg directory.
1320 """VFS rooted at the .hg directory.
1313
1321
1314 Used to access repository data not in the store.
1322 Used to access repository data not in the store.
1315 """)
1323 """)
1316
1324
1317 svfs = interfaceutil.Attribute(
1325 svfs = interfaceutil.Attribute(
1318 """VFS rooted at the store.
1326 """VFS rooted at the store.
1319
1327
1320 Used to access repository data in the store. Typically .hg/store.
1328 Used to access repository data in the store. Typically .hg/store.
1321 But can point elsewhere if the store is shared.
1329 But can point elsewhere if the store is shared.
1322 """)
1330 """)
1323
1331
1324 root = interfaceutil.Attribute(
1332 root = interfaceutil.Attribute(
1325 """Path to the root of the working directory.""")
1333 """Path to the root of the working directory.""")
1326
1334
1327 path = interfaceutil.Attribute(
1335 path = interfaceutil.Attribute(
1328 """Path to the .hg directory.""")
1336 """Path to the .hg directory.""")
1329
1337
1330 origroot = interfaceutil.Attribute(
1338 origroot = interfaceutil.Attribute(
1331 """The filesystem path that was used to construct the repo.""")
1339 """The filesystem path that was used to construct the repo.""")
1332
1340
1333 auditor = interfaceutil.Attribute(
1341 auditor = interfaceutil.Attribute(
1334 """A pathauditor for the working directory.
1342 """A pathauditor for the working directory.
1335
1343
1336 This checks if a path refers to a nested repository.
1344 This checks if a path refers to a nested repository.
1337
1345
1338 Operates on the filesystem.
1346 Operates on the filesystem.
1339 """)
1347 """)
1340
1348
1341 nofsauditor = interfaceutil.Attribute(
1349 nofsauditor = interfaceutil.Attribute(
1342 """A pathauditor for the working directory.
1350 """A pathauditor for the working directory.
1343
1351
1344 This is like ``auditor`` except it doesn't do filesystem checks.
1352 This is like ``auditor`` except it doesn't do filesystem checks.
1345 """)
1353 """)
1346
1354
1347 baseui = interfaceutil.Attribute(
1355 baseui = interfaceutil.Attribute(
1348 """Original ui instance passed into constructor.""")
1356 """Original ui instance passed into constructor.""")
1349
1357
1350 ui = interfaceutil.Attribute(
1358 ui = interfaceutil.Attribute(
1351 """Main ui instance for this instance.""")
1359 """Main ui instance for this instance.""")
1352
1360
1353 sharedpath = interfaceutil.Attribute(
1361 sharedpath = interfaceutil.Attribute(
1354 """Path to the .hg directory of the repo this repo was shared from.""")
1362 """Path to the .hg directory of the repo this repo was shared from.""")
1355
1363
1356 store = interfaceutil.Attribute(
1364 store = interfaceutil.Attribute(
1357 """A store instance.""")
1365 """A store instance.""")
1358
1366
1359 spath = interfaceutil.Attribute(
1367 spath = interfaceutil.Attribute(
1360 """Path to the store.""")
1368 """Path to the store.""")
1361
1369
1362 sjoin = interfaceutil.Attribute(
1370 sjoin = interfaceutil.Attribute(
1363 """Alias to self.store.join.""")
1371 """Alias to self.store.join.""")
1364
1372
1365 cachevfs = interfaceutil.Attribute(
1373 cachevfs = interfaceutil.Attribute(
1366 """A VFS used to access the cache directory.
1374 """A VFS used to access the cache directory.
1367
1375
1368 Typically .hg/cache.
1376 Typically .hg/cache.
1369 """)
1377 """)
1370
1378
1371 filteredrevcache = interfaceutil.Attribute(
1379 filteredrevcache = interfaceutil.Attribute(
1372 """Holds sets of revisions to be filtered.""")
1380 """Holds sets of revisions to be filtered.""")
1373
1381
1374 names = interfaceutil.Attribute(
1382 names = interfaceutil.Attribute(
1375 """A ``namespaces`` instance.""")
1383 """A ``namespaces`` instance.""")
1376
1384
1377 def close():
1385 def close():
1378 """Close the handle on this repository."""
1386 """Close the handle on this repository."""
1379
1387
1380 def peer():
1388 def peer():
1381 """Obtain an object conforming to the ``peer`` interface."""
1389 """Obtain an object conforming to the ``peer`` interface."""
1382
1390
1383 def unfiltered():
1391 def unfiltered():
1384 """Obtain an unfiltered/raw view of this repo."""
1392 """Obtain an unfiltered/raw view of this repo."""
1385
1393
1386 def filtered(name, visibilityexceptions=None):
1394 def filtered(name, visibilityexceptions=None):
1387 """Obtain a named view of this repository."""
1395 """Obtain a named view of this repository."""
1388
1396
1389 obsstore = interfaceutil.Attribute(
1397 obsstore = interfaceutil.Attribute(
1390 """A store of obsolescence data.""")
1398 """A store of obsolescence data.""")
1391
1399
1392 changelog = interfaceutil.Attribute(
1400 changelog = interfaceutil.Attribute(
1393 """A handle on the changelog revlog.""")
1401 """A handle on the changelog revlog.""")
1394
1402
1395 manifestlog = interfaceutil.Attribute(
1403 manifestlog = interfaceutil.Attribute(
1396 """An instance conforming to the ``imanifestlog`` interface.
1404 """An instance conforming to the ``imanifestlog`` interface.
1397
1405
1398 Provides access to manifests for the repository.
1406 Provides access to manifests for the repository.
1399 """)
1407 """)
1400
1408
1401 dirstate = interfaceutil.Attribute(
1409 dirstate = interfaceutil.Attribute(
1402 """Working directory state.""")
1410 """Working directory state.""")
1403
1411
1404 narrowpats = interfaceutil.Attribute(
1412 narrowpats = interfaceutil.Attribute(
1405 """Matcher patterns for this repository's narrowspec.""")
1413 """Matcher patterns for this repository's narrowspec.""")
1406
1414
1407 def narrowmatch():
1415 def narrowmatch():
1408 """Obtain a matcher for the narrowspec."""
1416 """Obtain a matcher for the narrowspec."""
1409
1417
1410 def setnarrowpats(newincludes, newexcludes):
1418 def setnarrowpats(newincludes, newexcludes):
1411 """Define the narrowspec for this repository."""
1419 """Define the narrowspec for this repository."""
1412
1420
1413 def __getitem__(changeid):
1421 def __getitem__(changeid):
1414 """Try to resolve a changectx."""
1422 """Try to resolve a changectx."""
1415
1423
1416 def __contains__(changeid):
1424 def __contains__(changeid):
1417 """Whether a changeset exists."""
1425 """Whether a changeset exists."""
1418
1426
1419 def __nonzero__():
1427 def __nonzero__():
1420 """Always returns True."""
1428 """Always returns True."""
1421 return True
1429 return True
1422
1430
1423 __bool__ = __nonzero__
1431 __bool__ = __nonzero__
1424
1432
1425 def __len__():
1433 def __len__():
1426 """Returns the number of changesets in the repo."""
1434 """Returns the number of changesets in the repo."""
1427
1435
1428 def __iter__():
1436 def __iter__():
1429 """Iterate over revisions in the changelog."""
1437 """Iterate over revisions in the changelog."""
1430
1438
1431 def revs(expr, *args):
1439 def revs(expr, *args):
1432 """Evaluate a revset.
1440 """Evaluate a revset.
1433
1441
1434 Emits revisions.
1442 Emits revisions.
1435 """
1443 """
1436
1444
1437 def set(expr, *args):
1445 def set(expr, *args):
1438 """Evaluate a revset.
1446 """Evaluate a revset.
1439
1447
1440 Emits changectx instances.
1448 Emits changectx instances.
1441 """
1449 """
1442
1450
1443 def anyrevs(specs, user=False, localalias=None):
1451 def anyrevs(specs, user=False, localalias=None):
1444 """Find revisions matching one of the given revsets."""
1452 """Find revisions matching one of the given revsets."""
1445
1453
1446 def url():
1454 def url():
1447 """Returns a string representing the location of this repo."""
1455 """Returns a string representing the location of this repo."""
1448
1456
1449 def hook(name, throw=False, **args):
1457 def hook(name, throw=False, **args):
1450 """Call a hook."""
1458 """Call a hook."""
1451
1459
1452 def tags():
1460 def tags():
1453 """Return a mapping of tag to node."""
1461 """Return a mapping of tag to node."""
1454
1462
1455 def tagtype(tagname):
1463 def tagtype(tagname):
1456 """Return the type of a given tag."""
1464 """Return the type of a given tag."""
1457
1465
1458 def tagslist():
1466 def tagslist():
1459 """Return a list of tags ordered by revision."""
1467 """Return a list of tags ordered by revision."""
1460
1468
1461 def nodetags(node):
1469 def nodetags(node):
1462 """Return the tags associated with a node."""
1470 """Return the tags associated with a node."""
1463
1471
1464 def nodebookmarks(node):
1472 def nodebookmarks(node):
1465 """Return the list of bookmarks pointing to the specified node."""
1473 """Return the list of bookmarks pointing to the specified node."""
1466
1474
1467 def branchmap():
1475 def branchmap():
1468 """Return a mapping of branch to heads in that branch."""
1476 """Return a mapping of branch to heads in that branch."""
1469
1477
1470 def revbranchcache():
1478 def revbranchcache():
1471 pass
1479 pass
1472
1480
1473 def branchtip(branchtip, ignoremissing=False):
1481 def branchtip(branchtip, ignoremissing=False):
1474 """Return the tip node for a given branch."""
1482 """Return the tip node for a given branch."""
1475
1483
1476 def lookup(key):
1484 def lookup(key):
1477 """Resolve the node for a revision."""
1485 """Resolve the node for a revision."""
1478
1486
1479 def lookupbranch(key):
1487 def lookupbranch(key):
1480 """Look up the branch name of the given revision or branch name."""
1488 """Look up the branch name of the given revision or branch name."""
1481
1489
1482 def known(nodes):
1490 def known(nodes):
1483 """Determine whether a series of nodes is known.
1491 """Determine whether a series of nodes is known.
1484
1492
1485 Returns a list of bools.
1493 Returns a list of bools.
1486 """
1494 """
1487
1495
1488 def local():
1496 def local():
1489 """Whether the repository is local."""
1497 """Whether the repository is local."""
1490 return True
1498 return True
1491
1499
1492 def publishing():
1500 def publishing():
1493 """Whether the repository is a publishing repository."""
1501 """Whether the repository is a publishing repository."""
1494
1502
1495 def cancopy():
1503 def cancopy():
1496 pass
1504 pass
1497
1505
1498 def shared():
1506 def shared():
1499 """The type of shared repository or None."""
1507 """The type of shared repository or None."""
1500
1508
1501 def wjoin(f, *insidef):
1509 def wjoin(f, *insidef):
1502 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1510 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1503
1511
1504 def setparents(p1, p2):
1512 def setparents(p1, p2):
1505 """Set the parent nodes of the working directory."""
1513 """Set the parent nodes of the working directory."""
1506
1514
1507 def filectx(path, changeid=None, fileid=None):
1515 def filectx(path, changeid=None, fileid=None):
1508 """Obtain a filectx for the given file revision."""
1516 """Obtain a filectx for the given file revision."""
1509
1517
1510 def getcwd():
1518 def getcwd():
1511 """Obtain the current working directory from the dirstate."""
1519 """Obtain the current working directory from the dirstate."""
1512
1520
1513 def pathto(f, cwd=None):
1521 def pathto(f, cwd=None):
1514 """Obtain the relative path to a file."""
1522 """Obtain the relative path to a file."""
1515
1523
1516 def adddatafilter(name, fltr):
1524 def adddatafilter(name, fltr):
1517 pass
1525 pass
1518
1526
1519 def wread(filename):
1527 def wread(filename):
1520 """Read a file from wvfs, using data filters."""
1528 """Read a file from wvfs, using data filters."""
1521
1529
1522 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1530 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1523 """Write data to a file in the wvfs, using data filters."""
1531 """Write data to a file in the wvfs, using data filters."""
1524
1532
1525 def wwritedata(filename, data):
1533 def wwritedata(filename, data):
1526 """Resolve data for writing to the wvfs, using data filters."""
1534 """Resolve data for writing to the wvfs, using data filters."""
1527
1535
1528 def currenttransaction():
1536 def currenttransaction():
1529 """Obtain the current transaction instance or None."""
1537 """Obtain the current transaction instance or None."""
1530
1538
1531 def transaction(desc, report=None):
1539 def transaction(desc, report=None):
1532 """Open a new transaction to write to the repository."""
1540 """Open a new transaction to write to the repository."""
1533
1541
1534 def undofiles():
1542 def undofiles():
1535 """Returns a list of (vfs, path) for files to undo transactions."""
1543 """Returns a list of (vfs, path) for files to undo transactions."""
1536
1544
1537 def recover():
1545 def recover():
1538 """Roll back an interrupted transaction."""
1546 """Roll back an interrupted transaction."""
1539
1547
1540 def rollback(dryrun=False, force=False):
1548 def rollback(dryrun=False, force=False):
1541 """Undo the last transaction.
1549 """Undo the last transaction.
1542
1550
1543 DANGEROUS.
1551 DANGEROUS.
1544 """
1552 """
1545
1553
1546 def updatecaches(tr=None, full=False):
1554 def updatecaches(tr=None, full=False):
1547 """Warm repo caches."""
1555 """Warm repo caches."""
1548
1556
1549 def invalidatecaches():
1557 def invalidatecaches():
1550 """Invalidate cached data due to the repository mutating."""
1558 """Invalidate cached data due to the repository mutating."""
1551
1559
1552 def invalidatevolatilesets():
1560 def invalidatevolatilesets():
1553 pass
1561 pass
1554
1562
1555 def invalidatedirstate():
1563 def invalidatedirstate():
1556 """Invalidate the dirstate."""
1564 """Invalidate the dirstate."""
1557
1565
1558 def invalidate(clearfilecache=False):
1566 def invalidate(clearfilecache=False):
1559 pass
1567 pass
1560
1568
1561 def invalidateall():
1569 def invalidateall():
1562 pass
1570 pass
1563
1571
1564 def lock(wait=True):
1572 def lock(wait=True):
1565 """Lock the repository store and return a lock instance."""
1573 """Lock the repository store and return a lock instance."""
1566
1574
1567 def wlock(wait=True):
1575 def wlock(wait=True):
1568 """Lock the non-store parts of the repository."""
1576 """Lock the non-store parts of the repository."""
1569
1577
1570 def currentwlock():
1578 def currentwlock():
1571 """Return the wlock if it's held or None."""
1579 """Return the wlock if it's held or None."""
1572
1580
1573 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1581 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1574 pass
1582 pass
1575
1583
1576 def commit(text='', user=None, date=None, match=None, force=False,
1584 def commit(text='', user=None, date=None, match=None, force=False,
1577 editor=False, extra=None):
1585 editor=False, extra=None):
1578 """Add a new revision to the repository."""
1586 """Add a new revision to the repository."""
1579
1587
1580 def commitctx(ctx, error=False):
1588 def commitctx(ctx, error=False):
1581 """Commit a commitctx instance to the repository."""
1589 """Commit a commitctx instance to the repository."""
1582
1590
1583 def destroying():
1591 def destroying():
1584 """Inform the repository that nodes are about to be destroyed."""
1592 """Inform the repository that nodes are about to be destroyed."""
1585
1593
1586 def destroyed():
1594 def destroyed():
1587 """Inform the repository that nodes have been destroyed."""
1595 """Inform the repository that nodes have been destroyed."""
1588
1596
1589 def status(node1='.', node2=None, match=None, ignored=False,
1597 def status(node1='.', node2=None, match=None, ignored=False,
1590 clean=False, unknown=False, listsubrepos=False):
1598 clean=False, unknown=False, listsubrepos=False):
1591 """Convenience method to call repo[x].status()."""
1599 """Convenience method to call repo[x].status()."""
1592
1600
1593 def addpostdsstatus(ps):
1601 def addpostdsstatus(ps):
1594 pass
1602 pass
1595
1603
1596 def postdsstatus():
1604 def postdsstatus():
1597 pass
1605 pass
1598
1606
1599 def clearpostdsstatus():
1607 def clearpostdsstatus():
1600 pass
1608 pass
1601
1609
1602 def heads(start=None):
1610 def heads(start=None):
1603 """Obtain list of nodes that are DAG heads."""
1611 """Obtain list of nodes that are DAG heads."""
1604
1612
1605 def branchheads(branch=None, start=None, closed=False):
1613 def branchheads(branch=None, start=None, closed=False):
1606 pass
1614 pass
1607
1615
1608 def branches(nodes):
1616 def branches(nodes):
1609 pass
1617 pass
1610
1618
1611 def between(pairs):
1619 def between(pairs):
1612 pass
1620 pass
1613
1621
1614 def checkpush(pushop):
1622 def checkpush(pushop):
1615 pass
1623 pass
1616
1624
1617 prepushoutgoinghooks = interfaceutil.Attribute(
1625 prepushoutgoinghooks = interfaceutil.Attribute(
1618 """util.hooks instance.""")
1626 """util.hooks instance.""")
1619
1627
1620 def pushkey(namespace, key, old, new):
1628 def pushkey(namespace, key, old, new):
1621 pass
1629 pass
1622
1630
1623 def listkeys(namespace):
1631 def listkeys(namespace):
1624 pass
1632 pass
1625
1633
1626 def debugwireargs(one, two, three=None, four=None, five=None):
1634 def debugwireargs(one, two, three=None, four=None, five=None):
1627 pass
1635 pass
1628
1636
1629 def savecommitmessage(text):
1637 def savecommitmessage(text):
1630 pass
1638 pass
1631
1639
1632 class completelocalrepository(ilocalrepositorymain,
1640 class completelocalrepository(ilocalrepositorymain,
1633 ilocalrepositoryfilestorage):
1641 ilocalrepositoryfilestorage):
1634 """Complete interface for a local repository."""
1642 """Complete interface for a local repository."""
General Comments 0
You need to be logged in to leave comments. Login now