##// END OF EJS Templates
nodemap: also use persistent nodemap for manifest...
marmoute -
r45290:640d5b3b default
parent child Browse files
Show More
@@ -1,2298 +1,2301 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11 import itertools
11 import itertools
12 import struct
12 import struct
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from .pycompat import getattr
22 from .pycompat import getattr
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 mdiff,
27 mdiff,
28 pathutil,
28 pathutil,
29 policy,
29 policy,
30 pycompat,
30 pycompat,
31 revlog,
31 revlog,
32 util,
32 util,
33 )
33 )
34 from .interfaces import (
34 from .interfaces import (
35 repository,
35 repository,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
42 # Allow tests to more easily test the alternate path in manifestdict.fastdelta()
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
43 FASTDELTA_TEXTDIFF_THRESHOLD = 1000
44
44
45
45
46 def _parse(data):
46 def _parse(data):
47 # This method does a little bit of excessive-looking
47 # This method does a little bit of excessive-looking
48 # precondition checking. This is so that the behavior of this
48 # precondition checking. This is so that the behavior of this
49 # class exactly matches its C counterpart to try and help
49 # class exactly matches its C counterpart to try and help
50 # prevent surprise breakage for anyone that develops against
50 # prevent surprise breakage for anyone that develops against
51 # the pure version.
51 # the pure version.
52 if data and data[-1:] != b'\n':
52 if data and data[-1:] != b'\n':
53 raise ValueError(b'Manifest did not end in a newline.')
53 raise ValueError(b'Manifest did not end in a newline.')
54 prev = None
54 prev = None
55 for l in data.splitlines():
55 for l in data.splitlines():
56 if prev is not None and prev > l:
56 if prev is not None and prev > l:
57 raise ValueError(b'Manifest lines not in sorted order.')
57 raise ValueError(b'Manifest lines not in sorted order.')
58 prev = l
58 prev = l
59 f, n = l.split(b'\0')
59 f, n = l.split(b'\0')
60 nl = len(n)
60 nl = len(n)
61 if 64 < nl:
61 if 64 < nl:
62 # modern hash, full width
62 # modern hash, full width
63 yield f, bin(n[:64]), n[64:]
63 yield f, bin(n[:64]), n[64:]
64 elif 40 < nl < 45:
64 elif 40 < nl < 45:
65 # legacy hash, always sha1
65 # legacy hash, always sha1
66 yield f, bin(n[:40]), n[40:]
66 yield f, bin(n[:40]), n[40:]
67 else:
67 else:
68 yield f, bin(n), b''
68 yield f, bin(n), b''
69
69
70
70
71 def _text(it):
71 def _text(it):
72 files = []
72 files = []
73 lines = []
73 lines = []
74 for f, n, fl in it:
74 for f, n, fl in it:
75 files.append(f)
75 files.append(f)
76 # if this is changed to support newlines in filenames,
76 # if this is changed to support newlines in filenames,
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
78 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
78 lines.append(b"%s\0%s%s\n" % (f, hex(n), fl))
79
79
80 _checkforbidden(files)
80 _checkforbidden(files)
81 return b''.join(lines)
81 return b''.join(lines)
82
82
83
83
84 class lazymanifestiter(object):
84 class lazymanifestiter(object):
85 def __init__(self, lm):
85 def __init__(self, lm):
86 self.pos = 0
86 self.pos = 0
87 self.lm = lm
87 self.lm = lm
88
88
89 def __iter__(self):
89 def __iter__(self):
90 return self
90 return self
91
91
92 def next(self):
92 def next(self):
93 try:
93 try:
94 data, pos = self.lm._get(self.pos)
94 data, pos = self.lm._get(self.pos)
95 except IndexError:
95 except IndexError:
96 raise StopIteration
96 raise StopIteration
97 if pos == -1:
97 if pos == -1:
98 self.pos += 1
98 self.pos += 1
99 return data[0]
99 return data[0]
100 self.pos += 1
100 self.pos += 1
101 zeropos = data.find(b'\x00', pos)
101 zeropos = data.find(b'\x00', pos)
102 return data[pos:zeropos]
102 return data[pos:zeropos]
103
103
104 __next__ = next
104 __next__ = next
105
105
106
106
107 class lazymanifestiterentries(object):
107 class lazymanifestiterentries(object):
108 def __init__(self, lm):
108 def __init__(self, lm):
109 self.lm = lm
109 self.lm = lm
110 self.pos = 0
110 self.pos = 0
111
111
112 def __iter__(self):
112 def __iter__(self):
113 return self
113 return self
114
114
115 def next(self):
115 def next(self):
116 try:
116 try:
117 data, pos = self.lm._get(self.pos)
117 data, pos = self.lm._get(self.pos)
118 except IndexError:
118 except IndexError:
119 raise StopIteration
119 raise StopIteration
120 if pos == -1:
120 if pos == -1:
121 self.pos += 1
121 self.pos += 1
122 return data
122 return data
123 zeropos = data.find(b'\x00', pos)
123 zeropos = data.find(b'\x00', pos)
124 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
124 hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
125 flags = self.lm._getflags(data, self.pos, zeropos)
125 flags = self.lm._getflags(data, self.pos, zeropos)
126 self.pos += 1
126 self.pos += 1
127 return (data[pos:zeropos], hashval, flags)
127 return (data[pos:zeropos], hashval, flags)
128
128
129 __next__ = next
129 __next__ = next
130
130
131
131
132 def unhexlify(data, extra, pos, length):
132 def unhexlify(data, extra, pos, length):
133 s = bin(data[pos : pos + length])
133 s = bin(data[pos : pos + length])
134 if extra:
134 if extra:
135 s += chr(extra & 0xFF)
135 s += chr(extra & 0xFF)
136 return s
136 return s
137
137
138
138
139 def _cmp(a, b):
139 def _cmp(a, b):
140 return (a > b) - (a < b)
140 return (a > b) - (a < b)
141
141
142
142
143 class _lazymanifest(object):
143 class _lazymanifest(object):
144 """A pure python manifest backed by a byte string. It is supplimented with
144 """A pure python manifest backed by a byte string. It is supplimented with
145 internal lists as it is modified, until it is compacted back to a pure byte
145 internal lists as it is modified, until it is compacted back to a pure byte
146 string.
146 string.
147
147
148 ``data`` is the initial manifest data.
148 ``data`` is the initial manifest data.
149
149
150 ``positions`` is a list of offsets, one per manifest entry. Positive
150 ``positions`` is a list of offsets, one per manifest entry. Positive
151 values are offsets into ``data``, negative values are offsets into the
151 values are offsets into ``data``, negative values are offsets into the
152 ``extradata`` list. When an entry is removed, its entry is dropped from
152 ``extradata`` list. When an entry is removed, its entry is dropped from
153 ``positions``. The values are encoded such that when walking the list and
153 ``positions``. The values are encoded such that when walking the list and
154 indexing into ``data`` or ``extradata`` as appropriate, the entries are
154 indexing into ``data`` or ``extradata`` as appropriate, the entries are
155 sorted by filename.
155 sorted by filename.
156
156
157 ``extradata`` is a list of (key, hash, flags) for entries that were added or
157 ``extradata`` is a list of (key, hash, flags) for entries that were added or
158 modified since the manifest was created or compacted.
158 modified since the manifest was created or compacted.
159 """
159 """
160
160
161 def __init__(
161 def __init__(
162 self,
162 self,
163 data,
163 data,
164 positions=None,
164 positions=None,
165 extrainfo=None,
165 extrainfo=None,
166 extradata=None,
166 extradata=None,
167 hasremovals=False,
167 hasremovals=False,
168 ):
168 ):
169 if positions is None:
169 if positions is None:
170 self.positions = self.findlines(data)
170 self.positions = self.findlines(data)
171 self.extrainfo = [0] * len(self.positions)
171 self.extrainfo = [0] * len(self.positions)
172 self.data = data
172 self.data = data
173 self.extradata = []
173 self.extradata = []
174 self.hasremovals = False
174 self.hasremovals = False
175 else:
175 else:
176 self.positions = positions[:]
176 self.positions = positions[:]
177 self.extrainfo = extrainfo[:]
177 self.extrainfo = extrainfo[:]
178 self.extradata = extradata[:]
178 self.extradata = extradata[:]
179 self.data = data
179 self.data = data
180 self.hasremovals = hasremovals
180 self.hasremovals = hasremovals
181
181
182 def findlines(self, data):
182 def findlines(self, data):
183 if not data:
183 if not data:
184 return []
184 return []
185 pos = data.find(b"\n")
185 pos = data.find(b"\n")
186 if pos == -1 or data[-1:] != b'\n':
186 if pos == -1 or data[-1:] != b'\n':
187 raise ValueError(b"Manifest did not end in a newline.")
187 raise ValueError(b"Manifest did not end in a newline.")
188 positions = [0]
188 positions = [0]
189 prev = data[: data.find(b'\x00')]
189 prev = data[: data.find(b'\x00')]
190 while pos < len(data) - 1 and pos != -1:
190 while pos < len(data) - 1 and pos != -1:
191 positions.append(pos + 1)
191 positions.append(pos + 1)
192 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
192 nexts = data[pos + 1 : data.find(b'\x00', pos + 1)]
193 if nexts < prev:
193 if nexts < prev:
194 raise ValueError(b"Manifest lines not in sorted order.")
194 raise ValueError(b"Manifest lines not in sorted order.")
195 prev = nexts
195 prev = nexts
196 pos = data.find(b"\n", pos + 1)
196 pos = data.find(b"\n", pos + 1)
197 return positions
197 return positions
198
198
199 def _get(self, index):
199 def _get(self, index):
200 # get the position encoded in pos:
200 # get the position encoded in pos:
201 # positive number is an index in 'data'
201 # positive number is an index in 'data'
202 # negative number is in extrapieces
202 # negative number is in extrapieces
203 pos = self.positions[index]
203 pos = self.positions[index]
204 if pos >= 0:
204 if pos >= 0:
205 return self.data, pos
205 return self.data, pos
206 return self.extradata[-pos - 1], -1
206 return self.extradata[-pos - 1], -1
207
207
208 def _getkey(self, pos):
208 def _getkey(self, pos):
209 if pos >= 0:
209 if pos >= 0:
210 return self.data[pos : self.data.find(b'\x00', pos + 1)]
210 return self.data[pos : self.data.find(b'\x00', pos + 1)]
211 return self.extradata[-pos - 1][0]
211 return self.extradata[-pos - 1][0]
212
212
213 def bsearch(self, key):
213 def bsearch(self, key):
214 first = 0
214 first = 0
215 last = len(self.positions) - 1
215 last = len(self.positions) - 1
216
216
217 while first <= last:
217 while first <= last:
218 midpoint = (first + last) // 2
218 midpoint = (first + last) // 2
219 nextpos = self.positions[midpoint]
219 nextpos = self.positions[midpoint]
220 candidate = self._getkey(nextpos)
220 candidate = self._getkey(nextpos)
221 r = _cmp(key, candidate)
221 r = _cmp(key, candidate)
222 if r == 0:
222 if r == 0:
223 return midpoint
223 return midpoint
224 else:
224 else:
225 if r < 0:
225 if r < 0:
226 last = midpoint - 1
226 last = midpoint - 1
227 else:
227 else:
228 first = midpoint + 1
228 first = midpoint + 1
229 return -1
229 return -1
230
230
231 def bsearch2(self, key):
231 def bsearch2(self, key):
232 # same as the above, but will always return the position
232 # same as the above, but will always return the position
233 # done for performance reasons
233 # done for performance reasons
234 first = 0
234 first = 0
235 last = len(self.positions) - 1
235 last = len(self.positions) - 1
236
236
237 while first <= last:
237 while first <= last:
238 midpoint = (first + last) // 2
238 midpoint = (first + last) // 2
239 nextpos = self.positions[midpoint]
239 nextpos = self.positions[midpoint]
240 candidate = self._getkey(nextpos)
240 candidate = self._getkey(nextpos)
241 r = _cmp(key, candidate)
241 r = _cmp(key, candidate)
242 if r == 0:
242 if r == 0:
243 return (midpoint, True)
243 return (midpoint, True)
244 else:
244 else:
245 if r < 0:
245 if r < 0:
246 last = midpoint - 1
246 last = midpoint - 1
247 else:
247 else:
248 first = midpoint + 1
248 first = midpoint + 1
249 return (first, False)
249 return (first, False)
250
250
251 def __contains__(self, key):
251 def __contains__(self, key):
252 return self.bsearch(key) != -1
252 return self.bsearch(key) != -1
253
253
254 def _getflags(self, data, needle, pos):
254 def _getflags(self, data, needle, pos):
255 start = pos + 41
255 start = pos + 41
256 end = data.find(b"\n", start)
256 end = data.find(b"\n", start)
257 if end == -1:
257 if end == -1:
258 end = len(data) - 1
258 end = len(data) - 1
259 if start == end:
259 if start == end:
260 return b''
260 return b''
261 return self.data[start:end]
261 return self.data[start:end]
262
262
263 def __getitem__(self, key):
263 def __getitem__(self, key):
264 if not isinstance(key, bytes):
264 if not isinstance(key, bytes):
265 raise TypeError(b"getitem: manifest keys must be a bytes.")
265 raise TypeError(b"getitem: manifest keys must be a bytes.")
266 needle = self.bsearch(key)
266 needle = self.bsearch(key)
267 if needle == -1:
267 if needle == -1:
268 raise KeyError
268 raise KeyError
269 data, pos = self._get(needle)
269 data, pos = self._get(needle)
270 if pos == -1:
270 if pos == -1:
271 return (data[1], data[2])
271 return (data[1], data[2])
272 zeropos = data.find(b'\x00', pos)
272 zeropos = data.find(b'\x00', pos)
273 nlpos = data.find(b'\n', zeropos)
273 nlpos = data.find(b'\n', zeropos)
274 assert 0 <= needle <= len(self.positions)
274 assert 0 <= needle <= len(self.positions)
275 assert len(self.extrainfo) == len(self.positions)
275 assert len(self.extrainfo) == len(self.positions)
276 hlen = nlpos - zeropos - 1
276 hlen = nlpos - zeropos - 1
277 # Hashes sometimes have an extra byte tucked on the end, so
277 # Hashes sometimes have an extra byte tucked on the end, so
278 # detect that.
278 # detect that.
279 if hlen % 2:
279 if hlen % 2:
280 hlen -= 1
280 hlen -= 1
281 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
281 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
282 flags = self._getflags(data, needle, zeropos)
282 flags = self._getflags(data, needle, zeropos)
283 return (hashval, flags)
283 return (hashval, flags)
284
284
285 def __delitem__(self, key):
285 def __delitem__(self, key):
286 needle, found = self.bsearch2(key)
286 needle, found = self.bsearch2(key)
287 if not found:
287 if not found:
288 raise KeyError
288 raise KeyError
289 cur = self.positions[needle]
289 cur = self.positions[needle]
290 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
290 self.positions = self.positions[:needle] + self.positions[needle + 1 :]
291 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
291 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1 :]
292 if cur >= 0:
292 if cur >= 0:
293 # This does NOT unsort the list as far as the search functions are
293 # This does NOT unsort the list as far as the search functions are
294 # concerned, as they only examine lines mapped by self.positions.
294 # concerned, as they only examine lines mapped by self.positions.
295 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
295 self.data = self.data[:cur] + b'\x00' + self.data[cur + 1 :]
296 self.hasremovals = True
296 self.hasremovals = True
297
297
298 def __setitem__(self, key, value):
298 def __setitem__(self, key, value):
299 if not isinstance(key, bytes):
299 if not isinstance(key, bytes):
300 raise TypeError(b"setitem: manifest keys must be a byte string.")
300 raise TypeError(b"setitem: manifest keys must be a byte string.")
301 if not isinstance(value, tuple) or len(value) != 2:
301 if not isinstance(value, tuple) or len(value) != 2:
302 raise TypeError(
302 raise TypeError(
303 b"Manifest values must be a tuple of (node, flags)."
303 b"Manifest values must be a tuple of (node, flags)."
304 )
304 )
305 hashval = value[0]
305 hashval = value[0]
306 # hashes are either 20 or 32 bytes (sha1 or its replacement),
306 # hashes are either 20 or 32 bytes (sha1 or its replacement),
307 # and allow one extra byte taht won't be persisted to disk but
307 # and allow one extra byte taht won't be persisted to disk but
308 # is sometimes used in memory.
308 # is sometimes used in memory.
309 if not isinstance(hashval, bytes) or not (
309 if not isinstance(hashval, bytes) or not (
310 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
310 20 <= len(hashval) <= 22 or 32 <= len(hashval) <= 34
311 ):
311 ):
312 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
312 raise TypeError(b"node must be a 20-byte or 32-byte byte string")
313 flags = value[1]
313 flags = value[1]
314 if len(hashval) == 22:
314 if len(hashval) == 22:
315 hashval = hashval[:-1]
315 hashval = hashval[:-1]
316 if not isinstance(flags, bytes) or len(flags) > 1:
316 if not isinstance(flags, bytes) or len(flags) > 1:
317 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
317 raise TypeError(b"flags must a 0 or 1 byte string, got %r", flags)
318 needle, found = self.bsearch2(key)
318 needle, found = self.bsearch2(key)
319 if found:
319 if found:
320 # put the item
320 # put the item
321 pos = self.positions[needle]
321 pos = self.positions[needle]
322 if pos < 0:
322 if pos < 0:
323 self.extradata[-pos - 1] = (key, hashval, value[1])
323 self.extradata[-pos - 1] = (key, hashval, value[1])
324 else:
324 else:
325 # just don't bother
325 # just don't bother
326 self.extradata.append((key, hashval, value[1]))
326 self.extradata.append((key, hashval, value[1]))
327 self.positions[needle] = -len(self.extradata)
327 self.positions[needle] = -len(self.extradata)
328 else:
328 else:
329 # not found, put it in with extra positions
329 # not found, put it in with extra positions
330 self.extradata.append((key, hashval, value[1]))
330 self.extradata.append((key, hashval, value[1]))
331 self.positions = (
331 self.positions = (
332 self.positions[:needle]
332 self.positions[:needle]
333 + [-len(self.extradata)]
333 + [-len(self.extradata)]
334 + self.positions[needle:]
334 + self.positions[needle:]
335 )
335 )
336 self.extrainfo = (
336 self.extrainfo = (
337 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
337 self.extrainfo[:needle] + [0] + self.extrainfo[needle:]
338 )
338 )
339
339
340 def copy(self):
340 def copy(self):
341 # XXX call _compact like in C?
341 # XXX call _compact like in C?
342 return _lazymanifest(
342 return _lazymanifest(
343 self.data,
343 self.data,
344 self.positions,
344 self.positions,
345 self.extrainfo,
345 self.extrainfo,
346 self.extradata,
346 self.extradata,
347 self.hasremovals,
347 self.hasremovals,
348 )
348 )
349
349
350 def _compact(self):
350 def _compact(self):
351 # hopefully not called TOO often
351 # hopefully not called TOO often
352 if len(self.extradata) == 0 and not self.hasremovals:
352 if len(self.extradata) == 0 and not self.hasremovals:
353 return
353 return
354 l = []
354 l = []
355 i = 0
355 i = 0
356 offset = 0
356 offset = 0
357 self.extrainfo = [0] * len(self.positions)
357 self.extrainfo = [0] * len(self.positions)
358 while i < len(self.positions):
358 while i < len(self.positions):
359 if self.positions[i] >= 0:
359 if self.positions[i] >= 0:
360 cur = self.positions[i]
360 cur = self.positions[i]
361 last_cut = cur
361 last_cut = cur
362
362
363 # Collect all contiguous entries in the buffer at the current
363 # Collect all contiguous entries in the buffer at the current
364 # offset, breaking out only for added/modified items held in
364 # offset, breaking out only for added/modified items held in
365 # extradata, or a deleted line prior to the next position.
365 # extradata, or a deleted line prior to the next position.
366 while True:
366 while True:
367 self.positions[i] = offset
367 self.positions[i] = offset
368 i += 1
368 i += 1
369 if i == len(self.positions) or self.positions[i] < 0:
369 if i == len(self.positions) or self.positions[i] < 0:
370 break
370 break
371
371
372 # A removed file has no positions[] entry, but does have an
372 # A removed file has no positions[] entry, but does have an
373 # overwritten first byte. Break out and find the end of the
373 # overwritten first byte. Break out and find the end of the
374 # current good entry/entries if there is a removed file
374 # current good entry/entries if there is a removed file
375 # before the next position.
375 # before the next position.
376 if (
376 if (
377 self.hasremovals
377 self.hasremovals
378 and self.data.find(b'\n\x00', cur, self.positions[i])
378 and self.data.find(b'\n\x00', cur, self.positions[i])
379 != -1
379 != -1
380 ):
380 ):
381 break
381 break
382
382
383 offset += self.positions[i] - cur
383 offset += self.positions[i] - cur
384 cur = self.positions[i]
384 cur = self.positions[i]
385 end_cut = self.data.find(b'\n', cur)
385 end_cut = self.data.find(b'\n', cur)
386 if end_cut != -1:
386 if end_cut != -1:
387 end_cut += 1
387 end_cut += 1
388 offset += end_cut - cur
388 offset += end_cut - cur
389 l.append(self.data[last_cut:end_cut])
389 l.append(self.data[last_cut:end_cut])
390 else:
390 else:
391 while i < len(self.positions) and self.positions[i] < 0:
391 while i < len(self.positions) and self.positions[i] < 0:
392 cur = self.positions[i]
392 cur = self.positions[i]
393 t = self.extradata[-cur - 1]
393 t = self.extradata[-cur - 1]
394 l.append(self._pack(t))
394 l.append(self._pack(t))
395 self.positions[i] = offset
395 self.positions[i] = offset
396 # Hashes are either 20 bytes (old sha1s) or 32
396 # Hashes are either 20 bytes (old sha1s) or 32
397 # bytes (new non-sha1).
397 # bytes (new non-sha1).
398 hlen = 20
398 hlen = 20
399 if len(t[1]) > 25:
399 if len(t[1]) > 25:
400 hlen = 32
400 hlen = 32
401 if len(t[1]) > hlen:
401 if len(t[1]) > hlen:
402 self.extrainfo[i] = ord(t[1][hlen + 1])
402 self.extrainfo[i] = ord(t[1][hlen + 1])
403 offset += len(l[-1])
403 offset += len(l[-1])
404 i += 1
404 i += 1
405 self.data = b''.join(l)
405 self.data = b''.join(l)
406 self.hasremovals = False
406 self.hasremovals = False
407 self.extradata = []
407 self.extradata = []
408
408
409 def _pack(self, d):
409 def _pack(self, d):
410 n = d[1]
410 n = d[1]
411 if len(n) == 21 or len(n) == 33:
411 if len(n) == 21 or len(n) == 33:
412 n = n[:-1]
412 n = n[:-1]
413 assert len(n) == 20 or len(n) == 32
413 assert len(n) == 20 or len(n) == 32
414 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
414 return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
415
415
416 def text(self):
416 def text(self):
417 self._compact()
417 self._compact()
418 return self.data
418 return self.data
419
419
420 def diff(self, m2, clean=False):
420 def diff(self, m2, clean=False):
421 '''Finds changes between the current manifest and m2.'''
421 '''Finds changes between the current manifest and m2.'''
422 # XXX think whether efficiency matters here
422 # XXX think whether efficiency matters here
423 diff = {}
423 diff = {}
424
424
425 for fn, e1, flags in self.iterentries():
425 for fn, e1, flags in self.iterentries():
426 if fn not in m2:
426 if fn not in m2:
427 diff[fn] = (e1, flags), (None, b'')
427 diff[fn] = (e1, flags), (None, b'')
428 else:
428 else:
429 e2 = m2[fn]
429 e2 = m2[fn]
430 if (e1, flags) != e2:
430 if (e1, flags) != e2:
431 diff[fn] = (e1, flags), e2
431 diff[fn] = (e1, flags), e2
432 elif clean:
432 elif clean:
433 diff[fn] = None
433 diff[fn] = None
434
434
435 for fn, e2, flags in m2.iterentries():
435 for fn, e2, flags in m2.iterentries():
436 if fn not in self:
436 if fn not in self:
437 diff[fn] = (None, b''), (e2, flags)
437 diff[fn] = (None, b''), (e2, flags)
438
438
439 return diff
439 return diff
440
440
441 def iterentries(self):
441 def iterentries(self):
442 return lazymanifestiterentries(self)
442 return lazymanifestiterentries(self)
443
443
444 def iterkeys(self):
444 def iterkeys(self):
445 return lazymanifestiter(self)
445 return lazymanifestiter(self)
446
446
447 def __iter__(self):
447 def __iter__(self):
448 return lazymanifestiter(self)
448 return lazymanifestiter(self)
449
449
450 def __len__(self):
450 def __len__(self):
451 return len(self.positions)
451 return len(self.positions)
452
452
453 def filtercopy(self, filterfn):
453 def filtercopy(self, filterfn):
454 # XXX should be optimized
454 # XXX should be optimized
455 c = _lazymanifest(b'')
455 c = _lazymanifest(b'')
456 for f, n, fl in self.iterentries():
456 for f, n, fl in self.iterentries():
457 if filterfn(f):
457 if filterfn(f):
458 c[f] = n, fl
458 c[f] = n, fl
459 return c
459 return c
460
460
461
461
462 try:
462 try:
463 _lazymanifest = parsers.lazymanifest
463 _lazymanifest = parsers.lazymanifest
464 except AttributeError:
464 except AttributeError:
465 pass
465 pass
466
466
467
467
468 @interfaceutil.implementer(repository.imanifestdict)
468 @interfaceutil.implementer(repository.imanifestdict)
469 class manifestdict(object):
469 class manifestdict(object):
470 def __init__(self, data=b''):
470 def __init__(self, data=b''):
471 self._lm = _lazymanifest(data)
471 self._lm = _lazymanifest(data)
472
472
473 def __getitem__(self, key):
473 def __getitem__(self, key):
474 return self._lm[key][0]
474 return self._lm[key][0]
475
475
476 def find(self, key):
476 def find(self, key):
477 return self._lm[key]
477 return self._lm[key]
478
478
479 def __len__(self):
479 def __len__(self):
480 return len(self._lm)
480 return len(self._lm)
481
481
482 def __nonzero__(self):
482 def __nonzero__(self):
483 # nonzero is covered by the __len__ function, but implementing it here
483 # nonzero is covered by the __len__ function, but implementing it here
484 # makes it easier for extensions to override.
484 # makes it easier for extensions to override.
485 return len(self._lm) != 0
485 return len(self._lm) != 0
486
486
487 __bool__ = __nonzero__
487 __bool__ = __nonzero__
488
488
489 def __setitem__(self, key, node):
489 def __setitem__(self, key, node):
490 self._lm[key] = node, self.flags(key)
490 self._lm[key] = node, self.flags(key)
491
491
492 def __contains__(self, key):
492 def __contains__(self, key):
493 if key is None:
493 if key is None:
494 return False
494 return False
495 return key in self._lm
495 return key in self._lm
496
496
497 def __delitem__(self, key):
497 def __delitem__(self, key):
498 del self._lm[key]
498 del self._lm[key]
499
499
500 def __iter__(self):
500 def __iter__(self):
501 return self._lm.__iter__()
501 return self._lm.__iter__()
502
502
503 def iterkeys(self):
503 def iterkeys(self):
504 return self._lm.iterkeys()
504 return self._lm.iterkeys()
505
505
506 def keys(self):
506 def keys(self):
507 return list(self.iterkeys())
507 return list(self.iterkeys())
508
508
509 def filesnotin(self, m2, match=None):
509 def filesnotin(self, m2, match=None):
510 '''Set of files in this manifest that are not in the other'''
510 '''Set of files in this manifest that are not in the other'''
511 if match is not None:
511 if match is not None:
512 match = matchmod.badmatch(match, lambda path, msg: None)
512 match = matchmod.badmatch(match, lambda path, msg: None)
513 sm2 = set(m2.walk(match))
513 sm2 = set(m2.walk(match))
514 return {f for f in self.walk(match) if f not in sm2}
514 return {f for f in self.walk(match) if f not in sm2}
515 return {f for f in self if f not in m2}
515 return {f for f in self if f not in m2}
516
516
517 @propertycache
517 @propertycache
518 def _dirs(self):
518 def _dirs(self):
519 return pathutil.dirs(self)
519 return pathutil.dirs(self)
520
520
521 def dirs(self):
521 def dirs(self):
522 return self._dirs
522 return self._dirs
523
523
524 def hasdir(self, dir):
524 def hasdir(self, dir):
525 return dir in self._dirs
525 return dir in self._dirs
526
526
527 def _filesfastpath(self, match):
527 def _filesfastpath(self, match):
528 '''Checks whether we can correctly and quickly iterate over matcher
528 '''Checks whether we can correctly and quickly iterate over matcher
529 files instead of over manifest files.'''
529 files instead of over manifest files.'''
530 files = match.files()
530 files = match.files()
531 return len(files) < 100 and (
531 return len(files) < 100 and (
532 match.isexact()
532 match.isexact()
533 or (match.prefix() and all(fn in self for fn in files))
533 or (match.prefix() and all(fn in self for fn in files))
534 )
534 )
535
535
536 def walk(self, match):
536 def walk(self, match):
537 '''Generates matching file names.
537 '''Generates matching file names.
538
538
539 Equivalent to manifest.matches(match).iterkeys(), but without creating
539 Equivalent to manifest.matches(match).iterkeys(), but without creating
540 an entirely new manifest.
540 an entirely new manifest.
541
541
542 It also reports nonexistent files by marking them bad with match.bad().
542 It also reports nonexistent files by marking them bad with match.bad().
543 '''
543 '''
544 if match.always():
544 if match.always():
545 for f in iter(self):
545 for f in iter(self):
546 yield f
546 yield f
547 return
547 return
548
548
549 fset = set(match.files())
549 fset = set(match.files())
550
550
551 # avoid the entire walk if we're only looking for specific files
551 # avoid the entire walk if we're only looking for specific files
552 if self._filesfastpath(match):
552 if self._filesfastpath(match):
553 for fn in sorted(fset):
553 for fn in sorted(fset):
554 if fn in self:
554 if fn in self:
555 yield fn
555 yield fn
556 return
556 return
557
557
558 for fn in self:
558 for fn in self:
559 if fn in fset:
559 if fn in fset:
560 # specified pattern is the exact name
560 # specified pattern is the exact name
561 fset.remove(fn)
561 fset.remove(fn)
562 if match(fn):
562 if match(fn):
563 yield fn
563 yield fn
564
564
565 # for dirstate.walk, files=[''] means "walk the whole tree".
565 # for dirstate.walk, files=[''] means "walk the whole tree".
566 # follow that here, too
566 # follow that here, too
567 fset.discard(b'')
567 fset.discard(b'')
568
568
569 for fn in sorted(fset):
569 for fn in sorted(fset):
570 if not self.hasdir(fn):
570 if not self.hasdir(fn):
571 match.bad(fn, None)
571 match.bad(fn, None)
572
572
573 def _matches(self, match):
573 def _matches(self, match):
574 '''generate a new manifest filtered by the match argument'''
574 '''generate a new manifest filtered by the match argument'''
575 if match.always():
575 if match.always():
576 return self.copy()
576 return self.copy()
577
577
578 if self._filesfastpath(match):
578 if self._filesfastpath(match):
579 m = manifestdict()
579 m = manifestdict()
580 lm = self._lm
580 lm = self._lm
581 for fn in match.files():
581 for fn in match.files():
582 if fn in lm:
582 if fn in lm:
583 m._lm[fn] = lm[fn]
583 m._lm[fn] = lm[fn]
584 return m
584 return m
585
585
586 m = manifestdict()
586 m = manifestdict()
587 m._lm = self._lm.filtercopy(match)
587 m._lm = self._lm.filtercopy(match)
588 return m
588 return m
589
589
590 def diff(self, m2, match=None, clean=False):
590 def diff(self, m2, match=None, clean=False):
591 '''Finds changes between the current manifest and m2.
591 '''Finds changes between the current manifest and m2.
592
592
593 Args:
593 Args:
594 m2: the manifest to which this manifest should be compared.
594 m2: the manifest to which this manifest should be compared.
595 clean: if true, include files unchanged between these manifests
595 clean: if true, include files unchanged between these manifests
596 with a None value in the returned dictionary.
596 with a None value in the returned dictionary.
597
597
598 The result is returned as a dict with filename as key and
598 The result is returned as a dict with filename as key and
599 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
599 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
600 nodeid in the current/other manifest and fl1/fl2 is the flag
600 nodeid in the current/other manifest and fl1/fl2 is the flag
601 in the current/other manifest. Where the file does not exist,
601 in the current/other manifest. Where the file does not exist,
602 the nodeid will be None and the flags will be the empty
602 the nodeid will be None and the flags will be the empty
603 string.
603 string.
604 '''
604 '''
605 if match:
605 if match:
606 m1 = self._matches(match)
606 m1 = self._matches(match)
607 m2 = m2._matches(match)
607 m2 = m2._matches(match)
608 return m1.diff(m2, clean=clean)
608 return m1.diff(m2, clean=clean)
609 return self._lm.diff(m2._lm, clean)
609 return self._lm.diff(m2._lm, clean)
610
610
611 def setflag(self, key, flag):
611 def setflag(self, key, flag):
612 self._lm[key] = self[key], flag
612 self._lm[key] = self[key], flag
613
613
614 def get(self, key, default=None):
614 def get(self, key, default=None):
615 try:
615 try:
616 return self._lm[key][0]
616 return self._lm[key][0]
617 except KeyError:
617 except KeyError:
618 return default
618 return default
619
619
620 def flags(self, key):
620 def flags(self, key):
621 try:
621 try:
622 return self._lm[key][1]
622 return self._lm[key][1]
623 except KeyError:
623 except KeyError:
624 return b''
624 return b''
625
625
626 def copy(self):
626 def copy(self):
627 c = manifestdict()
627 c = manifestdict()
628 c._lm = self._lm.copy()
628 c._lm = self._lm.copy()
629 return c
629 return c
630
630
631 def items(self):
631 def items(self):
632 return (x[:2] for x in self._lm.iterentries())
632 return (x[:2] for x in self._lm.iterentries())
633
633
634 def iteritems(self):
634 def iteritems(self):
635 return (x[:2] for x in self._lm.iterentries())
635 return (x[:2] for x in self._lm.iterentries())
636
636
637 def iterentries(self):
637 def iterentries(self):
638 return self._lm.iterentries()
638 return self._lm.iterentries()
639
639
640 def text(self):
640 def text(self):
641 # most likely uses native version
641 # most likely uses native version
642 return self._lm.text()
642 return self._lm.text()
643
643
644 def fastdelta(self, base, changes):
644 def fastdelta(self, base, changes):
645 """Given a base manifest text as a bytearray and a list of changes
645 """Given a base manifest text as a bytearray and a list of changes
646 relative to that text, compute a delta that can be used by revlog.
646 relative to that text, compute a delta that can be used by revlog.
647 """
647 """
648 delta = []
648 delta = []
649 dstart = None
649 dstart = None
650 dend = None
650 dend = None
651 dline = [b""]
651 dline = [b""]
652 start = 0
652 start = 0
653 # zero copy representation of base as a buffer
653 # zero copy representation of base as a buffer
654 addbuf = util.buffer(base)
654 addbuf = util.buffer(base)
655
655
656 changes = list(changes)
656 changes = list(changes)
657 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
657 if len(changes) < FASTDELTA_TEXTDIFF_THRESHOLD:
658 # start with a readonly loop that finds the offset of
658 # start with a readonly loop that finds the offset of
659 # each line and creates the deltas
659 # each line and creates the deltas
660 for f, todelete in changes:
660 for f, todelete in changes:
661 # bs will either be the index of the item or the insert point
661 # bs will either be the index of the item or the insert point
662 start, end = _msearch(addbuf, f, start)
662 start, end = _msearch(addbuf, f, start)
663 if not todelete:
663 if not todelete:
664 h, fl = self._lm[f]
664 h, fl = self._lm[f]
665 l = b"%s\0%s%s\n" % (f, hex(h), fl)
665 l = b"%s\0%s%s\n" % (f, hex(h), fl)
666 else:
666 else:
667 if start == end:
667 if start == end:
668 # item we want to delete was not found, error out
668 # item we want to delete was not found, error out
669 raise AssertionError(
669 raise AssertionError(
670 _(b"failed to remove %s from manifest") % f
670 _(b"failed to remove %s from manifest") % f
671 )
671 )
672 l = b""
672 l = b""
673 if dstart is not None and dstart <= start and dend >= start:
673 if dstart is not None and dstart <= start and dend >= start:
674 if dend < end:
674 if dend < end:
675 dend = end
675 dend = end
676 if l:
676 if l:
677 dline.append(l)
677 dline.append(l)
678 else:
678 else:
679 if dstart is not None:
679 if dstart is not None:
680 delta.append([dstart, dend, b"".join(dline)])
680 delta.append([dstart, dend, b"".join(dline)])
681 dstart = start
681 dstart = start
682 dend = end
682 dend = end
683 dline = [l]
683 dline = [l]
684
684
685 if dstart is not None:
685 if dstart is not None:
686 delta.append([dstart, dend, b"".join(dline)])
686 delta.append([dstart, dend, b"".join(dline)])
687 # apply the delta to the base, and get a delta for addrevision
687 # apply the delta to the base, and get a delta for addrevision
688 deltatext, arraytext = _addlistdelta(base, delta)
688 deltatext, arraytext = _addlistdelta(base, delta)
689 else:
689 else:
690 # For large changes, it's much cheaper to just build the text and
690 # For large changes, it's much cheaper to just build the text and
691 # diff it.
691 # diff it.
692 arraytext = bytearray(self.text())
692 arraytext = bytearray(self.text())
693 deltatext = mdiff.textdiff(
693 deltatext = mdiff.textdiff(
694 util.buffer(base), util.buffer(arraytext)
694 util.buffer(base), util.buffer(arraytext)
695 )
695 )
696
696
697 return arraytext, deltatext
697 return arraytext, deltatext
698
698
699
699
700 def _msearch(m, s, lo=0, hi=None):
700 def _msearch(m, s, lo=0, hi=None):
701 '''return a tuple (start, end) that says where to find s within m.
701 '''return a tuple (start, end) that says where to find s within m.
702
702
703 If the string is found m[start:end] are the line containing
703 If the string is found m[start:end] are the line containing
704 that string. If start == end the string was not found and
704 that string. If start == end the string was not found and
705 they indicate the proper sorted insertion point.
705 they indicate the proper sorted insertion point.
706
706
707 m should be a buffer, a memoryview or a byte string.
707 m should be a buffer, a memoryview or a byte string.
708 s is a byte string'''
708 s is a byte string'''
709
709
710 def advance(i, c):
710 def advance(i, c):
711 while i < lenm and m[i : i + 1] != c:
711 while i < lenm and m[i : i + 1] != c:
712 i += 1
712 i += 1
713 return i
713 return i
714
714
715 if not s:
715 if not s:
716 return (lo, lo)
716 return (lo, lo)
717 lenm = len(m)
717 lenm = len(m)
718 if not hi:
718 if not hi:
719 hi = lenm
719 hi = lenm
720 while lo < hi:
720 while lo < hi:
721 mid = (lo + hi) // 2
721 mid = (lo + hi) // 2
722 start = mid
722 start = mid
723 while start > 0 and m[start - 1 : start] != b'\n':
723 while start > 0 and m[start - 1 : start] != b'\n':
724 start -= 1
724 start -= 1
725 end = advance(start, b'\0')
725 end = advance(start, b'\0')
726 if bytes(m[start:end]) < s:
726 if bytes(m[start:end]) < s:
727 # we know that after the null there are 40 bytes of sha1
727 # we know that after the null there are 40 bytes of sha1
728 # this translates to the bisect lo = mid + 1
728 # this translates to the bisect lo = mid + 1
729 lo = advance(end + 40, b'\n') + 1
729 lo = advance(end + 40, b'\n') + 1
730 else:
730 else:
731 # this translates to the bisect hi = mid
731 # this translates to the bisect hi = mid
732 hi = start
732 hi = start
733 end = advance(lo, b'\0')
733 end = advance(lo, b'\0')
734 found = m[lo:end]
734 found = m[lo:end]
735 if s == found:
735 if s == found:
736 # we know that after the null there are 40 bytes of sha1
736 # we know that after the null there are 40 bytes of sha1
737 end = advance(end + 40, b'\n')
737 end = advance(end + 40, b'\n')
738 return (lo, end + 1)
738 return (lo, end + 1)
739 else:
739 else:
740 return (lo, lo)
740 return (lo, lo)
741
741
742
742
743 def _checkforbidden(l):
743 def _checkforbidden(l):
744 """Check filenames for illegal characters."""
744 """Check filenames for illegal characters."""
745 for f in l:
745 for f in l:
746 if b'\n' in f or b'\r' in f:
746 if b'\n' in f or b'\r' in f:
747 raise error.StorageError(
747 raise error.StorageError(
748 _(b"'\\n' and '\\r' disallowed in filenames: %r")
748 _(b"'\\n' and '\\r' disallowed in filenames: %r")
749 % pycompat.bytestr(f)
749 % pycompat.bytestr(f)
750 )
750 )
751
751
752
752
753 # apply the changes collected during the bisect loop to our addlist
753 # apply the changes collected during the bisect loop to our addlist
754 # return a delta suitable for addrevision
754 # return a delta suitable for addrevision
755 def _addlistdelta(addlist, x):
755 def _addlistdelta(addlist, x):
756 # for large addlist arrays, building a new array is cheaper
756 # for large addlist arrays, building a new array is cheaper
757 # than repeatedly modifying the existing one
757 # than repeatedly modifying the existing one
758 currentposition = 0
758 currentposition = 0
759 newaddlist = bytearray()
759 newaddlist = bytearray()
760
760
761 for start, end, content in x:
761 for start, end, content in x:
762 newaddlist += addlist[currentposition:start]
762 newaddlist += addlist[currentposition:start]
763 if content:
763 if content:
764 newaddlist += bytearray(content)
764 newaddlist += bytearray(content)
765
765
766 currentposition = end
766 currentposition = end
767
767
768 newaddlist += addlist[currentposition:]
768 newaddlist += addlist[currentposition:]
769
769
770 deltatext = b"".join(
770 deltatext = b"".join(
771 struct.pack(b">lll", start, end, len(content)) + content
771 struct.pack(b">lll", start, end, len(content)) + content
772 for start, end, content in x
772 for start, end, content in x
773 )
773 )
774 return deltatext, newaddlist
774 return deltatext, newaddlist
775
775
776
776
777 def _splittopdir(f):
777 def _splittopdir(f):
778 if b'/' in f:
778 if b'/' in f:
779 dir, subpath = f.split(b'/', 1)
779 dir, subpath = f.split(b'/', 1)
780 return dir + b'/', subpath
780 return dir + b'/', subpath
781 else:
781 else:
782 return b'', f
782 return b'', f
783
783
784
784
785 _noop = lambda s: None
785 _noop = lambda s: None
786
786
787
787
788 @interfaceutil.implementer(repository.imanifestdict)
788 @interfaceutil.implementer(repository.imanifestdict)
789 class treemanifest(object):
789 class treemanifest(object):
790 def __init__(self, dir=b'', text=b''):
790 def __init__(self, dir=b'', text=b''):
791 self._dir = dir
791 self._dir = dir
792 self._node = nullid
792 self._node = nullid
793 self._loadfunc = _noop
793 self._loadfunc = _noop
794 self._copyfunc = _noop
794 self._copyfunc = _noop
795 self._dirty = False
795 self._dirty = False
796 self._dirs = {}
796 self._dirs = {}
797 self._lazydirs = {}
797 self._lazydirs = {}
798 # Using _lazymanifest here is a little slower than plain old dicts
798 # Using _lazymanifest here is a little slower than plain old dicts
799 self._files = {}
799 self._files = {}
800 self._flags = {}
800 self._flags = {}
801 if text:
801 if text:
802
802
803 def readsubtree(subdir, subm):
803 def readsubtree(subdir, subm):
804 raise AssertionError(
804 raise AssertionError(
805 b'treemanifest constructor only accepts flat manifests'
805 b'treemanifest constructor only accepts flat manifests'
806 )
806 )
807
807
808 self.parse(text, readsubtree)
808 self.parse(text, readsubtree)
809 self._dirty = True # Mark flat manifest dirty after parsing
809 self._dirty = True # Mark flat manifest dirty after parsing
810
810
811 def _subpath(self, path):
811 def _subpath(self, path):
812 return self._dir + path
812 return self._dir + path
813
813
814 def _loadalllazy(self):
814 def _loadalllazy(self):
815 selfdirs = self._dirs
815 selfdirs = self._dirs
816 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
816 for d, (path, node, readsubtree, docopy) in pycompat.iteritems(
817 self._lazydirs
817 self._lazydirs
818 ):
818 ):
819 if docopy:
819 if docopy:
820 selfdirs[d] = readsubtree(path, node).copy()
820 selfdirs[d] = readsubtree(path, node).copy()
821 else:
821 else:
822 selfdirs[d] = readsubtree(path, node)
822 selfdirs[d] = readsubtree(path, node)
823 self._lazydirs = {}
823 self._lazydirs = {}
824
824
825 def _loadlazy(self, d):
825 def _loadlazy(self, d):
826 v = self._lazydirs.get(d)
826 v = self._lazydirs.get(d)
827 if v:
827 if v:
828 path, node, readsubtree, docopy = v
828 path, node, readsubtree, docopy = v
829 if docopy:
829 if docopy:
830 self._dirs[d] = readsubtree(path, node).copy()
830 self._dirs[d] = readsubtree(path, node).copy()
831 else:
831 else:
832 self._dirs[d] = readsubtree(path, node)
832 self._dirs[d] = readsubtree(path, node)
833 del self._lazydirs[d]
833 del self._lazydirs[d]
834
834
835 def _loadchildrensetlazy(self, visit):
835 def _loadchildrensetlazy(self, visit):
836 if not visit:
836 if not visit:
837 return None
837 return None
838 if visit == b'all' or visit == b'this':
838 if visit == b'all' or visit == b'this':
839 self._loadalllazy()
839 self._loadalllazy()
840 return None
840 return None
841
841
842 loadlazy = self._loadlazy
842 loadlazy = self._loadlazy
843 for k in visit:
843 for k in visit:
844 loadlazy(k + b'/')
844 loadlazy(k + b'/')
845 return visit
845 return visit
846
846
847 def _loaddifflazy(self, t1, t2):
847 def _loaddifflazy(self, t1, t2):
848 """load items in t1 and t2 if they're needed for diffing.
848 """load items in t1 and t2 if they're needed for diffing.
849
849
850 The criteria currently is:
850 The criteria currently is:
851 - if it's not present in _lazydirs in either t1 or t2, load it in the
851 - if it's not present in _lazydirs in either t1 or t2, load it in the
852 other (it may already be loaded or it may not exist, doesn't matter)
852 other (it may already be loaded or it may not exist, doesn't matter)
853 - if it's present in _lazydirs in both, compare the nodeid; if it
853 - if it's present in _lazydirs in both, compare the nodeid; if it
854 differs, load it in both
854 differs, load it in both
855 """
855 """
856 toloadlazy = []
856 toloadlazy = []
857 for d, v1 in pycompat.iteritems(t1._lazydirs):
857 for d, v1 in pycompat.iteritems(t1._lazydirs):
858 v2 = t2._lazydirs.get(d)
858 v2 = t2._lazydirs.get(d)
859 if not v2 or v2[1] != v1[1]:
859 if not v2 or v2[1] != v1[1]:
860 toloadlazy.append(d)
860 toloadlazy.append(d)
861 for d, v1 in pycompat.iteritems(t2._lazydirs):
861 for d, v1 in pycompat.iteritems(t2._lazydirs):
862 if d not in t1._lazydirs:
862 if d not in t1._lazydirs:
863 toloadlazy.append(d)
863 toloadlazy.append(d)
864
864
865 for d in toloadlazy:
865 for d in toloadlazy:
866 t1._loadlazy(d)
866 t1._loadlazy(d)
867 t2._loadlazy(d)
867 t2._loadlazy(d)
868
868
869 def __len__(self):
869 def __len__(self):
870 self._load()
870 self._load()
871 size = len(self._files)
871 size = len(self._files)
872 self._loadalllazy()
872 self._loadalllazy()
873 for m in self._dirs.values():
873 for m in self._dirs.values():
874 size += m.__len__()
874 size += m.__len__()
875 return size
875 return size
876
876
877 def __nonzero__(self):
877 def __nonzero__(self):
878 # Faster than "__len() != 0" since it avoids loading sub-manifests
878 # Faster than "__len() != 0" since it avoids loading sub-manifests
879 return not self._isempty()
879 return not self._isempty()
880
880
881 __bool__ = __nonzero__
881 __bool__ = __nonzero__
882
882
883 def _isempty(self):
883 def _isempty(self):
884 self._load() # for consistency; already loaded by all callers
884 self._load() # for consistency; already loaded by all callers
885 # See if we can skip loading everything.
885 # See if we can skip loading everything.
886 if self._files or (
886 if self._files or (
887 self._dirs and any(not m._isempty() for m in self._dirs.values())
887 self._dirs and any(not m._isempty() for m in self._dirs.values())
888 ):
888 ):
889 return False
889 return False
890 self._loadalllazy()
890 self._loadalllazy()
891 return not self._dirs or all(m._isempty() for m in self._dirs.values())
891 return not self._dirs or all(m._isempty() for m in self._dirs.values())
892
892
893 @encoding.strmethod
893 @encoding.strmethod
894 def __repr__(self):
894 def __repr__(self):
895 return (
895 return (
896 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
896 b'<treemanifest dir=%s, node=%s, loaded=%r, dirty=%r at 0x%x>'
897 % (
897 % (
898 self._dir,
898 self._dir,
899 hex(self._node),
899 hex(self._node),
900 bool(self._loadfunc is _noop),
900 bool(self._loadfunc is _noop),
901 self._dirty,
901 self._dirty,
902 id(self),
902 id(self),
903 )
903 )
904 )
904 )
905
905
906 def dir(self):
906 def dir(self):
907 '''The directory that this tree manifest represents, including a
907 '''The directory that this tree manifest represents, including a
908 trailing '/'. Empty string for the repo root directory.'''
908 trailing '/'. Empty string for the repo root directory.'''
909 return self._dir
909 return self._dir
910
910
911 def node(self):
911 def node(self):
912 '''This node of this instance. nullid for unsaved instances. Should
912 '''This node of this instance. nullid for unsaved instances. Should
913 be updated when the instance is read or written from a revlog.
913 be updated when the instance is read or written from a revlog.
914 '''
914 '''
915 assert not self._dirty
915 assert not self._dirty
916 return self._node
916 return self._node
917
917
918 def setnode(self, node):
918 def setnode(self, node):
919 self._node = node
919 self._node = node
920 self._dirty = False
920 self._dirty = False
921
921
922 def iterentries(self):
922 def iterentries(self):
923 self._load()
923 self._load()
924 self._loadalllazy()
924 self._loadalllazy()
925 for p, n in sorted(
925 for p, n in sorted(
926 itertools.chain(self._dirs.items(), self._files.items())
926 itertools.chain(self._dirs.items(), self._files.items())
927 ):
927 ):
928 if p in self._files:
928 if p in self._files:
929 yield self._subpath(p), n, self._flags.get(p, b'')
929 yield self._subpath(p), n, self._flags.get(p, b'')
930 else:
930 else:
931 for x in n.iterentries():
931 for x in n.iterentries():
932 yield x
932 yield x
933
933
934 def items(self):
934 def items(self):
935 self._load()
935 self._load()
936 self._loadalllazy()
936 self._loadalllazy()
937 for p, n in sorted(
937 for p, n in sorted(
938 itertools.chain(self._dirs.items(), self._files.items())
938 itertools.chain(self._dirs.items(), self._files.items())
939 ):
939 ):
940 if p in self._files:
940 if p in self._files:
941 yield self._subpath(p), n
941 yield self._subpath(p), n
942 else:
942 else:
943 for f, sn in pycompat.iteritems(n):
943 for f, sn in pycompat.iteritems(n):
944 yield f, sn
944 yield f, sn
945
945
946 iteritems = items
946 iteritems = items
947
947
948 def iterkeys(self):
948 def iterkeys(self):
949 self._load()
949 self._load()
950 self._loadalllazy()
950 self._loadalllazy()
951 for p in sorted(itertools.chain(self._dirs, self._files)):
951 for p in sorted(itertools.chain(self._dirs, self._files)):
952 if p in self._files:
952 if p in self._files:
953 yield self._subpath(p)
953 yield self._subpath(p)
954 else:
954 else:
955 for f in self._dirs[p]:
955 for f in self._dirs[p]:
956 yield f
956 yield f
957
957
958 def keys(self):
958 def keys(self):
959 return list(self.iterkeys())
959 return list(self.iterkeys())
960
960
961 def __iter__(self):
961 def __iter__(self):
962 return self.iterkeys()
962 return self.iterkeys()
963
963
964 def __contains__(self, f):
964 def __contains__(self, f):
965 if f is None:
965 if f is None:
966 return False
966 return False
967 self._load()
967 self._load()
968 dir, subpath = _splittopdir(f)
968 dir, subpath = _splittopdir(f)
969 if dir:
969 if dir:
970 self._loadlazy(dir)
970 self._loadlazy(dir)
971
971
972 if dir not in self._dirs:
972 if dir not in self._dirs:
973 return False
973 return False
974
974
975 return self._dirs[dir].__contains__(subpath)
975 return self._dirs[dir].__contains__(subpath)
976 else:
976 else:
977 return f in self._files
977 return f in self._files
978
978
979 def get(self, f, default=None):
979 def get(self, f, default=None):
980 self._load()
980 self._load()
981 dir, subpath = _splittopdir(f)
981 dir, subpath = _splittopdir(f)
982 if dir:
982 if dir:
983 self._loadlazy(dir)
983 self._loadlazy(dir)
984
984
985 if dir not in self._dirs:
985 if dir not in self._dirs:
986 return default
986 return default
987 return self._dirs[dir].get(subpath, default)
987 return self._dirs[dir].get(subpath, default)
988 else:
988 else:
989 return self._files.get(f, default)
989 return self._files.get(f, default)
990
990
991 def __getitem__(self, f):
991 def __getitem__(self, f):
992 self._load()
992 self._load()
993 dir, subpath = _splittopdir(f)
993 dir, subpath = _splittopdir(f)
994 if dir:
994 if dir:
995 self._loadlazy(dir)
995 self._loadlazy(dir)
996
996
997 return self._dirs[dir].__getitem__(subpath)
997 return self._dirs[dir].__getitem__(subpath)
998 else:
998 else:
999 return self._files[f]
999 return self._files[f]
1000
1000
1001 def flags(self, f):
1001 def flags(self, f):
1002 self._load()
1002 self._load()
1003 dir, subpath = _splittopdir(f)
1003 dir, subpath = _splittopdir(f)
1004 if dir:
1004 if dir:
1005 self._loadlazy(dir)
1005 self._loadlazy(dir)
1006
1006
1007 if dir not in self._dirs:
1007 if dir not in self._dirs:
1008 return b''
1008 return b''
1009 return self._dirs[dir].flags(subpath)
1009 return self._dirs[dir].flags(subpath)
1010 else:
1010 else:
1011 if f in self._lazydirs or f in self._dirs:
1011 if f in self._lazydirs or f in self._dirs:
1012 return b''
1012 return b''
1013 return self._flags.get(f, b'')
1013 return self._flags.get(f, b'')
1014
1014
1015 def find(self, f):
1015 def find(self, f):
1016 self._load()
1016 self._load()
1017 dir, subpath = _splittopdir(f)
1017 dir, subpath = _splittopdir(f)
1018 if dir:
1018 if dir:
1019 self._loadlazy(dir)
1019 self._loadlazy(dir)
1020
1020
1021 return self._dirs[dir].find(subpath)
1021 return self._dirs[dir].find(subpath)
1022 else:
1022 else:
1023 return self._files[f], self._flags.get(f, b'')
1023 return self._files[f], self._flags.get(f, b'')
1024
1024
1025 def __delitem__(self, f):
1025 def __delitem__(self, f):
1026 self._load()
1026 self._load()
1027 dir, subpath = _splittopdir(f)
1027 dir, subpath = _splittopdir(f)
1028 if dir:
1028 if dir:
1029 self._loadlazy(dir)
1029 self._loadlazy(dir)
1030
1030
1031 self._dirs[dir].__delitem__(subpath)
1031 self._dirs[dir].__delitem__(subpath)
1032 # If the directory is now empty, remove it
1032 # If the directory is now empty, remove it
1033 if self._dirs[dir]._isempty():
1033 if self._dirs[dir]._isempty():
1034 del self._dirs[dir]
1034 del self._dirs[dir]
1035 else:
1035 else:
1036 del self._files[f]
1036 del self._files[f]
1037 if f in self._flags:
1037 if f in self._flags:
1038 del self._flags[f]
1038 del self._flags[f]
1039 self._dirty = True
1039 self._dirty = True
1040
1040
1041 def __setitem__(self, f, n):
1041 def __setitem__(self, f, n):
1042 assert n is not None
1042 assert n is not None
1043 self._load()
1043 self._load()
1044 dir, subpath = _splittopdir(f)
1044 dir, subpath = _splittopdir(f)
1045 if dir:
1045 if dir:
1046 self._loadlazy(dir)
1046 self._loadlazy(dir)
1047 if dir not in self._dirs:
1047 if dir not in self._dirs:
1048 self._dirs[dir] = treemanifest(self._subpath(dir))
1048 self._dirs[dir] = treemanifest(self._subpath(dir))
1049 self._dirs[dir].__setitem__(subpath, n)
1049 self._dirs[dir].__setitem__(subpath, n)
1050 else:
1050 else:
1051 # manifest nodes are either 20 bytes or 32 bytes,
1051 # manifest nodes are either 20 bytes or 32 bytes,
1052 # depending on the hash in use. An extra byte is
1052 # depending on the hash in use. An extra byte is
1053 # occasionally used by hg, but won't ever be
1053 # occasionally used by hg, but won't ever be
1054 # persisted. Trim to 21 or 33 bytes as appropriate.
1054 # persisted. Trim to 21 or 33 bytes as appropriate.
1055 trim = 21 if len(n) < 25 else 33
1055 trim = 21 if len(n) < 25 else 33
1056 self._files[f] = n[:trim] # to match manifestdict's behavior
1056 self._files[f] = n[:trim] # to match manifestdict's behavior
1057 self._dirty = True
1057 self._dirty = True
1058
1058
1059 def _load(self):
1059 def _load(self):
1060 if self._loadfunc is not _noop:
1060 if self._loadfunc is not _noop:
1061 lf, self._loadfunc = self._loadfunc, _noop
1061 lf, self._loadfunc = self._loadfunc, _noop
1062 lf(self)
1062 lf(self)
1063 elif self._copyfunc is not _noop:
1063 elif self._copyfunc is not _noop:
1064 cf, self._copyfunc = self._copyfunc, _noop
1064 cf, self._copyfunc = self._copyfunc, _noop
1065 cf(self)
1065 cf(self)
1066
1066
1067 def setflag(self, f, flags):
1067 def setflag(self, f, flags):
1068 """Set the flags (symlink, executable) for path f."""
1068 """Set the flags (symlink, executable) for path f."""
1069 self._load()
1069 self._load()
1070 dir, subpath = _splittopdir(f)
1070 dir, subpath = _splittopdir(f)
1071 if dir:
1071 if dir:
1072 self._loadlazy(dir)
1072 self._loadlazy(dir)
1073 if dir not in self._dirs:
1073 if dir not in self._dirs:
1074 self._dirs[dir] = treemanifest(self._subpath(dir))
1074 self._dirs[dir] = treemanifest(self._subpath(dir))
1075 self._dirs[dir].setflag(subpath, flags)
1075 self._dirs[dir].setflag(subpath, flags)
1076 else:
1076 else:
1077 self._flags[f] = flags
1077 self._flags[f] = flags
1078 self._dirty = True
1078 self._dirty = True
1079
1079
1080 def copy(self):
1080 def copy(self):
1081 copy = treemanifest(self._dir)
1081 copy = treemanifest(self._dir)
1082 copy._node = self._node
1082 copy._node = self._node
1083 copy._dirty = self._dirty
1083 copy._dirty = self._dirty
1084 if self._copyfunc is _noop:
1084 if self._copyfunc is _noop:
1085
1085
1086 def _copyfunc(s):
1086 def _copyfunc(s):
1087 self._load()
1087 self._load()
1088 s._lazydirs = {
1088 s._lazydirs = {
1089 d: (p, n, r, True)
1089 d: (p, n, r, True)
1090 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1090 for d, (p, n, r, c) in pycompat.iteritems(self._lazydirs)
1091 }
1091 }
1092 sdirs = s._dirs
1092 sdirs = s._dirs
1093 for d, v in pycompat.iteritems(self._dirs):
1093 for d, v in pycompat.iteritems(self._dirs):
1094 sdirs[d] = v.copy()
1094 sdirs[d] = v.copy()
1095 s._files = dict.copy(self._files)
1095 s._files = dict.copy(self._files)
1096 s._flags = dict.copy(self._flags)
1096 s._flags = dict.copy(self._flags)
1097
1097
1098 if self._loadfunc is _noop:
1098 if self._loadfunc is _noop:
1099 _copyfunc(copy)
1099 _copyfunc(copy)
1100 else:
1100 else:
1101 copy._copyfunc = _copyfunc
1101 copy._copyfunc = _copyfunc
1102 else:
1102 else:
1103 copy._copyfunc = self._copyfunc
1103 copy._copyfunc = self._copyfunc
1104 return copy
1104 return copy
1105
1105
1106 def filesnotin(self, m2, match=None):
1106 def filesnotin(self, m2, match=None):
1107 '''Set of files in this manifest that are not in the other'''
1107 '''Set of files in this manifest that are not in the other'''
1108 if match and not match.always():
1108 if match and not match.always():
1109 m1 = self._matches(match)
1109 m1 = self._matches(match)
1110 m2 = m2._matches(match)
1110 m2 = m2._matches(match)
1111 return m1.filesnotin(m2)
1111 return m1.filesnotin(m2)
1112
1112
1113 files = set()
1113 files = set()
1114
1114
1115 def _filesnotin(t1, t2):
1115 def _filesnotin(t1, t2):
1116 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1116 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1117 return
1117 return
1118 t1._load()
1118 t1._load()
1119 t2._load()
1119 t2._load()
1120 self._loaddifflazy(t1, t2)
1120 self._loaddifflazy(t1, t2)
1121 for d, m1 in pycompat.iteritems(t1._dirs):
1121 for d, m1 in pycompat.iteritems(t1._dirs):
1122 if d in t2._dirs:
1122 if d in t2._dirs:
1123 m2 = t2._dirs[d]
1123 m2 = t2._dirs[d]
1124 _filesnotin(m1, m2)
1124 _filesnotin(m1, m2)
1125 else:
1125 else:
1126 files.update(m1.iterkeys())
1126 files.update(m1.iterkeys())
1127
1127
1128 for fn in t1._files:
1128 for fn in t1._files:
1129 if fn not in t2._files:
1129 if fn not in t2._files:
1130 files.add(t1._subpath(fn))
1130 files.add(t1._subpath(fn))
1131
1131
1132 _filesnotin(self, m2)
1132 _filesnotin(self, m2)
1133 return files
1133 return files
1134
1134
1135 @propertycache
1135 @propertycache
1136 def _alldirs(self):
1136 def _alldirs(self):
1137 return pathutil.dirs(self)
1137 return pathutil.dirs(self)
1138
1138
1139 def dirs(self):
1139 def dirs(self):
1140 return self._alldirs
1140 return self._alldirs
1141
1141
1142 def hasdir(self, dir):
1142 def hasdir(self, dir):
1143 self._load()
1143 self._load()
1144 topdir, subdir = _splittopdir(dir)
1144 topdir, subdir = _splittopdir(dir)
1145 if topdir:
1145 if topdir:
1146 self._loadlazy(topdir)
1146 self._loadlazy(topdir)
1147 if topdir in self._dirs:
1147 if topdir in self._dirs:
1148 return self._dirs[topdir].hasdir(subdir)
1148 return self._dirs[topdir].hasdir(subdir)
1149 return False
1149 return False
1150 dirslash = dir + b'/'
1150 dirslash = dir + b'/'
1151 return dirslash in self._dirs or dirslash in self._lazydirs
1151 return dirslash in self._dirs or dirslash in self._lazydirs
1152
1152
1153 def walk(self, match):
1153 def walk(self, match):
1154 '''Generates matching file names.
1154 '''Generates matching file names.
1155
1155
1156 It also reports nonexistent files by marking them bad with match.bad().
1156 It also reports nonexistent files by marking them bad with match.bad().
1157 '''
1157 '''
1158 if match.always():
1158 if match.always():
1159 for f in iter(self):
1159 for f in iter(self):
1160 yield f
1160 yield f
1161 return
1161 return
1162
1162
1163 fset = set(match.files())
1163 fset = set(match.files())
1164
1164
1165 for fn in self._walk(match):
1165 for fn in self._walk(match):
1166 if fn in fset:
1166 if fn in fset:
1167 # specified pattern is the exact name
1167 # specified pattern is the exact name
1168 fset.remove(fn)
1168 fset.remove(fn)
1169 yield fn
1169 yield fn
1170
1170
1171 # for dirstate.walk, files=[''] means "walk the whole tree".
1171 # for dirstate.walk, files=[''] means "walk the whole tree".
1172 # follow that here, too
1172 # follow that here, too
1173 fset.discard(b'')
1173 fset.discard(b'')
1174
1174
1175 for fn in sorted(fset):
1175 for fn in sorted(fset):
1176 if not self.hasdir(fn):
1176 if not self.hasdir(fn):
1177 match.bad(fn, None)
1177 match.bad(fn, None)
1178
1178
1179 def _walk(self, match):
1179 def _walk(self, match):
1180 '''Recursively generates matching file names for walk().'''
1180 '''Recursively generates matching file names for walk().'''
1181 visit = match.visitchildrenset(self._dir[:-1])
1181 visit = match.visitchildrenset(self._dir[:-1])
1182 if not visit:
1182 if not visit:
1183 return
1183 return
1184
1184
1185 # yield this dir's files and walk its submanifests
1185 # yield this dir's files and walk its submanifests
1186 self._load()
1186 self._load()
1187 visit = self._loadchildrensetlazy(visit)
1187 visit = self._loadchildrensetlazy(visit)
1188 for p in sorted(list(self._dirs) + list(self._files)):
1188 for p in sorted(list(self._dirs) + list(self._files)):
1189 if p in self._files:
1189 if p in self._files:
1190 fullp = self._subpath(p)
1190 fullp = self._subpath(p)
1191 if match(fullp):
1191 if match(fullp):
1192 yield fullp
1192 yield fullp
1193 else:
1193 else:
1194 if not visit or p[:-1] in visit:
1194 if not visit or p[:-1] in visit:
1195 for f in self._dirs[p]._walk(match):
1195 for f in self._dirs[p]._walk(match):
1196 yield f
1196 yield f
1197
1197
1198 def _matches(self, match):
1198 def _matches(self, match):
1199 '''recursively generate a new manifest filtered by the match argument.
1199 '''recursively generate a new manifest filtered by the match argument.
1200 '''
1200 '''
1201 if match.always():
1201 if match.always():
1202 return self.copy()
1202 return self.copy()
1203 return self._matches_inner(match)
1203 return self._matches_inner(match)
1204
1204
1205 def _matches_inner(self, match):
1205 def _matches_inner(self, match):
1206 if match.always():
1206 if match.always():
1207 return self.copy()
1207 return self.copy()
1208
1208
1209 visit = match.visitchildrenset(self._dir[:-1])
1209 visit = match.visitchildrenset(self._dir[:-1])
1210 if visit == b'all':
1210 if visit == b'all':
1211 return self.copy()
1211 return self.copy()
1212 ret = treemanifest(self._dir)
1212 ret = treemanifest(self._dir)
1213 if not visit:
1213 if not visit:
1214 return ret
1214 return ret
1215
1215
1216 self._load()
1216 self._load()
1217 for fn in self._files:
1217 for fn in self._files:
1218 # While visitchildrenset *usually* lists only subdirs, this is
1218 # While visitchildrenset *usually* lists only subdirs, this is
1219 # actually up to the matcher and may have some files in the set().
1219 # actually up to the matcher and may have some files in the set().
1220 # If visit == 'this', we should obviously look at the files in this
1220 # If visit == 'this', we should obviously look at the files in this
1221 # directory; if visit is a set, and fn is in it, we should inspect
1221 # directory; if visit is a set, and fn is in it, we should inspect
1222 # fn (but no need to inspect things not in the set).
1222 # fn (but no need to inspect things not in the set).
1223 if visit != b'this' and fn not in visit:
1223 if visit != b'this' and fn not in visit:
1224 continue
1224 continue
1225 fullp = self._subpath(fn)
1225 fullp = self._subpath(fn)
1226 # visitchildrenset isn't perfect, we still need to call the regular
1226 # visitchildrenset isn't perfect, we still need to call the regular
1227 # matcher code to further filter results.
1227 # matcher code to further filter results.
1228 if not match(fullp):
1228 if not match(fullp):
1229 continue
1229 continue
1230 ret._files[fn] = self._files[fn]
1230 ret._files[fn] = self._files[fn]
1231 if fn in self._flags:
1231 if fn in self._flags:
1232 ret._flags[fn] = self._flags[fn]
1232 ret._flags[fn] = self._flags[fn]
1233
1233
1234 visit = self._loadchildrensetlazy(visit)
1234 visit = self._loadchildrensetlazy(visit)
1235 for dir, subm in pycompat.iteritems(self._dirs):
1235 for dir, subm in pycompat.iteritems(self._dirs):
1236 if visit and dir[:-1] not in visit:
1236 if visit and dir[:-1] not in visit:
1237 continue
1237 continue
1238 m = subm._matches_inner(match)
1238 m = subm._matches_inner(match)
1239 if not m._isempty():
1239 if not m._isempty():
1240 ret._dirs[dir] = m
1240 ret._dirs[dir] = m
1241
1241
1242 if not ret._isempty():
1242 if not ret._isempty():
1243 ret._dirty = True
1243 ret._dirty = True
1244 return ret
1244 return ret
1245
1245
1246 def fastdelta(self, base, changes):
1246 def fastdelta(self, base, changes):
1247 raise FastdeltaUnavailable()
1247 raise FastdeltaUnavailable()
1248
1248
1249 def diff(self, m2, match=None, clean=False):
1249 def diff(self, m2, match=None, clean=False):
1250 '''Finds changes between the current manifest and m2.
1250 '''Finds changes between the current manifest and m2.
1251
1251
1252 Args:
1252 Args:
1253 m2: the manifest to which this manifest should be compared.
1253 m2: the manifest to which this manifest should be compared.
1254 clean: if true, include files unchanged between these manifests
1254 clean: if true, include files unchanged between these manifests
1255 with a None value in the returned dictionary.
1255 with a None value in the returned dictionary.
1256
1256
1257 The result is returned as a dict with filename as key and
1257 The result is returned as a dict with filename as key and
1258 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1258 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1259 nodeid in the current/other manifest and fl1/fl2 is the flag
1259 nodeid in the current/other manifest and fl1/fl2 is the flag
1260 in the current/other manifest. Where the file does not exist,
1260 in the current/other manifest. Where the file does not exist,
1261 the nodeid will be None and the flags will be the empty
1261 the nodeid will be None and the flags will be the empty
1262 string.
1262 string.
1263 '''
1263 '''
1264 if match and not match.always():
1264 if match and not match.always():
1265 m1 = self._matches(match)
1265 m1 = self._matches(match)
1266 m2 = m2._matches(match)
1266 m2 = m2._matches(match)
1267 return m1.diff(m2, clean=clean)
1267 return m1.diff(m2, clean=clean)
1268 result = {}
1268 result = {}
1269 emptytree = treemanifest()
1269 emptytree = treemanifest()
1270
1270
1271 def _iterativediff(t1, t2, stack):
1271 def _iterativediff(t1, t2, stack):
1272 """compares two tree manifests and append new tree-manifests which
1272 """compares two tree manifests and append new tree-manifests which
1273 needs to be compared to stack"""
1273 needs to be compared to stack"""
1274 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1274 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1275 return
1275 return
1276 t1._load()
1276 t1._load()
1277 t2._load()
1277 t2._load()
1278 self._loaddifflazy(t1, t2)
1278 self._loaddifflazy(t1, t2)
1279
1279
1280 for d, m1 in pycompat.iteritems(t1._dirs):
1280 for d, m1 in pycompat.iteritems(t1._dirs):
1281 m2 = t2._dirs.get(d, emptytree)
1281 m2 = t2._dirs.get(d, emptytree)
1282 stack.append((m1, m2))
1282 stack.append((m1, m2))
1283
1283
1284 for d, m2 in pycompat.iteritems(t2._dirs):
1284 for d, m2 in pycompat.iteritems(t2._dirs):
1285 if d not in t1._dirs:
1285 if d not in t1._dirs:
1286 stack.append((emptytree, m2))
1286 stack.append((emptytree, m2))
1287
1287
1288 for fn, n1 in pycompat.iteritems(t1._files):
1288 for fn, n1 in pycompat.iteritems(t1._files):
1289 fl1 = t1._flags.get(fn, b'')
1289 fl1 = t1._flags.get(fn, b'')
1290 n2 = t2._files.get(fn, None)
1290 n2 = t2._files.get(fn, None)
1291 fl2 = t2._flags.get(fn, b'')
1291 fl2 = t2._flags.get(fn, b'')
1292 if n1 != n2 or fl1 != fl2:
1292 if n1 != n2 or fl1 != fl2:
1293 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1293 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1294 elif clean:
1294 elif clean:
1295 result[t1._subpath(fn)] = None
1295 result[t1._subpath(fn)] = None
1296
1296
1297 for fn, n2 in pycompat.iteritems(t2._files):
1297 for fn, n2 in pycompat.iteritems(t2._files):
1298 if fn not in t1._files:
1298 if fn not in t1._files:
1299 fl2 = t2._flags.get(fn, b'')
1299 fl2 = t2._flags.get(fn, b'')
1300 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1300 result[t2._subpath(fn)] = ((None, b''), (n2, fl2))
1301
1301
1302 stackls = []
1302 stackls = []
1303 _iterativediff(self, m2, stackls)
1303 _iterativediff(self, m2, stackls)
1304 while stackls:
1304 while stackls:
1305 t1, t2 = stackls.pop()
1305 t1, t2 = stackls.pop()
1306 # stackls is populated in the function call
1306 # stackls is populated in the function call
1307 _iterativediff(t1, t2, stackls)
1307 _iterativediff(t1, t2, stackls)
1308 return result
1308 return result
1309
1309
1310 def unmodifiedsince(self, m2):
1310 def unmodifiedsince(self, m2):
1311 return not self._dirty and not m2._dirty and self._node == m2._node
1311 return not self._dirty and not m2._dirty and self._node == m2._node
1312
1312
1313 def parse(self, text, readsubtree):
1313 def parse(self, text, readsubtree):
1314 selflazy = self._lazydirs
1314 selflazy = self._lazydirs
1315 subpath = self._subpath
1315 subpath = self._subpath
1316 for f, n, fl in _parse(text):
1316 for f, n, fl in _parse(text):
1317 if fl == b't':
1317 if fl == b't':
1318 f = f + b'/'
1318 f = f + b'/'
1319 # False below means "doesn't need to be copied" and can use the
1319 # False below means "doesn't need to be copied" and can use the
1320 # cached value from readsubtree directly.
1320 # cached value from readsubtree directly.
1321 selflazy[f] = (subpath(f), n, readsubtree, False)
1321 selflazy[f] = (subpath(f), n, readsubtree, False)
1322 elif b'/' in f:
1322 elif b'/' in f:
1323 # This is a flat manifest, so use __setitem__ and setflag rather
1323 # This is a flat manifest, so use __setitem__ and setflag rather
1324 # than assigning directly to _files and _flags, so we can
1324 # than assigning directly to _files and _flags, so we can
1325 # assign a path in a subdirectory, and to mark dirty (compared
1325 # assign a path in a subdirectory, and to mark dirty (compared
1326 # to nullid).
1326 # to nullid).
1327 self[f] = n
1327 self[f] = n
1328 if fl:
1328 if fl:
1329 self.setflag(f, fl)
1329 self.setflag(f, fl)
1330 else:
1330 else:
1331 # Assigning to _files and _flags avoids marking as dirty,
1331 # Assigning to _files and _flags avoids marking as dirty,
1332 # and should be a little faster.
1332 # and should be a little faster.
1333 self._files[f] = n
1333 self._files[f] = n
1334 if fl:
1334 if fl:
1335 self._flags[f] = fl
1335 self._flags[f] = fl
1336
1336
1337 def text(self):
1337 def text(self):
1338 """Get the full data of this manifest as a bytestring."""
1338 """Get the full data of this manifest as a bytestring."""
1339 self._load()
1339 self._load()
1340 return _text(self.iterentries())
1340 return _text(self.iterentries())
1341
1341
1342 def dirtext(self):
1342 def dirtext(self):
1343 """Get the full data of this directory as a bytestring. Make sure that
1343 """Get the full data of this directory as a bytestring. Make sure that
1344 any submanifests have been written first, so their nodeids are correct.
1344 any submanifests have been written first, so their nodeids are correct.
1345 """
1345 """
1346 self._load()
1346 self._load()
1347 flags = self.flags
1347 flags = self.flags
1348 lazydirs = [
1348 lazydirs = [
1349 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1349 (d[:-1], v[1], b't') for d, v in pycompat.iteritems(self._lazydirs)
1350 ]
1350 ]
1351 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1351 dirs = [(d[:-1], self._dirs[d]._node, b't') for d in self._dirs]
1352 files = [(f, self._files[f], flags(f)) for f in self._files]
1352 files = [(f, self._files[f], flags(f)) for f in self._files]
1353 return _text(sorted(dirs + files + lazydirs))
1353 return _text(sorted(dirs + files + lazydirs))
1354
1354
1355 def read(self, gettext, readsubtree):
1355 def read(self, gettext, readsubtree):
1356 def _load_for_read(s):
1356 def _load_for_read(s):
1357 s.parse(gettext(), readsubtree)
1357 s.parse(gettext(), readsubtree)
1358 s._dirty = False
1358 s._dirty = False
1359
1359
1360 self._loadfunc = _load_for_read
1360 self._loadfunc = _load_for_read
1361
1361
1362 def writesubtrees(self, m1, m2, writesubtree, match):
1362 def writesubtrees(self, m1, m2, writesubtree, match):
1363 self._load() # for consistency; should never have any effect here
1363 self._load() # for consistency; should never have any effect here
1364 m1._load()
1364 m1._load()
1365 m2._load()
1365 m2._load()
1366 emptytree = treemanifest()
1366 emptytree = treemanifest()
1367
1367
1368 def getnode(m, d):
1368 def getnode(m, d):
1369 ld = m._lazydirs.get(d)
1369 ld = m._lazydirs.get(d)
1370 if ld:
1370 if ld:
1371 return ld[1]
1371 return ld[1]
1372 return m._dirs.get(d, emptytree)._node
1372 return m._dirs.get(d, emptytree)._node
1373
1373
1374 # let's skip investigating things that `match` says we do not need.
1374 # let's skip investigating things that `match` says we do not need.
1375 visit = match.visitchildrenset(self._dir[:-1])
1375 visit = match.visitchildrenset(self._dir[:-1])
1376 visit = self._loadchildrensetlazy(visit)
1376 visit = self._loadchildrensetlazy(visit)
1377 if visit == b'this' or visit == b'all':
1377 if visit == b'this' or visit == b'all':
1378 visit = None
1378 visit = None
1379 for d, subm in pycompat.iteritems(self._dirs):
1379 for d, subm in pycompat.iteritems(self._dirs):
1380 if visit and d[:-1] not in visit:
1380 if visit and d[:-1] not in visit:
1381 continue
1381 continue
1382 subp1 = getnode(m1, d)
1382 subp1 = getnode(m1, d)
1383 subp2 = getnode(m2, d)
1383 subp2 = getnode(m2, d)
1384 if subp1 == nullid:
1384 if subp1 == nullid:
1385 subp1, subp2 = subp2, subp1
1385 subp1, subp2 = subp2, subp1
1386 writesubtree(subm, subp1, subp2, match)
1386 writesubtree(subm, subp1, subp2, match)
1387
1387
1388 def walksubtrees(self, matcher=None):
1388 def walksubtrees(self, matcher=None):
1389 """Returns an iterator of the subtrees of this manifest, including this
1389 """Returns an iterator of the subtrees of this manifest, including this
1390 manifest itself.
1390 manifest itself.
1391
1391
1392 If `matcher` is provided, it only returns subtrees that match.
1392 If `matcher` is provided, it only returns subtrees that match.
1393 """
1393 """
1394 if matcher and not matcher.visitdir(self._dir[:-1]):
1394 if matcher and not matcher.visitdir(self._dir[:-1]):
1395 return
1395 return
1396 if not matcher or matcher(self._dir[:-1]):
1396 if not matcher or matcher(self._dir[:-1]):
1397 yield self
1397 yield self
1398
1398
1399 self._load()
1399 self._load()
1400 # OPT: use visitchildrenset to avoid loading everything.
1400 # OPT: use visitchildrenset to avoid loading everything.
1401 self._loadalllazy()
1401 self._loadalllazy()
1402 for d, subm in pycompat.iteritems(self._dirs):
1402 for d, subm in pycompat.iteritems(self._dirs):
1403 for subtree in subm.walksubtrees(matcher=matcher):
1403 for subtree in subm.walksubtrees(matcher=matcher):
1404 yield subtree
1404 yield subtree
1405
1405
1406
1406
1407 class manifestfulltextcache(util.lrucachedict):
1407 class manifestfulltextcache(util.lrucachedict):
1408 """File-backed LRU cache for the manifest cache
1408 """File-backed LRU cache for the manifest cache
1409
1409
1410 File consists of entries, up to EOF:
1410 File consists of entries, up to EOF:
1411
1411
1412 - 20 bytes node, 4 bytes length, <length> manifest data
1412 - 20 bytes node, 4 bytes length, <length> manifest data
1413
1413
1414 These are written in reverse cache order (oldest to newest).
1414 These are written in reverse cache order (oldest to newest).
1415
1415
1416 """
1416 """
1417
1417
1418 _file = b'manifestfulltextcache'
1418 _file = b'manifestfulltextcache'
1419
1419
1420 def __init__(self, max):
1420 def __init__(self, max):
1421 super(manifestfulltextcache, self).__init__(max)
1421 super(manifestfulltextcache, self).__init__(max)
1422 self._dirty = False
1422 self._dirty = False
1423 self._read = False
1423 self._read = False
1424 self._opener = None
1424 self._opener = None
1425
1425
1426 def read(self):
1426 def read(self):
1427 if self._read or self._opener is None:
1427 if self._read or self._opener is None:
1428 return
1428 return
1429
1429
1430 try:
1430 try:
1431 with self._opener(self._file) as fp:
1431 with self._opener(self._file) as fp:
1432 set = super(manifestfulltextcache, self).__setitem__
1432 set = super(manifestfulltextcache, self).__setitem__
1433 # ignore trailing data, this is a cache, corruption is skipped
1433 # ignore trailing data, this is a cache, corruption is skipped
1434 while True:
1434 while True:
1435 # TODO do we need to do work here for sha1 portability?
1435 # TODO do we need to do work here for sha1 portability?
1436 node = fp.read(20)
1436 node = fp.read(20)
1437 if len(node) < 20:
1437 if len(node) < 20:
1438 break
1438 break
1439 try:
1439 try:
1440 size = struct.unpack(b'>L', fp.read(4))[0]
1440 size = struct.unpack(b'>L', fp.read(4))[0]
1441 except struct.error:
1441 except struct.error:
1442 break
1442 break
1443 value = bytearray(fp.read(size))
1443 value = bytearray(fp.read(size))
1444 if len(value) != size:
1444 if len(value) != size:
1445 break
1445 break
1446 set(node, value)
1446 set(node, value)
1447 except IOError:
1447 except IOError:
1448 # the file is allowed to be missing
1448 # the file is allowed to be missing
1449 pass
1449 pass
1450
1450
1451 self._read = True
1451 self._read = True
1452 self._dirty = False
1452 self._dirty = False
1453
1453
1454 def write(self):
1454 def write(self):
1455 if not self._dirty or self._opener is None:
1455 if not self._dirty or self._opener is None:
1456 return
1456 return
1457 # rotate backwards to the first used node
1457 # rotate backwards to the first used node
1458 with self._opener(
1458 with self._opener(
1459 self._file, b'w', atomictemp=True, checkambig=True
1459 self._file, b'w', atomictemp=True, checkambig=True
1460 ) as fp:
1460 ) as fp:
1461 node = self._head.prev
1461 node = self._head.prev
1462 while True:
1462 while True:
1463 if node.key in self._cache:
1463 if node.key in self._cache:
1464 fp.write(node.key)
1464 fp.write(node.key)
1465 fp.write(struct.pack(b'>L', len(node.value)))
1465 fp.write(struct.pack(b'>L', len(node.value)))
1466 fp.write(node.value)
1466 fp.write(node.value)
1467 if node is self._head:
1467 if node is self._head:
1468 break
1468 break
1469 node = node.prev
1469 node = node.prev
1470
1470
1471 def __len__(self):
1471 def __len__(self):
1472 if not self._read:
1472 if not self._read:
1473 self.read()
1473 self.read()
1474 return super(manifestfulltextcache, self).__len__()
1474 return super(manifestfulltextcache, self).__len__()
1475
1475
1476 def __contains__(self, k):
1476 def __contains__(self, k):
1477 if not self._read:
1477 if not self._read:
1478 self.read()
1478 self.read()
1479 return super(manifestfulltextcache, self).__contains__(k)
1479 return super(manifestfulltextcache, self).__contains__(k)
1480
1480
1481 def __iter__(self):
1481 def __iter__(self):
1482 if not self._read:
1482 if not self._read:
1483 self.read()
1483 self.read()
1484 return super(manifestfulltextcache, self).__iter__()
1484 return super(manifestfulltextcache, self).__iter__()
1485
1485
1486 def __getitem__(self, k):
1486 def __getitem__(self, k):
1487 if not self._read:
1487 if not self._read:
1488 self.read()
1488 self.read()
1489 # the cache lru order can change on read
1489 # the cache lru order can change on read
1490 setdirty = self._cache.get(k) is not self._head
1490 setdirty = self._cache.get(k) is not self._head
1491 value = super(manifestfulltextcache, self).__getitem__(k)
1491 value = super(manifestfulltextcache, self).__getitem__(k)
1492 if setdirty:
1492 if setdirty:
1493 self._dirty = True
1493 self._dirty = True
1494 return value
1494 return value
1495
1495
1496 def __setitem__(self, k, v):
1496 def __setitem__(self, k, v):
1497 if not self._read:
1497 if not self._read:
1498 self.read()
1498 self.read()
1499 super(manifestfulltextcache, self).__setitem__(k, v)
1499 super(manifestfulltextcache, self).__setitem__(k, v)
1500 self._dirty = True
1500 self._dirty = True
1501
1501
1502 def __delitem__(self, k):
1502 def __delitem__(self, k):
1503 if not self._read:
1503 if not self._read:
1504 self.read()
1504 self.read()
1505 super(manifestfulltextcache, self).__delitem__(k)
1505 super(manifestfulltextcache, self).__delitem__(k)
1506 self._dirty = True
1506 self._dirty = True
1507
1507
1508 def get(self, k, default=None):
1508 def get(self, k, default=None):
1509 if not self._read:
1509 if not self._read:
1510 self.read()
1510 self.read()
1511 return super(manifestfulltextcache, self).get(k, default=default)
1511 return super(manifestfulltextcache, self).get(k, default=default)
1512
1512
1513 def clear(self, clear_persisted_data=False):
1513 def clear(self, clear_persisted_data=False):
1514 super(manifestfulltextcache, self).clear()
1514 super(manifestfulltextcache, self).clear()
1515 if clear_persisted_data:
1515 if clear_persisted_data:
1516 self._dirty = True
1516 self._dirty = True
1517 self.write()
1517 self.write()
1518 self._read = False
1518 self._read = False
1519
1519
1520
1520
1521 # and upper bound of what we expect from compression
1521 # and upper bound of what we expect from compression
1522 # (real live value seems to be "3")
1522 # (real live value seems to be "3")
1523 MAXCOMPRESSION = 3
1523 MAXCOMPRESSION = 3
1524
1524
1525
1525
1526 class FastdeltaUnavailable(Exception):
1526 class FastdeltaUnavailable(Exception):
1527 """Exception raised when fastdelta isn't usable on a manifest."""
1527 """Exception raised when fastdelta isn't usable on a manifest."""
1528
1528
1529
1529
1530 @interfaceutil.implementer(repository.imanifeststorage)
1530 @interfaceutil.implementer(repository.imanifeststorage)
1531 class manifestrevlog(object):
1531 class manifestrevlog(object):
1532 '''A revlog that stores manifest texts. This is responsible for caching the
1532 '''A revlog that stores manifest texts. This is responsible for caching the
1533 full-text manifest contents.
1533 full-text manifest contents.
1534 '''
1534 '''
1535
1535
1536 def __init__(
1536 def __init__(
1537 self,
1537 self,
1538 opener,
1538 opener,
1539 tree=b'',
1539 tree=b'',
1540 dirlogcache=None,
1540 dirlogcache=None,
1541 indexfile=None,
1541 indexfile=None,
1542 treemanifest=False,
1542 treemanifest=False,
1543 ):
1543 ):
1544 """Constructs a new manifest revlog
1544 """Constructs a new manifest revlog
1545
1545
1546 `indexfile` - used by extensions to have two manifests at once, like
1546 `indexfile` - used by extensions to have two manifests at once, like
1547 when transitioning between flatmanifeset and treemanifests.
1547 when transitioning between flatmanifeset and treemanifests.
1548
1548
1549 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1549 `treemanifest` - used to indicate this is a tree manifest revlog. Opener
1550 options can also be used to make this a tree manifest revlog. The opener
1550 options can also be used to make this a tree manifest revlog. The opener
1551 option takes precedence, so if it is set to True, we ignore whatever
1551 option takes precedence, so if it is set to True, we ignore whatever
1552 value is passed in to the constructor.
1552 value is passed in to the constructor.
1553 """
1553 """
1554 # During normal operations, we expect to deal with not more than four
1554 # During normal operations, we expect to deal with not more than four
1555 # revs at a time (such as during commit --amend). When rebasing large
1555 # revs at a time (such as during commit --amend). When rebasing large
1556 # stacks of commits, the number can go up, hence the config knob below.
1556 # stacks of commits, the number can go up, hence the config knob below.
1557 cachesize = 4
1557 cachesize = 4
1558 optiontreemanifest = False
1558 optiontreemanifest = False
1559 opts = getattr(opener, 'options', None)
1559 opts = getattr(opener, 'options', None)
1560 if opts is not None:
1560 if opts is not None:
1561 cachesize = opts.get(b'manifestcachesize', cachesize)
1561 cachesize = opts.get(b'manifestcachesize', cachesize)
1562 optiontreemanifest = opts.get(b'treemanifest', False)
1562 optiontreemanifest = opts.get(b'treemanifest', False)
1563
1563
1564 self._treeondisk = optiontreemanifest or treemanifest
1564 self._treeondisk = optiontreemanifest or treemanifest
1565
1565
1566 self._fulltextcache = manifestfulltextcache(cachesize)
1566 self._fulltextcache = manifestfulltextcache(cachesize)
1567
1567
1568 if tree:
1568 if tree:
1569 assert self._treeondisk, b'opts is %r' % opts
1569 assert self._treeondisk, b'opts is %r' % opts
1570
1570
1571 if indexfile is None:
1571 if indexfile is None:
1572 indexfile = b'00manifest.i'
1572 indexfile = b'00manifest.i'
1573 if tree:
1573 if tree:
1574 indexfile = b"meta/" + tree + indexfile
1574 indexfile = b"meta/" + tree + indexfile
1575
1575
1576 self.tree = tree
1576 self.tree = tree
1577
1577
1578 # The dirlogcache is kept on the root manifest log
1578 # The dirlogcache is kept on the root manifest log
1579 if tree:
1579 if tree:
1580 self._dirlogcache = dirlogcache
1580 self._dirlogcache = dirlogcache
1581 else:
1581 else:
1582 self._dirlogcache = {b'': self}
1582 self._dirlogcache = {b'': self}
1583
1583
1584 self._revlog = revlog.revlog(
1584 self._revlog = revlog.revlog(
1585 opener,
1585 opener,
1586 indexfile,
1586 indexfile,
1587 # only root indexfile is cached
1587 # only root indexfile is cached
1588 checkambig=not bool(tree),
1588 checkambig=not bool(tree),
1589 mmaplargeindex=True,
1589 mmaplargeindex=True,
1590 upperboundcomp=MAXCOMPRESSION,
1590 upperboundcomp=MAXCOMPRESSION,
1591 persistentnodemap=opener.options.get(
1592 b'exp-persistent-nodemap', False
1593 ),
1591 )
1594 )
1592
1595
1593 self.index = self._revlog.index
1596 self.index = self._revlog.index
1594 self.version = self._revlog.version
1597 self.version = self._revlog.version
1595 self._generaldelta = self._revlog._generaldelta
1598 self._generaldelta = self._revlog._generaldelta
1596
1599
1597 def _setupmanifestcachehooks(self, repo):
1600 def _setupmanifestcachehooks(self, repo):
1598 """Persist the manifestfulltextcache on lock release"""
1601 """Persist the manifestfulltextcache on lock release"""
1599 if not util.safehasattr(repo, b'_wlockref'):
1602 if not util.safehasattr(repo, b'_wlockref'):
1600 return
1603 return
1601
1604
1602 self._fulltextcache._opener = repo.wcachevfs
1605 self._fulltextcache._opener = repo.wcachevfs
1603 if repo._currentlock(repo._wlockref) is None:
1606 if repo._currentlock(repo._wlockref) is None:
1604 return
1607 return
1605
1608
1606 reporef = weakref.ref(repo)
1609 reporef = weakref.ref(repo)
1607 manifestrevlogref = weakref.ref(self)
1610 manifestrevlogref = weakref.ref(self)
1608
1611
1609 def persistmanifestcache(success):
1612 def persistmanifestcache(success):
1610 # Repo is in an unknown state, do not persist.
1613 # Repo is in an unknown state, do not persist.
1611 if not success:
1614 if not success:
1612 return
1615 return
1613
1616
1614 repo = reporef()
1617 repo = reporef()
1615 self = manifestrevlogref()
1618 self = manifestrevlogref()
1616 if repo is None or self is None:
1619 if repo is None or self is None:
1617 return
1620 return
1618 if repo.manifestlog.getstorage(b'') is not self:
1621 if repo.manifestlog.getstorage(b'') is not self:
1619 # there's a different manifest in play now, abort
1622 # there's a different manifest in play now, abort
1620 return
1623 return
1621 self._fulltextcache.write()
1624 self._fulltextcache.write()
1622
1625
1623 repo._afterlock(persistmanifestcache)
1626 repo._afterlock(persistmanifestcache)
1624
1627
1625 @property
1628 @property
1626 def fulltextcache(self):
1629 def fulltextcache(self):
1627 return self._fulltextcache
1630 return self._fulltextcache
1628
1631
1629 def clearcaches(self, clear_persisted_data=False):
1632 def clearcaches(self, clear_persisted_data=False):
1630 self._revlog.clearcaches()
1633 self._revlog.clearcaches()
1631 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1634 self._fulltextcache.clear(clear_persisted_data=clear_persisted_data)
1632 self._dirlogcache = {self.tree: self}
1635 self._dirlogcache = {self.tree: self}
1633
1636
1634 def dirlog(self, d):
1637 def dirlog(self, d):
1635 if d:
1638 if d:
1636 assert self._treeondisk
1639 assert self._treeondisk
1637 if d not in self._dirlogcache:
1640 if d not in self._dirlogcache:
1638 mfrevlog = manifestrevlog(
1641 mfrevlog = manifestrevlog(
1639 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1642 self.opener, d, self._dirlogcache, treemanifest=self._treeondisk
1640 )
1643 )
1641 self._dirlogcache[d] = mfrevlog
1644 self._dirlogcache[d] = mfrevlog
1642 return self._dirlogcache[d]
1645 return self._dirlogcache[d]
1643
1646
1644 def add(
1647 def add(
1645 self,
1648 self,
1646 m,
1649 m,
1647 transaction,
1650 transaction,
1648 link,
1651 link,
1649 p1,
1652 p1,
1650 p2,
1653 p2,
1651 added,
1654 added,
1652 removed,
1655 removed,
1653 readtree=None,
1656 readtree=None,
1654 match=None,
1657 match=None,
1655 ):
1658 ):
1656 try:
1659 try:
1657 if p1 not in self.fulltextcache:
1660 if p1 not in self.fulltextcache:
1658 raise FastdeltaUnavailable()
1661 raise FastdeltaUnavailable()
1659 # If our first parent is in the manifest cache, we can
1662 # If our first parent is in the manifest cache, we can
1660 # compute a delta here using properties we know about the
1663 # compute a delta here using properties we know about the
1661 # manifest up-front, which may save time later for the
1664 # manifest up-front, which may save time later for the
1662 # revlog layer.
1665 # revlog layer.
1663
1666
1664 _checkforbidden(added)
1667 _checkforbidden(added)
1665 # combine the changed lists into one sorted iterator
1668 # combine the changed lists into one sorted iterator
1666 work = heapq.merge(
1669 work = heapq.merge(
1667 [(x, False) for x in sorted(added)],
1670 [(x, False) for x in sorted(added)],
1668 [(x, True) for x in sorted(removed)],
1671 [(x, True) for x in sorted(removed)],
1669 )
1672 )
1670
1673
1671 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1674 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1672 cachedelta = self._revlog.rev(p1), deltatext
1675 cachedelta = self._revlog.rev(p1), deltatext
1673 text = util.buffer(arraytext)
1676 text = util.buffer(arraytext)
1674 n = self._revlog.addrevision(
1677 n = self._revlog.addrevision(
1675 text, transaction, link, p1, p2, cachedelta
1678 text, transaction, link, p1, p2, cachedelta
1676 )
1679 )
1677 except FastdeltaUnavailable:
1680 except FastdeltaUnavailable:
1678 # The first parent manifest isn't already loaded or the
1681 # The first parent manifest isn't already loaded or the
1679 # manifest implementation doesn't support fastdelta, so
1682 # manifest implementation doesn't support fastdelta, so
1680 # we'll just encode a fulltext of the manifest and pass
1683 # we'll just encode a fulltext of the manifest and pass
1681 # that through to the revlog layer, and let it handle the
1684 # that through to the revlog layer, and let it handle the
1682 # delta process.
1685 # delta process.
1683 if self._treeondisk:
1686 if self._treeondisk:
1684 assert readtree, b"readtree must be set for treemanifest writes"
1687 assert readtree, b"readtree must be set for treemanifest writes"
1685 assert match, b"match must be specified for treemanifest writes"
1688 assert match, b"match must be specified for treemanifest writes"
1686 m1 = readtree(self.tree, p1)
1689 m1 = readtree(self.tree, p1)
1687 m2 = readtree(self.tree, p2)
1690 m2 = readtree(self.tree, p2)
1688 n = self._addtree(
1691 n = self._addtree(
1689 m, transaction, link, m1, m2, readtree, match=match
1692 m, transaction, link, m1, m2, readtree, match=match
1690 )
1693 )
1691 arraytext = None
1694 arraytext = None
1692 else:
1695 else:
1693 text = m.text()
1696 text = m.text()
1694 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1697 n = self._revlog.addrevision(text, transaction, link, p1, p2)
1695 arraytext = bytearray(text)
1698 arraytext = bytearray(text)
1696
1699
1697 if arraytext is not None:
1700 if arraytext is not None:
1698 self.fulltextcache[n] = arraytext
1701 self.fulltextcache[n] = arraytext
1699
1702
1700 return n
1703 return n
1701
1704
1702 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1705 def _addtree(self, m, transaction, link, m1, m2, readtree, match):
1703 # If the manifest is unchanged compared to one parent,
1706 # If the manifest is unchanged compared to one parent,
1704 # don't write a new revision
1707 # don't write a new revision
1705 if self.tree != b'' and (
1708 if self.tree != b'' and (
1706 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1709 m.unmodifiedsince(m1) or m.unmodifiedsince(m2)
1707 ):
1710 ):
1708 return m.node()
1711 return m.node()
1709
1712
1710 def writesubtree(subm, subp1, subp2, match):
1713 def writesubtree(subm, subp1, subp2, match):
1711 sublog = self.dirlog(subm.dir())
1714 sublog = self.dirlog(subm.dir())
1712 sublog.add(
1715 sublog.add(
1713 subm,
1716 subm,
1714 transaction,
1717 transaction,
1715 link,
1718 link,
1716 subp1,
1719 subp1,
1717 subp2,
1720 subp2,
1718 None,
1721 None,
1719 None,
1722 None,
1720 readtree=readtree,
1723 readtree=readtree,
1721 match=match,
1724 match=match,
1722 )
1725 )
1723
1726
1724 m.writesubtrees(m1, m2, writesubtree, match)
1727 m.writesubtrees(m1, m2, writesubtree, match)
1725 text = m.dirtext()
1728 text = m.dirtext()
1726 n = None
1729 n = None
1727 if self.tree != b'':
1730 if self.tree != b'':
1728 # Double-check whether contents are unchanged to one parent
1731 # Double-check whether contents are unchanged to one parent
1729 if text == m1.dirtext():
1732 if text == m1.dirtext():
1730 n = m1.node()
1733 n = m1.node()
1731 elif text == m2.dirtext():
1734 elif text == m2.dirtext():
1732 n = m2.node()
1735 n = m2.node()
1733
1736
1734 if not n:
1737 if not n:
1735 n = self._revlog.addrevision(
1738 n = self._revlog.addrevision(
1736 text, transaction, link, m1.node(), m2.node()
1739 text, transaction, link, m1.node(), m2.node()
1737 )
1740 )
1738
1741
1739 # Save nodeid so parent manifest can calculate its nodeid
1742 # Save nodeid so parent manifest can calculate its nodeid
1740 m.setnode(n)
1743 m.setnode(n)
1741 return n
1744 return n
1742
1745
1743 def __len__(self):
1746 def __len__(self):
1744 return len(self._revlog)
1747 return len(self._revlog)
1745
1748
1746 def __iter__(self):
1749 def __iter__(self):
1747 return self._revlog.__iter__()
1750 return self._revlog.__iter__()
1748
1751
1749 def rev(self, node):
1752 def rev(self, node):
1750 return self._revlog.rev(node)
1753 return self._revlog.rev(node)
1751
1754
1752 def node(self, rev):
1755 def node(self, rev):
1753 return self._revlog.node(rev)
1756 return self._revlog.node(rev)
1754
1757
1755 def lookup(self, value):
1758 def lookup(self, value):
1756 return self._revlog.lookup(value)
1759 return self._revlog.lookup(value)
1757
1760
1758 def parentrevs(self, rev):
1761 def parentrevs(self, rev):
1759 return self._revlog.parentrevs(rev)
1762 return self._revlog.parentrevs(rev)
1760
1763
1761 def parents(self, node):
1764 def parents(self, node):
1762 return self._revlog.parents(node)
1765 return self._revlog.parents(node)
1763
1766
1764 def linkrev(self, rev):
1767 def linkrev(self, rev):
1765 return self._revlog.linkrev(rev)
1768 return self._revlog.linkrev(rev)
1766
1769
1767 def checksize(self):
1770 def checksize(self):
1768 return self._revlog.checksize()
1771 return self._revlog.checksize()
1769
1772
1770 def revision(self, node, _df=None, raw=False):
1773 def revision(self, node, _df=None, raw=False):
1771 return self._revlog.revision(node, _df=_df, raw=raw)
1774 return self._revlog.revision(node, _df=_df, raw=raw)
1772
1775
1773 def rawdata(self, node, _df=None):
1776 def rawdata(self, node, _df=None):
1774 return self._revlog.rawdata(node, _df=_df)
1777 return self._revlog.rawdata(node, _df=_df)
1775
1778
1776 def revdiff(self, rev1, rev2):
1779 def revdiff(self, rev1, rev2):
1777 return self._revlog.revdiff(rev1, rev2)
1780 return self._revlog.revdiff(rev1, rev2)
1778
1781
1779 def cmp(self, node, text):
1782 def cmp(self, node, text):
1780 return self._revlog.cmp(node, text)
1783 return self._revlog.cmp(node, text)
1781
1784
1782 def deltaparent(self, rev):
1785 def deltaparent(self, rev):
1783 return self._revlog.deltaparent(rev)
1786 return self._revlog.deltaparent(rev)
1784
1787
1785 def emitrevisions(
1788 def emitrevisions(
1786 self,
1789 self,
1787 nodes,
1790 nodes,
1788 nodesorder=None,
1791 nodesorder=None,
1789 revisiondata=False,
1792 revisiondata=False,
1790 assumehaveparentrevisions=False,
1793 assumehaveparentrevisions=False,
1791 deltamode=repository.CG_DELTAMODE_STD,
1794 deltamode=repository.CG_DELTAMODE_STD,
1792 ):
1795 ):
1793 return self._revlog.emitrevisions(
1796 return self._revlog.emitrevisions(
1794 nodes,
1797 nodes,
1795 nodesorder=nodesorder,
1798 nodesorder=nodesorder,
1796 revisiondata=revisiondata,
1799 revisiondata=revisiondata,
1797 assumehaveparentrevisions=assumehaveparentrevisions,
1800 assumehaveparentrevisions=assumehaveparentrevisions,
1798 deltamode=deltamode,
1801 deltamode=deltamode,
1799 )
1802 )
1800
1803
1801 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1804 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
1802 return self._revlog.addgroup(
1805 return self._revlog.addgroup(
1803 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1806 deltas, linkmapper, transaction, addrevisioncb=addrevisioncb
1804 )
1807 )
1805
1808
1806 def rawsize(self, rev):
1809 def rawsize(self, rev):
1807 return self._revlog.rawsize(rev)
1810 return self._revlog.rawsize(rev)
1808
1811
1809 def getstrippoint(self, minlink):
1812 def getstrippoint(self, minlink):
1810 return self._revlog.getstrippoint(minlink)
1813 return self._revlog.getstrippoint(minlink)
1811
1814
1812 def strip(self, minlink, transaction):
1815 def strip(self, minlink, transaction):
1813 return self._revlog.strip(minlink, transaction)
1816 return self._revlog.strip(minlink, transaction)
1814
1817
1815 def files(self):
1818 def files(self):
1816 return self._revlog.files()
1819 return self._revlog.files()
1817
1820
1818 def clone(self, tr, destrevlog, **kwargs):
1821 def clone(self, tr, destrevlog, **kwargs):
1819 if not isinstance(destrevlog, manifestrevlog):
1822 if not isinstance(destrevlog, manifestrevlog):
1820 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1823 raise error.ProgrammingError(b'expected manifestrevlog to clone()')
1821
1824
1822 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1825 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
1823
1826
1824 def storageinfo(
1827 def storageinfo(
1825 self,
1828 self,
1826 exclusivefiles=False,
1829 exclusivefiles=False,
1827 sharedfiles=False,
1830 sharedfiles=False,
1828 revisionscount=False,
1831 revisionscount=False,
1829 trackedsize=False,
1832 trackedsize=False,
1830 storedsize=False,
1833 storedsize=False,
1831 ):
1834 ):
1832 return self._revlog.storageinfo(
1835 return self._revlog.storageinfo(
1833 exclusivefiles=exclusivefiles,
1836 exclusivefiles=exclusivefiles,
1834 sharedfiles=sharedfiles,
1837 sharedfiles=sharedfiles,
1835 revisionscount=revisionscount,
1838 revisionscount=revisionscount,
1836 trackedsize=trackedsize,
1839 trackedsize=trackedsize,
1837 storedsize=storedsize,
1840 storedsize=storedsize,
1838 )
1841 )
1839
1842
1840 @property
1843 @property
1841 def indexfile(self):
1844 def indexfile(self):
1842 return self._revlog.indexfile
1845 return self._revlog.indexfile
1843
1846
1844 @indexfile.setter
1847 @indexfile.setter
1845 def indexfile(self, value):
1848 def indexfile(self, value):
1846 self._revlog.indexfile = value
1849 self._revlog.indexfile = value
1847
1850
1848 @property
1851 @property
1849 def opener(self):
1852 def opener(self):
1850 return self._revlog.opener
1853 return self._revlog.opener
1851
1854
1852 @opener.setter
1855 @opener.setter
1853 def opener(self, value):
1856 def opener(self, value):
1854 self._revlog.opener = value
1857 self._revlog.opener = value
1855
1858
1856
1859
1857 @interfaceutil.implementer(repository.imanifestlog)
1860 @interfaceutil.implementer(repository.imanifestlog)
1858 class manifestlog(object):
1861 class manifestlog(object):
1859 """A collection class representing the collection of manifest snapshots
1862 """A collection class representing the collection of manifest snapshots
1860 referenced by commits in the repository.
1863 referenced by commits in the repository.
1861
1864
1862 In this situation, 'manifest' refers to the abstract concept of a snapshot
1865 In this situation, 'manifest' refers to the abstract concept of a snapshot
1863 of the list of files in the given commit. Consumers of the output of this
1866 of the list of files in the given commit. Consumers of the output of this
1864 class do not care about the implementation details of the actual manifests
1867 class do not care about the implementation details of the actual manifests
1865 they receive (i.e. tree or flat or lazily loaded, etc)."""
1868 they receive (i.e. tree or flat or lazily loaded, etc)."""
1866
1869
1867 def __init__(self, opener, repo, rootstore, narrowmatch):
1870 def __init__(self, opener, repo, rootstore, narrowmatch):
1868 usetreemanifest = False
1871 usetreemanifest = False
1869 cachesize = 4
1872 cachesize = 4
1870
1873
1871 opts = getattr(opener, 'options', None)
1874 opts = getattr(opener, 'options', None)
1872 if opts is not None:
1875 if opts is not None:
1873 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1876 usetreemanifest = opts.get(b'treemanifest', usetreemanifest)
1874 cachesize = opts.get(b'manifestcachesize', cachesize)
1877 cachesize = opts.get(b'manifestcachesize', cachesize)
1875
1878
1876 self._treemanifests = usetreemanifest
1879 self._treemanifests = usetreemanifest
1877
1880
1878 self._rootstore = rootstore
1881 self._rootstore = rootstore
1879 self._rootstore._setupmanifestcachehooks(repo)
1882 self._rootstore._setupmanifestcachehooks(repo)
1880 self._narrowmatch = narrowmatch
1883 self._narrowmatch = narrowmatch
1881
1884
1882 # A cache of the manifestctx or treemanifestctx for each directory
1885 # A cache of the manifestctx or treemanifestctx for each directory
1883 self._dirmancache = {}
1886 self._dirmancache = {}
1884 self._dirmancache[b''] = util.lrucachedict(cachesize)
1887 self._dirmancache[b''] = util.lrucachedict(cachesize)
1885
1888
1886 self._cachesize = cachesize
1889 self._cachesize = cachesize
1887
1890
1888 def __getitem__(self, node):
1891 def __getitem__(self, node):
1889 """Retrieves the manifest instance for the given node. Throws a
1892 """Retrieves the manifest instance for the given node. Throws a
1890 LookupError if not found.
1893 LookupError if not found.
1891 """
1894 """
1892 return self.get(b'', node)
1895 return self.get(b'', node)
1893
1896
1894 def get(self, tree, node, verify=True):
1897 def get(self, tree, node, verify=True):
1895 """Retrieves the manifest instance for the given node. Throws a
1898 """Retrieves the manifest instance for the given node. Throws a
1896 LookupError if not found.
1899 LookupError if not found.
1897
1900
1898 `verify` - if True an exception will be thrown if the node is not in
1901 `verify` - if True an exception will be thrown if the node is not in
1899 the revlog
1902 the revlog
1900 """
1903 """
1901 if node in self._dirmancache.get(tree, ()):
1904 if node in self._dirmancache.get(tree, ()):
1902 return self._dirmancache[tree][node]
1905 return self._dirmancache[tree][node]
1903
1906
1904 if not self._narrowmatch.always():
1907 if not self._narrowmatch.always():
1905 if not self._narrowmatch.visitdir(tree[:-1]):
1908 if not self._narrowmatch.visitdir(tree[:-1]):
1906 return excludeddirmanifestctx(tree, node)
1909 return excludeddirmanifestctx(tree, node)
1907 if tree:
1910 if tree:
1908 if self._rootstore._treeondisk:
1911 if self._rootstore._treeondisk:
1909 if verify:
1912 if verify:
1910 # Side-effect is LookupError is raised if node doesn't
1913 # Side-effect is LookupError is raised if node doesn't
1911 # exist.
1914 # exist.
1912 self.getstorage(tree).rev(node)
1915 self.getstorage(tree).rev(node)
1913
1916
1914 m = treemanifestctx(self, tree, node)
1917 m = treemanifestctx(self, tree, node)
1915 else:
1918 else:
1916 raise error.Abort(
1919 raise error.Abort(
1917 _(
1920 _(
1918 b"cannot ask for manifest directory '%s' in a flat "
1921 b"cannot ask for manifest directory '%s' in a flat "
1919 b"manifest"
1922 b"manifest"
1920 )
1923 )
1921 % tree
1924 % tree
1922 )
1925 )
1923 else:
1926 else:
1924 if verify:
1927 if verify:
1925 # Side-effect is LookupError is raised if node doesn't exist.
1928 # Side-effect is LookupError is raised if node doesn't exist.
1926 self._rootstore.rev(node)
1929 self._rootstore.rev(node)
1927
1930
1928 if self._treemanifests:
1931 if self._treemanifests:
1929 m = treemanifestctx(self, b'', node)
1932 m = treemanifestctx(self, b'', node)
1930 else:
1933 else:
1931 m = manifestctx(self, node)
1934 m = manifestctx(self, node)
1932
1935
1933 if node != nullid:
1936 if node != nullid:
1934 mancache = self._dirmancache.get(tree)
1937 mancache = self._dirmancache.get(tree)
1935 if not mancache:
1938 if not mancache:
1936 mancache = util.lrucachedict(self._cachesize)
1939 mancache = util.lrucachedict(self._cachesize)
1937 self._dirmancache[tree] = mancache
1940 self._dirmancache[tree] = mancache
1938 mancache[node] = m
1941 mancache[node] = m
1939 return m
1942 return m
1940
1943
1941 def getstorage(self, tree):
1944 def getstorage(self, tree):
1942 return self._rootstore.dirlog(tree)
1945 return self._rootstore.dirlog(tree)
1943
1946
1944 def clearcaches(self, clear_persisted_data=False):
1947 def clearcaches(self, clear_persisted_data=False):
1945 self._dirmancache.clear()
1948 self._dirmancache.clear()
1946 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1949 self._rootstore.clearcaches(clear_persisted_data=clear_persisted_data)
1947
1950
1948 def rev(self, node):
1951 def rev(self, node):
1949 return self._rootstore.rev(node)
1952 return self._rootstore.rev(node)
1950
1953
1951
1954
1952 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1955 @interfaceutil.implementer(repository.imanifestrevisionwritable)
1953 class memmanifestctx(object):
1956 class memmanifestctx(object):
1954 def __init__(self, manifestlog):
1957 def __init__(self, manifestlog):
1955 self._manifestlog = manifestlog
1958 self._manifestlog = manifestlog
1956 self._manifestdict = manifestdict()
1959 self._manifestdict = manifestdict()
1957
1960
1958 def _storage(self):
1961 def _storage(self):
1959 return self._manifestlog.getstorage(b'')
1962 return self._manifestlog.getstorage(b'')
1960
1963
1961 def copy(self):
1964 def copy(self):
1962 memmf = memmanifestctx(self._manifestlog)
1965 memmf = memmanifestctx(self._manifestlog)
1963 memmf._manifestdict = self.read().copy()
1966 memmf._manifestdict = self.read().copy()
1964 return memmf
1967 return memmf
1965
1968
1966 def read(self):
1969 def read(self):
1967 return self._manifestdict
1970 return self._manifestdict
1968
1971
1969 def write(self, transaction, link, p1, p2, added, removed, match=None):
1972 def write(self, transaction, link, p1, p2, added, removed, match=None):
1970 return self._storage().add(
1973 return self._storage().add(
1971 self._manifestdict,
1974 self._manifestdict,
1972 transaction,
1975 transaction,
1973 link,
1976 link,
1974 p1,
1977 p1,
1975 p2,
1978 p2,
1976 added,
1979 added,
1977 removed,
1980 removed,
1978 match=match,
1981 match=match,
1979 )
1982 )
1980
1983
1981
1984
1982 @interfaceutil.implementer(repository.imanifestrevisionstored)
1985 @interfaceutil.implementer(repository.imanifestrevisionstored)
1983 class manifestctx(object):
1986 class manifestctx(object):
1984 """A class representing a single revision of a manifest, including its
1987 """A class representing a single revision of a manifest, including its
1985 contents, its parent revs, and its linkrev.
1988 contents, its parent revs, and its linkrev.
1986 """
1989 """
1987
1990
1988 def __init__(self, manifestlog, node):
1991 def __init__(self, manifestlog, node):
1989 self._manifestlog = manifestlog
1992 self._manifestlog = manifestlog
1990 self._data = None
1993 self._data = None
1991
1994
1992 self._node = node
1995 self._node = node
1993
1996
1994 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1997 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1995 # but let's add it later when something needs it and we can load it
1998 # but let's add it later when something needs it and we can load it
1996 # lazily.
1999 # lazily.
1997 # self.p1, self.p2 = store.parents(node)
2000 # self.p1, self.p2 = store.parents(node)
1998 # rev = store.rev(node)
2001 # rev = store.rev(node)
1999 # self.linkrev = store.linkrev(rev)
2002 # self.linkrev = store.linkrev(rev)
2000
2003
2001 def _storage(self):
2004 def _storage(self):
2002 return self._manifestlog.getstorage(b'')
2005 return self._manifestlog.getstorage(b'')
2003
2006
2004 def node(self):
2007 def node(self):
2005 return self._node
2008 return self._node
2006
2009
2007 def copy(self):
2010 def copy(self):
2008 memmf = memmanifestctx(self._manifestlog)
2011 memmf = memmanifestctx(self._manifestlog)
2009 memmf._manifestdict = self.read().copy()
2012 memmf._manifestdict = self.read().copy()
2010 return memmf
2013 return memmf
2011
2014
2012 @propertycache
2015 @propertycache
2013 def parents(self):
2016 def parents(self):
2014 return self._storage().parents(self._node)
2017 return self._storage().parents(self._node)
2015
2018
2016 def read(self):
2019 def read(self):
2017 if self._data is None:
2020 if self._data is None:
2018 if self._node == nullid:
2021 if self._node == nullid:
2019 self._data = manifestdict()
2022 self._data = manifestdict()
2020 else:
2023 else:
2021 store = self._storage()
2024 store = self._storage()
2022 if self._node in store.fulltextcache:
2025 if self._node in store.fulltextcache:
2023 text = pycompat.bytestr(store.fulltextcache[self._node])
2026 text = pycompat.bytestr(store.fulltextcache[self._node])
2024 else:
2027 else:
2025 text = store.revision(self._node)
2028 text = store.revision(self._node)
2026 arraytext = bytearray(text)
2029 arraytext = bytearray(text)
2027 store.fulltextcache[self._node] = arraytext
2030 store.fulltextcache[self._node] = arraytext
2028 self._data = manifestdict(text)
2031 self._data = manifestdict(text)
2029 return self._data
2032 return self._data
2030
2033
2031 def readfast(self, shallow=False):
2034 def readfast(self, shallow=False):
2032 '''Calls either readdelta or read, based on which would be less work.
2035 '''Calls either readdelta or read, based on which would be less work.
2033 readdelta is called if the delta is against the p1, and therefore can be
2036 readdelta is called if the delta is against the p1, and therefore can be
2034 read quickly.
2037 read quickly.
2035
2038
2036 If `shallow` is True, nothing changes since this is a flat manifest.
2039 If `shallow` is True, nothing changes since this is a flat manifest.
2037 '''
2040 '''
2038 store = self._storage()
2041 store = self._storage()
2039 r = store.rev(self._node)
2042 r = store.rev(self._node)
2040 deltaparent = store.deltaparent(r)
2043 deltaparent = store.deltaparent(r)
2041 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2044 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2042 return self.readdelta()
2045 return self.readdelta()
2043 return self.read()
2046 return self.read()
2044
2047
2045 def readdelta(self, shallow=False):
2048 def readdelta(self, shallow=False):
2046 '''Returns a manifest containing just the entries that are present
2049 '''Returns a manifest containing just the entries that are present
2047 in this manifest, but not in its p1 manifest. This is efficient to read
2050 in this manifest, but not in its p1 manifest. This is efficient to read
2048 if the revlog delta is already p1.
2051 if the revlog delta is already p1.
2049
2052
2050 Changing the value of `shallow` has no effect on flat manifests.
2053 Changing the value of `shallow` has no effect on flat manifests.
2051 '''
2054 '''
2052 store = self._storage()
2055 store = self._storage()
2053 r = store.rev(self._node)
2056 r = store.rev(self._node)
2054 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2057 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2055 return manifestdict(d)
2058 return manifestdict(d)
2056
2059
2057 def find(self, key):
2060 def find(self, key):
2058 return self.read().find(key)
2061 return self.read().find(key)
2059
2062
2060
2063
2061 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2064 @interfaceutil.implementer(repository.imanifestrevisionwritable)
2062 class memtreemanifestctx(object):
2065 class memtreemanifestctx(object):
2063 def __init__(self, manifestlog, dir=b''):
2066 def __init__(self, manifestlog, dir=b''):
2064 self._manifestlog = manifestlog
2067 self._manifestlog = manifestlog
2065 self._dir = dir
2068 self._dir = dir
2066 self._treemanifest = treemanifest()
2069 self._treemanifest = treemanifest()
2067
2070
2068 def _storage(self):
2071 def _storage(self):
2069 return self._manifestlog.getstorage(b'')
2072 return self._manifestlog.getstorage(b'')
2070
2073
2071 def copy(self):
2074 def copy(self):
2072 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2075 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2073 memmf._treemanifest = self._treemanifest.copy()
2076 memmf._treemanifest = self._treemanifest.copy()
2074 return memmf
2077 return memmf
2075
2078
2076 def read(self):
2079 def read(self):
2077 return self._treemanifest
2080 return self._treemanifest
2078
2081
2079 def write(self, transaction, link, p1, p2, added, removed, match=None):
2082 def write(self, transaction, link, p1, p2, added, removed, match=None):
2080 def readtree(dir, node):
2083 def readtree(dir, node):
2081 return self._manifestlog.get(dir, node).read()
2084 return self._manifestlog.get(dir, node).read()
2082
2085
2083 return self._storage().add(
2086 return self._storage().add(
2084 self._treemanifest,
2087 self._treemanifest,
2085 transaction,
2088 transaction,
2086 link,
2089 link,
2087 p1,
2090 p1,
2088 p2,
2091 p2,
2089 added,
2092 added,
2090 removed,
2093 removed,
2091 readtree=readtree,
2094 readtree=readtree,
2092 match=match,
2095 match=match,
2093 )
2096 )
2094
2097
2095
2098
2096 @interfaceutil.implementer(repository.imanifestrevisionstored)
2099 @interfaceutil.implementer(repository.imanifestrevisionstored)
2097 class treemanifestctx(object):
2100 class treemanifestctx(object):
2098 def __init__(self, manifestlog, dir, node):
2101 def __init__(self, manifestlog, dir, node):
2099 self._manifestlog = manifestlog
2102 self._manifestlog = manifestlog
2100 self._dir = dir
2103 self._dir = dir
2101 self._data = None
2104 self._data = None
2102
2105
2103 self._node = node
2106 self._node = node
2104
2107
2105 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2108 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
2106 # we can instantiate treemanifestctx objects for directories we don't
2109 # we can instantiate treemanifestctx objects for directories we don't
2107 # have on disk.
2110 # have on disk.
2108 # self.p1, self.p2 = store.parents(node)
2111 # self.p1, self.p2 = store.parents(node)
2109 # rev = store.rev(node)
2112 # rev = store.rev(node)
2110 # self.linkrev = store.linkrev(rev)
2113 # self.linkrev = store.linkrev(rev)
2111
2114
2112 def _storage(self):
2115 def _storage(self):
2113 narrowmatch = self._manifestlog._narrowmatch
2116 narrowmatch = self._manifestlog._narrowmatch
2114 if not narrowmatch.always():
2117 if not narrowmatch.always():
2115 if not narrowmatch.visitdir(self._dir[:-1]):
2118 if not narrowmatch.visitdir(self._dir[:-1]):
2116 return excludedmanifestrevlog(self._dir)
2119 return excludedmanifestrevlog(self._dir)
2117 return self._manifestlog.getstorage(self._dir)
2120 return self._manifestlog.getstorage(self._dir)
2118
2121
2119 def read(self):
2122 def read(self):
2120 if self._data is None:
2123 if self._data is None:
2121 store = self._storage()
2124 store = self._storage()
2122 if self._node == nullid:
2125 if self._node == nullid:
2123 self._data = treemanifest()
2126 self._data = treemanifest()
2124 # TODO accessing non-public API
2127 # TODO accessing non-public API
2125 elif store._treeondisk:
2128 elif store._treeondisk:
2126 m = treemanifest(dir=self._dir)
2129 m = treemanifest(dir=self._dir)
2127
2130
2128 def gettext():
2131 def gettext():
2129 return store.revision(self._node)
2132 return store.revision(self._node)
2130
2133
2131 def readsubtree(dir, subm):
2134 def readsubtree(dir, subm):
2132 # Set verify to False since we need to be able to create
2135 # Set verify to False since we need to be able to create
2133 # subtrees for trees that don't exist on disk.
2136 # subtrees for trees that don't exist on disk.
2134 return self._manifestlog.get(dir, subm, verify=False).read()
2137 return self._manifestlog.get(dir, subm, verify=False).read()
2135
2138
2136 m.read(gettext, readsubtree)
2139 m.read(gettext, readsubtree)
2137 m.setnode(self._node)
2140 m.setnode(self._node)
2138 self._data = m
2141 self._data = m
2139 else:
2142 else:
2140 if self._node in store.fulltextcache:
2143 if self._node in store.fulltextcache:
2141 text = pycompat.bytestr(store.fulltextcache[self._node])
2144 text = pycompat.bytestr(store.fulltextcache[self._node])
2142 else:
2145 else:
2143 text = store.revision(self._node)
2146 text = store.revision(self._node)
2144 arraytext = bytearray(text)
2147 arraytext = bytearray(text)
2145 store.fulltextcache[self._node] = arraytext
2148 store.fulltextcache[self._node] = arraytext
2146 self._data = treemanifest(dir=self._dir, text=text)
2149 self._data = treemanifest(dir=self._dir, text=text)
2147
2150
2148 return self._data
2151 return self._data
2149
2152
2150 def node(self):
2153 def node(self):
2151 return self._node
2154 return self._node
2152
2155
2153 def copy(self):
2156 def copy(self):
2154 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2157 memmf = memtreemanifestctx(self._manifestlog, dir=self._dir)
2155 memmf._treemanifest = self.read().copy()
2158 memmf._treemanifest = self.read().copy()
2156 return memmf
2159 return memmf
2157
2160
2158 @propertycache
2161 @propertycache
2159 def parents(self):
2162 def parents(self):
2160 return self._storage().parents(self._node)
2163 return self._storage().parents(self._node)
2161
2164
2162 def readdelta(self, shallow=False):
2165 def readdelta(self, shallow=False):
2163 '''Returns a manifest containing just the entries that are present
2166 '''Returns a manifest containing just the entries that are present
2164 in this manifest, but not in its p1 manifest. This is efficient to read
2167 in this manifest, but not in its p1 manifest. This is efficient to read
2165 if the revlog delta is already p1.
2168 if the revlog delta is already p1.
2166
2169
2167 If `shallow` is True, this will read the delta for this directory,
2170 If `shallow` is True, this will read the delta for this directory,
2168 without recursively reading subdirectory manifests. Instead, any
2171 without recursively reading subdirectory manifests. Instead, any
2169 subdirectory entry will be reported as it appears in the manifest, i.e.
2172 subdirectory entry will be reported as it appears in the manifest, i.e.
2170 the subdirectory will be reported among files and distinguished only by
2173 the subdirectory will be reported among files and distinguished only by
2171 its 't' flag.
2174 its 't' flag.
2172 '''
2175 '''
2173 store = self._storage()
2176 store = self._storage()
2174 if shallow:
2177 if shallow:
2175 r = store.rev(self._node)
2178 r = store.rev(self._node)
2176 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2179 d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
2177 return manifestdict(d)
2180 return manifestdict(d)
2178 else:
2181 else:
2179 # Need to perform a slow delta
2182 # Need to perform a slow delta
2180 r0 = store.deltaparent(store.rev(self._node))
2183 r0 = store.deltaparent(store.rev(self._node))
2181 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2184 m0 = self._manifestlog.get(self._dir, store.node(r0)).read()
2182 m1 = self.read()
2185 m1 = self.read()
2183 md = treemanifest(dir=self._dir)
2186 md = treemanifest(dir=self._dir)
2184 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2187 for f, ((n0, fl0), (n1, fl1)) in pycompat.iteritems(m0.diff(m1)):
2185 if n1:
2188 if n1:
2186 md[f] = n1
2189 md[f] = n1
2187 if fl1:
2190 if fl1:
2188 md.setflag(f, fl1)
2191 md.setflag(f, fl1)
2189 return md
2192 return md
2190
2193
2191 def readfast(self, shallow=False):
2194 def readfast(self, shallow=False):
2192 '''Calls either readdelta or read, based on which would be less work.
2195 '''Calls either readdelta or read, based on which would be less work.
2193 readdelta is called if the delta is against the p1, and therefore can be
2196 readdelta is called if the delta is against the p1, and therefore can be
2194 read quickly.
2197 read quickly.
2195
2198
2196 If `shallow` is True, it only returns the entries from this manifest,
2199 If `shallow` is True, it only returns the entries from this manifest,
2197 and not any submanifests.
2200 and not any submanifests.
2198 '''
2201 '''
2199 store = self._storage()
2202 store = self._storage()
2200 r = store.rev(self._node)
2203 r = store.rev(self._node)
2201 deltaparent = store.deltaparent(r)
2204 deltaparent = store.deltaparent(r)
2202 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2205 if deltaparent != nullrev and deltaparent in store.parentrevs(r):
2203 return self.readdelta(shallow=shallow)
2206 return self.readdelta(shallow=shallow)
2204
2207
2205 if shallow:
2208 if shallow:
2206 return manifestdict(store.revision(self._node))
2209 return manifestdict(store.revision(self._node))
2207 else:
2210 else:
2208 return self.read()
2211 return self.read()
2209
2212
2210 def find(self, key):
2213 def find(self, key):
2211 return self.read().find(key)
2214 return self.read().find(key)
2212
2215
2213
2216
2214 class excludeddir(treemanifest):
2217 class excludeddir(treemanifest):
2215 """Stand-in for a directory that is excluded from the repository.
2218 """Stand-in for a directory that is excluded from the repository.
2216
2219
2217 With narrowing active on a repository that uses treemanifests,
2220 With narrowing active on a repository that uses treemanifests,
2218 some of the directory revlogs will be excluded from the resulting
2221 some of the directory revlogs will be excluded from the resulting
2219 clone. This is a huge storage win for clients, but means we need
2222 clone. This is a huge storage win for clients, but means we need
2220 some sort of pseudo-manifest to surface to internals so we can
2223 some sort of pseudo-manifest to surface to internals so we can
2221 detect a merge conflict outside the narrowspec. That's what this
2224 detect a merge conflict outside the narrowspec. That's what this
2222 class is: it stands in for a directory whose node is known, but
2225 class is: it stands in for a directory whose node is known, but
2223 whose contents are unknown.
2226 whose contents are unknown.
2224 """
2227 """
2225
2228
2226 def __init__(self, dir, node):
2229 def __init__(self, dir, node):
2227 super(excludeddir, self).__init__(dir)
2230 super(excludeddir, self).__init__(dir)
2228 self._node = node
2231 self._node = node
2229 # Add an empty file, which will be included by iterators and such,
2232 # Add an empty file, which will be included by iterators and such,
2230 # appearing as the directory itself (i.e. something like "dir/")
2233 # appearing as the directory itself (i.e. something like "dir/")
2231 self._files[b''] = node
2234 self._files[b''] = node
2232 self._flags[b''] = b't'
2235 self._flags[b''] = b't'
2233
2236
2234 # Manifests outside the narrowspec should never be modified, so avoid
2237 # Manifests outside the narrowspec should never be modified, so avoid
2235 # copying. This makes a noticeable difference when there are very many
2238 # copying. This makes a noticeable difference when there are very many
2236 # directories outside the narrowspec. Also, it makes sense for the copy to
2239 # directories outside the narrowspec. Also, it makes sense for the copy to
2237 # be of the same type as the original, which would not happen with the
2240 # be of the same type as the original, which would not happen with the
2238 # super type's copy().
2241 # super type's copy().
2239 def copy(self):
2242 def copy(self):
2240 return self
2243 return self
2241
2244
2242
2245
2243 class excludeddirmanifestctx(treemanifestctx):
2246 class excludeddirmanifestctx(treemanifestctx):
2244 """context wrapper for excludeddir - see that docstring for rationale"""
2247 """context wrapper for excludeddir - see that docstring for rationale"""
2245
2248
2246 def __init__(self, dir, node):
2249 def __init__(self, dir, node):
2247 self._dir = dir
2250 self._dir = dir
2248 self._node = node
2251 self._node = node
2249
2252
2250 def read(self):
2253 def read(self):
2251 return excludeddir(self._dir, self._node)
2254 return excludeddir(self._dir, self._node)
2252
2255
2253 def write(self, *args):
2256 def write(self, *args):
2254 raise error.ProgrammingError(
2257 raise error.ProgrammingError(
2255 b'attempt to write manifest from excluded dir %s' % self._dir
2258 b'attempt to write manifest from excluded dir %s' % self._dir
2256 )
2259 )
2257
2260
2258
2261
2259 class excludedmanifestrevlog(manifestrevlog):
2262 class excludedmanifestrevlog(manifestrevlog):
2260 """Stand-in for excluded treemanifest revlogs.
2263 """Stand-in for excluded treemanifest revlogs.
2261
2264
2262 When narrowing is active on a treemanifest repository, we'll have
2265 When narrowing is active on a treemanifest repository, we'll have
2263 references to directories we can't see due to the revlog being
2266 references to directories we can't see due to the revlog being
2264 skipped. This class exists to conform to the manifestrevlog
2267 skipped. This class exists to conform to the manifestrevlog
2265 interface for those directories and proactively prevent writes to
2268 interface for those directories and proactively prevent writes to
2266 outside the narrowspec.
2269 outside the narrowspec.
2267 """
2270 """
2268
2271
2269 def __init__(self, dir):
2272 def __init__(self, dir):
2270 self._dir = dir
2273 self._dir = dir
2271
2274
2272 def __len__(self):
2275 def __len__(self):
2273 raise error.ProgrammingError(
2276 raise error.ProgrammingError(
2274 b'attempt to get length of excluded dir %s' % self._dir
2277 b'attempt to get length of excluded dir %s' % self._dir
2275 )
2278 )
2276
2279
2277 def rev(self, node):
2280 def rev(self, node):
2278 raise error.ProgrammingError(
2281 raise error.ProgrammingError(
2279 b'attempt to get rev from excluded dir %s' % self._dir
2282 b'attempt to get rev from excluded dir %s' % self._dir
2280 )
2283 )
2281
2284
2282 def linkrev(self, node):
2285 def linkrev(self, node):
2283 raise error.ProgrammingError(
2286 raise error.ProgrammingError(
2284 b'attempt to get linkrev from excluded dir %s' % self._dir
2287 b'attempt to get linkrev from excluded dir %s' % self._dir
2285 )
2288 )
2286
2289
2287 def node(self, rev):
2290 def node(self, rev):
2288 raise error.ProgrammingError(
2291 raise error.ProgrammingError(
2289 b'attempt to get node from excluded dir %s' % self._dir
2292 b'attempt to get node from excluded dir %s' % self._dir
2290 )
2293 )
2291
2294
2292 def add(self, *args, **kwargs):
2295 def add(self, *args, **kwargs):
2293 # We should never write entries in dirlogs outside the narrow clone.
2296 # We should never write entries in dirlogs outside the narrow clone.
2294 # However, the method still gets called from writesubtree() in
2297 # However, the method still gets called from writesubtree() in
2295 # _addtree(), so we need to handle it. We should possibly make that
2298 # _addtree(), so we need to handle it. We should possibly make that
2296 # avoid calling add() with a clean manifest (_dirty is always False
2299 # avoid calling add() with a clean manifest (_dirty is always False
2297 # in excludeddir instances).
2300 # in excludeddir instances).
2298 pass
2301 pass
@@ -1,413 +1,416 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5 $ hg init test-repo
5 $ hg init test-repo
6 $ cd test-repo
6 $ cd test-repo
7 $ cat << EOF >> .hg/hgrc
7 $ cat << EOF >> .hg/hgrc
8 > [experimental]
8 > [experimental]
9 > exp-persistent-nodemap=yes
9 > exp-persistent-nodemap=yes
10 > [devel]
10 > [devel]
11 > persistent-nodemap=yes
11 > persistent-nodemap=yes
12 > EOF
12 > EOF
13 $ hg debugbuilddag .+5000 --new-file
13 $ hg debugbuilddag .+5000 --new-file
14 $ hg debugnodemap --metadata
14 $ hg debugnodemap --metadata
15 uid: ???????????????? (glob)
15 uid: ???????????????? (glob)
16 tip-rev: 5000
16 tip-rev: 5000
17 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
17 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
18 data-length: 121088
18 data-length: 121088
19 data-unused: 0
19 data-unused: 0
20 data-unused: 0.000%
20 data-unused: 0.000%
21 $ f --size .hg/store/00changelog.n
21 $ f --size .hg/store/00changelog.n
22 .hg/store/00changelog.n: size=70
22 .hg/store/00changelog.n: size=70
23
23
24 Simple lookup works
24 Simple lookup works
25
25
26 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
26 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
27 $ hg log -r "$ANYNODE" --template '{rev}\n'
27 $ hg log -r "$ANYNODE" --template '{rev}\n'
28 5000
28 5000
29
29
30
30
31 #if rust
31 #if rust
32
32
33 $ f --sha256 .hg/store/00changelog-*.nd
33 $ f --sha256 .hg/store/00changelog-*.nd
34 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
34 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
35
36 $ f --sha256 .hg/store/00manifest-*.nd
37 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
35 $ hg debugnodemap --dump-new | f --sha256 --size
38 $ hg debugnodemap --dump-new | f --sha256 --size
36 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
39 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
37 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
40 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
38 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
41 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
39 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
42 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
40 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
43 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
41 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
44 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
42 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
45 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
43 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
46 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
44 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
47 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
45 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
48 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
46 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
49 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
47 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
50 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
48 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
51 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
49 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
52 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
50 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
53 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
51 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
54 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
52 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
55 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
53 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
56 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
54 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
57 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
55
58
56
59
57 #else
60 #else
58
61
59 $ f --sha256 .hg/store/00changelog-*.nd
62 $ f --sha256 .hg/store/00changelog-*.nd
60 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
63 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
61 $ hg debugnodemap --dump-new | f --sha256 --size
64 $ hg debugnodemap --dump-new | f --sha256 --size
62 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
65 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
63 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
66 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
64 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
67 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
65 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
68 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
66 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
69 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
67 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
70 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
68 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
71 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
69 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
72 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
70 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
73 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
71 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
74 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
72 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
75 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
73 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
76 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
74 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
77 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
75 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
78 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
76 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
79 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
77 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
80 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
78 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
81 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
79 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
82 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
80 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
83 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
81
84
82 #endif
85 #endif
83
86
84 $ hg debugnodemap --check
87 $ hg debugnodemap --check
85 revision in index: 5001
88 revision in index: 5001
86 revision in nodemap: 5001
89 revision in nodemap: 5001
87
90
88 add a new commit
91 add a new commit
89
92
90 $ hg up
93 $ hg up
91 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 $ echo foo > foo
95 $ echo foo > foo
93 $ hg add foo
96 $ hg add foo
94 $ hg ci -m 'foo'
97 $ hg ci -m 'foo'
95
98
96 #if no-pure no-rust
99 #if no-pure no-rust
97 $ hg debugnodemap --metadata
100 $ hg debugnodemap --metadata
98 uid: ???????????????? (glob)
101 uid: ???????????????? (glob)
99 tip-rev: 5001
102 tip-rev: 5001
100 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
103 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
101 data-length: 121088
104 data-length: 121088
102 data-unused: 0
105 data-unused: 0
103 data-unused: 0.000%
106 data-unused: 0.000%
104 #else
107 #else
105 $ hg debugnodemap --metadata
108 $ hg debugnodemap --metadata
106 uid: ???????????????? (glob)
109 uid: ???????????????? (glob)
107 tip-rev: 5001
110 tip-rev: 5001
108 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
111 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
109 data-length: 121344
112 data-length: 121344
110 data-unused: 256
113 data-unused: 256
111 data-unused: 0.211%
114 data-unused: 0.211%
112 #endif
115 #endif
113
116
114 $ f --size .hg/store/00changelog.n
117 $ f --size .hg/store/00changelog.n
115 .hg/store/00changelog.n: size=70
118 .hg/store/00changelog.n: size=70
116
119
117 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
120 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
118
121
119 #if pure
122 #if pure
120 $ f --sha256 .hg/store/00changelog-*.nd --size
123 $ f --sha256 .hg/store/00changelog-*.nd --size
121 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
124 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
122 #endif
125 #endif
123
126
124 #if rust
127 #if rust
125 $ f --sha256 .hg/store/00changelog-*.nd --size
128 $ f --sha256 .hg/store/00changelog-*.nd --size
126 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
129 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
127 #endif
130 #endif
128
131
129 #if no-pure no-rust
132 #if no-pure no-rust
130 $ f --sha256 .hg/store/00changelog-*.nd --size
133 $ f --sha256 .hg/store/00changelog-*.nd --size
131 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
134 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
132 #endif
135 #endif
133
136
134 $ hg debugnodemap --check
137 $ hg debugnodemap --check
135 revision in index: 5002
138 revision in index: 5002
136 revision in nodemap: 5002
139 revision in nodemap: 5002
137
140
138 Test code path without mmap
141 Test code path without mmap
139 ---------------------------
142 ---------------------------
140
143
141 $ echo bar > bar
144 $ echo bar > bar
142 $ hg add bar
145 $ hg add bar
143 $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
146 $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
144
147
145 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
148 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
146 revision in index: 5003
149 revision in index: 5003
147 revision in nodemap: 5003
150 revision in nodemap: 5003
148 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
151 $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
149 revision in index: 5003
152 revision in index: 5003
150 revision in nodemap: 5003
153 revision in nodemap: 5003
151
154
152
155
153 #if pure
156 #if pure
154 $ hg debugnodemap --metadata
157 $ hg debugnodemap --metadata
155 uid: ???????????????? (glob)
158 uid: ???????????????? (glob)
156 tip-rev: 5002
159 tip-rev: 5002
157 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
160 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
158 data-length: 121600
161 data-length: 121600
159 data-unused: 512
162 data-unused: 512
160 data-unused: 0.421%
163 data-unused: 0.421%
161 $ f --sha256 .hg/store/00changelog-*.nd --size
164 $ f --sha256 .hg/store/00changelog-*.nd --size
162 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
165 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
163 #endif
166 #endif
164 #if rust
167 #if rust
165 $ hg debugnodemap --metadata
168 $ hg debugnodemap --metadata
166 uid: ???????????????? (glob)
169 uid: ???????????????? (glob)
167 tip-rev: 5002
170 tip-rev: 5002
168 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
171 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
169 data-length: 121600
172 data-length: 121600
170 data-unused: 512
173 data-unused: 512
171 data-unused: 0.421%
174 data-unused: 0.421%
172 $ f --sha256 .hg/store/00changelog-*.nd --size
175 $ f --sha256 .hg/store/00changelog-*.nd --size
173 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
176 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
174 #endif
177 #endif
175 #if no-pure no-rust
178 #if no-pure no-rust
176 $ hg debugnodemap --metadata
179 $ hg debugnodemap --metadata
177 uid: ???????????????? (glob)
180 uid: ???????????????? (glob)
178 tip-rev: 5002
181 tip-rev: 5002
179 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
182 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
180 data-length: 121088
183 data-length: 121088
181 data-unused: 0
184 data-unused: 0
182 data-unused: 0.000%
185 data-unused: 0.000%
183 $ f --sha256 .hg/store/00changelog-*.nd --size
186 $ f --sha256 .hg/store/00changelog-*.nd --size
184 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
187 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
185 #endif
188 #endif
186
189
187 Test force warming the cache
190 Test force warming the cache
188
191
189 $ rm .hg/store/00changelog.n
192 $ rm .hg/store/00changelog.n
190 $ hg debugnodemap --metadata
193 $ hg debugnodemap --metadata
191 $ hg debugupdatecache
194 $ hg debugupdatecache
192 #if pure
195 #if pure
193 $ hg debugnodemap --metadata
196 $ hg debugnodemap --metadata
194 uid: ???????????????? (glob)
197 uid: ???????????????? (glob)
195 tip-rev: 5002
198 tip-rev: 5002
196 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
199 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
197 data-length: 121088
200 data-length: 121088
198 data-unused: 0
201 data-unused: 0
199 data-unused: 0.000%
202 data-unused: 0.000%
200 #else
203 #else
201 $ hg debugnodemap --metadata
204 $ hg debugnodemap --metadata
202 uid: ???????????????? (glob)
205 uid: ???????????????? (glob)
203 tip-rev: 5002
206 tip-rev: 5002
204 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
207 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
205 data-length: 121088
208 data-length: 121088
206 data-unused: 0
209 data-unused: 0
207 data-unused: 0.000%
210 data-unused: 0.000%
208 #endif
211 #endif
209
212
210 Check out of sync nodemap
213 Check out of sync nodemap
211 =========================
214 =========================
212
215
213 First copy old data on the side.
216 First copy old data on the side.
214
217
215 $ mkdir ../tmp-copies
218 $ mkdir ../tmp-copies
216 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
219 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
217
220
218 Nodemap lagging behind
221 Nodemap lagging behind
219 ----------------------
222 ----------------------
220
223
221 make a new commit
224 make a new commit
222
225
223 $ echo bar2 > bar
226 $ echo bar2 > bar
224 $ hg ci -m 'bar2'
227 $ hg ci -m 'bar2'
225 $ NODE=`hg log -r tip -T '{node}\n'`
228 $ NODE=`hg log -r tip -T '{node}\n'`
226 $ hg log -r "$NODE" -T '{rev}\n'
229 $ hg log -r "$NODE" -T '{rev}\n'
227 5003
230 5003
228
231
229 If the nodemap is lagging behind, it can catch up fine
232 If the nodemap is lagging behind, it can catch up fine
230
233
231 $ hg debugnodemap --metadata
234 $ hg debugnodemap --metadata
232 uid: ???????????????? (glob)
235 uid: ???????????????? (glob)
233 tip-rev: 5003
236 tip-rev: 5003
234 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
237 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
235 data-length: 121344 (pure !)
238 data-length: 121344 (pure !)
236 data-length: 121344 (rust !)
239 data-length: 121344 (rust !)
237 data-length: 121152 (no-rust no-pure !)
240 data-length: 121152 (no-rust no-pure !)
238 data-unused: 192 (pure !)
241 data-unused: 192 (pure !)
239 data-unused: 192 (rust !)
242 data-unused: 192 (rust !)
240 data-unused: 0 (no-rust no-pure !)
243 data-unused: 0 (no-rust no-pure !)
241 data-unused: 0.158% (pure !)
244 data-unused: 0.158% (pure !)
242 data-unused: 0.158% (rust !)
245 data-unused: 0.158% (rust !)
243 data-unused: 0.000% (no-rust no-pure !)
246 data-unused: 0.000% (no-rust no-pure !)
244 $ cp -f ../tmp-copies/* .hg/store/
247 $ cp -f ../tmp-copies/* .hg/store/
245 $ hg debugnodemap --metadata
248 $ hg debugnodemap --metadata
246 uid: ???????????????? (glob)
249 uid: ???????????????? (glob)
247 tip-rev: 5002
250 tip-rev: 5002
248 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
251 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
249 data-length: 121088
252 data-length: 121088
250 data-unused: 0
253 data-unused: 0
251 data-unused: 0.000%
254 data-unused: 0.000%
252 $ hg log -r "$NODE" -T '{rev}\n'
255 $ hg log -r "$NODE" -T '{rev}\n'
253 5003
256 5003
254
257
255 changelog altered
258 changelog altered
256 -----------------
259 -----------------
257
260
258 If the nodemap is not gated behind a requirements, an unaware client can alter
261 If the nodemap is not gated behind a requirements, an unaware client can alter
259 the repository so the revlog used to generate the nodemap is not longer
262 the repository so the revlog used to generate the nodemap is not longer
260 compatible with the persistent nodemap. We need to detect that.
263 compatible with the persistent nodemap. We need to detect that.
261
264
262 $ hg up "$NODE~5"
265 $ hg up "$NODE~5"
263 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
266 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
264 $ echo bar > babar
267 $ echo bar > babar
265 $ hg add babar
268 $ hg add babar
266 $ hg ci -m 'babar'
269 $ hg ci -m 'babar'
267 created new head
270 created new head
268 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
271 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
269 $ hg log -r "$OTHERNODE" -T '{rev}\n'
272 $ hg log -r "$OTHERNODE" -T '{rev}\n'
270 5004
273 5004
271
274
272 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
275 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
273
276
274 the nodemap should detect the changelog have been tampered with and recover.
277 the nodemap should detect the changelog have been tampered with and recover.
275
278
276 $ hg debugnodemap --metadata
279 $ hg debugnodemap --metadata
277 uid: ???????????????? (glob)
280 uid: ???????????????? (glob)
278 tip-rev: 5002
281 tip-rev: 5002
279 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
282 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
280 data-length: 121536 (pure !)
283 data-length: 121536 (pure !)
281 data-length: 121088 (rust !)
284 data-length: 121088 (rust !)
282 data-length: 121088 (no-pure no-rust !)
285 data-length: 121088 (no-pure no-rust !)
283 data-unused: 448 (pure !)
286 data-unused: 448 (pure !)
284 data-unused: 0 (rust !)
287 data-unused: 0 (rust !)
285 data-unused: 0 (no-pure no-rust !)
288 data-unused: 0 (no-pure no-rust !)
286 data-unused: 0.000% (rust !)
289 data-unused: 0.000% (rust !)
287 data-unused: 0.369% (pure !)
290 data-unused: 0.369% (pure !)
288 data-unused: 0.000% (no-pure no-rust !)
291 data-unused: 0.000% (no-pure no-rust !)
289
292
290 $ cp -f ../tmp-copies/* .hg/store/
293 $ cp -f ../tmp-copies/* .hg/store/
291 $ hg debugnodemap --metadata
294 $ hg debugnodemap --metadata
292 uid: ???????????????? (glob)
295 uid: ???????????????? (glob)
293 tip-rev: 5002
296 tip-rev: 5002
294 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
297 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
295 data-length: 121088
298 data-length: 121088
296 data-unused: 0
299 data-unused: 0
297 data-unused: 0.000%
300 data-unused: 0.000%
298 $ hg log -r "$OTHERNODE" -T '{rev}\n'
301 $ hg log -r "$OTHERNODE" -T '{rev}\n'
299 5002
302 5002
300
303
301 Check transaction related property
304 Check transaction related property
302 ==================================
305 ==================================
303
306
304 An up to date nodemap should be available to shell hooks,
307 An up to date nodemap should be available to shell hooks,
305
308
306 $ echo dsljfl > a
309 $ echo dsljfl > a
307 $ hg add a
310 $ hg add a
308 $ hg ci -m a
311 $ hg ci -m a
309 $ hg debugnodemap --metadata
312 $ hg debugnodemap --metadata
310 uid: ???????????????? (glob)
313 uid: ???????????????? (glob)
311 tip-rev: 5003
314 tip-rev: 5003
312 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
315 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
313 data-length: 121088
316 data-length: 121088
314 data-unused: 0
317 data-unused: 0
315 data-unused: 0.000%
318 data-unused: 0.000%
316 $ echo babar2 > babar
319 $ echo babar2 > babar
317 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
320 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
318 uid: ???????????????? (glob)
321 uid: ???????????????? (glob)
319 tip-rev: 5004
322 tip-rev: 5004
320 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
323 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
321 data-length: 121280 (pure !)
324 data-length: 121280 (pure !)
322 data-length: 121280 (rust !)
325 data-length: 121280 (rust !)
323 data-length: 121088 (no-pure no-rust !)
326 data-length: 121088 (no-pure no-rust !)
324 data-unused: 192 (pure !)
327 data-unused: 192 (pure !)
325 data-unused: 192 (rust !)
328 data-unused: 192 (rust !)
326 data-unused: 0 (no-pure no-rust !)
329 data-unused: 0 (no-pure no-rust !)
327 data-unused: 0.158% (pure !)
330 data-unused: 0.158% (pure !)
328 data-unused: 0.158% (rust !)
331 data-unused: 0.158% (rust !)
329 data-unused: 0.000% (no-pure no-rust !)
332 data-unused: 0.000% (no-pure no-rust !)
330 $ hg debugnodemap --metadata
333 $ hg debugnodemap --metadata
331 uid: ???????????????? (glob)
334 uid: ???????????????? (glob)
332 tip-rev: 5004
335 tip-rev: 5004
333 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
336 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
334 data-length: 121280 (pure !)
337 data-length: 121280 (pure !)
335 data-length: 121280 (rust !)
338 data-length: 121280 (rust !)
336 data-length: 121088 (no-pure no-rust !)
339 data-length: 121088 (no-pure no-rust !)
337 data-unused: 192 (pure !)
340 data-unused: 192 (pure !)
338 data-unused: 192 (rust !)
341 data-unused: 192 (rust !)
339 data-unused: 0 (no-pure no-rust !)
342 data-unused: 0 (no-pure no-rust !)
340 data-unused: 0.158% (pure !)
343 data-unused: 0.158% (pure !)
341 data-unused: 0.158% (rust !)
344 data-unused: 0.158% (rust !)
342 data-unused: 0.000% (no-pure no-rust !)
345 data-unused: 0.000% (no-pure no-rust !)
343
346
344 Another process does not see the pending nodemap content during run.
347 Another process does not see the pending nodemap content during run.
345
348
346 $ PATH=$RUNTESTDIR/testlib/:$PATH
349 $ PATH=$RUNTESTDIR/testlib/:$PATH
347 $ echo qpoasp > a
350 $ echo qpoasp > a
348 $ hg ci -m a2 \
351 $ hg ci -m a2 \
349 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
352 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
350 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
353 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
351
354
352 (read the repository while the commit transaction is pending)
355 (read the repository while the commit transaction is pending)
353
356
354 $ wait-on-file 20 sync-txn-pending && \
357 $ wait-on-file 20 sync-txn-pending && \
355 > hg debugnodemap --metadata && \
358 > hg debugnodemap --metadata && \
356 > wait-on-file 20 sync-txn-close sync-repo-read
359 > wait-on-file 20 sync-txn-close sync-repo-read
357 uid: ???????????????? (glob)
360 uid: ???????????????? (glob)
358 tip-rev: 5004
361 tip-rev: 5004
359 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
362 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
360 data-length: 121280 (pure !)
363 data-length: 121280 (pure !)
361 data-length: 121280 (rust !)
364 data-length: 121280 (rust !)
362 data-length: 121088 (no-pure no-rust !)
365 data-length: 121088 (no-pure no-rust !)
363 data-unused: 192 (pure !)
366 data-unused: 192 (pure !)
364 data-unused: 192 (rust !)
367 data-unused: 192 (rust !)
365 data-unused: 0 (no-pure no-rust !)
368 data-unused: 0 (no-pure no-rust !)
366 data-unused: 0.158% (pure !)
369 data-unused: 0.158% (pure !)
367 data-unused: 0.158% (rust !)
370 data-unused: 0.158% (rust !)
368 data-unused: 0.000% (no-pure no-rust !)
371 data-unused: 0.000% (no-pure no-rust !)
369 $ hg debugnodemap --metadata
372 $ hg debugnodemap --metadata
370 uid: ???????????????? (glob)
373 uid: ???????????????? (glob)
371 tip-rev: 5005
374 tip-rev: 5005
372 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
375 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
373 data-length: 121536 (pure !)
376 data-length: 121536 (pure !)
374 data-length: 121536 (rust !)
377 data-length: 121536 (rust !)
375 data-length: 121088 (no-pure no-rust !)
378 data-length: 121088 (no-pure no-rust !)
376 data-unused: 448 (pure !)
379 data-unused: 448 (pure !)
377 data-unused: 448 (rust !)
380 data-unused: 448 (rust !)
378 data-unused: 0 (no-pure no-rust !)
381 data-unused: 0 (no-pure no-rust !)
379 data-unused: 0.369% (pure !)
382 data-unused: 0.369% (pure !)
380 data-unused: 0.369% (rust !)
383 data-unused: 0.369% (rust !)
381 data-unused: 0.000% (no-pure no-rust !)
384 data-unused: 0.000% (no-pure no-rust !)
382
385
383 $ cat output.txt
386 $ cat output.txt
384
387
385 Check that a failing transaction will properly revert the data
388 Check that a failing transaction will properly revert the data
386
389
387 $ echo plakfe > a
390 $ echo plakfe > a
388 $ f --size --sha256 .hg/store/00changelog-*.nd
391 $ f --size --sha256 .hg/store/00changelog-*.nd
389 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
392 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
390 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
393 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
391 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
394 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
392 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
395 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
393 transaction abort!
396 transaction abort!
394 rollback completed
397 rollback completed
395 abort: This is a late abort
398 abort: This is a late abort
396 [255]
399 [255]
397 $ hg debugnodemap --metadata
400 $ hg debugnodemap --metadata
398 uid: ???????????????? (glob)
401 uid: ???????????????? (glob)
399 tip-rev: 5005
402 tip-rev: 5005
400 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
403 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
401 data-length: 121536 (pure !)
404 data-length: 121536 (pure !)
402 data-length: 121536 (rust !)
405 data-length: 121536 (rust !)
403 data-length: 121088 (no-pure no-rust !)
406 data-length: 121088 (no-pure no-rust !)
404 data-unused: 448 (pure !)
407 data-unused: 448 (pure !)
405 data-unused: 448 (rust !)
408 data-unused: 448 (rust !)
406 data-unused: 0 (no-pure no-rust !)
409 data-unused: 0 (no-pure no-rust !)
407 data-unused: 0.369% (pure !)
410 data-unused: 0.369% (pure !)
408 data-unused: 0.369% (rust !)
411 data-unused: 0.369% (rust !)
409 data-unused: 0.000% (no-pure no-rust !)
412 data-unused: 0.000% (no-pure no-rust !)
410 $ f --size --sha256 .hg/store/00changelog-*.nd
413 $ f --size --sha256 .hg/store/00changelog-*.nd
411 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
414 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
412 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
415 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
413 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
416 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
General Comments 0
You need to be logged in to leave comments. Login now