##// END OF EJS Templates
streamclone: treat volatile file as "fullfile"...
marmoute -
r47751:aed6ceaa default
parent child Browse files
Show More
@@ -1,799 +1,810 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import getattr
16 from .pycompat import getattr
17 from .node import hex
17 from .node import hex
18 from . import (
18 from . import (
19 changelog,
19 changelog,
20 error,
20 error,
21 manifest,
21 manifest,
22 policy,
22 policy,
23 pycompat,
23 pycompat,
24 util,
24 util,
25 vfs as vfsmod,
25 vfs as vfsmod,
26 )
26 )
27 from .utils import hashutil
27 from .utils import hashutil
28
28
29 parsers = policy.importmod('parsers')
29 parsers = policy.importmod('parsers')
30 # how much bytes should be read from fncache in one read
30 # how much bytes should be read from fncache in one read
31 # It is done to prevent loading large fncache files into memory
31 # It is done to prevent loading large fncache files into memory
32 fncache_chunksize = 10 ** 6
32 fncache_chunksize = 10 ** 6
33
33
34
34
35 def _matchtrackedpath(path, matcher):
35 def _matchtrackedpath(path, matcher):
36 """parses a fncache entry and returns whether the entry is tracking a path
36 """parses a fncache entry and returns whether the entry is tracking a path
37 matched by matcher or not.
37 matched by matcher or not.
38
38
39 If matcher is None, returns True"""
39 If matcher is None, returns True"""
40
40
41 if matcher is None:
41 if matcher is None:
42 return True
42 return True
43 path = decodedir(path)
43 path = decodedir(path)
44 if path.startswith(b'data/'):
44 if path.startswith(b'data/'):
45 return matcher(path[len(b'data/') : -len(b'.i')])
45 return matcher(path[len(b'data/') : -len(b'.i')])
46 elif path.startswith(b'meta/'):
46 elif path.startswith(b'meta/'):
47 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
47 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48
48
49 raise error.ProgrammingError(b"cannot decode path %s" % path)
49 raise error.ProgrammingError(b"cannot decode path %s" % path)
50
50
51
51
52 # This avoids a collision between a file named foo and a dir named
52 # This avoids a collision between a file named foo and a dir named
53 # foo.i or foo.d
53 # foo.i or foo.d
54 def _encodedir(path):
54 def _encodedir(path):
55 """
55 """
56 >>> _encodedir(b'data/foo.i')
56 >>> _encodedir(b'data/foo.i')
57 'data/foo.i'
57 'data/foo.i'
58 >>> _encodedir(b'data/foo.i/bla.i')
58 >>> _encodedir(b'data/foo.i/bla.i')
59 'data/foo.i.hg/bla.i'
59 'data/foo.i.hg/bla.i'
60 >>> _encodedir(b'data/foo.i.hg/bla.i')
60 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 'data/foo.i.hg.hg/bla.i'
61 'data/foo.i.hg.hg/bla.i'
62 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
62 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
63 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 """
64 """
65 return (
65 return (
66 path.replace(b".hg/", b".hg.hg/")
66 path.replace(b".hg/", b".hg.hg/")
67 .replace(b".i/", b".i.hg/")
67 .replace(b".i/", b".i.hg/")
68 .replace(b".d/", b".d.hg/")
68 .replace(b".d/", b".d.hg/")
69 )
69 )
70
70
71
71
72 encodedir = getattr(parsers, 'encodedir', _encodedir)
72 encodedir = getattr(parsers, 'encodedir', _encodedir)
73
73
74
74
75 def decodedir(path):
75 def decodedir(path):
76 """
76 """
77 >>> decodedir(b'data/foo.i')
77 >>> decodedir(b'data/foo.i')
78 'data/foo.i'
78 'data/foo.i'
79 >>> decodedir(b'data/foo.i.hg/bla.i')
79 >>> decodedir(b'data/foo.i.hg/bla.i')
80 'data/foo.i/bla.i'
80 'data/foo.i/bla.i'
81 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
81 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 'data/foo.i.hg/bla.i'
82 'data/foo.i.hg/bla.i'
83 """
83 """
84 if b".hg/" not in path:
84 if b".hg/" not in path:
85 return path
85 return path
86 return (
86 return (
87 path.replace(b".d.hg/", b".d/")
87 path.replace(b".d.hg/", b".d/")
88 .replace(b".i.hg/", b".i/")
88 .replace(b".i.hg/", b".i/")
89 .replace(b".hg.hg/", b".hg/")
89 .replace(b".hg.hg/", b".hg/")
90 )
90 )
91
91
92
92
93 def _reserved():
93 def _reserved():
94 """characters that are problematic for filesystems
94 """characters that are problematic for filesystems
95
95
96 * ascii escapes (0..31)
96 * ascii escapes (0..31)
97 * ascii hi (126..255)
97 * ascii hi (126..255)
98 * windows specials
98 * windows specials
99
99
100 these characters will be escaped by encodefunctions
100 these characters will be escaped by encodefunctions
101 """
101 """
102 winreserved = [ord(x) for x in u'\\:*?"<>|']
102 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 for x in range(32):
103 for x in range(32):
104 yield x
104 yield x
105 for x in range(126, 256):
105 for x in range(126, 256):
106 yield x
106 yield x
107 for x in winreserved:
107 for x in winreserved:
108 yield x
108 yield x
109
109
110
110
111 def _buildencodefun():
111 def _buildencodefun():
112 """
112 """
113 >>> enc, dec = _buildencodefun()
113 >>> enc, dec = _buildencodefun()
114
114
115 >>> enc(b'nothing/special.txt')
115 >>> enc(b'nothing/special.txt')
116 'nothing/special.txt'
116 'nothing/special.txt'
117 >>> dec(b'nothing/special.txt')
117 >>> dec(b'nothing/special.txt')
118 'nothing/special.txt'
118 'nothing/special.txt'
119
119
120 >>> enc(b'HELLO')
120 >>> enc(b'HELLO')
121 '_h_e_l_l_o'
121 '_h_e_l_l_o'
122 >>> dec(b'_h_e_l_l_o')
122 >>> dec(b'_h_e_l_l_o')
123 'HELLO'
123 'HELLO'
124
124
125 >>> enc(b'hello:world?')
125 >>> enc(b'hello:world?')
126 'hello~3aworld~3f'
126 'hello~3aworld~3f'
127 >>> dec(b'hello~3aworld~3f')
127 >>> dec(b'hello~3aworld~3f')
128 'hello:world?'
128 'hello:world?'
129
129
130 >>> enc(b'the\\x07quick\\xADshot')
130 >>> enc(b'the\\x07quick\\xADshot')
131 'the~07quick~adshot'
131 'the~07quick~adshot'
132 >>> dec(b'the~07quick~adshot')
132 >>> dec(b'the~07quick~adshot')
133 'the\\x07quick\\xadshot'
133 'the\\x07quick\\xadshot'
134 """
134 """
135 e = b'_'
135 e = b'_'
136 xchr = pycompat.bytechr
136 xchr = pycompat.bytechr
137 asciistr = list(map(xchr, range(127)))
137 asciistr = list(map(xchr, range(127)))
138 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
138 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139
139
140 cmap = {x: x for x in asciistr}
140 cmap = {x: x for x in asciistr}
141 for x in _reserved():
141 for x in _reserved():
142 cmap[xchr(x)] = b"~%02x" % x
142 cmap[xchr(x)] = b"~%02x" % x
143 for x in capitals + [ord(e)]:
143 for x in capitals + [ord(e)]:
144 cmap[xchr(x)] = e + xchr(x).lower()
144 cmap[xchr(x)] = e + xchr(x).lower()
145
145
146 dmap = {}
146 dmap = {}
147 for k, v in pycompat.iteritems(cmap):
147 for k, v in pycompat.iteritems(cmap):
148 dmap[v] = k
148 dmap[v] = k
149
149
150 def decode(s):
150 def decode(s):
151 i = 0
151 i = 0
152 while i < len(s):
152 while i < len(s):
153 for l in pycompat.xrange(1, 4):
153 for l in pycompat.xrange(1, 4):
154 try:
154 try:
155 yield dmap[s[i : i + l]]
155 yield dmap[s[i : i + l]]
156 i += l
156 i += l
157 break
157 break
158 except KeyError:
158 except KeyError:
159 pass
159 pass
160 else:
160 else:
161 raise KeyError
161 raise KeyError
162
162
163 return (
163 return (
164 lambda s: b''.join(
164 lambda s: b''.join(
165 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
165 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 ),
166 ),
167 lambda s: b''.join(list(decode(s))),
167 lambda s: b''.join(list(decode(s))),
168 )
168 )
169
169
170
170
171 _encodefname, _decodefname = _buildencodefun()
171 _encodefname, _decodefname = _buildencodefun()
172
172
173
173
174 def encodefilename(s):
174 def encodefilename(s):
175 """
175 """
176 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
176 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
177 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 """
178 """
179 return _encodefname(encodedir(s))
179 return _encodefname(encodedir(s))
180
180
181
181
182 def decodefilename(s):
182 def decodefilename(s):
183 """
183 """
184 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
184 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
185 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 """
186 """
187 return decodedir(_decodefname(s))
187 return decodedir(_decodefname(s))
188
188
189
189
190 def _buildlowerencodefun():
190 def _buildlowerencodefun():
191 """
191 """
192 >>> f = _buildlowerencodefun()
192 >>> f = _buildlowerencodefun()
193 >>> f(b'nothing/special.txt')
193 >>> f(b'nothing/special.txt')
194 'nothing/special.txt'
194 'nothing/special.txt'
195 >>> f(b'HELLO')
195 >>> f(b'HELLO')
196 'hello'
196 'hello'
197 >>> f(b'hello:world?')
197 >>> f(b'hello:world?')
198 'hello~3aworld~3f'
198 'hello~3aworld~3f'
199 >>> f(b'the\\x07quick\\xADshot')
199 >>> f(b'the\\x07quick\\xADshot')
200 'the~07quick~adshot'
200 'the~07quick~adshot'
201 """
201 """
202 xchr = pycompat.bytechr
202 xchr = pycompat.bytechr
203 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
203 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 for x in _reserved():
204 for x in _reserved():
205 cmap[xchr(x)] = b"~%02x" % x
205 cmap[xchr(x)] = b"~%02x" % x
206 for x in range(ord(b"A"), ord(b"Z") + 1):
206 for x in range(ord(b"A"), ord(b"Z") + 1):
207 cmap[xchr(x)] = xchr(x).lower()
207 cmap[xchr(x)] = xchr(x).lower()
208
208
209 def lowerencode(s):
209 def lowerencode(s):
210 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
210 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211
211
212 return lowerencode
212 return lowerencode
213
213
214
214
215 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
215 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216
216
217 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
217 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
218 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
219 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220
220
221
221
222 def _auxencode(path, dotencode):
222 def _auxencode(path, dotencode):
223 """
223 """
224 Encodes filenames containing names reserved by Windows or which end in
224 Encodes filenames containing names reserved by Windows or which end in
225 period or space. Does not touch other single reserved characters c.
225 period or space. Does not touch other single reserved characters c.
226 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
226 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 Additionally encodes space or period at the beginning, if dotencode is
227 Additionally encodes space or period at the beginning, if dotencode is
228 True. Parameter path is assumed to be all lowercase.
228 True. Parameter path is assumed to be all lowercase.
229 A segment only needs encoding if a reserved name appears as a
229 A segment only needs encoding if a reserved name appears as a
230 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
230 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 doesn't need encoding.
231 doesn't need encoding.
232
232
233 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
233 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 >>> _auxencode(s.split(b'/'), True)
234 >>> _auxencode(s.split(b'/'), True)
235 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
235 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
236 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 >>> _auxencode(s.split(b'/'), False)
237 >>> _auxencode(s.split(b'/'), False)
238 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
238 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 >>> _auxencode([b'foo. '], True)
239 >>> _auxencode([b'foo. '], True)
240 ['foo.~20']
240 ['foo.~20']
241 >>> _auxencode([b' .foo'], True)
241 >>> _auxencode([b' .foo'], True)
242 ['~20.foo']
242 ['~20.foo']
243 """
243 """
244 for i, n in enumerate(path):
244 for i, n in enumerate(path):
245 if not n:
245 if not n:
246 continue
246 continue
247 if dotencode and n[0] in b'. ':
247 if dotencode and n[0] in b'. ':
248 n = b"~%02x" % ord(n[0:1]) + n[1:]
248 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 path[i] = n
249 path[i] = n
250 else:
250 else:
251 l = n.find(b'.')
251 l = n.find(b'.')
252 if l == -1:
252 if l == -1:
253 l = len(n)
253 l = len(n)
254 if (l == 3 and n[:3] in _winres3) or (
254 if (l == 3 and n[:3] in _winres3) or (
255 l == 4
255 l == 4
256 and n[3:4] <= b'9'
256 and n[3:4] <= b'9'
257 and n[3:4] >= b'1'
257 and n[3:4] >= b'1'
258 and n[:3] in _winres4
258 and n[:3] in _winres4
259 ):
259 ):
260 # encode third letter ('aux' -> 'au~78')
260 # encode third letter ('aux' -> 'au~78')
261 ec = b"~%02x" % ord(n[2:3])
261 ec = b"~%02x" % ord(n[2:3])
262 n = n[0:2] + ec + n[3:]
262 n = n[0:2] + ec + n[3:]
263 path[i] = n
263 path[i] = n
264 if n[-1] in b'. ':
264 if n[-1] in b'. ':
265 # encode last period or space ('foo...' -> 'foo..~2e')
265 # encode last period or space ('foo...' -> 'foo..~2e')
266 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
266 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 return path
267 return path
268
268
269
269
270 _maxstorepathlen = 120
270 _maxstorepathlen = 120
271 _dirprefixlen = 8
271 _dirprefixlen = 8
272 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
272 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273
273
274
274
275 def _hashencode(path, dotencode):
275 def _hashencode(path, dotencode):
276 digest = hex(hashutil.sha1(path).digest())
276 digest = hex(hashutil.sha1(path).digest())
277 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
277 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 parts = _auxencode(le, dotencode)
278 parts = _auxencode(le, dotencode)
279 basename = parts[-1]
279 basename = parts[-1]
280 _root, ext = os.path.splitext(basename)
280 _root, ext = os.path.splitext(basename)
281 sdirs = []
281 sdirs = []
282 sdirslen = 0
282 sdirslen = 0
283 for p in parts[:-1]:
283 for p in parts[:-1]:
284 d = p[:_dirprefixlen]
284 d = p[:_dirprefixlen]
285 if d[-1] in b'. ':
285 if d[-1] in b'. ':
286 # Windows can't access dirs ending in period or space
286 # Windows can't access dirs ending in period or space
287 d = d[:-1] + b'_'
287 d = d[:-1] + b'_'
288 if sdirslen == 0:
288 if sdirslen == 0:
289 t = len(d)
289 t = len(d)
290 else:
290 else:
291 t = sdirslen + 1 + len(d)
291 t = sdirslen + 1 + len(d)
292 if t > _maxshortdirslen:
292 if t > _maxshortdirslen:
293 break
293 break
294 sdirs.append(d)
294 sdirs.append(d)
295 sdirslen = t
295 sdirslen = t
296 dirs = b'/'.join(sdirs)
296 dirs = b'/'.join(sdirs)
297 if len(dirs) > 0:
297 if len(dirs) > 0:
298 dirs += b'/'
298 dirs += b'/'
299 res = b'dh/' + dirs + digest + ext
299 res = b'dh/' + dirs + digest + ext
300 spaceleft = _maxstorepathlen - len(res)
300 spaceleft = _maxstorepathlen - len(res)
301 if spaceleft > 0:
301 if spaceleft > 0:
302 filler = basename[:spaceleft]
302 filler = basename[:spaceleft]
303 res = b'dh/' + dirs + filler + digest + ext
303 res = b'dh/' + dirs + filler + digest + ext
304 return res
304 return res
305
305
306
306
307 def _hybridencode(path, dotencode):
307 def _hybridencode(path, dotencode):
308 """encodes path with a length limit
308 """encodes path with a length limit
309
309
310 Encodes all paths that begin with 'data/', according to the following.
310 Encodes all paths that begin with 'data/', according to the following.
311
311
312 Default encoding (reversible):
312 Default encoding (reversible):
313
313
314 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
314 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 characters are encoded as '~xx', where xx is the two digit hex code
315 characters are encoded as '~xx', where xx is the two digit hex code
316 of the character (see encodefilename).
316 of the character (see encodefilename).
317 Relevant path components consisting of Windows reserved filenames are
317 Relevant path components consisting of Windows reserved filenames are
318 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
318 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319
319
320 Hashed encoding (not reversible):
320 Hashed encoding (not reversible):
321
321
322 If the default-encoded path is longer than _maxstorepathlen, a
322 If the default-encoded path is longer than _maxstorepathlen, a
323 non-reversible hybrid hashing of the path is done instead.
323 non-reversible hybrid hashing of the path is done instead.
324 This encoding uses up to _dirprefixlen characters of all directory
324 This encoding uses up to _dirprefixlen characters of all directory
325 levels of the lowerencoded path, but not more levels than can fit into
325 levels of the lowerencoded path, but not more levels than can fit into
326 _maxshortdirslen.
326 _maxshortdirslen.
327 Then follows the filler followed by the sha digest of the full path.
327 Then follows the filler followed by the sha digest of the full path.
328 The filler is the beginning of the basename of the lowerencoded path
328 The filler is the beginning of the basename of the lowerencoded path
329 (the basename is everything after the last path separator). The filler
329 (the basename is everything after the last path separator). The filler
330 is as long as possible, filling in characters from the basename until
330 is as long as possible, filling in characters from the basename until
331 the encoded path has _maxstorepathlen characters (or all chars of the
331 the encoded path has _maxstorepathlen characters (or all chars of the
332 basename have been taken).
332 basename have been taken).
333 The extension (e.g. '.i' or '.d') is preserved.
333 The extension (e.g. '.i' or '.d') is preserved.
334
334
335 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
335 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 encoding was used.
336 encoding was used.
337 """
337 """
338 path = encodedir(path)
338 path = encodedir(path)
339 ef = _encodefname(path).split(b'/')
339 ef = _encodefname(path).split(b'/')
340 res = b'/'.join(_auxencode(ef, dotencode))
340 res = b'/'.join(_auxencode(ef, dotencode))
341 if len(res) > _maxstorepathlen:
341 if len(res) > _maxstorepathlen:
342 res = _hashencode(path, dotencode)
342 res = _hashencode(path, dotencode)
343 return res
343 return res
344
344
345
345
346 def _pathencode(path):
346 def _pathencode(path):
347 de = encodedir(path)
347 de = encodedir(path)
348 if len(path) > _maxstorepathlen:
348 if len(path) > _maxstorepathlen:
349 return _hashencode(de, True)
349 return _hashencode(de, True)
350 ef = _encodefname(de).split(b'/')
350 ef = _encodefname(de).split(b'/')
351 res = b'/'.join(_auxencode(ef, True))
351 res = b'/'.join(_auxencode(ef, True))
352 if len(res) > _maxstorepathlen:
352 if len(res) > _maxstorepathlen:
353 return _hashencode(de, True)
353 return _hashencode(de, True)
354 return res
354 return res
355
355
356
356
357 _pathencode = getattr(parsers, 'pathencode', _pathencode)
357 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358
358
359
359
360 def _plainhybridencode(f):
360 def _plainhybridencode(f):
361 return _hybridencode(f, False)
361 return _hybridencode(f, False)
362
362
363
363
364 def _calcmode(vfs):
364 def _calcmode(vfs):
365 try:
365 try:
366 # files in .hg/ will be created using this mode
366 # files in .hg/ will be created using this mode
367 mode = vfs.stat().st_mode
367 mode = vfs.stat().st_mode
368 # avoid some useless chmods
368 # avoid some useless chmods
369 if (0o777 & ~util.umask) == (0o777 & mode):
369 if (0o777 & ~util.umask) == (0o777 & mode):
370 mode = None
370 mode = None
371 except OSError:
371 except OSError:
372 mode = None
372 mode = None
373 return mode
373 return mode
374
374
375
375
376 _data = [
376 _data = [
377 b'bookmarks',
377 b'bookmarks',
378 b'narrowspec',
378 b'narrowspec',
379 b'data',
379 b'data',
380 b'meta',
380 b'meta',
381 b'00manifest.d',
381 b'00manifest.d',
382 b'00manifest.i',
382 b'00manifest.i',
383 b'00changelog.d',
383 b'00changelog.d',
384 b'00changelog.i',
384 b'00changelog.i',
385 b'phaseroots',
385 b'phaseroots',
386 b'obsstore',
386 b'obsstore',
387 b'requires',
387 b'requires',
388 ]
388 ]
389
389
390 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
390 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
391 REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
391 REVLOG_FILES_OTHER_EXT = (b'.d', b'.n', b'.nd', b'd.tmpcensored')
392 # files that are "volatile" and might change between listing and streaming
393 #
394 # note: the ".nd" file are nodemap data and won't "change" but they might be
395 # deleted.
396 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
392
397
393
398
394 def is_revlog(f, kind, st):
399 def is_revlog(f, kind, st):
395 if kind != stat.S_IFREG:
400 if kind != stat.S_IFREG:
396 return None
401 return None
397 return revlog_type(f)
402 return revlog_type(f)
398
403
399
404
400 def revlog_type(f):
405 def revlog_type(f):
401 if f.endswith(REVLOG_FILES_MAIN_EXT):
406 if f.endswith(REVLOG_FILES_MAIN_EXT):
402 return FILEFLAGS_REVLOG_MAIN
407 return FILEFLAGS_REVLOG_MAIN
403 elif f.endswith(REVLOG_FILES_OTHER_EXT):
408 elif f.endswith(REVLOG_FILES_OTHER_EXT):
404 return FILETYPE_FILELOG_OTHER
409 t = FILETYPE_FILELOG_OTHER
410 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
411 t |= FILEFLAGS_VOLATILE
412 return t
405
413
406
414
407 # the file is part of changelog data
415 # the file is part of changelog data
408 FILEFLAGS_CHANGELOG = 1 << 13
416 FILEFLAGS_CHANGELOG = 1 << 13
409 # the file is part of manifest data
417 # the file is part of manifest data
410 FILEFLAGS_MANIFESTLOG = 1 << 12
418 FILEFLAGS_MANIFESTLOG = 1 << 12
411 # the file is part of filelog data
419 # the file is part of filelog data
412 FILEFLAGS_FILELOG = 1 << 11
420 FILEFLAGS_FILELOG = 1 << 11
413 # file that are not directly part of a revlog
421 # file that are not directly part of a revlog
414 FILEFLAGS_OTHER = 1 << 10
422 FILEFLAGS_OTHER = 1 << 10
415
423
416 # the main entry point for a revlog
424 # the main entry point for a revlog
417 FILEFLAGS_REVLOG_MAIN = 1 << 1
425 FILEFLAGS_REVLOG_MAIN = 1 << 1
418 # a secondary file for a revlog
426 # a secondary file for a revlog
419 FILEFLAGS_REVLOG_OTHER = 1 << 0
427 FILEFLAGS_REVLOG_OTHER = 1 << 0
420
428
429 # files that are "volatile" and might change between listing and streaming
430 FILEFLAGS_VOLATILE = 1 << 20
431
421 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
432 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
422 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
433 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
423 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
434 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
424 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
435 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
425 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
436 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
426 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
437 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
427 FILETYPE_OTHER = FILEFLAGS_OTHER
438 FILETYPE_OTHER = FILEFLAGS_OTHER
428
439
429
440
430 class basicstore(object):
441 class basicstore(object):
431 '''base class for local repository stores'''
442 '''base class for local repository stores'''
432
443
433 def __init__(self, path, vfstype):
444 def __init__(self, path, vfstype):
434 vfs = vfstype(path)
445 vfs = vfstype(path)
435 self.path = vfs.base
446 self.path = vfs.base
436 self.createmode = _calcmode(vfs)
447 self.createmode = _calcmode(vfs)
437 vfs.createmode = self.createmode
448 vfs.createmode = self.createmode
438 self.rawvfs = vfs
449 self.rawvfs = vfs
439 self.vfs = vfsmod.filtervfs(vfs, encodedir)
450 self.vfs = vfsmod.filtervfs(vfs, encodedir)
440 self.opener = self.vfs
451 self.opener = self.vfs
441
452
442 def join(self, f):
453 def join(self, f):
443 return self.path + b'/' + encodedir(f)
454 return self.path + b'/' + encodedir(f)
444
455
445 def _walk(self, relpath, recurse):
456 def _walk(self, relpath, recurse):
446 '''yields (unencoded, encoded, size)'''
457 '''yields (unencoded, encoded, size)'''
447 path = self.path
458 path = self.path
448 if relpath:
459 if relpath:
449 path += b'/' + relpath
460 path += b'/' + relpath
450 striplen = len(self.path) + 1
461 striplen = len(self.path) + 1
451 l = []
462 l = []
452 if self.rawvfs.isdir(path):
463 if self.rawvfs.isdir(path):
453 visit = [path]
464 visit = [path]
454 readdir = self.rawvfs.readdir
465 readdir = self.rawvfs.readdir
455 while visit:
466 while visit:
456 p = visit.pop()
467 p = visit.pop()
457 for f, kind, st in readdir(p, stat=True):
468 for f, kind, st in readdir(p, stat=True):
458 fp = p + b'/' + f
469 fp = p + b'/' + f
459 rl_type = is_revlog(f, kind, st)
470 rl_type = is_revlog(f, kind, st)
460 if rl_type is not None:
471 if rl_type is not None:
461 n = util.pconvert(fp[striplen:])
472 n = util.pconvert(fp[striplen:])
462 l.append((rl_type, decodedir(n), n, st.st_size))
473 l.append((rl_type, decodedir(n), n, st.st_size))
463 elif kind == stat.S_IFDIR and recurse:
474 elif kind == stat.S_IFDIR and recurse:
464 visit.append(fp)
475 visit.append(fp)
465 l.sort()
476 l.sort()
466 return l
477 return l
467
478
468 def changelog(self, trypending, concurrencychecker=None):
479 def changelog(self, trypending, concurrencychecker=None):
469 return changelog.changelog(
480 return changelog.changelog(
470 self.vfs,
481 self.vfs,
471 trypending=trypending,
482 trypending=trypending,
472 concurrencychecker=concurrencychecker,
483 concurrencychecker=concurrencychecker,
473 )
484 )
474
485
475 def manifestlog(self, repo, storenarrowmatch):
486 def manifestlog(self, repo, storenarrowmatch):
476 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
487 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
477 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
488 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
478
489
479 def datafiles(self, matcher=None):
490 def datafiles(self, matcher=None):
480 files = self._walk(b'data', True) + self._walk(b'meta', True)
491 files = self._walk(b'data', True) + self._walk(b'meta', True)
481 for (t, u, e, s) in files:
492 for (t, u, e, s) in files:
482 yield (FILEFLAGS_FILELOG | t, u, e, s)
493 yield (FILEFLAGS_FILELOG | t, u, e, s)
483
494
484 def topfiles(self):
495 def topfiles(self):
485 # yield manifest before changelog
496 # yield manifest before changelog
486 files = reversed(self._walk(b'', False))
497 files = reversed(self._walk(b'', False))
487 for (t, u, e, s) in files:
498 for (t, u, e, s) in files:
488 if u.startswith(b'00changelog'):
499 if u.startswith(b'00changelog'):
489 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
500 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
490 elif u.startswith(b'00manifest'):
501 elif u.startswith(b'00manifest'):
491 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
502 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
492 else:
503 else:
493 yield (FILETYPE_OTHER | t, u, e, s)
504 yield (FILETYPE_OTHER | t, u, e, s)
494
505
495 def walk(self, matcher=None):
506 def walk(self, matcher=None):
496 """return file related to data storage (ie: revlogs)
507 """return file related to data storage (ie: revlogs)
497
508
498 yields (file_type, unencoded, encoded, size)
509 yields (file_type, unencoded, encoded, size)
499
510
500 if a matcher is passed, storage files of only those tracked paths
511 if a matcher is passed, storage files of only those tracked paths
501 are passed with matches the matcher
512 are passed with matches the matcher
502 """
513 """
503 # yield data files first
514 # yield data files first
504 for x in self.datafiles(matcher):
515 for x in self.datafiles(matcher):
505 yield x
516 yield x
506 for x in self.topfiles():
517 for x in self.topfiles():
507 yield x
518 yield x
508
519
509 def copylist(self):
520 def copylist(self):
510 return _data
521 return _data
511
522
512 def write(self, tr):
523 def write(self, tr):
513 pass
524 pass
514
525
515 def invalidatecaches(self):
526 def invalidatecaches(self):
516 pass
527 pass
517
528
518 def markremoved(self, fn):
529 def markremoved(self, fn):
519 pass
530 pass
520
531
521 def __contains__(self, path):
532 def __contains__(self, path):
522 '''Checks if the store contains path'''
533 '''Checks if the store contains path'''
523 path = b"/".join((b"data", path))
534 path = b"/".join((b"data", path))
524 # file?
535 # file?
525 if self.vfs.exists(path + b".i"):
536 if self.vfs.exists(path + b".i"):
526 return True
537 return True
527 # dir?
538 # dir?
528 if not path.endswith(b"/"):
539 if not path.endswith(b"/"):
529 path = path + b"/"
540 path = path + b"/"
530 return self.vfs.exists(path)
541 return self.vfs.exists(path)
531
542
532
543
533 class encodedstore(basicstore):
544 class encodedstore(basicstore):
534 def __init__(self, path, vfstype):
545 def __init__(self, path, vfstype):
535 vfs = vfstype(path + b'/store')
546 vfs = vfstype(path + b'/store')
536 self.path = vfs.base
547 self.path = vfs.base
537 self.createmode = _calcmode(vfs)
548 self.createmode = _calcmode(vfs)
538 vfs.createmode = self.createmode
549 vfs.createmode = self.createmode
539 self.rawvfs = vfs
550 self.rawvfs = vfs
540 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
551 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
541 self.opener = self.vfs
552 self.opener = self.vfs
542
553
543 def datafiles(self, matcher=None):
554 def datafiles(self, matcher=None):
544 for t, a, b, size in super(encodedstore, self).datafiles():
555 for t, a, b, size in super(encodedstore, self).datafiles():
545 try:
556 try:
546 a = decodefilename(a)
557 a = decodefilename(a)
547 except KeyError:
558 except KeyError:
548 a = None
559 a = None
549 if a is not None and not _matchtrackedpath(a, matcher):
560 if a is not None and not _matchtrackedpath(a, matcher):
550 continue
561 continue
551 yield t, a, b, size
562 yield t, a, b, size
552
563
553 def join(self, f):
564 def join(self, f):
554 return self.path + b'/' + encodefilename(f)
565 return self.path + b'/' + encodefilename(f)
555
566
556 def copylist(self):
567 def copylist(self):
557 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
568 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
558
569
559
570
560 class fncache(object):
571 class fncache(object):
561 # the filename used to be partially encoded
572 # the filename used to be partially encoded
562 # hence the encodedir/decodedir dance
573 # hence the encodedir/decodedir dance
563 def __init__(self, vfs):
574 def __init__(self, vfs):
564 self.vfs = vfs
575 self.vfs = vfs
565 self.entries = None
576 self.entries = None
566 self._dirty = False
577 self._dirty = False
567 # set of new additions to fncache
578 # set of new additions to fncache
568 self.addls = set()
579 self.addls = set()
569
580
570 def ensureloaded(self, warn=None):
581 def ensureloaded(self, warn=None):
571 """read the fncache file if not already read.
582 """read the fncache file if not already read.
572
583
573 If the file on disk is corrupted, raise. If warn is provided,
584 If the file on disk is corrupted, raise. If warn is provided,
574 warn and keep going instead."""
585 warn and keep going instead."""
575 if self.entries is None:
586 if self.entries is None:
576 self._load(warn)
587 self._load(warn)
577
588
578 def _load(self, warn=None):
589 def _load(self, warn=None):
579 '''fill the entries from the fncache file'''
590 '''fill the entries from the fncache file'''
580 self._dirty = False
591 self._dirty = False
581 try:
592 try:
582 fp = self.vfs(b'fncache', mode=b'rb')
593 fp = self.vfs(b'fncache', mode=b'rb')
583 except IOError:
594 except IOError:
584 # skip nonexistent file
595 # skip nonexistent file
585 self.entries = set()
596 self.entries = set()
586 return
597 return
587
598
588 self.entries = set()
599 self.entries = set()
589 chunk = b''
600 chunk = b''
590 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
601 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
591 chunk += c
602 chunk += c
592 try:
603 try:
593 p = chunk.rindex(b'\n')
604 p = chunk.rindex(b'\n')
594 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
605 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
595 chunk = chunk[p + 1 :]
606 chunk = chunk[p + 1 :]
596 except ValueError:
607 except ValueError:
597 # substring '\n' not found, maybe the entry is bigger than the
608 # substring '\n' not found, maybe the entry is bigger than the
598 # chunksize, so let's keep iterating
609 # chunksize, so let's keep iterating
599 pass
610 pass
600
611
601 if chunk:
612 if chunk:
602 msg = _(b"fncache does not ends with a newline")
613 msg = _(b"fncache does not ends with a newline")
603 if warn:
614 if warn:
604 warn(msg + b'\n')
615 warn(msg + b'\n')
605 else:
616 else:
606 raise error.Abort(
617 raise error.Abort(
607 msg,
618 msg,
608 hint=_(
619 hint=_(
609 b"use 'hg debugrebuildfncache' to "
620 b"use 'hg debugrebuildfncache' to "
610 b"rebuild the fncache"
621 b"rebuild the fncache"
611 ),
622 ),
612 )
623 )
613 self._checkentries(fp, warn)
624 self._checkentries(fp, warn)
614 fp.close()
625 fp.close()
615
626
616 def _checkentries(self, fp, warn):
627 def _checkentries(self, fp, warn):
617 """ make sure there is no empty string in entries """
628 """ make sure there is no empty string in entries """
618 if b'' in self.entries:
629 if b'' in self.entries:
619 fp.seek(0)
630 fp.seek(0)
620 for n, line in enumerate(util.iterfile(fp)):
631 for n, line in enumerate(util.iterfile(fp)):
621 if not line.rstrip(b'\n'):
632 if not line.rstrip(b'\n'):
622 t = _(b'invalid entry in fncache, line %d') % (n + 1)
633 t = _(b'invalid entry in fncache, line %d') % (n + 1)
623 if warn:
634 if warn:
624 warn(t + b'\n')
635 warn(t + b'\n')
625 else:
636 else:
626 raise error.Abort(t)
637 raise error.Abort(t)
627
638
628 def write(self, tr):
639 def write(self, tr):
629 if self._dirty:
640 if self._dirty:
630 assert self.entries is not None
641 assert self.entries is not None
631 self.entries = self.entries | self.addls
642 self.entries = self.entries | self.addls
632 self.addls = set()
643 self.addls = set()
633 tr.addbackup(b'fncache')
644 tr.addbackup(b'fncache')
634 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
645 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
635 if self.entries:
646 if self.entries:
636 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
647 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
637 fp.close()
648 fp.close()
638 self._dirty = False
649 self._dirty = False
639 if self.addls:
650 if self.addls:
640 # if we have just new entries, let's append them to the fncache
651 # if we have just new entries, let's append them to the fncache
641 tr.addbackup(b'fncache')
652 tr.addbackup(b'fncache')
642 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
653 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
643 if self.addls:
654 if self.addls:
644 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
655 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
645 fp.close()
656 fp.close()
646 self.entries = None
657 self.entries = None
647 self.addls = set()
658 self.addls = set()
648
659
649 def add(self, fn):
660 def add(self, fn):
650 if self.entries is None:
661 if self.entries is None:
651 self._load()
662 self._load()
652 if fn not in self.entries:
663 if fn not in self.entries:
653 self.addls.add(fn)
664 self.addls.add(fn)
654
665
655 def remove(self, fn):
666 def remove(self, fn):
656 if self.entries is None:
667 if self.entries is None:
657 self._load()
668 self._load()
658 if fn in self.addls:
669 if fn in self.addls:
659 self.addls.remove(fn)
670 self.addls.remove(fn)
660 return
671 return
661 try:
672 try:
662 self.entries.remove(fn)
673 self.entries.remove(fn)
663 self._dirty = True
674 self._dirty = True
664 except KeyError:
675 except KeyError:
665 pass
676 pass
666
677
667 def __contains__(self, fn):
678 def __contains__(self, fn):
668 if fn in self.addls:
679 if fn in self.addls:
669 return True
680 return True
670 if self.entries is None:
681 if self.entries is None:
671 self._load()
682 self._load()
672 return fn in self.entries
683 return fn in self.entries
673
684
674 def __iter__(self):
685 def __iter__(self):
675 if self.entries is None:
686 if self.entries is None:
676 self._load()
687 self._load()
677 return iter(self.entries | self.addls)
688 return iter(self.entries | self.addls)
678
689
679
690
680 class _fncachevfs(vfsmod.proxyvfs):
691 class _fncachevfs(vfsmod.proxyvfs):
681 def __init__(self, vfs, fnc, encode):
692 def __init__(self, vfs, fnc, encode):
682 vfsmod.proxyvfs.__init__(self, vfs)
693 vfsmod.proxyvfs.__init__(self, vfs)
683 self.fncache = fnc
694 self.fncache = fnc
684 self.encode = encode
695 self.encode = encode
685
696
686 def __call__(self, path, mode=b'r', *args, **kw):
697 def __call__(self, path, mode=b'r', *args, **kw):
687 encoded = self.encode(path)
698 encoded = self.encode(path)
688 if mode not in (b'r', b'rb') and (
699 if mode not in (b'r', b'rb') and (
689 path.startswith(b'data/') or path.startswith(b'meta/')
700 path.startswith(b'data/') or path.startswith(b'meta/')
690 ):
701 ):
691 # do not trigger a fncache load when adding a file that already is
702 # do not trigger a fncache load when adding a file that already is
692 # known to exist.
703 # known to exist.
693 notload = self.fncache.entries is None and self.vfs.exists(encoded)
704 notload = self.fncache.entries is None and self.vfs.exists(encoded)
694 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
705 if notload and b'a' in mode and not self.vfs.stat(encoded).st_size:
695 # when appending to an existing file, if the file has size zero,
706 # when appending to an existing file, if the file has size zero,
696 # it should be considered as missing. Such zero-size files are
707 # it should be considered as missing. Such zero-size files are
697 # the result of truncation when a transaction is aborted.
708 # the result of truncation when a transaction is aborted.
698 notload = False
709 notload = False
699 if not notload:
710 if not notload:
700 self.fncache.add(path)
711 self.fncache.add(path)
701 return self.vfs(encoded, mode, *args, **kw)
712 return self.vfs(encoded, mode, *args, **kw)
702
713
703 def join(self, path):
714 def join(self, path):
704 if path:
715 if path:
705 return self.vfs.join(self.encode(path))
716 return self.vfs.join(self.encode(path))
706 else:
717 else:
707 return self.vfs.join(path)
718 return self.vfs.join(path)
708
719
709
720
710 class fncachestore(basicstore):
721 class fncachestore(basicstore):
711 def __init__(self, path, vfstype, dotencode):
722 def __init__(self, path, vfstype, dotencode):
712 if dotencode:
723 if dotencode:
713 encode = _pathencode
724 encode = _pathencode
714 else:
725 else:
715 encode = _plainhybridencode
726 encode = _plainhybridencode
716 self.encode = encode
727 self.encode = encode
717 vfs = vfstype(path + b'/store')
728 vfs = vfstype(path + b'/store')
718 self.path = vfs.base
729 self.path = vfs.base
719 self.pathsep = self.path + b'/'
730 self.pathsep = self.path + b'/'
720 self.createmode = _calcmode(vfs)
731 self.createmode = _calcmode(vfs)
721 vfs.createmode = self.createmode
732 vfs.createmode = self.createmode
722 self.rawvfs = vfs
733 self.rawvfs = vfs
723 fnc = fncache(vfs)
734 fnc = fncache(vfs)
724 self.fncache = fnc
735 self.fncache = fnc
725 self.vfs = _fncachevfs(vfs, fnc, encode)
736 self.vfs = _fncachevfs(vfs, fnc, encode)
726 self.opener = self.vfs
737 self.opener = self.vfs
727
738
728 def join(self, f):
739 def join(self, f):
729 return self.pathsep + self.encode(f)
740 return self.pathsep + self.encode(f)
730
741
731 def getsize(self, path):
742 def getsize(self, path):
732 return self.rawvfs.stat(path).st_size
743 return self.rawvfs.stat(path).st_size
733
744
734 def datafiles(self, matcher=None):
745 def datafiles(self, matcher=None):
735 for f in sorted(self.fncache):
746 for f in sorted(self.fncache):
736 if not _matchtrackedpath(f, matcher):
747 if not _matchtrackedpath(f, matcher):
737 continue
748 continue
738 ef = self.encode(f)
749 ef = self.encode(f)
739 try:
750 try:
740 t = revlog_type(f)
751 t = revlog_type(f)
741 t |= FILEFLAGS_FILELOG
752 t |= FILEFLAGS_FILELOG
742 yield t, f, ef, self.getsize(ef)
753 yield t, f, ef, self.getsize(ef)
743 except OSError as err:
754 except OSError as err:
744 if err.errno != errno.ENOENT:
755 if err.errno != errno.ENOENT:
745 raise
756 raise
746
757
747 def copylist(self):
758 def copylist(self):
748 d = (
759 d = (
749 b'bookmarks',
760 b'bookmarks',
750 b'narrowspec',
761 b'narrowspec',
751 b'data',
762 b'data',
752 b'meta',
763 b'meta',
753 b'dh',
764 b'dh',
754 b'fncache',
765 b'fncache',
755 b'phaseroots',
766 b'phaseroots',
756 b'obsstore',
767 b'obsstore',
757 b'00manifest.d',
768 b'00manifest.d',
758 b'00manifest.i',
769 b'00manifest.i',
759 b'00changelog.d',
770 b'00changelog.d',
760 b'00changelog.i',
771 b'00changelog.i',
761 b'requires',
772 b'requires',
762 )
773 )
763 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
774 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
764
775
765 def write(self, tr):
776 def write(self, tr):
766 self.fncache.write(tr)
777 self.fncache.write(tr)
767
778
768 def invalidatecaches(self):
779 def invalidatecaches(self):
769 self.fncache.entries = None
780 self.fncache.entries = None
770 self.fncache.addls = set()
781 self.fncache.addls = set()
771
782
772 def markremoved(self, fn):
783 def markremoved(self, fn):
773 self.fncache.remove(fn)
784 self.fncache.remove(fn)
774
785
775 def _exists(self, f):
786 def _exists(self, f):
776 ef = self.encode(f)
787 ef = self.encode(f)
777 try:
788 try:
778 self.getsize(ef)
789 self.getsize(ef)
779 return True
790 return True
780 except OSError as err:
791 except OSError as err:
781 if err.errno != errno.ENOENT:
792 if err.errno != errno.ENOENT:
782 raise
793 raise
783 # nonexistent entry
794 # nonexistent entry
784 return False
795 return False
785
796
786 def __contains__(self, path):
797 def __contains__(self, path):
787 '''Checks if the store contains path'''
798 '''Checks if the store contains path'''
788 path = b"/".join((b"data", path))
799 path = b"/".join((b"data", path))
789 # check for files (exact match)
800 # check for files (exact match)
790 e = path + b'.i'
801 e = path + b'.i'
791 if e in self.fncache and self._exists(e):
802 if e in self.fncache and self._exists(e):
792 return True
803 return True
793 # now check for directories (prefix match)
804 # now check for directories (prefix match)
794 if not path.endswith(b'/'):
805 if not path.endswith(b'/'):
795 path += b'/'
806 path += b'/'
796 for e in self.fncache:
807 for e in self.fncache:
797 if e.startswith(path) and self._exists(e):
808 if e.startswith(path) and self._exists(e):
798 return True
809 return True
799 return False
810 return False
@@ -1,747 +1,750 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .pycompat import open
15 from .pycompat import open
16 from .interfaces import repository
16 from .interfaces import repository
17 from . import (
17 from . import (
18 cacheutil,
18 cacheutil,
19 error,
19 error,
20 narrowspec,
20 narrowspec,
21 phases,
21 phases,
22 pycompat,
22 pycompat,
23 requirements as requirementsmod,
23 requirements as requirementsmod,
24 scmutil,
24 scmutil,
25 store,
25 store,
26 util,
26 util,
27 )
27 )
28
28
29
29
30 def canperformstreamclone(pullop, bundle2=False):
30 def canperformstreamclone(pullop, bundle2=False):
31 """Whether it is possible to perform a streaming clone as part of pull.
31 """Whether it is possible to perform a streaming clone as part of pull.
32
32
33 ``bundle2`` will cause the function to consider stream clone through
33 ``bundle2`` will cause the function to consider stream clone through
34 bundle2 and only through bundle2.
34 bundle2 and only through bundle2.
35
35
36 Returns a tuple of (supported, requirements). ``supported`` is True if
36 Returns a tuple of (supported, requirements). ``supported`` is True if
37 streaming clone is supported and False otherwise. ``requirements`` is
37 streaming clone is supported and False otherwise. ``requirements`` is
38 a set of repo requirements from the remote, or ``None`` if stream clone
38 a set of repo requirements from the remote, or ``None`` if stream clone
39 isn't supported.
39 isn't supported.
40 """
40 """
41 repo = pullop.repo
41 repo = pullop.repo
42 remote = pullop.remote
42 remote = pullop.remote
43
43
44 bundle2supported = False
44 bundle2supported = False
45 if pullop.canusebundle2:
45 if pullop.canusebundle2:
46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
47 bundle2supported = True
47 bundle2supported = True
48 # else
48 # else
49 # Server doesn't support bundle2 stream clone or doesn't support
49 # Server doesn't support bundle2 stream clone or doesn't support
50 # the versions we support. Fall back and possibly allow legacy.
50 # the versions we support. Fall back and possibly allow legacy.
51
51
52 # Ensures legacy code path uses available bundle2.
52 # Ensures legacy code path uses available bundle2.
53 if bundle2supported and not bundle2:
53 if bundle2supported and not bundle2:
54 return False, None
54 return False, None
55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
56 elif bundle2 and not bundle2supported:
56 elif bundle2 and not bundle2supported:
57 return False, None
57 return False, None
58
58
59 # Streaming clone only works on empty repositories.
59 # Streaming clone only works on empty repositories.
60 if len(repo):
60 if len(repo):
61 return False, None
61 return False, None
62
62
63 # Streaming clone only works if all data is being requested.
63 # Streaming clone only works if all data is being requested.
64 if pullop.heads:
64 if pullop.heads:
65 return False, None
65 return False, None
66
66
67 streamrequested = pullop.streamclonerequested
67 streamrequested = pullop.streamclonerequested
68
68
69 # If we don't have a preference, let the server decide for us. This
69 # If we don't have a preference, let the server decide for us. This
70 # likely only comes into play in LANs.
70 # likely only comes into play in LANs.
71 if streamrequested is None:
71 if streamrequested is None:
72 # The server can advertise whether to prefer streaming clone.
72 # The server can advertise whether to prefer streaming clone.
73 streamrequested = remote.capable(b'stream-preferred')
73 streamrequested = remote.capable(b'stream-preferred')
74
74
75 if not streamrequested:
75 if not streamrequested:
76 return False, None
76 return False, None
77
77
78 # In order for stream clone to work, the client has to support all the
78 # In order for stream clone to work, the client has to support all the
79 # requirements advertised by the server.
79 # requirements advertised by the server.
80 #
80 #
81 # The server advertises its requirements via the "stream" and "streamreqs"
81 # The server advertises its requirements via the "stream" and "streamreqs"
82 # capability. "stream" (a value-less capability) is advertised if and only
82 # capability. "stream" (a value-less capability) is advertised if and only
83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
84 # is advertised and contains a comma-delimited list of requirements.
84 # is advertised and contains a comma-delimited list of requirements.
85 requirements = set()
85 requirements = set()
86 if remote.capable(b'stream'):
86 if remote.capable(b'stream'):
87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
88 else:
88 else:
89 streamreqs = remote.capable(b'streamreqs')
89 streamreqs = remote.capable(b'streamreqs')
90 # This is weird and shouldn't happen with modern servers.
90 # This is weird and shouldn't happen with modern servers.
91 if not streamreqs:
91 if not streamreqs:
92 pullop.repo.ui.warn(
92 pullop.repo.ui.warn(
93 _(
93 _(
94 b'warning: stream clone requested but server has them '
94 b'warning: stream clone requested but server has them '
95 b'disabled\n'
95 b'disabled\n'
96 )
96 )
97 )
97 )
98 return False, None
98 return False, None
99
99
100 streamreqs = set(streamreqs.split(b','))
100 streamreqs = set(streamreqs.split(b','))
101 # Server requires something we don't support. Bail.
101 # Server requires something we don't support. Bail.
102 missingreqs = streamreqs - repo.supportedformats
102 missingreqs = streamreqs - repo.supportedformats
103 if missingreqs:
103 if missingreqs:
104 pullop.repo.ui.warn(
104 pullop.repo.ui.warn(
105 _(
105 _(
106 b'warning: stream clone requested but client is missing '
106 b'warning: stream clone requested but client is missing '
107 b'requirements: %s\n'
107 b'requirements: %s\n'
108 )
108 )
109 % b', '.join(sorted(missingreqs))
109 % b', '.join(sorted(missingreqs))
110 )
110 )
111 pullop.repo.ui.warn(
111 pullop.repo.ui.warn(
112 _(
112 _(
113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
114 b'for more information)\n'
114 b'for more information)\n'
115 )
115 )
116 )
116 )
117 return False, None
117 return False, None
118 requirements = streamreqs
118 requirements = streamreqs
119
119
120 return True, requirements
120 return True, requirements
121
121
122
122
123 def maybeperformlegacystreamclone(pullop):
123 def maybeperformlegacystreamclone(pullop):
124 """Possibly perform a legacy stream clone operation.
124 """Possibly perform a legacy stream clone operation.
125
125
126 Legacy stream clones are performed as part of pull but before all other
126 Legacy stream clones are performed as part of pull but before all other
127 operations.
127 operations.
128
128
129 A legacy stream clone will not be performed if a bundle2 stream clone is
129 A legacy stream clone will not be performed if a bundle2 stream clone is
130 supported.
130 supported.
131 """
131 """
132 from . import localrepo
132 from . import localrepo
133
133
134 supported, requirements = canperformstreamclone(pullop)
134 supported, requirements = canperformstreamclone(pullop)
135
135
136 if not supported:
136 if not supported:
137 return
137 return
138
138
139 repo = pullop.repo
139 repo = pullop.repo
140 remote = pullop.remote
140 remote = pullop.remote
141
141
142 # Save remote branchmap. We will use it later to speed up branchcache
142 # Save remote branchmap. We will use it later to speed up branchcache
143 # creation.
143 # creation.
144 rbranchmap = None
144 rbranchmap = None
145 if remote.capable(b'branchmap'):
145 if remote.capable(b'branchmap'):
146 with remote.commandexecutor() as e:
146 with remote.commandexecutor() as e:
147 rbranchmap = e.callcommand(b'branchmap', {}).result()
147 rbranchmap = e.callcommand(b'branchmap', {}).result()
148
148
149 repo.ui.status(_(b'streaming all changes\n'))
149 repo.ui.status(_(b'streaming all changes\n'))
150
150
151 with remote.commandexecutor() as e:
151 with remote.commandexecutor() as e:
152 fp = e.callcommand(b'stream_out', {}).result()
152 fp = e.callcommand(b'stream_out', {}).result()
153
153
154 # TODO strictly speaking, this code should all be inside the context
154 # TODO strictly speaking, this code should all be inside the context
155 # manager because the context manager is supposed to ensure all wire state
155 # manager because the context manager is supposed to ensure all wire state
156 # is flushed when exiting. But the legacy peers don't do this, so it
156 # is flushed when exiting. But the legacy peers don't do this, so it
157 # doesn't matter.
157 # doesn't matter.
158 l = fp.readline()
158 l = fp.readline()
159 try:
159 try:
160 resp = int(l)
160 resp = int(l)
161 except ValueError:
161 except ValueError:
162 raise error.ResponseError(
162 raise error.ResponseError(
163 _(b'unexpected response from remote server:'), l
163 _(b'unexpected response from remote server:'), l
164 )
164 )
165 if resp == 1:
165 if resp == 1:
166 raise error.Abort(_(b'operation forbidden by server'))
166 raise error.Abort(_(b'operation forbidden by server'))
167 elif resp == 2:
167 elif resp == 2:
168 raise error.Abort(_(b'locking the remote repository failed'))
168 raise error.Abort(_(b'locking the remote repository failed'))
169 elif resp != 0:
169 elif resp != 0:
170 raise error.Abort(_(b'the server sent an unknown error code'))
170 raise error.Abort(_(b'the server sent an unknown error code'))
171
171
172 l = fp.readline()
172 l = fp.readline()
173 try:
173 try:
174 filecount, bytecount = map(int, l.split(b' ', 1))
174 filecount, bytecount = map(int, l.split(b' ', 1))
175 except (ValueError, TypeError):
175 except (ValueError, TypeError):
176 raise error.ResponseError(
176 raise error.ResponseError(
177 _(b'unexpected response from remote server:'), l
177 _(b'unexpected response from remote server:'), l
178 )
178 )
179
179
180 with repo.lock():
180 with repo.lock():
181 consumev1(repo, fp, filecount, bytecount)
181 consumev1(repo, fp, filecount, bytecount)
182
182
183 # new requirements = old non-format requirements +
183 # new requirements = old non-format requirements +
184 # new format-related remote requirements
184 # new format-related remote requirements
185 # requirements from the streamed-in repository
185 # requirements from the streamed-in repository
186 repo.requirements = requirements | (
186 repo.requirements = requirements | (
187 repo.requirements - repo.supportedformats
187 repo.requirements - repo.supportedformats
188 )
188 )
189 repo.svfs.options = localrepo.resolvestorevfsoptions(
189 repo.svfs.options = localrepo.resolvestorevfsoptions(
190 repo.ui, repo.requirements, repo.features
190 repo.ui, repo.requirements, repo.features
191 )
191 )
192 scmutil.writereporequirements(repo)
192 scmutil.writereporequirements(repo)
193
193
194 if rbranchmap:
194 if rbranchmap:
195 repo._branchcaches.replace(repo, rbranchmap)
195 repo._branchcaches.replace(repo, rbranchmap)
196
196
197 repo.invalidate()
197 repo.invalidate()
198
198
199
199
200 def allowservergeneration(repo):
200 def allowservergeneration(repo):
201 """Whether streaming clones are allowed from the server."""
201 """Whether streaming clones are allowed from the server."""
202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
203 return False
203 return False
204
204
205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
206 return False
206 return False
207
207
208 # The way stream clone works makes it impossible to hide secret changesets.
208 # The way stream clone works makes it impossible to hide secret changesets.
209 # So don't allow this by default.
209 # So don't allow this by default.
210 secret = phases.hassecret(repo)
210 secret = phases.hassecret(repo)
211 if secret:
211 if secret:
212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
213
213
214 return True
214 return True
215
215
216
216
217 # This is it's own function so extensions can override it.
217 # This is it's own function so extensions can override it.
218 def _walkstreamfiles(repo, matcher=None):
218 def _walkstreamfiles(repo, matcher=None):
219 return repo.store.walk(matcher)
219 return repo.store.walk(matcher)
220
220
221
221
222 def generatev1(repo):
222 def generatev1(repo):
223 """Emit content for version 1 of a streaming clone.
223 """Emit content for version 1 of a streaming clone.
224
224
225 This returns a 3-tuple of (file count, byte size, data iterator).
225 This returns a 3-tuple of (file count, byte size, data iterator).
226
226
227 The data iterator consists of N entries for each file being transferred.
227 The data iterator consists of N entries for each file being transferred.
228 Each file entry starts as a line with the file name and integer size
228 Each file entry starts as a line with the file name and integer size
229 delimited by a null byte.
229 delimited by a null byte.
230
230
231 The raw file data follows. Following the raw file data is the next file
231 The raw file data follows. Following the raw file data is the next file
232 entry, or EOF.
232 entry, or EOF.
233
233
234 When used on the wire protocol, an additional line indicating protocol
234 When used on the wire protocol, an additional line indicating protocol
235 success will be prepended to the stream. This function is not responsible
235 success will be prepended to the stream. This function is not responsible
236 for adding it.
236 for adding it.
237
237
238 This function will obtain a repository lock to ensure a consistent view of
238 This function will obtain a repository lock to ensure a consistent view of
239 the store is captured. It therefore may raise LockError.
239 the store is captured. It therefore may raise LockError.
240 """
240 """
241 entries = []
241 entries = []
242 total_bytes = 0
242 total_bytes = 0
243 # Get consistent snapshot of repo, lock during scan.
243 # Get consistent snapshot of repo, lock during scan.
244 with repo.lock():
244 with repo.lock():
245 repo.ui.debug(b'scanning\n')
245 repo.ui.debug(b'scanning\n')
246 for file_type, name, ename, size in _walkstreamfiles(repo):
246 for file_type, name, ename, size in _walkstreamfiles(repo):
247 if size:
247 if size:
248 entries.append((name, size))
248 entries.append((name, size))
249 total_bytes += size
249 total_bytes += size
250 _test_sync_point_walk_1(repo)
250 _test_sync_point_walk_1(repo)
251 _test_sync_point_walk_2(repo)
251 _test_sync_point_walk_2(repo)
252
252
253 repo.ui.debug(
253 repo.ui.debug(
254 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
254 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
255 )
255 )
256
256
257 svfs = repo.svfs
257 svfs = repo.svfs
258 debugflag = repo.ui.debugflag
258 debugflag = repo.ui.debugflag
259
259
260 def emitrevlogdata():
260 def emitrevlogdata():
261 for name, size in entries:
261 for name, size in entries:
262 if debugflag:
262 if debugflag:
263 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
263 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
264 # partially encode name over the wire for backwards compat
264 # partially encode name over the wire for backwards compat
265 yield b'%s\0%d\n' % (store.encodedir(name), size)
265 yield b'%s\0%d\n' % (store.encodedir(name), size)
266 # auditing at this stage is both pointless (paths are already
266 # auditing at this stage is both pointless (paths are already
267 # trusted by the local repo) and expensive
267 # trusted by the local repo) and expensive
268 with svfs(name, b'rb', auditpath=False) as fp:
268 with svfs(name, b'rb', auditpath=False) as fp:
269 if size <= 65536:
269 if size <= 65536:
270 yield fp.read(size)
270 yield fp.read(size)
271 else:
271 else:
272 for chunk in util.filechunkiter(fp, limit=size):
272 for chunk in util.filechunkiter(fp, limit=size):
273 yield chunk
273 yield chunk
274
274
275 return len(entries), total_bytes, emitrevlogdata()
275 return len(entries), total_bytes, emitrevlogdata()
276
276
277
277
278 def generatev1wireproto(repo):
278 def generatev1wireproto(repo):
279 """Emit content for version 1 of streaming clone suitable for the wire.
279 """Emit content for version 1 of streaming clone suitable for the wire.
280
280
281 This is the data output from ``generatev1()`` with 2 header lines. The
281 This is the data output from ``generatev1()`` with 2 header lines. The
282 first line indicates overall success. The 2nd contains the file count and
282 first line indicates overall success. The 2nd contains the file count and
283 byte size of payload.
283 byte size of payload.
284
284
285 The success line contains "0" for success, "1" for stream generation not
285 The success line contains "0" for success, "1" for stream generation not
286 allowed, and "2" for error locking the repository (possibly indicating
286 allowed, and "2" for error locking the repository (possibly indicating
287 a permissions error for the server process).
287 a permissions error for the server process).
288 """
288 """
289 if not allowservergeneration(repo):
289 if not allowservergeneration(repo):
290 yield b'1\n'
290 yield b'1\n'
291 return
291 return
292
292
293 try:
293 try:
294 filecount, bytecount, it = generatev1(repo)
294 filecount, bytecount, it = generatev1(repo)
295 except error.LockError:
295 except error.LockError:
296 yield b'2\n'
296 yield b'2\n'
297 return
297 return
298
298
299 # Indicates successful response.
299 # Indicates successful response.
300 yield b'0\n'
300 yield b'0\n'
301 yield b'%d %d\n' % (filecount, bytecount)
301 yield b'%d %d\n' % (filecount, bytecount)
302 for chunk in it:
302 for chunk in it:
303 yield chunk
303 yield chunk
304
304
305
305
306 def generatebundlev1(repo, compression=b'UN'):
306 def generatebundlev1(repo, compression=b'UN'):
307 """Emit content for version 1 of a stream clone bundle.
307 """Emit content for version 1 of a stream clone bundle.
308
308
309 The first 4 bytes of the output ("HGS1") denote this as stream clone
309 The first 4 bytes of the output ("HGS1") denote this as stream clone
310 bundle version 1.
310 bundle version 1.
311
311
312 The next 2 bytes indicate the compression type. Only "UN" is currently
312 The next 2 bytes indicate the compression type. Only "UN" is currently
313 supported.
313 supported.
314
314
315 The next 16 bytes are two 64-bit big endian unsigned integers indicating
315 The next 16 bytes are two 64-bit big endian unsigned integers indicating
316 file count and byte count, respectively.
316 file count and byte count, respectively.
317
317
318 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
318 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
319 of the requirements string, including a trailing \0. The following N bytes
319 of the requirements string, including a trailing \0. The following N bytes
320 are the requirements string, which is ASCII containing a comma-delimited
320 are the requirements string, which is ASCII containing a comma-delimited
321 list of repo requirements that are needed to support the data.
321 list of repo requirements that are needed to support the data.
322
322
323 The remaining content is the output of ``generatev1()`` (which may be
323 The remaining content is the output of ``generatev1()`` (which may be
324 compressed in the future).
324 compressed in the future).
325
325
326 Returns a tuple of (requirements, data generator).
326 Returns a tuple of (requirements, data generator).
327 """
327 """
328 if compression != b'UN':
328 if compression != b'UN':
329 raise ValueError(b'we do not support the compression argument yet')
329 raise ValueError(b'we do not support the compression argument yet')
330
330
331 requirements = repo.requirements & repo.supportedformats
331 requirements = repo.requirements & repo.supportedformats
332 requires = b','.join(sorted(requirements))
332 requires = b','.join(sorted(requirements))
333
333
334 def gen():
334 def gen():
335 yield b'HGS1'
335 yield b'HGS1'
336 yield compression
336 yield compression
337
337
338 filecount, bytecount, it = generatev1(repo)
338 filecount, bytecount, it = generatev1(repo)
339 repo.ui.status(
339 repo.ui.status(
340 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
340 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
341 )
341 )
342
342
343 yield struct.pack(b'>QQ', filecount, bytecount)
343 yield struct.pack(b'>QQ', filecount, bytecount)
344 yield struct.pack(b'>H', len(requires) + 1)
344 yield struct.pack(b'>H', len(requires) + 1)
345 yield requires + b'\0'
345 yield requires + b'\0'
346
346
347 # This is where we'll add compression in the future.
347 # This is where we'll add compression in the future.
348 assert compression == b'UN'
348 assert compression == b'UN'
349
349
350 progress = repo.ui.makeprogress(
350 progress = repo.ui.makeprogress(
351 _(b'bundle'), total=bytecount, unit=_(b'bytes')
351 _(b'bundle'), total=bytecount, unit=_(b'bytes')
352 )
352 )
353 progress.update(0)
353 progress.update(0)
354
354
355 for chunk in it:
355 for chunk in it:
356 progress.increment(step=len(chunk))
356 progress.increment(step=len(chunk))
357 yield chunk
357 yield chunk
358
358
359 progress.complete()
359 progress.complete()
360
360
361 return requirements, gen()
361 return requirements, gen()
362
362
363
363
364 def consumev1(repo, fp, filecount, bytecount):
364 def consumev1(repo, fp, filecount, bytecount):
365 """Apply the contents from version 1 of a streaming clone file handle.
365 """Apply the contents from version 1 of a streaming clone file handle.
366
366
367 This takes the output from "stream_out" and applies it to the specified
367 This takes the output from "stream_out" and applies it to the specified
368 repository.
368 repository.
369
369
370 Like "stream_out," the status line added by the wire protocol is not
370 Like "stream_out," the status line added by the wire protocol is not
371 handled by this function.
371 handled by this function.
372 """
372 """
373 with repo.lock():
373 with repo.lock():
374 repo.ui.status(
374 repo.ui.status(
375 _(b'%d files to transfer, %s of data\n')
375 _(b'%d files to transfer, %s of data\n')
376 % (filecount, util.bytecount(bytecount))
376 % (filecount, util.bytecount(bytecount))
377 )
377 )
378 progress = repo.ui.makeprogress(
378 progress = repo.ui.makeprogress(
379 _(b'clone'), total=bytecount, unit=_(b'bytes')
379 _(b'clone'), total=bytecount, unit=_(b'bytes')
380 )
380 )
381 progress.update(0)
381 progress.update(0)
382 start = util.timer()
382 start = util.timer()
383
383
384 # TODO: get rid of (potential) inconsistency
384 # TODO: get rid of (potential) inconsistency
385 #
385 #
386 # If transaction is started and any @filecache property is
386 # If transaction is started and any @filecache property is
387 # changed at this point, it causes inconsistency between
387 # changed at this point, it causes inconsistency between
388 # in-memory cached property and streamclone-ed file on the
388 # in-memory cached property and streamclone-ed file on the
389 # disk. Nested transaction prevents transaction scope "clone"
389 # disk. Nested transaction prevents transaction scope "clone"
390 # below from writing in-memory changes out at the end of it,
390 # below from writing in-memory changes out at the end of it,
391 # even though in-memory changes are discarded at the end of it
391 # even though in-memory changes are discarded at the end of it
392 # regardless of transaction nesting.
392 # regardless of transaction nesting.
393 #
393 #
394 # But transaction nesting can't be simply prohibited, because
394 # But transaction nesting can't be simply prohibited, because
395 # nesting occurs also in ordinary case (e.g. enabling
395 # nesting occurs also in ordinary case (e.g. enabling
396 # clonebundles).
396 # clonebundles).
397
397
398 with repo.transaction(b'clone'):
398 with repo.transaction(b'clone'):
399 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
399 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
400 for i in pycompat.xrange(filecount):
400 for i in pycompat.xrange(filecount):
401 # XXX doesn't support '\n' or '\r' in filenames
401 # XXX doesn't support '\n' or '\r' in filenames
402 l = fp.readline()
402 l = fp.readline()
403 try:
403 try:
404 name, size = l.split(b'\0', 1)
404 name, size = l.split(b'\0', 1)
405 size = int(size)
405 size = int(size)
406 except (ValueError, TypeError):
406 except (ValueError, TypeError):
407 raise error.ResponseError(
407 raise error.ResponseError(
408 _(b'unexpected response from remote server:'), l
408 _(b'unexpected response from remote server:'), l
409 )
409 )
410 if repo.ui.debugflag:
410 if repo.ui.debugflag:
411 repo.ui.debug(
411 repo.ui.debug(
412 b'adding %s (%s)\n' % (name, util.bytecount(size))
412 b'adding %s (%s)\n' % (name, util.bytecount(size))
413 )
413 )
414 # for backwards compat, name was partially encoded
414 # for backwards compat, name was partially encoded
415 path = store.decodedir(name)
415 path = store.decodedir(name)
416 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
416 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
417 for chunk in util.filechunkiter(fp, limit=size):
417 for chunk in util.filechunkiter(fp, limit=size):
418 progress.increment(step=len(chunk))
418 progress.increment(step=len(chunk))
419 ofp.write(chunk)
419 ofp.write(chunk)
420
420
421 # force @filecache properties to be reloaded from
421 # force @filecache properties to be reloaded from
422 # streamclone-ed file at next access
422 # streamclone-ed file at next access
423 repo.invalidate(clearfilecache=True)
423 repo.invalidate(clearfilecache=True)
424
424
425 elapsed = util.timer() - start
425 elapsed = util.timer() - start
426 if elapsed <= 0:
426 if elapsed <= 0:
427 elapsed = 0.001
427 elapsed = 0.001
428 progress.complete()
428 progress.complete()
429 repo.ui.status(
429 repo.ui.status(
430 _(b'transferred %s in %.1f seconds (%s/sec)\n')
430 _(b'transferred %s in %.1f seconds (%s/sec)\n')
431 % (
431 % (
432 util.bytecount(bytecount),
432 util.bytecount(bytecount),
433 elapsed,
433 elapsed,
434 util.bytecount(bytecount / elapsed),
434 util.bytecount(bytecount / elapsed),
435 )
435 )
436 )
436 )
437
437
438
438
439 def readbundle1header(fp):
439 def readbundle1header(fp):
440 compression = fp.read(2)
440 compression = fp.read(2)
441 if compression != b'UN':
441 if compression != b'UN':
442 raise error.Abort(
442 raise error.Abort(
443 _(
443 _(
444 b'only uncompressed stream clone bundles are '
444 b'only uncompressed stream clone bundles are '
445 b'supported; got %s'
445 b'supported; got %s'
446 )
446 )
447 % compression
447 % compression
448 )
448 )
449
449
450 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
450 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
451 requireslen = struct.unpack(b'>H', fp.read(2))[0]
451 requireslen = struct.unpack(b'>H', fp.read(2))[0]
452 requires = fp.read(requireslen)
452 requires = fp.read(requireslen)
453
453
454 if not requires.endswith(b'\0'):
454 if not requires.endswith(b'\0'):
455 raise error.Abort(
455 raise error.Abort(
456 _(
456 _(
457 b'malformed stream clone bundle: '
457 b'malformed stream clone bundle: '
458 b'requirements not properly encoded'
458 b'requirements not properly encoded'
459 )
459 )
460 )
460 )
461
461
462 requirements = set(requires.rstrip(b'\0').split(b','))
462 requirements = set(requires.rstrip(b'\0').split(b','))
463
463
464 return filecount, bytecount, requirements
464 return filecount, bytecount, requirements
465
465
466
466
467 def applybundlev1(repo, fp):
467 def applybundlev1(repo, fp):
468 """Apply the content from a stream clone bundle version 1.
468 """Apply the content from a stream clone bundle version 1.
469
469
470 We assume the 4 byte header has been read and validated and the file handle
470 We assume the 4 byte header has been read and validated and the file handle
471 is at the 2 byte compression identifier.
471 is at the 2 byte compression identifier.
472 """
472 """
473 if len(repo):
473 if len(repo):
474 raise error.Abort(
474 raise error.Abort(
475 _(b'cannot apply stream clone bundle on non-empty repo')
475 _(b'cannot apply stream clone bundle on non-empty repo')
476 )
476 )
477
477
478 filecount, bytecount, requirements = readbundle1header(fp)
478 filecount, bytecount, requirements = readbundle1header(fp)
479 missingreqs = requirements - repo.supportedformats
479 missingreqs = requirements - repo.supportedformats
480 if missingreqs:
480 if missingreqs:
481 raise error.Abort(
481 raise error.Abort(
482 _(b'unable to apply stream clone: unsupported format: %s')
482 _(b'unable to apply stream clone: unsupported format: %s')
483 % b', '.join(sorted(missingreqs))
483 % b', '.join(sorted(missingreqs))
484 )
484 )
485
485
486 consumev1(repo, fp, filecount, bytecount)
486 consumev1(repo, fp, filecount, bytecount)
487
487
488
488
489 class streamcloneapplier(object):
489 class streamcloneapplier(object):
490 """Class to manage applying streaming clone bundles.
490 """Class to manage applying streaming clone bundles.
491
491
492 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
492 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
493 readers to perform bundle type-specific functionality.
493 readers to perform bundle type-specific functionality.
494 """
494 """
495
495
496 def __init__(self, fh):
496 def __init__(self, fh):
497 self._fh = fh
497 self._fh = fh
498
498
499 def apply(self, repo):
499 def apply(self, repo):
500 return applybundlev1(repo, self._fh)
500 return applybundlev1(repo, self._fh)
501
501
502
502
503 # type of file to stream
503 # type of file to stream
504 _fileappend = 0 # append only file
504 _fileappend = 0 # append only file
505 _filefull = 1 # full snapshot file
505 _filefull = 1 # full snapshot file
506
506
507 # Source of the file
507 # Source of the file
508 _srcstore = b's' # store (svfs)
508 _srcstore = b's' # store (svfs)
509 _srccache = b'c' # cache (cache)
509 _srccache = b'c' # cache (cache)
510
510
511 # This is it's own function so extensions can override it.
511 # This is it's own function so extensions can override it.
512 def _walkstreamfullstorefiles(repo):
512 def _walkstreamfullstorefiles(repo):
513 """list snapshot file from the store"""
513 """list snapshot file from the store"""
514 fnames = []
514 fnames = []
515 if not repo.publishing():
515 if not repo.publishing():
516 fnames.append(b'phaseroots')
516 fnames.append(b'phaseroots')
517 return fnames
517 return fnames
518
518
519
519
520 def _filterfull(entry, copy, vfsmap):
520 def _filterfull(entry, copy, vfsmap):
521 """actually copy the snapshot files"""
521 """actually copy the snapshot files"""
522 src, name, ftype, data = entry
522 src, name, ftype, data = entry
523 if ftype != _filefull:
523 if ftype != _filefull:
524 return entry
524 return entry
525 return (src, name, ftype, copy(vfsmap[src].join(name)))
525 return (src, name, ftype, copy(vfsmap[src].join(name)))
526
526
527
527
528 @contextlib.contextmanager
528 @contextlib.contextmanager
529 def maketempcopies():
529 def maketempcopies():
530 """return a function to temporary copy file"""
530 """return a function to temporary copy file"""
531 files = []
531 files = []
532 try:
532 try:
533
533
534 def copy(src):
534 def copy(src):
535 fd, dst = pycompat.mkstemp()
535 fd, dst = pycompat.mkstemp()
536 os.close(fd)
536 os.close(fd)
537 files.append(dst)
537 files.append(dst)
538 util.copyfiles(src, dst, hardlink=True)
538 util.copyfiles(src, dst, hardlink=True)
539 return dst
539 return dst
540
540
541 yield copy
541 yield copy
542 finally:
542 finally:
543 for tmp in files:
543 for tmp in files:
544 util.tryunlink(tmp)
544 util.tryunlink(tmp)
545
545
546
546
547 def _makemap(repo):
547 def _makemap(repo):
548 """make a (src -> vfs) map for the repo"""
548 """make a (src -> vfs) map for the repo"""
549 vfsmap = {
549 vfsmap = {
550 _srcstore: repo.svfs,
550 _srcstore: repo.svfs,
551 _srccache: repo.cachevfs,
551 _srccache: repo.cachevfs,
552 }
552 }
553 # we keep repo.vfs out of the on purpose, ther are too many danger there
553 # we keep repo.vfs out of the on purpose, ther are too many danger there
554 # (eg: .hg/hgrc)
554 # (eg: .hg/hgrc)
555 assert repo.vfs not in vfsmap.values()
555 assert repo.vfs not in vfsmap.values()
556
556
557 return vfsmap
557 return vfsmap
558
558
559
559
560 def _emit2(repo, entries, totalfilesize):
560 def _emit2(repo, entries, totalfilesize):
561 """actually emit the stream bundle"""
561 """actually emit the stream bundle"""
562 vfsmap = _makemap(repo)
562 vfsmap = _makemap(repo)
563 progress = repo.ui.makeprogress(
563 progress = repo.ui.makeprogress(
564 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
564 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
565 )
565 )
566 progress.update(0)
566 progress.update(0)
567 with maketempcopies() as copy, progress:
567 with maketempcopies() as copy, progress:
568 # copy is delayed until we are in the try
568 # copy is delayed until we are in the try
569 entries = [_filterfull(e, copy, vfsmap) for e in entries]
569 entries = [_filterfull(e, copy, vfsmap) for e in entries]
570 yield None # this release the lock on the repository
570 yield None # this release the lock on the repository
571 seen = 0
571 seen = 0
572
572
573 for src, name, ftype, data in entries:
573 for src, name, ftype, data in entries:
574 vfs = vfsmap[src]
574 vfs = vfsmap[src]
575 yield src
575 yield src
576 yield util.uvarintencode(len(name))
576 yield util.uvarintencode(len(name))
577 if ftype == _fileappend:
577 if ftype == _fileappend:
578 fp = vfs(name)
578 fp = vfs(name)
579 size = data
579 size = data
580 elif ftype == _filefull:
580 elif ftype == _filefull:
581 fp = open(data, b'rb')
581 fp = open(data, b'rb')
582 size = util.fstat(fp).st_size
582 size = util.fstat(fp).st_size
583 try:
583 try:
584 yield util.uvarintencode(size)
584 yield util.uvarintencode(size)
585 yield name
585 yield name
586 if size <= 65536:
586 if size <= 65536:
587 chunks = (fp.read(size),)
587 chunks = (fp.read(size),)
588 else:
588 else:
589 chunks = util.filechunkiter(fp, limit=size)
589 chunks = util.filechunkiter(fp, limit=size)
590 for chunk in chunks:
590 for chunk in chunks:
591 seen += len(chunk)
591 seen += len(chunk)
592 progress.update(seen)
592 progress.update(seen)
593 yield chunk
593 yield chunk
594 finally:
594 finally:
595 fp.close()
595 fp.close()
596
596
597
597
598 def _test_sync_point_walk_1(repo):
598 def _test_sync_point_walk_1(repo):
599 """a function for synchronisation during tests"""
599 """a function for synchronisation during tests"""
600
600
601
601
602 def _test_sync_point_walk_2(repo):
602 def _test_sync_point_walk_2(repo):
603 """a function for synchronisation during tests"""
603 """a function for synchronisation during tests"""
604
604
605
605
606 def generatev2(repo, includes, excludes, includeobsmarkers):
606 def generatev2(repo, includes, excludes, includeobsmarkers):
607 """Emit content for version 2 of a streaming clone.
607 """Emit content for version 2 of a streaming clone.
608
608
609 the data stream consists the following entries:
609 the data stream consists the following entries:
610 1) A char representing the file destination (eg: store or cache)
610 1) A char representing the file destination (eg: store or cache)
611 2) A varint containing the length of the filename
611 2) A varint containing the length of the filename
612 3) A varint containing the length of file data
612 3) A varint containing the length of file data
613 4) N bytes containing the filename (the internal, store-agnostic form)
613 4) N bytes containing the filename (the internal, store-agnostic form)
614 5) N bytes containing the file data
614 5) N bytes containing the file data
615
615
616 Returns a 3-tuple of (file count, file size, data iterator).
616 Returns a 3-tuple of (file count, file size, data iterator).
617 """
617 """
618
618
619 with repo.lock():
619 with repo.lock():
620
620
621 entries = []
621 entries = []
622 totalfilesize = 0
622 totalfilesize = 0
623
623
624 matcher = None
624 matcher = None
625 if includes or excludes:
625 if includes or excludes:
626 matcher = narrowspec.match(repo.root, includes, excludes)
626 matcher = narrowspec.match(repo.root, includes, excludes)
627
627
628 repo.ui.debug(b'scanning\n')
628 repo.ui.debug(b'scanning\n')
629 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
629 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
630 if size:
630 if size:
631 entries.append((_srcstore, name, _fileappend, size))
631 ft = _fileappend
632 if rl_type & store.FILEFLAGS_VOLATILE:
633 ft = _filefull
634 entries.append((_srcstore, name, ft, size))
632 totalfilesize += size
635 totalfilesize += size
633 for name in _walkstreamfullstorefiles(repo):
636 for name in _walkstreamfullstorefiles(repo):
634 if repo.svfs.exists(name):
637 if repo.svfs.exists(name):
635 totalfilesize += repo.svfs.lstat(name).st_size
638 totalfilesize += repo.svfs.lstat(name).st_size
636 entries.append((_srcstore, name, _filefull, None))
639 entries.append((_srcstore, name, _filefull, None))
637 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
640 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
638 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
641 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
639 entries.append((_srcstore, b'obsstore', _filefull, None))
642 entries.append((_srcstore, b'obsstore', _filefull, None))
640 for name in cacheutil.cachetocopy(repo):
643 for name in cacheutil.cachetocopy(repo):
641 if repo.cachevfs.exists(name):
644 if repo.cachevfs.exists(name):
642 totalfilesize += repo.cachevfs.lstat(name).st_size
645 totalfilesize += repo.cachevfs.lstat(name).st_size
643 entries.append((_srccache, name, _filefull, None))
646 entries.append((_srccache, name, _filefull, None))
644
647
645 chunks = _emit2(repo, entries, totalfilesize)
648 chunks = _emit2(repo, entries, totalfilesize)
646 first = next(chunks)
649 first = next(chunks)
647 assert first is None
650 assert first is None
648 _test_sync_point_walk_1(repo)
651 _test_sync_point_walk_1(repo)
649 _test_sync_point_walk_2(repo)
652 _test_sync_point_walk_2(repo)
650
653
651 return len(entries), totalfilesize, chunks
654 return len(entries), totalfilesize, chunks
652
655
653
656
654 @contextlib.contextmanager
657 @contextlib.contextmanager
655 def nested(*ctxs):
658 def nested(*ctxs):
656 this = ctxs[0]
659 this = ctxs[0]
657 rest = ctxs[1:]
660 rest = ctxs[1:]
658 with this:
661 with this:
659 if rest:
662 if rest:
660 with nested(*rest):
663 with nested(*rest):
661 yield
664 yield
662 else:
665 else:
663 yield
666 yield
664
667
665
668
666 def consumev2(repo, fp, filecount, filesize):
669 def consumev2(repo, fp, filecount, filesize):
667 """Apply the contents from a version 2 streaming clone.
670 """Apply the contents from a version 2 streaming clone.
668
671
669 Data is read from an object that only needs to provide a ``read(size)``
672 Data is read from an object that only needs to provide a ``read(size)``
670 method.
673 method.
671 """
674 """
672 with repo.lock():
675 with repo.lock():
673 repo.ui.status(
676 repo.ui.status(
674 _(b'%d files to transfer, %s of data\n')
677 _(b'%d files to transfer, %s of data\n')
675 % (filecount, util.bytecount(filesize))
678 % (filecount, util.bytecount(filesize))
676 )
679 )
677
680
678 start = util.timer()
681 start = util.timer()
679 progress = repo.ui.makeprogress(
682 progress = repo.ui.makeprogress(
680 _(b'clone'), total=filesize, unit=_(b'bytes')
683 _(b'clone'), total=filesize, unit=_(b'bytes')
681 )
684 )
682 progress.update(0)
685 progress.update(0)
683
686
684 vfsmap = _makemap(repo)
687 vfsmap = _makemap(repo)
685
688
686 with repo.transaction(b'clone'):
689 with repo.transaction(b'clone'):
687 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
690 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
688 with nested(*ctxs):
691 with nested(*ctxs):
689 for i in range(filecount):
692 for i in range(filecount):
690 src = util.readexactly(fp, 1)
693 src = util.readexactly(fp, 1)
691 vfs = vfsmap[src]
694 vfs = vfsmap[src]
692 namelen = util.uvarintdecodestream(fp)
695 namelen = util.uvarintdecodestream(fp)
693 datalen = util.uvarintdecodestream(fp)
696 datalen = util.uvarintdecodestream(fp)
694
697
695 name = util.readexactly(fp, namelen)
698 name = util.readexactly(fp, namelen)
696
699
697 if repo.ui.debugflag:
700 if repo.ui.debugflag:
698 repo.ui.debug(
701 repo.ui.debug(
699 b'adding [%s] %s (%s)\n'
702 b'adding [%s] %s (%s)\n'
700 % (src, name, util.bytecount(datalen))
703 % (src, name, util.bytecount(datalen))
701 )
704 )
702
705
703 with vfs(name, b'w') as ofp:
706 with vfs(name, b'w') as ofp:
704 for chunk in util.filechunkiter(fp, limit=datalen):
707 for chunk in util.filechunkiter(fp, limit=datalen):
705 progress.increment(step=len(chunk))
708 progress.increment(step=len(chunk))
706 ofp.write(chunk)
709 ofp.write(chunk)
707
710
708 # force @filecache properties to be reloaded from
711 # force @filecache properties to be reloaded from
709 # streamclone-ed file at next access
712 # streamclone-ed file at next access
710 repo.invalidate(clearfilecache=True)
713 repo.invalidate(clearfilecache=True)
711
714
712 elapsed = util.timer() - start
715 elapsed = util.timer() - start
713 if elapsed <= 0:
716 if elapsed <= 0:
714 elapsed = 0.001
717 elapsed = 0.001
715 repo.ui.status(
718 repo.ui.status(
716 _(b'transferred %s in %.1f seconds (%s/sec)\n')
719 _(b'transferred %s in %.1f seconds (%s/sec)\n')
717 % (
720 % (
718 util.bytecount(progress.pos),
721 util.bytecount(progress.pos),
719 elapsed,
722 elapsed,
720 util.bytecount(progress.pos / elapsed),
723 util.bytecount(progress.pos / elapsed),
721 )
724 )
722 )
725 )
723 progress.complete()
726 progress.complete()
724
727
725
728
726 def applybundlev2(repo, fp, filecount, filesize, requirements):
729 def applybundlev2(repo, fp, filecount, filesize, requirements):
727 from . import localrepo
730 from . import localrepo
728
731
729 missingreqs = [r for r in requirements if r not in repo.supported]
732 missingreqs = [r for r in requirements if r not in repo.supported]
730 if missingreqs:
733 if missingreqs:
731 raise error.Abort(
734 raise error.Abort(
732 _(b'unable to apply stream clone: unsupported format: %s')
735 _(b'unable to apply stream clone: unsupported format: %s')
733 % b', '.join(sorted(missingreqs))
736 % b', '.join(sorted(missingreqs))
734 )
737 )
735
738
736 consumev2(repo, fp, filecount, filesize)
739 consumev2(repo, fp, filecount, filesize)
737
740
738 # new requirements = old non-format requirements +
741 # new requirements = old non-format requirements +
739 # new format-related remote requirements
742 # new format-related remote requirements
740 # requirements from the streamed-in repository
743 # requirements from the streamed-in repository
741 repo.requirements = set(requirements) | (
744 repo.requirements = set(requirements) | (
742 repo.requirements - repo.supportedformats
745 repo.requirements - repo.supportedformats
743 )
746 )
744 repo.svfs.options = localrepo.resolvestorevfsoptions(
747 repo.svfs.options = localrepo.resolvestorevfsoptions(
745 repo.ui, repo.requirements, repo.features
748 repo.ui, repo.requirements, repo.features
746 )
749 )
747 scmutil.writereporequirements(repo)
750 scmutil.writereporequirements(repo)
@@ -1,1065 +1,1054 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 #if no-rust
6 #if no-rust
7
7
8 $ cat << EOF >> $HGRCPATH
8 $ cat << EOF >> $HGRCPATH
9 > [format]
9 > [format]
10 > use-persistent-nodemap=yes
10 > use-persistent-nodemap=yes
11 > [devel]
11 > [devel]
12 > persistent-nodemap=yes
12 > persistent-nodemap=yes
13 > EOF
13 > EOF
14
14
15 #endif
15 #endif
16
16
17 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
17 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
18 $ cd test-repo
18 $ cd test-repo
19
19
20 Check handling of the default slow-path value
20 Check handling of the default slow-path value
21
21
22 #if no-pure no-rust
22 #if no-pure no-rust
23
23
24 $ hg id
24 $ hg id
25 abort: accessing `persistent-nodemap` repository without associated fast implementation.
25 abort: accessing `persistent-nodemap` repository without associated fast implementation.
26 (check `hg help config.format.use-persistent-nodemap` for details)
26 (check `hg help config.format.use-persistent-nodemap` for details)
27 [255]
27 [255]
28
28
29 Unlock further check (we are here to test the feature)
29 Unlock further check (we are here to test the feature)
30
30
31 $ cat << EOF >> $HGRCPATH
31 $ cat << EOF >> $HGRCPATH
32 > [storage]
32 > [storage]
33 > # to avoid spamming the test
33 > # to avoid spamming the test
34 > revlog.persistent-nodemap.slow-path=allow
34 > revlog.persistent-nodemap.slow-path=allow
35 > EOF
35 > EOF
36
36
37 #endif
37 #endif
38
38
39 #if rust
39 #if rust
40
40
41 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
41 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
42 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
42 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
43 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
43 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
44 incorrectly used `libc::c_int` (32 bits).
44 incorrectly used `libc::c_int` (32 bits).
45 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
45 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
46
46
47 $ hg log -r 00000000
47 $ hg log -r 00000000
48 changeset: -1:000000000000
48 changeset: -1:000000000000
49 tag: tip
49 tag: tip
50 user:
50 user:
51 date: Thu Jan 01 00:00:00 1970 +0000
51 date: Thu Jan 01 00:00:00 1970 +0000
52
52
53
53
54 #endif
54 #endif
55
55
56
56
57 $ hg debugformat
57 $ hg debugformat
58 format-variant repo
58 format-variant repo
59 fncache: yes
59 fncache: yes
60 dotencode: yes
60 dotencode: yes
61 generaldelta: yes
61 generaldelta: yes
62 share-safe: no
62 share-safe: no
63 sparserevlog: yes
63 sparserevlog: yes
64 persistent-nodemap: yes
64 persistent-nodemap: yes
65 copies-sdc: no
65 copies-sdc: no
66 revlog-v2: no
66 revlog-v2: no
67 plain-cl-delta: yes
67 plain-cl-delta: yes
68 compression: zlib (no-zstd !)
68 compression: zlib (no-zstd !)
69 compression: zstd (zstd !)
69 compression: zstd (zstd !)
70 compression-level: default
70 compression-level: default
71 $ hg debugbuilddag .+5000 --new-file
71 $ hg debugbuilddag .+5000 --new-file
72
72
73 $ hg debugnodemap --metadata
73 $ hg debugnodemap --metadata
74 uid: ???????????????? (glob)
74 uid: ???????????????? (glob)
75 tip-rev: 5000
75 tip-rev: 5000
76 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
76 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
77 data-length: 121088
77 data-length: 121088
78 data-unused: 0
78 data-unused: 0
79 data-unused: 0.000%
79 data-unused: 0.000%
80 $ f --size .hg/store/00changelog.n
80 $ f --size .hg/store/00changelog.n
81 .hg/store/00changelog.n: size=70
81 .hg/store/00changelog.n: size=70
82
82
83 Simple lookup works
83 Simple lookup works
84
84
85 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
85 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
86 $ hg log -r "$ANYNODE" --template '{rev}\n'
86 $ hg log -r "$ANYNODE" --template '{rev}\n'
87 5000
87 5000
88
88
89
89
90 #if rust
90 #if rust
91
91
92 $ f --sha256 .hg/store/00changelog-*.nd
92 $ f --sha256 .hg/store/00changelog-*.nd
93 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
93 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
94
94
95 $ f --sha256 .hg/store/00manifest-*.nd
95 $ f --sha256 .hg/store/00manifest-*.nd
96 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
96 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
97 $ hg debugnodemap --dump-new | f --sha256 --size
97 $ hg debugnodemap --dump-new | f --sha256 --size
98 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
98 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
99 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
99 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
100 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
100 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
101 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
101 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
102 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
102 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
103 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
103 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
104 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
104 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
105 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
105 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
106 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
106 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
107 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
107 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
108 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
108 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
109 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
109 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
110 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
110 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
111 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
111 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
112 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
112 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
113 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
113 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
114 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
114 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
115 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
115 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
116 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
116 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
117
117
118
118
119 #else
119 #else
120
120
121 $ f --sha256 .hg/store/00changelog-*.nd
121 $ f --sha256 .hg/store/00changelog-*.nd
122 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
122 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
123 $ hg debugnodemap --dump-new | f --sha256 --size
123 $ hg debugnodemap --dump-new | f --sha256 --size
124 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
124 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
125 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
125 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
126 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
126 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
127 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
127 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
128 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
128 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
129 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
129 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
130 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
130 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
131 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
131 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
132 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
132 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
133 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
133 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
134 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
134 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
135 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
135 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
136 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
136 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
137 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
137 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
138 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
138 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
139 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
139 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
140 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
140 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
141 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
141 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
142 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
142 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
143
143
144 #endif
144 #endif
145
145
146 $ hg debugnodemap --check
146 $ hg debugnodemap --check
147 revision in index: 5001
147 revision in index: 5001
148 revision in nodemap: 5001
148 revision in nodemap: 5001
149
149
150 add a new commit
150 add a new commit
151
151
152 $ hg up
152 $ hg up
153 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 $ echo foo > foo
154 $ echo foo > foo
155 $ hg add foo
155 $ hg add foo
156
156
157
157
158 Check slow-path config value handling
158 Check slow-path config value handling
159 -------------------------------------
159 -------------------------------------
160
160
161 #if no-pure no-rust
161 #if no-pure no-rust
162
162
163 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
163 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
164 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
164 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
165 falling back to default value: abort
165 falling back to default value: abort
166 abort: accessing `persistent-nodemap` repository without associated fast implementation.
166 abort: accessing `persistent-nodemap` repository without associated fast implementation.
167 (check `hg help config.format.use-persistent-nodemap` for details)
167 (check `hg help config.format.use-persistent-nodemap` for details)
168 [255]
168 [255]
169
169
170 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
170 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
171 warning: accessing `persistent-nodemap` repository without associated fast implementation.
171 warning: accessing `persistent-nodemap` repository without associated fast implementation.
172 (check `hg help config.format.use-persistent-nodemap` for details)
172 (check `hg help config.format.use-persistent-nodemap` for details)
173 changeset: 5000:6b02b8c7b966
173 changeset: 5000:6b02b8c7b966
174 tag: tip
174 tag: tip
175 user: debugbuilddag
175 user: debugbuilddag
176 date: Thu Jan 01 01:23:20 1970 +0000
176 date: Thu Jan 01 01:23:20 1970 +0000
177 summary: r5000
177 summary: r5000
178
178
179 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
179 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
180 abort: accessing `persistent-nodemap` repository without associated fast implementation.
180 abort: accessing `persistent-nodemap` repository without associated fast implementation.
181 (check `hg help config.format.use-persistent-nodemap` for details)
181 (check `hg help config.format.use-persistent-nodemap` for details)
182 [255]
182 [255]
183
183
184 #else
184 #else
185
185
186 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
186 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
187 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
187 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
188 falling back to default value: abort
188 falling back to default value: abort
189 6b02b8c7b966+ tip
189 6b02b8c7b966+ tip
190
190
191 #endif
191 #endif
192
192
193 $ hg ci -m 'foo'
193 $ hg ci -m 'foo'
194
194
195 #if no-pure no-rust
195 #if no-pure no-rust
196 $ hg debugnodemap --metadata
196 $ hg debugnodemap --metadata
197 uid: ???????????????? (glob)
197 uid: ???????????????? (glob)
198 tip-rev: 5001
198 tip-rev: 5001
199 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
199 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
200 data-length: 121088
200 data-length: 121088
201 data-unused: 0
201 data-unused: 0
202 data-unused: 0.000%
202 data-unused: 0.000%
203 #else
203 #else
204 $ hg debugnodemap --metadata
204 $ hg debugnodemap --metadata
205 uid: ???????????????? (glob)
205 uid: ???????????????? (glob)
206 tip-rev: 5001
206 tip-rev: 5001
207 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
207 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
208 data-length: 121344
208 data-length: 121344
209 data-unused: 256
209 data-unused: 256
210 data-unused: 0.211%
210 data-unused: 0.211%
211 #endif
211 #endif
212
212
213 $ f --size .hg/store/00changelog.n
213 $ f --size .hg/store/00changelog.n
214 .hg/store/00changelog.n: size=70
214 .hg/store/00changelog.n: size=70
215
215
216 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
216 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
217
217
218 #if pure
218 #if pure
219 $ f --sha256 .hg/store/00changelog-*.nd --size
219 $ f --sha256 .hg/store/00changelog-*.nd --size
220 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
220 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
221 #endif
221 #endif
222
222
223 #if rust
223 #if rust
224 $ f --sha256 .hg/store/00changelog-*.nd --size
224 $ f --sha256 .hg/store/00changelog-*.nd --size
225 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
225 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
226 #endif
226 #endif
227
227
228 #if no-pure no-rust
228 #if no-pure no-rust
229 $ f --sha256 .hg/store/00changelog-*.nd --size
229 $ f --sha256 .hg/store/00changelog-*.nd --size
230 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
230 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
231 #endif
231 #endif
232
232
233 $ hg debugnodemap --check
233 $ hg debugnodemap --check
234 revision in index: 5002
234 revision in index: 5002
235 revision in nodemap: 5002
235 revision in nodemap: 5002
236
236
237 Test code path without mmap
237 Test code path without mmap
238 ---------------------------
238 ---------------------------
239
239
240 $ echo bar > bar
240 $ echo bar > bar
241 $ hg add bar
241 $ hg add bar
242 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
242 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
243
243
244 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
244 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
245 revision in index: 5003
245 revision in index: 5003
246 revision in nodemap: 5003
246 revision in nodemap: 5003
247 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
247 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
248 revision in index: 5003
248 revision in index: 5003
249 revision in nodemap: 5003
249 revision in nodemap: 5003
250
250
251
251
252 #if pure
252 #if pure
253 $ hg debugnodemap --metadata
253 $ hg debugnodemap --metadata
254 uid: ???????????????? (glob)
254 uid: ???????????????? (glob)
255 tip-rev: 5002
255 tip-rev: 5002
256 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
256 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
257 data-length: 121600
257 data-length: 121600
258 data-unused: 512
258 data-unused: 512
259 data-unused: 0.421%
259 data-unused: 0.421%
260 $ f --sha256 .hg/store/00changelog-*.nd --size
260 $ f --sha256 .hg/store/00changelog-*.nd --size
261 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
261 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
262 #endif
262 #endif
263 #if rust
263 #if rust
264 $ hg debugnodemap --metadata
264 $ hg debugnodemap --metadata
265 uid: ???????????????? (glob)
265 uid: ???????????????? (glob)
266 tip-rev: 5002
266 tip-rev: 5002
267 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
267 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
268 data-length: 121600
268 data-length: 121600
269 data-unused: 512
269 data-unused: 512
270 data-unused: 0.421%
270 data-unused: 0.421%
271 $ f --sha256 .hg/store/00changelog-*.nd --size
271 $ f --sha256 .hg/store/00changelog-*.nd --size
272 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
272 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
273 #endif
273 #endif
274 #if no-pure no-rust
274 #if no-pure no-rust
275 $ hg debugnodemap --metadata
275 $ hg debugnodemap --metadata
276 uid: ???????????????? (glob)
276 uid: ???????????????? (glob)
277 tip-rev: 5002
277 tip-rev: 5002
278 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
278 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
279 data-length: 121088
279 data-length: 121088
280 data-unused: 0
280 data-unused: 0
281 data-unused: 0.000%
281 data-unused: 0.000%
282 $ f --sha256 .hg/store/00changelog-*.nd --size
282 $ f --sha256 .hg/store/00changelog-*.nd --size
283 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
283 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
284 #endif
284 #endif
285
285
286 Test force warming the cache
286 Test force warming the cache
287
287
288 $ rm .hg/store/00changelog.n
288 $ rm .hg/store/00changelog.n
289 $ hg debugnodemap --metadata
289 $ hg debugnodemap --metadata
290 $ hg debugupdatecache
290 $ hg debugupdatecache
291 #if pure
291 #if pure
292 $ hg debugnodemap --metadata
292 $ hg debugnodemap --metadata
293 uid: ???????????????? (glob)
293 uid: ???????????????? (glob)
294 tip-rev: 5002
294 tip-rev: 5002
295 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
295 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
296 data-length: 121088
296 data-length: 121088
297 data-unused: 0
297 data-unused: 0
298 data-unused: 0.000%
298 data-unused: 0.000%
299 #else
299 #else
300 $ hg debugnodemap --metadata
300 $ hg debugnodemap --metadata
301 uid: ???????????????? (glob)
301 uid: ???????????????? (glob)
302 tip-rev: 5002
302 tip-rev: 5002
303 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
303 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
304 data-length: 121088
304 data-length: 121088
305 data-unused: 0
305 data-unused: 0
306 data-unused: 0.000%
306 data-unused: 0.000%
307 #endif
307 #endif
308
308
309 Check out of sync nodemap
309 Check out of sync nodemap
310 =========================
310 =========================
311
311
312 First copy old data on the side.
312 First copy old data on the side.
313
313
314 $ mkdir ../tmp-copies
314 $ mkdir ../tmp-copies
315 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
315 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
316
316
317 Nodemap lagging behind
317 Nodemap lagging behind
318 ----------------------
318 ----------------------
319
319
320 make a new commit
320 make a new commit
321
321
322 $ echo bar2 > bar
322 $ echo bar2 > bar
323 $ hg ci -m 'bar2'
323 $ hg ci -m 'bar2'
324 $ NODE=`hg log -r tip -T '{node}\n'`
324 $ NODE=`hg log -r tip -T '{node}\n'`
325 $ hg log -r "$NODE" -T '{rev}\n'
325 $ hg log -r "$NODE" -T '{rev}\n'
326 5003
326 5003
327
327
328 If the nodemap is lagging behind, it can catch up fine
328 If the nodemap is lagging behind, it can catch up fine
329
329
330 $ hg debugnodemap --metadata
330 $ hg debugnodemap --metadata
331 uid: ???????????????? (glob)
331 uid: ???????????????? (glob)
332 tip-rev: 5003
332 tip-rev: 5003
333 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
333 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
334 data-length: 121344 (pure !)
334 data-length: 121344 (pure !)
335 data-length: 121344 (rust !)
335 data-length: 121344 (rust !)
336 data-length: 121152 (no-rust no-pure !)
336 data-length: 121152 (no-rust no-pure !)
337 data-unused: 192 (pure !)
337 data-unused: 192 (pure !)
338 data-unused: 192 (rust !)
338 data-unused: 192 (rust !)
339 data-unused: 0 (no-rust no-pure !)
339 data-unused: 0 (no-rust no-pure !)
340 data-unused: 0.158% (pure !)
340 data-unused: 0.158% (pure !)
341 data-unused: 0.158% (rust !)
341 data-unused: 0.158% (rust !)
342 data-unused: 0.000% (no-rust no-pure !)
342 data-unused: 0.000% (no-rust no-pure !)
343 $ cp -f ../tmp-copies/* .hg/store/
343 $ cp -f ../tmp-copies/* .hg/store/
344 $ hg debugnodemap --metadata
344 $ hg debugnodemap --metadata
345 uid: ???????????????? (glob)
345 uid: ???????????????? (glob)
346 tip-rev: 5002
346 tip-rev: 5002
347 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
347 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
348 data-length: 121088
348 data-length: 121088
349 data-unused: 0
349 data-unused: 0
350 data-unused: 0.000%
350 data-unused: 0.000%
351 $ hg log -r "$NODE" -T '{rev}\n'
351 $ hg log -r "$NODE" -T '{rev}\n'
352 5003
352 5003
353
353
354 changelog altered
354 changelog altered
355 -----------------
355 -----------------
356
356
357 If the nodemap is not gated behind a requirements, an unaware client can alter
357 If the nodemap is not gated behind a requirements, an unaware client can alter
358 the repository so the revlog used to generate the nodemap is not longer
358 the repository so the revlog used to generate the nodemap is not longer
359 compatible with the persistent nodemap. We need to detect that.
359 compatible with the persistent nodemap. We need to detect that.
360
360
361 $ hg up "$NODE~5"
361 $ hg up "$NODE~5"
362 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
362 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
363 $ echo bar > babar
363 $ echo bar > babar
364 $ hg add babar
364 $ hg add babar
365 $ hg ci -m 'babar'
365 $ hg ci -m 'babar'
366 created new head
366 created new head
367 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
367 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
368 $ hg log -r "$OTHERNODE" -T '{rev}\n'
368 $ hg log -r "$OTHERNODE" -T '{rev}\n'
369 5004
369 5004
370
370
371 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
371 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
372
372
373 the nodemap should detect the changelog have been tampered with and recover.
373 the nodemap should detect the changelog have been tampered with and recover.
374
374
375 $ hg debugnodemap --metadata
375 $ hg debugnodemap --metadata
376 uid: ???????????????? (glob)
376 uid: ???????????????? (glob)
377 tip-rev: 5002
377 tip-rev: 5002
378 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
378 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
379 data-length: 121536 (pure !)
379 data-length: 121536 (pure !)
380 data-length: 121088 (rust !)
380 data-length: 121088 (rust !)
381 data-length: 121088 (no-pure no-rust !)
381 data-length: 121088 (no-pure no-rust !)
382 data-unused: 448 (pure !)
382 data-unused: 448 (pure !)
383 data-unused: 0 (rust !)
383 data-unused: 0 (rust !)
384 data-unused: 0 (no-pure no-rust !)
384 data-unused: 0 (no-pure no-rust !)
385 data-unused: 0.000% (rust !)
385 data-unused: 0.000% (rust !)
386 data-unused: 0.369% (pure !)
386 data-unused: 0.369% (pure !)
387 data-unused: 0.000% (no-pure no-rust !)
387 data-unused: 0.000% (no-pure no-rust !)
388
388
389 $ cp -f ../tmp-copies/* .hg/store/
389 $ cp -f ../tmp-copies/* .hg/store/
390 $ hg debugnodemap --metadata
390 $ hg debugnodemap --metadata
391 uid: ???????????????? (glob)
391 uid: ???????????????? (glob)
392 tip-rev: 5002
392 tip-rev: 5002
393 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
393 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
394 data-length: 121088
394 data-length: 121088
395 data-unused: 0
395 data-unused: 0
396 data-unused: 0.000%
396 data-unused: 0.000%
397 $ hg log -r "$OTHERNODE" -T '{rev}\n'
397 $ hg log -r "$OTHERNODE" -T '{rev}\n'
398 5002
398 5002
399
399
400 missing data file
400 missing data file
401 -----------------
401 -----------------
402
402
403 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
403 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
404 > sed 's/uid: //'`
404 > sed 's/uid: //'`
405 $ FILE=.hg/store/00changelog-"${UUID}".nd
405 $ FILE=.hg/store/00changelog-"${UUID}".nd
406 $ mv $FILE ../tmp-data-file
406 $ mv $FILE ../tmp-data-file
407 $ cp .hg/store/00changelog.n ../tmp-docket
407 $ cp .hg/store/00changelog.n ../tmp-docket
408
408
409 mercurial don't crash
409 mercurial don't crash
410
410
411 $ hg log -r .
411 $ hg log -r .
412 changeset: 5002:b355ef8adce0
412 changeset: 5002:b355ef8adce0
413 tag: tip
413 tag: tip
414 parent: 4998:d918ad6d18d3
414 parent: 4998:d918ad6d18d3
415 user: test
415 user: test
416 date: Thu Jan 01 00:00:00 1970 +0000
416 date: Thu Jan 01 00:00:00 1970 +0000
417 summary: babar
417 summary: babar
418
418
419 $ hg debugnodemap --metadata
419 $ hg debugnodemap --metadata
420
420
421 $ hg debugupdatecache
421 $ hg debugupdatecache
422 $ hg debugnodemap --metadata
422 $ hg debugnodemap --metadata
423 uid: * (glob)
423 uid: * (glob)
424 tip-rev: 5002
424 tip-rev: 5002
425 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
425 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
426 data-length: 121088
426 data-length: 121088
427 data-unused: 0
427 data-unused: 0
428 data-unused: 0.000%
428 data-unused: 0.000%
429 $ mv ../tmp-data-file $FILE
429 $ mv ../tmp-data-file $FILE
430 $ mv ../tmp-docket .hg/store/00changelog.n
430 $ mv ../tmp-docket .hg/store/00changelog.n
431
431
432 Check transaction related property
432 Check transaction related property
433 ==================================
433 ==================================
434
434
435 An up to date nodemap should be available to shell hooks,
435 An up to date nodemap should be available to shell hooks,
436
436
437 $ echo dsljfl > a
437 $ echo dsljfl > a
438 $ hg add a
438 $ hg add a
439 $ hg ci -m a
439 $ hg ci -m a
440 $ hg debugnodemap --metadata
440 $ hg debugnodemap --metadata
441 uid: ???????????????? (glob)
441 uid: ???????????????? (glob)
442 tip-rev: 5003
442 tip-rev: 5003
443 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
443 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
444 data-length: 121088
444 data-length: 121088
445 data-unused: 0
445 data-unused: 0
446 data-unused: 0.000%
446 data-unused: 0.000%
447 $ echo babar2 > babar
447 $ echo babar2 > babar
448 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
448 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
449 uid: ???????????????? (glob)
449 uid: ???????????????? (glob)
450 tip-rev: 5004
450 tip-rev: 5004
451 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
451 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
452 data-length: 121280 (pure !)
452 data-length: 121280 (pure !)
453 data-length: 121280 (rust !)
453 data-length: 121280 (rust !)
454 data-length: 121088 (no-pure no-rust !)
454 data-length: 121088 (no-pure no-rust !)
455 data-unused: 192 (pure !)
455 data-unused: 192 (pure !)
456 data-unused: 192 (rust !)
456 data-unused: 192 (rust !)
457 data-unused: 0 (no-pure no-rust !)
457 data-unused: 0 (no-pure no-rust !)
458 data-unused: 0.158% (pure !)
458 data-unused: 0.158% (pure !)
459 data-unused: 0.158% (rust !)
459 data-unused: 0.158% (rust !)
460 data-unused: 0.000% (no-pure no-rust !)
460 data-unused: 0.000% (no-pure no-rust !)
461 $ hg debugnodemap --metadata
461 $ hg debugnodemap --metadata
462 uid: ???????????????? (glob)
462 uid: ???????????????? (glob)
463 tip-rev: 5004
463 tip-rev: 5004
464 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
464 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
465 data-length: 121280 (pure !)
465 data-length: 121280 (pure !)
466 data-length: 121280 (rust !)
466 data-length: 121280 (rust !)
467 data-length: 121088 (no-pure no-rust !)
467 data-length: 121088 (no-pure no-rust !)
468 data-unused: 192 (pure !)
468 data-unused: 192 (pure !)
469 data-unused: 192 (rust !)
469 data-unused: 192 (rust !)
470 data-unused: 0 (no-pure no-rust !)
470 data-unused: 0 (no-pure no-rust !)
471 data-unused: 0.158% (pure !)
471 data-unused: 0.158% (pure !)
472 data-unused: 0.158% (rust !)
472 data-unused: 0.158% (rust !)
473 data-unused: 0.000% (no-pure no-rust !)
473 data-unused: 0.000% (no-pure no-rust !)
474
474
475 Another process does not see the pending nodemap content during run.
475 Another process does not see the pending nodemap content during run.
476
476
477 $ PATH=$RUNTESTDIR/testlib/:$PATH
477 $ PATH=$RUNTESTDIR/testlib/:$PATH
478 $ echo qpoasp > a
478 $ echo qpoasp > a
479 $ hg ci -m a2 \
479 $ hg ci -m a2 \
480 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
480 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
481 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
481 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
482
482
483 (read the repository while the commit transaction is pending)
483 (read the repository while the commit transaction is pending)
484
484
485 $ wait-on-file 20 sync-txn-pending && \
485 $ wait-on-file 20 sync-txn-pending && \
486 > hg debugnodemap --metadata && \
486 > hg debugnodemap --metadata && \
487 > wait-on-file 20 sync-txn-close sync-repo-read
487 > wait-on-file 20 sync-txn-close sync-repo-read
488 uid: ???????????????? (glob)
488 uid: ???????????????? (glob)
489 tip-rev: 5004
489 tip-rev: 5004
490 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
490 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
491 data-length: 121280 (pure !)
491 data-length: 121280 (pure !)
492 data-length: 121280 (rust !)
492 data-length: 121280 (rust !)
493 data-length: 121088 (no-pure no-rust !)
493 data-length: 121088 (no-pure no-rust !)
494 data-unused: 192 (pure !)
494 data-unused: 192 (pure !)
495 data-unused: 192 (rust !)
495 data-unused: 192 (rust !)
496 data-unused: 0 (no-pure no-rust !)
496 data-unused: 0 (no-pure no-rust !)
497 data-unused: 0.158% (pure !)
497 data-unused: 0.158% (pure !)
498 data-unused: 0.158% (rust !)
498 data-unused: 0.158% (rust !)
499 data-unused: 0.000% (no-pure no-rust !)
499 data-unused: 0.000% (no-pure no-rust !)
500 $ hg debugnodemap --metadata
500 $ hg debugnodemap --metadata
501 uid: ???????????????? (glob)
501 uid: ???????????????? (glob)
502 tip-rev: 5005
502 tip-rev: 5005
503 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
503 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
504 data-length: 121536 (pure !)
504 data-length: 121536 (pure !)
505 data-length: 121536 (rust !)
505 data-length: 121536 (rust !)
506 data-length: 121088 (no-pure no-rust !)
506 data-length: 121088 (no-pure no-rust !)
507 data-unused: 448 (pure !)
507 data-unused: 448 (pure !)
508 data-unused: 448 (rust !)
508 data-unused: 448 (rust !)
509 data-unused: 0 (no-pure no-rust !)
509 data-unused: 0 (no-pure no-rust !)
510 data-unused: 0.369% (pure !)
510 data-unused: 0.369% (pure !)
511 data-unused: 0.369% (rust !)
511 data-unused: 0.369% (rust !)
512 data-unused: 0.000% (no-pure no-rust !)
512 data-unused: 0.000% (no-pure no-rust !)
513
513
514 $ cat output.txt
514 $ cat output.txt
515
515
516 Check that a failing transaction will properly revert the data
516 Check that a failing transaction will properly revert the data
517
517
518 $ echo plakfe > a
518 $ echo plakfe > a
519 $ f --size --sha256 .hg/store/00changelog-*.nd
519 $ f --size --sha256 .hg/store/00changelog-*.nd
520 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
520 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
521 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
521 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
522 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
522 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
523 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
523 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
524 transaction abort!
524 transaction abort!
525 rollback completed
525 rollback completed
526 abort: This is a late abort
526 abort: This is a late abort
527 [255]
527 [255]
528 $ hg debugnodemap --metadata
528 $ hg debugnodemap --metadata
529 uid: ???????????????? (glob)
529 uid: ???????????????? (glob)
530 tip-rev: 5005
530 tip-rev: 5005
531 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
531 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
532 data-length: 121536 (pure !)
532 data-length: 121536 (pure !)
533 data-length: 121536 (rust !)
533 data-length: 121536 (rust !)
534 data-length: 121088 (no-pure no-rust !)
534 data-length: 121088 (no-pure no-rust !)
535 data-unused: 448 (pure !)
535 data-unused: 448 (pure !)
536 data-unused: 448 (rust !)
536 data-unused: 448 (rust !)
537 data-unused: 0 (no-pure no-rust !)
537 data-unused: 0 (no-pure no-rust !)
538 data-unused: 0.369% (pure !)
538 data-unused: 0.369% (pure !)
539 data-unused: 0.369% (rust !)
539 data-unused: 0.369% (rust !)
540 data-unused: 0.000% (no-pure no-rust !)
540 data-unused: 0.000% (no-pure no-rust !)
541 $ f --size --sha256 .hg/store/00changelog-*.nd
541 $ f --size --sha256 .hg/store/00changelog-*.nd
542 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
542 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
543 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
543 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
544 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
544 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
545
545
546 Check that removing content does not confuse the nodemap
546 Check that removing content does not confuse the nodemap
547 --------------------------------------------------------
547 --------------------------------------------------------
548
548
549 removing data with rollback
549 removing data with rollback
550
550
551 $ echo aso > a
551 $ echo aso > a
552 $ hg ci -m a4
552 $ hg ci -m a4
553 $ hg rollback
553 $ hg rollback
554 repository tip rolled back to revision 5005 (undo commit)
554 repository tip rolled back to revision 5005 (undo commit)
555 working directory now based on revision 5005
555 working directory now based on revision 5005
556 $ hg id -r .
556 $ hg id -r .
557 90d5d3ba2fc4 tip
557 90d5d3ba2fc4 tip
558
558
559 roming data with strip
559 roming data with strip
560
560
561 $ echo aso > a
561 $ echo aso > a
562 $ hg ci -m a4
562 $ hg ci -m a4
563 $ hg --config extensions.strip= strip -r . --no-backup
563 $ hg --config extensions.strip= strip -r . --no-backup
564 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
564 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
565 $ hg id -r . --traceback
565 $ hg id -r . --traceback
566 90d5d3ba2fc4 tip
566 90d5d3ba2fc4 tip
567
567
568 Test upgrade / downgrade
568 Test upgrade / downgrade
569 ========================
569 ========================
570
570
571 downgrading
571 downgrading
572
572
573 $ cat << EOF >> .hg/hgrc
573 $ cat << EOF >> .hg/hgrc
574 > [format]
574 > [format]
575 > use-persistent-nodemap=no
575 > use-persistent-nodemap=no
576 > EOF
576 > EOF
577 $ hg debugformat -v
577 $ hg debugformat -v
578 format-variant repo config default
578 format-variant repo config default
579 fncache: yes yes yes
579 fncache: yes yes yes
580 dotencode: yes yes yes
580 dotencode: yes yes yes
581 generaldelta: yes yes yes
581 generaldelta: yes yes yes
582 share-safe: no no no
582 share-safe: no no no
583 sparserevlog: yes yes yes
583 sparserevlog: yes yes yes
584 persistent-nodemap: yes no no
584 persistent-nodemap: yes no no
585 copies-sdc: no no no
585 copies-sdc: no no no
586 revlog-v2: no no no
586 revlog-v2: no no no
587 plain-cl-delta: yes yes yes
587 plain-cl-delta: yes yes yes
588 compression: zlib zlib zlib (no-zstd !)
588 compression: zlib zlib zlib (no-zstd !)
589 compression: zstd zstd zstd (zstd !)
589 compression: zstd zstd zstd (zstd !)
590 compression-level: default default default
590 compression-level: default default default
591 $ hg debugupgraderepo --run --no-backup
591 $ hg debugupgraderepo --run --no-backup
592 upgrade will perform the following actions:
592 upgrade will perform the following actions:
593
593
594 requirements
594 requirements
595 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
595 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
596 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
596 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
597 removed: persistent-nodemap
597 removed: persistent-nodemap
598
598
599 processed revlogs:
599 processed revlogs:
600 - all-filelogs
600 - all-filelogs
601 - changelog
601 - changelog
602 - manifest
602 - manifest
603
603
604 beginning upgrade...
604 beginning upgrade...
605 repository locked and read-only
605 repository locked and read-only
606 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
606 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
607 (it is safe to interrupt this process any time before data migration completes)
607 (it is safe to interrupt this process any time before data migration completes)
608 downgrading repository to not use persistent nodemap feature
608 downgrading repository to not use persistent nodemap feature
609 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
609 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
610 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
610 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
611 00changelog-*.nd (glob)
611 00changelog-*.nd (glob)
612 00manifest-*.nd (glob)
612 00manifest-*.nd (glob)
613 undo.backup.00changelog.n
613 undo.backup.00changelog.n
614 undo.backup.00manifest.n
614 undo.backup.00manifest.n
615 $ hg debugnodemap --metadata
615 $ hg debugnodemap --metadata
616
616
617
617
618 upgrading
618 upgrading
619
619
620 $ cat << EOF >> .hg/hgrc
620 $ cat << EOF >> .hg/hgrc
621 > [format]
621 > [format]
622 > use-persistent-nodemap=yes
622 > use-persistent-nodemap=yes
623 > EOF
623 > EOF
624 $ hg debugformat -v
624 $ hg debugformat -v
625 format-variant repo config default
625 format-variant repo config default
626 fncache: yes yes yes
626 fncache: yes yes yes
627 dotencode: yes yes yes
627 dotencode: yes yes yes
628 generaldelta: yes yes yes
628 generaldelta: yes yes yes
629 share-safe: no no no
629 share-safe: no no no
630 sparserevlog: yes yes yes
630 sparserevlog: yes yes yes
631 persistent-nodemap: no yes no
631 persistent-nodemap: no yes no
632 copies-sdc: no no no
632 copies-sdc: no no no
633 revlog-v2: no no no
633 revlog-v2: no no no
634 plain-cl-delta: yes yes yes
634 plain-cl-delta: yes yes yes
635 compression: zlib zlib zlib (no-zstd !)
635 compression: zlib zlib zlib (no-zstd !)
636 compression: zstd zstd zstd (zstd !)
636 compression: zstd zstd zstd (zstd !)
637 compression-level: default default default
637 compression-level: default default default
638 $ hg debugupgraderepo --run --no-backup
638 $ hg debugupgraderepo --run --no-backup
639 upgrade will perform the following actions:
639 upgrade will perform the following actions:
640
640
641 requirements
641 requirements
642 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
642 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
643 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
643 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
644 added: persistent-nodemap
644 added: persistent-nodemap
645
645
646 persistent-nodemap
646 persistent-nodemap
647 Speedup revision lookup by node id.
647 Speedup revision lookup by node id.
648
648
649 processed revlogs:
649 processed revlogs:
650 - all-filelogs
650 - all-filelogs
651 - changelog
651 - changelog
652 - manifest
652 - manifest
653
653
654 beginning upgrade...
654 beginning upgrade...
655 repository locked and read-only
655 repository locked and read-only
656 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
656 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
657 (it is safe to interrupt this process any time before data migration completes)
657 (it is safe to interrupt this process any time before data migration completes)
658 upgrading repository to use persistent nodemap feature
658 upgrading repository to use persistent nodemap feature
659 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
659 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
660 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
660 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
661 00changelog-*.nd (glob)
661 00changelog-*.nd (glob)
662 00changelog.n
662 00changelog.n
663 00manifest-*.nd (glob)
663 00manifest-*.nd (glob)
664 00manifest.n
664 00manifest.n
665 undo.backup.00changelog.n
665 undo.backup.00changelog.n
666 undo.backup.00manifest.n
666 undo.backup.00manifest.n
667
667
668 $ hg debugnodemap --metadata
668 $ hg debugnodemap --metadata
669 uid: * (glob)
669 uid: * (glob)
670 tip-rev: 5005
670 tip-rev: 5005
671 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
671 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
672 data-length: 121088
672 data-length: 121088
673 data-unused: 0
673 data-unused: 0
674 data-unused: 0.000%
674 data-unused: 0.000%
675
675
676 Running unrelated upgrade
676 Running unrelated upgrade
677
677
678 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
678 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
679 upgrade will perform the following actions:
679 upgrade will perform the following actions:
680
680
681 requirements
681 requirements
682 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !)
682 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd !)
683 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
683 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
684
684
685 optimisations: re-delta-all
685 optimisations: re-delta-all
686
686
687 processed revlogs:
687 processed revlogs:
688 - all-filelogs
688 - all-filelogs
689 - changelog
689 - changelog
690 - manifest
690 - manifest
691
691
692 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
692 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
693 00changelog-*.nd (glob)
693 00changelog-*.nd (glob)
694 00changelog.n
694 00changelog.n
695 00manifest-*.nd (glob)
695 00manifest-*.nd (glob)
696 00manifest.n
696 00manifest.n
697
697
698 $ hg debugnodemap --metadata
698 $ hg debugnodemap --metadata
699 uid: * (glob)
699 uid: * (glob)
700 tip-rev: 5005
700 tip-rev: 5005
701 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
701 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
702 data-length: 121088
702 data-length: 121088
703 data-unused: 0
703 data-unused: 0
704 data-unused: 0.000%
704 data-unused: 0.000%
705
705
706 Persistent nodemap and local/streaming clone
706 Persistent nodemap and local/streaming clone
707 ============================================
707 ============================================
708
708
709 $ cd ..
709 $ cd ..
710
710
711 standard clone
711 standard clone
712 --------------
712 --------------
713
713
714 The persistent nodemap should exist after a streaming clone
714 The persistent nodemap should exist after a streaming clone
715
715
716 $ hg clone --pull --quiet -U test-repo standard-clone
716 $ hg clone --pull --quiet -U test-repo standard-clone
717 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
717 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
718 00changelog-*.nd (glob)
718 00changelog-*.nd (glob)
719 00changelog.n
719 00changelog.n
720 00manifest-*.nd (glob)
720 00manifest-*.nd (glob)
721 00manifest.n
721 00manifest.n
722 $ hg -R standard-clone debugnodemap --metadata
722 $ hg -R standard-clone debugnodemap --metadata
723 uid: * (glob)
723 uid: * (glob)
724 tip-rev: 5005
724 tip-rev: 5005
725 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
725 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
726 data-length: 121088
726 data-length: 121088
727 data-unused: 0
727 data-unused: 0
728 data-unused: 0.000%
728 data-unused: 0.000%
729
729
730
730
731 local clone
731 local clone
732 ------------
732 ------------
733
733
734 The persistent nodemap should exist after a streaming clone
734 The persistent nodemap should exist after a streaming clone
735
735
736 $ hg clone -U test-repo local-clone
736 $ hg clone -U test-repo local-clone
737 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
737 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
738 00changelog-*.nd (glob)
738 00changelog-*.nd (glob)
739 00changelog.n
739 00changelog.n
740 00manifest-*.nd (glob)
740 00manifest-*.nd (glob)
741 00manifest.n
741 00manifest.n
742 $ hg -R local-clone debugnodemap --metadata
742 $ hg -R local-clone debugnodemap --metadata
743 uid: * (glob)
743 uid: * (glob)
744 tip-rev: 5005
744 tip-rev: 5005
745 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
745 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
746 data-length: 121088
746 data-length: 121088
747 data-unused: 0
747 data-unused: 0
748 data-unused: 0.000%
748 data-unused: 0.000%
749
749
750 Test various corruption case
750 Test various corruption case
751 ============================
751 ============================
752
752
753 Missing datafile
753 Missing datafile
754 ----------------
754 ----------------
755
755
756 Test behavior with a missing datafile
756 Test behavior with a missing datafile
757
757
758 $ hg clone --quiet --pull test-repo corruption-test-repo
758 $ hg clone --quiet --pull test-repo corruption-test-repo
759 $ ls -1 corruption-test-repo/.hg/store/00changelog*
759 $ ls -1 corruption-test-repo/.hg/store/00changelog*
760 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
760 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
761 corruption-test-repo/.hg/store/00changelog.d
761 corruption-test-repo/.hg/store/00changelog.d
762 corruption-test-repo/.hg/store/00changelog.i
762 corruption-test-repo/.hg/store/00changelog.i
763 corruption-test-repo/.hg/store/00changelog.n
763 corruption-test-repo/.hg/store/00changelog.n
764 $ rm corruption-test-repo/.hg/store/00changelog*.nd
764 $ rm corruption-test-repo/.hg/store/00changelog*.nd
765 $ hg log -R corruption-test-repo -r .
765 $ hg log -R corruption-test-repo -r .
766 changeset: 5005:90d5d3ba2fc4
766 changeset: 5005:90d5d3ba2fc4
767 tag: tip
767 tag: tip
768 user: test
768 user: test
769 date: Thu Jan 01 00:00:00 1970 +0000
769 date: Thu Jan 01 00:00:00 1970 +0000
770 summary: a2
770 summary: a2
771
771
772 $ ls -1 corruption-test-repo/.hg/store/00changelog*
772 $ ls -1 corruption-test-repo/.hg/store/00changelog*
773 corruption-test-repo/.hg/store/00changelog.d
773 corruption-test-repo/.hg/store/00changelog.d
774 corruption-test-repo/.hg/store/00changelog.i
774 corruption-test-repo/.hg/store/00changelog.i
775 corruption-test-repo/.hg/store/00changelog.n
775 corruption-test-repo/.hg/store/00changelog.n
776
776
777 Truncated data file
777 Truncated data file
778 -------------------
778 -------------------
779
779
780 Test behavior with a too short datafile
780 Test behavior with a too short datafile
781
781
782 rebuild the missing data
782 rebuild the missing data
783 $ hg -R corruption-test-repo debugupdatecache
783 $ hg -R corruption-test-repo debugupdatecache
784 $ ls -1 corruption-test-repo/.hg/store/00changelog*
784 $ ls -1 corruption-test-repo/.hg/store/00changelog*
785 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
785 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
786 corruption-test-repo/.hg/store/00changelog.d
786 corruption-test-repo/.hg/store/00changelog.d
787 corruption-test-repo/.hg/store/00changelog.i
787 corruption-test-repo/.hg/store/00changelog.i
788 corruption-test-repo/.hg/store/00changelog.n
788 corruption-test-repo/.hg/store/00changelog.n
789
789
790 truncate the file
790 truncate the file
791
791
792 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
792 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
793 $ f -s $datafilepath
793 $ f -s $datafilepath
794 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
794 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
795 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=none
795 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=none
796 $ mv $datafilepath-tmp $datafilepath
796 $ mv $datafilepath-tmp $datafilepath
797 $ f -s $datafilepath
797 $ f -s $datafilepath
798 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
798 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
799
799
800 Check that Mercurial reaction to this event
800 Check that Mercurial reaction to this event
801
801
802 $ hg -R corruption-test-repo log -r . --traceback
802 $ hg -R corruption-test-repo log -r . --traceback
803 changeset: 5005:90d5d3ba2fc4
803 changeset: 5005:90d5d3ba2fc4
804 tag: tip
804 tag: tip
805 user: test
805 user: test
806 date: Thu Jan 01 00:00:00 1970 +0000
806 date: Thu Jan 01 00:00:00 1970 +0000
807 summary: a2
807 summary: a2
808
808
809
809
810
810
811 stream clone
811 stream clone
812 ============
812 ============
813
813
814 The persistent nodemap should exist after a streaming clone
814 The persistent nodemap should exist after a streaming clone
815
815
816 Simple case
816 Simple case
817 -----------
817 -----------
818
818
819 No race condition
819 No race condition
820
820
821 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
821 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
822 adding [s] 00manifest.n (70 bytes)
822 adding [s] 00manifest.n (70 bytes)
823 adding [s] 00manifest.d (452 KB) (no-zstd !)
824 adding [s] 00manifest.d (491 KB) (zstd !)
825 adding [s] 00manifest-*.nd (118 KB) (glob)
823 adding [s] 00manifest-*.nd (118 KB) (glob)
826 adding [s] 00changelog.n (70 bytes)
824 adding [s] 00changelog.n (70 bytes)
825 adding [s] 00changelog-*.nd (118 KB) (glob)
826 adding [s] 00manifest.d (452 KB) (no-zstd !)
827 adding [s] 00manifest.d (491 KB) (zstd !)
827 adding [s] 00changelog.d (360 KB) (no-zstd !)
828 adding [s] 00changelog.d (360 KB) (no-zstd !)
828 adding [s] 00changelog.d (368 KB) (zstd !)
829 adding [s] 00changelog.d (368 KB) (zstd !)
829 adding [s] 00changelog-*.nd (118 KB) (glob)
830 adding [s] 00manifest.i (313 KB)
830 adding [s] 00manifest.i (313 KB)
831 adding [s] 00changelog.i (313 KB)
831 adding [s] 00changelog.i (313 KB)
832 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
832 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
833 00changelog-*.nd (glob)
833 00changelog-*.nd (glob)
834 00changelog.n
834 00changelog.n
835 00manifest-*.nd (glob)
835 00manifest-*.nd (glob)
836 00manifest.n
836 00manifest.n
837 $ hg -R stream-clone debugnodemap --metadata
837 $ hg -R stream-clone debugnodemap --metadata
838 uid: * (glob)
838 uid: * (glob)
839 tip-rev: 5005
839 tip-rev: 5005
840 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
840 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
841 data-length: 121088
841 data-length: 121088
842 data-unused: 0
842 data-unused: 0
843 data-unused: 0.000%
843 data-unused: 0.000%
844
844
845 new data appened
845 new data appened
846 -----------------
846 -----------------
847
847
848 Other commit happening on the server during the stream clone
848 Other commit happening on the server during the stream clone
849
849
850 setup the step-by-step stream cloning
850 setup the step-by-step stream cloning
851
851
852 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
852 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
853 $ export HG_TEST_STREAM_WALKED_FILE_1
853 $ export HG_TEST_STREAM_WALKED_FILE_1
854 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
854 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
855 $ export HG_TEST_STREAM_WALKED_FILE_2
855 $ export HG_TEST_STREAM_WALKED_FILE_2
856 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
856 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
857 $ export HG_TEST_STREAM_WALKED_FILE_3
857 $ export HG_TEST_STREAM_WALKED_FILE_3
858 $ cat << EOF >> test-repo/.hg/hgrc
858 $ cat << EOF >> test-repo/.hg/hgrc
859 > [extensions]
859 > [extensions]
860 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
860 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
861 > EOF
861 > EOF
862
862
863 Check and record file state beforehand
863 Check and record file state beforehand
864
864
865 $ f --size test-repo/.hg/store/00changelog*
865 $ f --size test-repo/.hg/store/00changelog*
866 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
866 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
867 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
867 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
868 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
868 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
869 test-repo/.hg/store/00changelog.i: size=320384
869 test-repo/.hg/store/00changelog.i: size=320384
870 test-repo/.hg/store/00changelog.n: size=70
870 test-repo/.hg/store/00changelog.n: size=70
871 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
871 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
872 uid: * (glob)
872 uid: * (glob)
873 tip-rev: 5005
873 tip-rev: 5005
874 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
874 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
875 data-length: 121088
875 data-length: 121088
876 data-unused: 0
876 data-unused: 0
877 data-unused: 0.000%
877 data-unused: 0.000%
878
878
879 Prepare a commit
879 Prepare a commit
880
880
881 $ echo foo >> test-repo/foo
881 $ echo foo >> test-repo/foo
882 $ hg -R test-repo/ add test-repo/foo
882 $ hg -R test-repo/ add test-repo/foo
883
883
884 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
884 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
885
885
886 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
886 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
887 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
887 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
888 $ hg -R test-repo/ commit -m foo
888 $ hg -R test-repo/ commit -m foo
889 $ touch $HG_TEST_STREAM_WALKED_FILE_2
889 $ touch $HG_TEST_STREAM_WALKED_FILE_2
890 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
890 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
891 $ cat clone-output
891 $ cat clone-output
892 remote: abort: unexpected error: [Errno 2] $ENOENT$: *'$TESTTMP/test-repo/.hg/store/00manifest-*.nd' (glob) (known-bad-output no-rust no-pure !)
893 abort: pull failed on remote (known-bad-output no-rust no-pure !)
894 adding [s] 00manifest.n (70 bytes)
892 adding [s] 00manifest.n (70 bytes)
895 adding [s] 00manifest.d (491 KB) (zstd !)
893 adding [s] 00manifest-*.nd (118 KB) (glob)
894 adding [s] 00changelog.n (70 bytes)
895 adding [s] 00changelog-*.nd (118 KB) (glob)
896 adding [s] 00manifest.d (452 KB) (no-zstd !)
896 adding [s] 00manifest.d (452 KB) (no-zstd !)
897 remote: abort: $ENOENT$: '$TESTTMP/test-repo/.hg/store/00manifest-*.nd' (glob) (known-bad-output no-rust no-pure !)
897 adding [s] 00manifest.d (491 KB) (zstd !)
898 adding [s] 00manifest-*.nd (118 KB) (glob) (rust !)
899 adding [s] 00changelog.n (70 bytes) (rust !)
900 adding [s] 00changelog.d (368 KB) (zstd rust !)
901 adding [s] 00changelog-*.nd (118 KB) (glob) (rust !)
902 adding [s] 00manifest.i (313 KB) (rust !)
903 adding [s] 00changelog.i (313 KB) (rust !)
904 adding [s] 00manifest-*.nd (118 KB) (glob) (pure !)
905 adding [s] 00changelog.n (70 bytes) (pure !)
906 adding [s] 00changelog.d (360 KB) (no-zstd !)
898 adding [s] 00changelog.d (360 KB) (no-zstd !)
907 adding [s] 00changelog-*.nd (118 KB) (glob) (pure !)
899 adding [s] 00changelog.d (368 KB) (zstd !)
908 adding [s] 00manifest.i (313 KB) (pure !)
900 adding [s] 00manifest.i (313 KB)
909 adding [s] 00changelog.i (313 KB) (pure !)
901 adding [s] 00changelog.i (313 KB)
910
902
911 Check the result state
903 Check the result state
912
904
913 $ f --size stream-clone-race-1/.hg/store/00changelog*
905 $ f --size stream-clone-race-1/.hg/store/00changelog*
914 stream-clone-race-1/.hg/store/00changelog*: file not found (known-bad-output no-rust no-pure !)
906 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
915 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob) (rust !)
907 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
916 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd rust !)
908 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
917 stream-clone-race-1/.hg/store/00changelog.i: size=320384 (rust !)
909 stream-clone-race-1/.hg/store/00changelog.i: size=320384
918 stream-clone-race-1/.hg/store/00changelog.n: size=70 (rust !)
910 stream-clone-race-1/.hg/store/00changelog.n: size=70
919 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob) (pure !)
920 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd pure !)
921 stream-clone-race-1/.hg/store/00changelog.i: size=320384 (pure !)
922 stream-clone-race-1/.hg/store/00changelog.n: size=70 (pure !)
923
911
924 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
912 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
925 abort: repository stream-clone-race-1 not found (known-bad-output no-rust no-pure !)
913 uid: * (glob)
926 uid: * (glob) (rust !)
914 tip-rev: 5005
927 tip-rev: 5005 (rust !)
915 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
928 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe (rust !)
916 data-length: 121088
929 data-length: 121088 (rust !)
917 data-unused: 0
930 data-unused: 0 (rust !)
918 data-unused: 0.000%
931 data-unused: 0.000% (rust !)
932 uid: * (glob) (pure !)
933 tip-rev: 5005 (pure !)
934 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe (pure !)
935 data-length: 121088 (pure !)
936 data-unused: 0 (pure !)
937 data-unused: 0.000% (pure !)
938
919
939 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
920 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
940 (ie: the following diff should be empty)
921 (ie: the following diff should be empty)
941
922
923 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
924
925 #if no-rust no-pure
942 $ diff -u server-metadata.txt client-metadata.txt
926 $ diff -u server-metadata.txt client-metadata.txt
943 --- server-metadata.txt * (glob) (known-bad-output !)
927 --- server-metadata.txt * (glob)
944 +++ client-metadata.txt * (glob) (known-bad-output !)
928 +++ client-metadata.txt * (glob)
945 @@ -1,4 +1,4 @@ (known-bad-output rust !)
929 @@ -1,4 +1,4 @@
946 @@ -1,4 +1,4 @@ (known-bad-output pure !)
930 -uid: * (glob)
947 @@ -1,6 +0,0 @@ (known-bad-output no-rust no-pure !)
931 +uid: * (glob)
948 -uid: * (glob) (known-bad-output !)
932 tip-rev: 5005
949 +uid: * (glob) (known-bad-output rust !)
933 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
950 tip-rev: 5005 (known-bad-output rust !)
934 data-length: 121088
951 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe (known-bad-output rust !)
952 data-length: 121088 (known-bad-output rust !)
953 +uid: * (glob) (known-bad-output pure !)
954 tip-rev: 5005 (known-bad-output pure !)
955 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe (known-bad-output pure !)
956 data-length: 121088 (known-bad-output pure !)
957 -tip-rev: 5005 (known-bad-output no-rust no-pure !)
958 -tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe (known-bad-output no-rust no-pure !)
959 -data-length: 121088 (known-bad-output no-rust no-pure !)
960 -data-unused: 0 (known-bad-output no-rust no-pure !)
961 -data-unused: 0.000% (known-bad-output no-rust no-pure !)
962 [1]
935 [1]
936 #else
937 $ diff -u server-metadata.txt client-metadata.txt
938 #endif
939
963
940
964 Clean up after the test.
941 Clean up after the test.
965
942
966 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
943 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
967 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
944 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
968 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
945 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
969
946
970 full regeneration
947 full regeneration
971 -----------------
948 -----------------
972
949
973 A full nodemap is generated
950 A full nodemap is generated
974
951
975 (ideally this test would append enough data to make sure the nodemap data file
952 (ideally this test would append enough data to make sure the nodemap data file
976 get changed, however to make thing simpler we will force the regeneration for
953 get changed, however to make thing simpler we will force the regeneration for
977 this test.
954 this test.
978
955
979 Check the initial state
956 Check the initial state
980
957
981 $ f --size test-repo/.hg/store/00changelog*
958 $ f --size test-repo/.hg/store/00changelog*
982 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
959 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
983 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
960 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
984 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
961 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
985 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
962 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
986 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
963 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
987 test-repo/.hg/store/00changelog.i: size=320448
964 test-repo/.hg/store/00changelog.i: size=320448
988 test-repo/.hg/store/00changelog.n: size=70
965 test-repo/.hg/store/00changelog.n: size=70
989 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
966 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
990 uid: * (glob)
967 uid: * (glob)
991 tip-rev: 5006
968 tip-rev: 5006
992 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
969 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
993 data-length: 121344 (rust !)
970 data-length: 121344 (rust !)
971 data-length: 121344 (pure !)
972 data-length: 121152 (no-rust no-pure !)
994 data-unused: 192 (rust !)
973 data-unused: 192 (rust !)
995 data-unused: 0.158% (rust !)
974 data-unused: 192 (pure !)
996 data-length: 121152 (no-rust no-pure !)
997 data-unused: 0 (no-rust no-pure !)
975 data-unused: 0 (no-rust no-pure !)
976 data-unused: 0.158% (rust !)
977 data-unused: 0.158% (pure !)
998 data-unused: 0.000% (no-rust no-pure !)
978 data-unused: 0.000% (no-rust no-pure !)
999 data-length: 121344 (pure !)
1000 data-unused: 192 (pure !)
1001 data-unused: 0.158% (pure !)
1002
979
1003 Performe the mix of clone and full refresh of the nodemap, so that the files
980 Performe the mix of clone and full refresh of the nodemap, so that the files
1004 (and filenames) are different between listing time and actual transfer time.
981 (and filenames) are different between listing time and actual transfer time.
1005
982
1006 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
983 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1007 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
984 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1008 $ rm test-repo/.hg/store/00changelog.n
985 $ rm test-repo/.hg/store/00changelog.n
1009 $ rm test-repo/.hg/store/00changelog-*.nd
986 $ rm test-repo/.hg/store/00changelog-*.nd
1010 $ hg -R test-repo/ debugupdatecache
987 $ hg -R test-repo/ debugupdatecache
1011 $ touch $HG_TEST_STREAM_WALKED_FILE_2
988 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1012 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
989 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1013 $ cat clone-output-2
990 $ cat clone-output-2
1014 remote: abort: unexpected error: [Errno 2] $ENOENT$: *'$TESTTMP/test-repo/.hg/store/00changelog-*.nd' (glob) (known-bad-output rust !)
1015 remote: abort: unexpected error: [Errno 2] $ENOENT$: *'$TESTTMP/test-repo/.hg/store/00changelog-*.nd' (glob) (known-bad-output pure !)
1016 remote: abort: unexpected error: [Errno 2] $ENOENT$: *'$TESTTMP/test-repo/.hg/store/00manifest-*.nd' (glob) (known-bad-output no-pure no-rust !)
1017 abort: pull failed on remote (known-bad-output !)
1018 adding [s] undo.backup.00manifest.n (70 bytes) (known-bad-output !)
991 adding [s] undo.backup.00manifest.n (70 bytes) (known-bad-output !)
1019 adding [s] undo.backup.00changelog.n (70 bytes) (known-bad-output !)
992 adding [s] undo.backup.00changelog.n (70 bytes) (known-bad-output !)
1020 adding [s] 00manifest.n (70 bytes)
993 adding [s] 00manifest.n (70 bytes)
994 adding [s] 00manifest-*.nd (118 KB) (glob)
995 adding [s] 00changelog.n (70 bytes)
996 adding [s] 00changelog-*.nd (118 KB) (glob)
1021 adding [s] 00manifest.d (492 KB) (zstd !)
997 adding [s] 00manifest.d (492 KB) (zstd !)
1022 adding [s] 00manifest.d (452 KB) (no-zstd !)
998 adding [s] 00manifest.d (452 KB) (no-zstd !)
1023 adding [s] 00manifest-*.nd (118 KB) (glob) (rust !)
1024 adding [s] 00manifest-*.nd (118 KB) (glob) (pure !)
1025 remote: abort: $ENOENT$: '$TESTTMP/test-repo/.hg/store/00changelog-*.nd' (glob) (known-bad-output rust !)
1026 remote: abort: $ENOENT$: '$TESTTMP/test-repo/.hg/store/00manifest-*.nd' (glob) (known-bad-output no-pure no-rust !)
1027 adding [s] 00changelog.n (70 bytes) (pure !)
1028 adding [s] 00changelog.d (360 KB) (no-zstd !)
999 adding [s] 00changelog.d (360 KB) (no-zstd !)
1029 remote: abort: $ENOENT$: '$TESTTMP/test-repo/.hg/store/00changelog-*.nd' (glob) (known-bad-output pure !)
1000 adding [s] 00changelog.d (368 KB) (zstd !)
1001 adding [s] 00manifest.i (313 KB)
1002 adding [s] 00changelog.i (313 KB)
1030
1003
1031 Check the result.
1004 Check the result.
1032
1005
1033 $ f --size stream-clone-race-2/.hg/store/00changelog*
1006 $ f --size stream-clone-race-2/.hg/store/00changelog*
1034 stream-clone-race-2/.hg/store/00changelog*: file not found (known-bad-output !)
1007 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1008 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1009 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1010 stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
1011 stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
1012 stream-clone-race-2/.hg/store/00changelog.i: size=320448
1013 stream-clone-race-2/.hg/store/00changelog.n: size=70
1035
1014
1036 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1015 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1037 abort: repository stream-clone-race-2 not found (known-bad-output !)
1016 uid: * (glob)
1017 tip-rev: 5006
1018 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1019 data-length: 121344 (rust !)
1020 data-unused: 192 (rust !)
1021 data-unused: 0.158% (rust !)
1022 data-length: 121152 (no-rust no-pure !)
1023 data-unused: 0 (no-rust no-pure !)
1024 data-unused: 0.000% (no-rust no-pure !)
1025 data-length: 121344 (pure !)
1026 data-unused: 192 (pure !)
1027 data-unused: 0.158% (pure !)
1038
1028
1039 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1029 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1040 (ie: the following diff should be empty)
1030 (ie: the following diff should be empty)
1041
1031
1032 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1033
1034 #if no-rust no-pure
1042 $ diff -u server-metadata-2.txt client-metadata-2.txt
1035 $ diff -u server-metadata-2.txt client-metadata-2.txt
1043 --- server-metadata-2.txt * (glob) (known-bad-output !)
1036 --- server-metadata-2.txt * (glob)
1044 +++ client-metadata-2.txt * (glob) (known-bad-output !)
1037 +++ client-metadata-2.txt * (glob)
1045 @@ -1,6 +0,0 @@ (known-bad-output !)
1038 @@ -1,4 +1,4 @@
1046 -uid: * (glob) (known-bad-output !)
1039 -uid: * (glob)
1047 -tip-rev: 5006 (known-bad-output !)
1040 +uid: * (glob)
1048 -tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b (known-bad-output !)
1041 tip-rev: 5006
1049 -data-length: 121344 (known-bad-output rust !)
1042 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1050 -data-unused: 192 (known-bad-output rust !)
1043 data-length: 121152
1051 -data-unused: 0.158% (known-bad-output rust !)
1052 -data-length: 121344 (known-bad-output pure !)
1053 -data-unused: 192 (known-bad-output pure !)
1054 -data-unused: 0.158% (known-bad-output pure !)
1055 -data-length: 121152 (known-bad-output no-rust no-pure !)
1056 -data-unused: 0 (known-bad-output no-rust no-pure !)
1057 -data-unused: 0.000% (known-bad-output no-rust no-pure !)
1058 [1]
1044 [1]
1045 #else
1046 $ diff -u server-metadata-2.txt client-metadata-2.txt
1047 #endif
1059
1048
1060 Clean up after the test
1049 Clean up after the test
1061
1050
1062 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1051 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1063 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1052 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1064 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1053 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1065
1054
General Comments 0
You need to be logged in to leave comments. Login now