##// END OF EJS Templates
store: yield phases before changelog...
marmoute -
r51406:5a62d56e default
parent child Browse files
Show More
@@ -1,1088 +1,1089 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import collections
8 import collections
9 import functools
9 import functools
10 import os
10 import os
11 import re
11 import re
12 import stat
12 import stat
13 from typing import Generator
13 from typing import Generator
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import getattr
16 from .pycompat import getattr
17 from .thirdparty import attr
17 from .thirdparty import attr
18 from .node import hex
18 from .node import hex
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 error,
21 error,
22 manifest,
22 manifest,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28 from .utils import hashutil
28 from .utils import hashutil
29
29
30 parsers = policy.importmod('parsers')
30 parsers = policy.importmod('parsers')
31 # how much bytes should be read from fncache in one read
31 # how much bytes should be read from fncache in one read
32 # It is done to prevent loading large fncache files into memory
32 # It is done to prevent loading large fncache files into memory
33 fncache_chunksize = 10 ** 6
33 fncache_chunksize = 10 ** 6
34
34
35
35
36 def _match_tracked_entry(entry, matcher):
36 def _match_tracked_entry(entry, matcher):
37 """parses a fncache entry and returns whether the entry is tracking a path
37 """parses a fncache entry and returns whether the entry is tracking a path
38 matched by matcher or not.
38 matched by matcher or not.
39
39
40 If matcher is None, returns True"""
40 If matcher is None, returns True"""
41
41
42 if matcher is None:
42 if matcher is None:
43 return True
43 return True
44 if entry.is_filelog:
44 if entry.is_filelog:
45 return matcher(entry.target_id)
45 return matcher(entry.target_id)
46 elif entry.is_manifestlog:
46 elif entry.is_manifestlog:
47 return matcher.visitdir(entry.target_id.rstrip(b'/'))
47 return matcher.visitdir(entry.target_id.rstrip(b'/'))
48 raise error.ProgrammingError(b"cannot process entry %r" % entry)
48 raise error.ProgrammingError(b"cannot process entry %r" % entry)
49
49
50
50
51 # This avoids a collision between a file named foo and a dir named
51 # This avoids a collision between a file named foo and a dir named
52 # foo.i or foo.d
52 # foo.i or foo.d
53 def _encodedir(path):
53 def _encodedir(path):
54 """
54 """
55 >>> _encodedir(b'data/foo.i')
55 >>> _encodedir(b'data/foo.i')
56 'data/foo.i'
56 'data/foo.i'
57 >>> _encodedir(b'data/foo.i/bla.i')
57 >>> _encodedir(b'data/foo.i/bla.i')
58 'data/foo.i.hg/bla.i'
58 'data/foo.i.hg/bla.i'
59 >>> _encodedir(b'data/foo.i.hg/bla.i')
59 >>> _encodedir(b'data/foo.i.hg/bla.i')
60 'data/foo.i.hg.hg/bla.i'
60 'data/foo.i.hg.hg/bla.i'
61 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
61 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
62 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
62 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
63 """
63 """
64 return (
64 return (
65 path.replace(b".hg/", b".hg.hg/")
65 path.replace(b".hg/", b".hg.hg/")
66 .replace(b".i/", b".i.hg/")
66 .replace(b".i/", b".i.hg/")
67 .replace(b".d/", b".d.hg/")
67 .replace(b".d/", b".d.hg/")
68 )
68 )
69
69
70
70
71 encodedir = getattr(parsers, 'encodedir', _encodedir)
71 encodedir = getattr(parsers, 'encodedir', _encodedir)
72
72
73
73
74 def decodedir(path):
74 def decodedir(path):
75 """
75 """
76 >>> decodedir(b'data/foo.i')
76 >>> decodedir(b'data/foo.i')
77 'data/foo.i'
77 'data/foo.i'
78 >>> decodedir(b'data/foo.i.hg/bla.i')
78 >>> decodedir(b'data/foo.i.hg/bla.i')
79 'data/foo.i/bla.i'
79 'data/foo.i/bla.i'
80 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
80 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
81 'data/foo.i.hg/bla.i'
81 'data/foo.i.hg/bla.i'
82 """
82 """
83 if b".hg/" not in path:
83 if b".hg/" not in path:
84 return path
84 return path
85 return (
85 return (
86 path.replace(b".d.hg/", b".d/")
86 path.replace(b".d.hg/", b".d/")
87 .replace(b".i.hg/", b".i/")
87 .replace(b".i.hg/", b".i/")
88 .replace(b".hg.hg/", b".hg/")
88 .replace(b".hg.hg/", b".hg/")
89 )
89 )
90
90
91
91
92 def _reserved():
92 def _reserved():
93 """characters that are problematic for filesystems
93 """characters that are problematic for filesystems
94
94
95 * ascii escapes (0..31)
95 * ascii escapes (0..31)
96 * ascii hi (126..255)
96 * ascii hi (126..255)
97 * windows specials
97 * windows specials
98
98
99 these characters will be escaped by encodefunctions
99 these characters will be escaped by encodefunctions
100 """
100 """
101 winreserved = [ord(x) for x in u'\\:*?"<>|']
101 winreserved = [ord(x) for x in u'\\:*?"<>|']
102 for x in range(32):
102 for x in range(32):
103 yield x
103 yield x
104 for x in range(126, 256):
104 for x in range(126, 256):
105 yield x
105 yield x
106 for x in winreserved:
106 for x in winreserved:
107 yield x
107 yield x
108
108
109
109
110 def _buildencodefun():
110 def _buildencodefun():
111 """
111 """
112 >>> enc, dec = _buildencodefun()
112 >>> enc, dec = _buildencodefun()
113
113
114 >>> enc(b'nothing/special.txt')
114 >>> enc(b'nothing/special.txt')
115 'nothing/special.txt'
115 'nothing/special.txt'
116 >>> dec(b'nothing/special.txt')
116 >>> dec(b'nothing/special.txt')
117 'nothing/special.txt'
117 'nothing/special.txt'
118
118
119 >>> enc(b'HELLO')
119 >>> enc(b'HELLO')
120 '_h_e_l_l_o'
120 '_h_e_l_l_o'
121 >>> dec(b'_h_e_l_l_o')
121 >>> dec(b'_h_e_l_l_o')
122 'HELLO'
122 'HELLO'
123
123
124 >>> enc(b'hello:world?')
124 >>> enc(b'hello:world?')
125 'hello~3aworld~3f'
125 'hello~3aworld~3f'
126 >>> dec(b'hello~3aworld~3f')
126 >>> dec(b'hello~3aworld~3f')
127 'hello:world?'
127 'hello:world?'
128
128
129 >>> enc(b'the\\x07quick\\xADshot')
129 >>> enc(b'the\\x07quick\\xADshot')
130 'the~07quick~adshot'
130 'the~07quick~adshot'
131 >>> dec(b'the~07quick~adshot')
131 >>> dec(b'the~07quick~adshot')
132 'the\\x07quick\\xadshot'
132 'the\\x07quick\\xadshot'
133 """
133 """
134 e = b'_'
134 e = b'_'
135 xchr = pycompat.bytechr
135 xchr = pycompat.bytechr
136 asciistr = list(map(xchr, range(127)))
136 asciistr = list(map(xchr, range(127)))
137 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
137 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
138
138
139 cmap = {x: x for x in asciistr}
139 cmap = {x: x for x in asciistr}
140 for x in _reserved():
140 for x in _reserved():
141 cmap[xchr(x)] = b"~%02x" % x
141 cmap[xchr(x)] = b"~%02x" % x
142 for x in capitals + [ord(e)]:
142 for x in capitals + [ord(e)]:
143 cmap[xchr(x)] = e + xchr(x).lower()
143 cmap[xchr(x)] = e + xchr(x).lower()
144
144
145 dmap = {}
145 dmap = {}
146 for k, v in cmap.items():
146 for k, v in cmap.items():
147 dmap[v] = k
147 dmap[v] = k
148
148
149 def decode(s):
149 def decode(s):
150 i = 0
150 i = 0
151 while i < len(s):
151 while i < len(s):
152 for l in range(1, 4):
152 for l in range(1, 4):
153 try:
153 try:
154 yield dmap[s[i : i + l]]
154 yield dmap[s[i : i + l]]
155 i += l
155 i += l
156 break
156 break
157 except KeyError:
157 except KeyError:
158 pass
158 pass
159 else:
159 else:
160 raise KeyError
160 raise KeyError
161
161
162 return (
162 return (
163 lambda s: b''.join([cmap[s[c : c + 1]] for c in range(len(s))]),
163 lambda s: b''.join([cmap[s[c : c + 1]] for c in range(len(s))]),
164 lambda s: b''.join(list(decode(s))),
164 lambda s: b''.join(list(decode(s))),
165 )
165 )
166
166
167
167
168 _encodefname, _decodefname = _buildencodefun()
168 _encodefname, _decodefname = _buildencodefun()
169
169
170
170
171 def encodefilename(s):
171 def encodefilename(s):
172 """
172 """
173 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
173 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
174 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
174 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
175 """
175 """
176 return _encodefname(encodedir(s))
176 return _encodefname(encodedir(s))
177
177
178
178
179 def decodefilename(s):
179 def decodefilename(s):
180 """
180 """
181 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
181 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
182 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
182 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
183 """
183 """
184 return decodedir(_decodefname(s))
184 return decodedir(_decodefname(s))
185
185
186
186
187 def _buildlowerencodefun():
187 def _buildlowerencodefun():
188 """
188 """
189 >>> f = _buildlowerencodefun()
189 >>> f = _buildlowerencodefun()
190 >>> f(b'nothing/special.txt')
190 >>> f(b'nothing/special.txt')
191 'nothing/special.txt'
191 'nothing/special.txt'
192 >>> f(b'HELLO')
192 >>> f(b'HELLO')
193 'hello'
193 'hello'
194 >>> f(b'hello:world?')
194 >>> f(b'hello:world?')
195 'hello~3aworld~3f'
195 'hello~3aworld~3f'
196 >>> f(b'the\\x07quick\\xADshot')
196 >>> f(b'the\\x07quick\\xADshot')
197 'the~07quick~adshot'
197 'the~07quick~adshot'
198 """
198 """
199 xchr = pycompat.bytechr
199 xchr = pycompat.bytechr
200 cmap = {xchr(x): xchr(x) for x in range(127)}
200 cmap = {xchr(x): xchr(x) for x in range(127)}
201 for x in _reserved():
201 for x in _reserved():
202 cmap[xchr(x)] = b"~%02x" % x
202 cmap[xchr(x)] = b"~%02x" % x
203 for x in range(ord(b"A"), ord(b"Z") + 1):
203 for x in range(ord(b"A"), ord(b"Z") + 1):
204 cmap[xchr(x)] = xchr(x).lower()
204 cmap[xchr(x)] = xchr(x).lower()
205
205
206 def lowerencode(s):
206 def lowerencode(s):
207 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
207 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
208
208
209 return lowerencode
209 return lowerencode
210
210
211
211
212 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
212 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
213
213
214 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
214 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
215 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
215 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
216 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
216 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
217
217
218
218
219 def _auxencode(path, dotencode):
219 def _auxencode(path, dotencode):
220 """
220 """
221 Encodes filenames containing names reserved by Windows or which end in
221 Encodes filenames containing names reserved by Windows or which end in
222 period or space. Does not touch other single reserved characters c.
222 period or space. Does not touch other single reserved characters c.
223 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
223 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
224 Additionally encodes space or period at the beginning, if dotencode is
224 Additionally encodes space or period at the beginning, if dotencode is
225 True. Parameter path is assumed to be all lowercase.
225 True. Parameter path is assumed to be all lowercase.
226 A segment only needs encoding if a reserved name appears as a
226 A segment only needs encoding if a reserved name appears as a
227 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
227 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
228 doesn't need encoding.
228 doesn't need encoding.
229
229
230 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
230 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
231 >>> _auxencode(s.split(b'/'), True)
231 >>> _auxencode(s.split(b'/'), True)
232 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
232 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
233 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
233 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
234 >>> _auxencode(s.split(b'/'), False)
234 >>> _auxencode(s.split(b'/'), False)
235 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
235 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
236 >>> _auxencode([b'foo. '], True)
236 >>> _auxencode([b'foo. '], True)
237 ['foo.~20']
237 ['foo.~20']
238 >>> _auxencode([b' .foo'], True)
238 >>> _auxencode([b' .foo'], True)
239 ['~20.foo']
239 ['~20.foo']
240 """
240 """
241 for i, n in enumerate(path):
241 for i, n in enumerate(path):
242 if not n:
242 if not n:
243 continue
243 continue
244 if dotencode and n[0] in b'. ':
244 if dotencode and n[0] in b'. ':
245 n = b"~%02x" % ord(n[0:1]) + n[1:]
245 n = b"~%02x" % ord(n[0:1]) + n[1:]
246 path[i] = n
246 path[i] = n
247 else:
247 else:
248 l = n.find(b'.')
248 l = n.find(b'.')
249 if l == -1:
249 if l == -1:
250 l = len(n)
250 l = len(n)
251 if (l == 3 and n[:3] in _winres3) or (
251 if (l == 3 and n[:3] in _winres3) or (
252 l == 4
252 l == 4
253 and n[3:4] <= b'9'
253 and n[3:4] <= b'9'
254 and n[3:4] >= b'1'
254 and n[3:4] >= b'1'
255 and n[:3] in _winres4
255 and n[:3] in _winres4
256 ):
256 ):
257 # encode third letter ('aux' -> 'au~78')
257 # encode third letter ('aux' -> 'au~78')
258 ec = b"~%02x" % ord(n[2:3])
258 ec = b"~%02x" % ord(n[2:3])
259 n = n[0:2] + ec + n[3:]
259 n = n[0:2] + ec + n[3:]
260 path[i] = n
260 path[i] = n
261 if n[-1] in b'. ':
261 if n[-1] in b'. ':
262 # encode last period or space ('foo...' -> 'foo..~2e')
262 # encode last period or space ('foo...' -> 'foo..~2e')
263 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
263 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
264 return path
264 return path
265
265
266
266
267 _maxstorepathlen = 120
267 _maxstorepathlen = 120
268 _dirprefixlen = 8
268 _dirprefixlen = 8
269 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
269 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
270
270
271
271
272 def _hashencode(path, dotencode):
272 def _hashencode(path, dotencode):
273 digest = hex(hashutil.sha1(path).digest())
273 digest = hex(hashutil.sha1(path).digest())
274 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
274 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
275 parts = _auxencode(le, dotencode)
275 parts = _auxencode(le, dotencode)
276 basename = parts[-1]
276 basename = parts[-1]
277 _root, ext = os.path.splitext(basename)
277 _root, ext = os.path.splitext(basename)
278 sdirs = []
278 sdirs = []
279 sdirslen = 0
279 sdirslen = 0
280 for p in parts[:-1]:
280 for p in parts[:-1]:
281 d = p[:_dirprefixlen]
281 d = p[:_dirprefixlen]
282 if d[-1] in b'. ':
282 if d[-1] in b'. ':
283 # Windows can't access dirs ending in period or space
283 # Windows can't access dirs ending in period or space
284 d = d[:-1] + b'_'
284 d = d[:-1] + b'_'
285 if sdirslen == 0:
285 if sdirslen == 0:
286 t = len(d)
286 t = len(d)
287 else:
287 else:
288 t = sdirslen + 1 + len(d)
288 t = sdirslen + 1 + len(d)
289 if t > _maxshortdirslen:
289 if t > _maxshortdirslen:
290 break
290 break
291 sdirs.append(d)
291 sdirs.append(d)
292 sdirslen = t
292 sdirslen = t
293 dirs = b'/'.join(sdirs)
293 dirs = b'/'.join(sdirs)
294 if len(dirs) > 0:
294 if len(dirs) > 0:
295 dirs += b'/'
295 dirs += b'/'
296 res = b'dh/' + dirs + digest + ext
296 res = b'dh/' + dirs + digest + ext
297 spaceleft = _maxstorepathlen - len(res)
297 spaceleft = _maxstorepathlen - len(res)
298 if spaceleft > 0:
298 if spaceleft > 0:
299 filler = basename[:spaceleft]
299 filler = basename[:spaceleft]
300 res = b'dh/' + dirs + filler + digest + ext
300 res = b'dh/' + dirs + filler + digest + ext
301 return res
301 return res
302
302
303
303
304 def _hybridencode(path, dotencode):
304 def _hybridencode(path, dotencode):
305 """encodes path with a length limit
305 """encodes path with a length limit
306
306
307 Encodes all paths that begin with 'data/', according to the following.
307 Encodes all paths that begin with 'data/', according to the following.
308
308
309 Default encoding (reversible):
309 Default encoding (reversible):
310
310
311 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
311 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
312 characters are encoded as '~xx', where xx is the two digit hex code
312 characters are encoded as '~xx', where xx is the two digit hex code
313 of the character (see encodefilename).
313 of the character (see encodefilename).
314 Relevant path components consisting of Windows reserved filenames are
314 Relevant path components consisting of Windows reserved filenames are
315 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
315 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
316
316
317 Hashed encoding (not reversible):
317 Hashed encoding (not reversible):
318
318
319 If the default-encoded path is longer than _maxstorepathlen, a
319 If the default-encoded path is longer than _maxstorepathlen, a
320 non-reversible hybrid hashing of the path is done instead.
320 non-reversible hybrid hashing of the path is done instead.
321 This encoding uses up to _dirprefixlen characters of all directory
321 This encoding uses up to _dirprefixlen characters of all directory
322 levels of the lowerencoded path, but not more levels than can fit into
322 levels of the lowerencoded path, but not more levels than can fit into
323 _maxshortdirslen.
323 _maxshortdirslen.
324 Then follows the filler followed by the sha digest of the full path.
324 Then follows the filler followed by the sha digest of the full path.
325 The filler is the beginning of the basename of the lowerencoded path
325 The filler is the beginning of the basename of the lowerencoded path
326 (the basename is everything after the last path separator). The filler
326 (the basename is everything after the last path separator). The filler
327 is as long as possible, filling in characters from the basename until
327 is as long as possible, filling in characters from the basename until
328 the encoded path has _maxstorepathlen characters (or all chars of the
328 the encoded path has _maxstorepathlen characters (or all chars of the
329 basename have been taken).
329 basename have been taken).
330 The extension (e.g. '.i' or '.d') is preserved.
330 The extension (e.g. '.i' or '.d') is preserved.
331
331
332 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
332 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
333 encoding was used.
333 encoding was used.
334 """
334 """
335 path = encodedir(path)
335 path = encodedir(path)
336 ef = _encodefname(path).split(b'/')
336 ef = _encodefname(path).split(b'/')
337 res = b'/'.join(_auxencode(ef, dotencode))
337 res = b'/'.join(_auxencode(ef, dotencode))
338 if len(res) > _maxstorepathlen:
338 if len(res) > _maxstorepathlen:
339 res = _hashencode(path, dotencode)
339 res = _hashencode(path, dotencode)
340 return res
340 return res
341
341
342
342
343 def _pathencode(path):
343 def _pathencode(path):
344 de = encodedir(path)
344 de = encodedir(path)
345 if len(path) > _maxstorepathlen:
345 if len(path) > _maxstorepathlen:
346 return _hashencode(de, True)
346 return _hashencode(de, True)
347 ef = _encodefname(de).split(b'/')
347 ef = _encodefname(de).split(b'/')
348 res = b'/'.join(_auxencode(ef, True))
348 res = b'/'.join(_auxencode(ef, True))
349 if len(res) > _maxstorepathlen:
349 if len(res) > _maxstorepathlen:
350 return _hashencode(de, True)
350 return _hashencode(de, True)
351 return res
351 return res
352
352
353
353
354 _pathencode = getattr(parsers, 'pathencode', _pathencode)
354 _pathencode = getattr(parsers, 'pathencode', _pathencode)
355
355
356
356
357 def _plainhybridencode(f):
357 def _plainhybridencode(f):
358 return _hybridencode(f, False)
358 return _hybridencode(f, False)
359
359
360
360
361 def _calcmode(vfs):
361 def _calcmode(vfs):
362 try:
362 try:
363 # files in .hg/ will be created using this mode
363 # files in .hg/ will be created using this mode
364 mode = vfs.stat().st_mode
364 mode = vfs.stat().st_mode
365 # avoid some useless chmods
365 # avoid some useless chmods
366 if (0o777 & ~util.umask) == (0o777 & mode):
366 if (0o777 & ~util.umask) == (0o777 & mode):
367 mode = None
367 mode = None
368 except OSError:
368 except OSError:
369 mode = None
369 mode = None
370 return mode
370 return mode
371
371
372
372
373 _data = [
373 _data = [
374 b'bookmarks',
374 b'bookmarks',
375 b'narrowspec',
375 b'narrowspec',
376 b'data',
376 b'data',
377 b'meta',
377 b'meta',
378 b'00manifest.d',
378 b'00manifest.d',
379 b'00manifest.i',
379 b'00manifest.i',
380 b'00changelog.d',
380 b'00changelog.d',
381 b'00changelog.i',
381 b'00changelog.i',
382 b'phaseroots',
382 b'phaseroots',
383 b'obsstore',
383 b'obsstore',
384 b'requires',
384 b'requires',
385 ]
385 ]
386
386
387 REVLOG_FILES_MAIN_EXT = (b'.i',)
387 REVLOG_FILES_MAIN_EXT = (b'.i',)
388 REVLOG_FILES_OTHER_EXT = (
388 REVLOG_FILES_OTHER_EXT = (
389 b'.idx',
389 b'.idx',
390 b'.d',
390 b'.d',
391 b'.dat',
391 b'.dat',
392 b'.n',
392 b'.n',
393 b'.nd',
393 b'.nd',
394 b'.sda',
394 b'.sda',
395 )
395 )
396 # file extension that also use a `-SOMELONGIDHASH.ext` form
396 # file extension that also use a `-SOMELONGIDHASH.ext` form
397 REVLOG_FILES_LONG_EXT = (
397 REVLOG_FILES_LONG_EXT = (
398 b'.nd',
398 b'.nd',
399 b'.idx',
399 b'.idx',
400 b'.dat',
400 b'.dat',
401 b'.sda',
401 b'.sda',
402 )
402 )
403 # files that are "volatile" and might change between listing and streaming
403 # files that are "volatile" and might change between listing and streaming
404 #
404 #
405 # note: the ".nd" file are nodemap data and won't "change" but they might be
405 # note: the ".nd" file are nodemap data and won't "change" but they might be
406 # deleted.
406 # deleted.
407 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
407 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
408
408
409 # some exception to the above matching
409 # some exception to the above matching
410 #
410 #
411 # XXX This is currently not in use because of issue6542
411 # XXX This is currently not in use because of issue6542
412 EXCLUDED = re.compile(br'.*undo\.[^/]+\.(nd?|i)$')
412 EXCLUDED = re.compile(br'.*undo\.[^/]+\.(nd?|i)$')
413
413
414
414
415 def is_revlog(f, kind, st):
415 def is_revlog(f, kind, st):
416 if kind != stat.S_IFREG:
416 if kind != stat.S_IFREG:
417 return None
417 return None
418 return revlog_type(f)
418 return revlog_type(f)
419
419
420
420
421 def revlog_type(f):
421 def revlog_type(f):
422 # XXX we need to filter `undo.` created by the transaction here, however
422 # XXX we need to filter `undo.` created by the transaction here, however
423 # being naive about it also filter revlog for `undo.*` files, leading to
423 # being naive about it also filter revlog for `undo.*` files, leading to
424 # issue6542. So we no longer use EXCLUDED.
424 # issue6542. So we no longer use EXCLUDED.
425 if f.endswith(REVLOG_FILES_MAIN_EXT):
425 if f.endswith(REVLOG_FILES_MAIN_EXT):
426 return FILEFLAGS_REVLOG_MAIN
426 return FILEFLAGS_REVLOG_MAIN
427 elif f.endswith(REVLOG_FILES_OTHER_EXT):
427 elif f.endswith(REVLOG_FILES_OTHER_EXT):
428 t = FILETYPE_FILELOG_OTHER
428 t = FILETYPE_FILELOG_OTHER
429 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
429 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
430 t |= FILEFLAGS_VOLATILE
430 t |= FILEFLAGS_VOLATILE
431 return t
431 return t
432 return None
432 return None
433
433
434
434
435 # the file is part of changelog data
435 # the file is part of changelog data
436 FILEFLAGS_CHANGELOG = 1 << 13
436 FILEFLAGS_CHANGELOG = 1 << 13
437 # the file is part of manifest data
437 # the file is part of manifest data
438 FILEFLAGS_MANIFESTLOG = 1 << 12
438 FILEFLAGS_MANIFESTLOG = 1 << 12
439 # the file is part of filelog data
439 # the file is part of filelog data
440 FILEFLAGS_FILELOG = 1 << 11
440 FILEFLAGS_FILELOG = 1 << 11
441 # file that are not directly part of a revlog
441 # file that are not directly part of a revlog
442 FILEFLAGS_OTHER = 1 << 10
442 FILEFLAGS_OTHER = 1 << 10
443
443
444 # the main entry point for a revlog
444 # the main entry point for a revlog
445 FILEFLAGS_REVLOG_MAIN = 1 << 1
445 FILEFLAGS_REVLOG_MAIN = 1 << 1
446 # a secondary file for a revlog
446 # a secondary file for a revlog
447 FILEFLAGS_REVLOG_OTHER = 1 << 0
447 FILEFLAGS_REVLOG_OTHER = 1 << 0
448
448
449 # files that are "volatile" and might change between listing and streaming
449 # files that are "volatile" and might change between listing and streaming
450 FILEFLAGS_VOLATILE = 1 << 20
450 FILEFLAGS_VOLATILE = 1 << 20
451
451
452 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
452 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
453 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
453 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
454 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
454 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
455 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
455 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
456 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
456 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
457 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
457 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
458 FILETYPE_OTHER = FILEFLAGS_OTHER
458 FILETYPE_OTHER = FILEFLAGS_OTHER
459
459
460
460
461 @attr.s(slots=True, init=False)
461 @attr.s(slots=True, init=False)
462 class BaseStoreEntry:
462 class BaseStoreEntry:
463 """An entry in the store
463 """An entry in the store
464
464
465 This is returned by `store.walk` and represent some data in the store."""
465 This is returned by `store.walk` and represent some data in the store."""
466
466
467
467
468 @attr.s(slots=True, init=False)
468 @attr.s(slots=True, init=False)
469 class SimpleStoreEntry(BaseStoreEntry):
469 class SimpleStoreEntry(BaseStoreEntry):
470 """A generic entry in the store"""
470 """A generic entry in the store"""
471
471
472 is_revlog = False
472 is_revlog = False
473
473
474 _entry_path = attr.ib()
474 _entry_path = attr.ib()
475 _is_volatile = attr.ib(default=False)
475 _is_volatile = attr.ib(default=False)
476 _file_size = attr.ib(default=None)
476 _file_size = attr.ib(default=None)
477
477
478 def __init__(
478 def __init__(
479 self,
479 self,
480 entry_path,
480 entry_path,
481 is_volatile=False,
481 is_volatile=False,
482 file_size=None,
482 file_size=None,
483 ):
483 ):
484 super().__init__()
484 super().__init__()
485 self._entry_path = entry_path
485 self._entry_path = entry_path
486 self._is_volatile = is_volatile
486 self._is_volatile = is_volatile
487 self._file_size = file_size
487 self._file_size = file_size
488
488
489 def files(self):
489 def files(self):
490 return [
490 return [
491 StoreFile(
491 StoreFile(
492 unencoded_path=self._entry_path,
492 unencoded_path=self._entry_path,
493 file_size=self._file_size,
493 file_size=self._file_size,
494 is_volatile=self._is_volatile,
494 is_volatile=self._is_volatile,
495 )
495 )
496 ]
496 ]
497
497
498
498
499 @attr.s(slots=True, init=False)
499 @attr.s(slots=True, init=False)
500 class RevlogStoreEntry(BaseStoreEntry):
500 class RevlogStoreEntry(BaseStoreEntry):
501 """A revlog entry in the store"""
501 """A revlog entry in the store"""
502
502
503 is_revlog = True
503 is_revlog = True
504
504
505 revlog_type = attr.ib(default=None)
505 revlog_type = attr.ib(default=None)
506 target_id = attr.ib(default=None)
506 target_id = attr.ib(default=None)
507 _path_prefix = attr.ib(default=None)
507 _path_prefix = attr.ib(default=None)
508 _details = attr.ib(default=None)
508 _details = attr.ib(default=None)
509
509
510 def __init__(
510 def __init__(
511 self,
511 self,
512 revlog_type,
512 revlog_type,
513 path_prefix,
513 path_prefix,
514 target_id,
514 target_id,
515 details,
515 details,
516 ):
516 ):
517 super().__init__()
517 super().__init__()
518 self.revlog_type = revlog_type
518 self.revlog_type = revlog_type
519 self.target_id = target_id
519 self.target_id = target_id
520 self._path_prefix = path_prefix
520 self._path_prefix = path_prefix
521 assert b'.i' in details, (path_prefix, details)
521 assert b'.i' in details, (path_prefix, details)
522 self._details = details
522 self._details = details
523
523
524 @property
524 @property
525 def is_changelog(self):
525 def is_changelog(self):
526 return self.revlog_type & FILEFLAGS_CHANGELOG
526 return self.revlog_type & FILEFLAGS_CHANGELOG
527
527
528 @property
528 @property
529 def is_manifestlog(self):
529 def is_manifestlog(self):
530 return self.revlog_type & FILEFLAGS_MANIFESTLOG
530 return self.revlog_type & FILEFLAGS_MANIFESTLOG
531
531
532 @property
532 @property
533 def is_filelog(self):
533 def is_filelog(self):
534 return self.revlog_type & FILEFLAGS_FILELOG
534 return self.revlog_type & FILEFLAGS_FILELOG
535
535
536 def main_file_path(self):
536 def main_file_path(self):
537 """unencoded path of the main revlog file"""
537 """unencoded path of the main revlog file"""
538 return self._path_prefix + b'.i'
538 return self._path_prefix + b'.i'
539
539
540 def files(self):
540 def files(self):
541 files = []
541 files = []
542 for ext in sorted(self._details, key=_ext_key):
542 for ext in sorted(self._details, key=_ext_key):
543 path = self._path_prefix + ext
543 path = self._path_prefix + ext
544 data = self._details[ext]
544 data = self._details[ext]
545 files.append(StoreFile(unencoded_path=path, **data))
545 files.append(StoreFile(unencoded_path=path, **data))
546 return files
546 return files
547
547
548
548
549 @attr.s(slots=True)
549 @attr.s(slots=True)
550 class StoreFile:
550 class StoreFile:
551 """a file matching an entry"""
551 """a file matching an entry"""
552
552
553 unencoded_path = attr.ib()
553 unencoded_path = attr.ib()
554 _file_size = attr.ib(default=None)
554 _file_size = attr.ib(default=None)
555 is_volatile = attr.ib(default=False)
555 is_volatile = attr.ib(default=False)
556
556
557 def file_size(self, vfs):
557 def file_size(self, vfs):
558 if self._file_size is not None:
558 if self._file_size is not None:
559 return self._file_size
559 return self._file_size
560 try:
560 try:
561 return vfs.stat(self.unencoded_path).st_size
561 return vfs.stat(self.unencoded_path).st_size
562 except FileNotFoundError:
562 except FileNotFoundError:
563 return 0
563 return 0
564
564
565
565
566 def _gather_revlog(files_data):
566 def _gather_revlog(files_data):
567 """group files per revlog prefix
567 """group files per revlog prefix
568
568
569 The returns a two level nested dict. The top level key is the revlog prefix
569 The returns a two level nested dict. The top level key is the revlog prefix
570 without extension, the second level is all the file "suffix" that were
570 without extension, the second level is all the file "suffix" that were
571 seen for this revlog and arbitrary file data as value.
571 seen for this revlog and arbitrary file data as value.
572 """
572 """
573 revlogs = collections.defaultdict(dict)
573 revlogs = collections.defaultdict(dict)
574 for u, value in files_data:
574 for u, value in files_data:
575 name, ext = _split_revlog_ext(u)
575 name, ext = _split_revlog_ext(u)
576 revlogs[name][ext] = value
576 revlogs[name][ext] = value
577 return sorted(revlogs.items())
577 return sorted(revlogs.items())
578
578
579
579
580 def _split_revlog_ext(filename):
580 def _split_revlog_ext(filename):
581 """split the revlog file prefix from the variable extension"""
581 """split the revlog file prefix from the variable extension"""
582 if filename.endswith(REVLOG_FILES_LONG_EXT):
582 if filename.endswith(REVLOG_FILES_LONG_EXT):
583 char = b'-'
583 char = b'-'
584 else:
584 else:
585 char = b'.'
585 char = b'.'
586 idx = filename.rfind(char)
586 idx = filename.rfind(char)
587 return filename[:idx], filename[idx:]
587 return filename[:idx], filename[idx:]
588
588
589
589
590 def _ext_key(ext):
590 def _ext_key(ext):
591 """a key to order revlog suffix
591 """a key to order revlog suffix
592
592
593 important to issue .i after other entry."""
593 important to issue .i after other entry."""
594 # the only important part of this order is to keep the `.i` last.
594 # the only important part of this order is to keep the `.i` last.
595 if ext.endswith(b'.n'):
595 if ext.endswith(b'.n'):
596 return (0, ext)
596 return (0, ext)
597 elif ext.endswith(b'.nd'):
597 elif ext.endswith(b'.nd'):
598 return (10, ext)
598 return (10, ext)
599 elif ext.endswith(b'.d'):
599 elif ext.endswith(b'.d'):
600 return (20, ext)
600 return (20, ext)
601 elif ext.endswith(b'.i'):
601 elif ext.endswith(b'.i'):
602 return (50, ext)
602 return (50, ext)
603 else:
603 else:
604 return (40, ext)
604 return (40, ext)
605
605
606
606
607 class basicstore:
607 class basicstore:
608 '''base class for local repository stores'''
608 '''base class for local repository stores'''
609
609
610 def __init__(self, path, vfstype):
610 def __init__(self, path, vfstype):
611 vfs = vfstype(path)
611 vfs = vfstype(path)
612 self.path = vfs.base
612 self.path = vfs.base
613 self.createmode = _calcmode(vfs)
613 self.createmode = _calcmode(vfs)
614 vfs.createmode = self.createmode
614 vfs.createmode = self.createmode
615 self.rawvfs = vfs
615 self.rawvfs = vfs
616 self.vfs = vfsmod.filtervfs(vfs, encodedir)
616 self.vfs = vfsmod.filtervfs(vfs, encodedir)
617 self.opener = self.vfs
617 self.opener = self.vfs
618
618
619 def join(self, f):
619 def join(self, f):
620 return self.path + b'/' + encodedir(f)
620 return self.path + b'/' + encodedir(f)
621
621
622 def _walk(self, relpath, recurse, undecodable=None):
622 def _walk(self, relpath, recurse, undecodable=None):
623 '''yields (revlog_type, unencoded, size)'''
623 '''yields (revlog_type, unencoded, size)'''
624 path = self.path
624 path = self.path
625 if relpath:
625 if relpath:
626 path += b'/' + relpath
626 path += b'/' + relpath
627 striplen = len(self.path) + 1
627 striplen = len(self.path) + 1
628 l = []
628 l = []
629 if self.rawvfs.isdir(path):
629 if self.rawvfs.isdir(path):
630 visit = [path]
630 visit = [path]
631 readdir = self.rawvfs.readdir
631 readdir = self.rawvfs.readdir
632 while visit:
632 while visit:
633 p = visit.pop()
633 p = visit.pop()
634 for f, kind, st in readdir(p, stat=True):
634 for f, kind, st in readdir(p, stat=True):
635 fp = p + b'/' + f
635 fp = p + b'/' + f
636 rl_type = is_revlog(f, kind, st)
636 rl_type = is_revlog(f, kind, st)
637 if rl_type is not None:
637 if rl_type is not None:
638 n = util.pconvert(fp[striplen:])
638 n = util.pconvert(fp[striplen:])
639 l.append((decodedir(n), (rl_type, st.st_size)))
639 l.append((decodedir(n), (rl_type, st.st_size)))
640 elif kind == stat.S_IFDIR and recurse:
640 elif kind == stat.S_IFDIR and recurse:
641 visit.append(fp)
641 visit.append(fp)
642
642
643 l.sort()
643 l.sort()
644 return l
644 return l
645
645
646 def changelog(self, trypending, concurrencychecker=None):
646 def changelog(self, trypending, concurrencychecker=None):
647 return changelog.changelog(
647 return changelog.changelog(
648 self.vfs,
648 self.vfs,
649 trypending=trypending,
649 trypending=trypending,
650 concurrencychecker=concurrencychecker,
650 concurrencychecker=concurrencychecker,
651 )
651 )
652
652
653 def manifestlog(self, repo, storenarrowmatch):
653 def manifestlog(self, repo, storenarrowmatch):
654 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
654 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
655 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
655 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
656
656
657 def data_entries(
657 def data_entries(
658 self, matcher=None, undecodable=None
658 self, matcher=None, undecodable=None
659 ) -> Generator[BaseStoreEntry, None, None]:
659 ) -> Generator[BaseStoreEntry, None, None]:
660 """Like walk, but excluding the changelog and root manifest.
660 """Like walk, but excluding the changelog and root manifest.
661
661
662 When [undecodable] is None, revlogs names that can't be
662 When [undecodable] is None, revlogs names that can't be
663 decoded cause an exception. When it is provided, it should
663 decoded cause an exception. When it is provided, it should
664 be a list and the filenames that can't be decoded are added
664 be a list and the filenames that can't be decoded are added
665 to it instead. This is very rarely needed."""
665 to it instead. This is very rarely needed."""
666 dirs = [
666 dirs = [
667 (b'data', FILEFLAGS_FILELOG),
667 (b'data', FILEFLAGS_FILELOG),
668 (b'meta', FILEFLAGS_MANIFESTLOG),
668 (b'meta', FILEFLAGS_MANIFESTLOG),
669 ]
669 ]
670 for base_dir, rl_type in dirs:
670 for base_dir, rl_type in dirs:
671 files = self._walk(base_dir, True, undecodable=undecodable)
671 files = self._walk(base_dir, True, undecodable=undecodable)
672 files = (f for f in files if f[1][0] is not None)
672 files = (f for f in files if f[1][0] is not None)
673 for revlog, details in _gather_revlog(files):
673 for revlog, details in _gather_revlog(files):
674 file_details = {}
674 file_details = {}
675 revlog_target_id = revlog.split(b'/', 1)[1]
675 revlog_target_id = revlog.split(b'/', 1)[1]
676 for ext, (t, s) in sorted(details.items()):
676 for ext, (t, s) in sorted(details.items()):
677 file_details[ext] = {
677 file_details[ext] = {
678 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
678 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
679 'file_size': s,
679 'file_size': s,
680 }
680 }
681 yield RevlogStoreEntry(
681 yield RevlogStoreEntry(
682 path_prefix=revlog,
682 path_prefix=revlog,
683 revlog_type=rl_type,
683 revlog_type=rl_type,
684 target_id=revlog_target_id,
684 target_id=revlog_target_id,
685 details=file_details,
685 details=file_details,
686 )
686 )
687
687
688 def top_entries(self, phase=False) -> Generator[BaseStoreEntry, None, None]:
688 def top_entries(self, phase=False) -> Generator[BaseStoreEntry, None, None]:
689 if phase and self.vfs.exists(b'phaseroots'):
690 yield SimpleStoreEntry(
691 entry_path=b'phaseroots',
692 is_volatile=True,
693 )
694
689 files = reversed(self._walk(b'', False))
695 files = reversed(self._walk(b'', False))
690
696
691 changelogs = collections.defaultdict(dict)
697 changelogs = collections.defaultdict(dict)
692 manifestlogs = collections.defaultdict(dict)
698 manifestlogs = collections.defaultdict(dict)
693
699
694 for u, (t, s) in files:
700 for u, (t, s) in files:
695 if u.startswith(b'00changelog'):
701 if u.startswith(b'00changelog'):
696 name, ext = _split_revlog_ext(u)
702 name, ext = _split_revlog_ext(u)
697 changelogs[name][ext] = (t, s)
703 changelogs[name][ext] = (t, s)
698 elif u.startswith(b'00manifest'):
704 elif u.startswith(b'00manifest'):
699 name, ext = _split_revlog_ext(u)
705 name, ext = _split_revlog_ext(u)
700 manifestlogs[name][ext] = (t, s)
706 manifestlogs[name][ext] = (t, s)
701 else:
707 else:
702 yield SimpleStoreEntry(
708 yield SimpleStoreEntry(
703 entry_path=u,
709 entry_path=u,
704 is_volatile=bool(t & FILEFLAGS_VOLATILE),
710 is_volatile=bool(t & FILEFLAGS_VOLATILE),
705 file_size=s,
711 file_size=s,
706 )
712 )
707 # yield manifest before changelog
713 # yield manifest before changelog
708 top_rl = [
714 top_rl = [
709 (manifestlogs, FILEFLAGS_MANIFESTLOG),
715 (manifestlogs, FILEFLAGS_MANIFESTLOG),
710 (changelogs, FILEFLAGS_CHANGELOG),
716 (changelogs, FILEFLAGS_CHANGELOG),
711 ]
717 ]
712 assert len(manifestlogs) <= 1
718 assert len(manifestlogs) <= 1
713 assert len(changelogs) <= 1
719 assert len(changelogs) <= 1
714 for data, revlog_type in top_rl:
720 for data, revlog_type in top_rl:
715 for revlog, details in sorted(data.items()):
721 for revlog, details in sorted(data.items()):
716 file_details = {}
722 file_details = {}
717 for ext, (t, s) in details.items():
723 for ext, (t, s) in details.items():
718 file_details[ext] = {
724 file_details[ext] = {
719 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
725 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
720 'file_size': s,
726 'file_size': s,
721 }
727 }
722 yield RevlogStoreEntry(
728 yield RevlogStoreEntry(
723 path_prefix=revlog,
729 path_prefix=revlog,
724 revlog_type=revlog_type,
730 revlog_type=revlog_type,
725 target_id=b'',
731 target_id=b'',
726 details=file_details,
732 details=file_details,
727 )
733 )
728 if phase and self.vfs.exists(b'phaseroots'):
729 yield SimpleStoreEntry(
730 entry_path=b'phaseroots',
731 is_volatile=True,
732 )
733
734
734 def walk(
735 def walk(
735 self, matcher=None, phase=False
736 self, matcher=None, phase=False
736 ) -> Generator[BaseStoreEntry, None, None]:
737 ) -> Generator[BaseStoreEntry, None, None]:
737 """return files related to data storage (ie: revlogs)
738 """return files related to data storage (ie: revlogs)
738
739
739 yields instance from BaseStoreEntry subclasses
740 yields instance from BaseStoreEntry subclasses
740
741
741 if a matcher is passed, storage files of only those tracked paths
742 if a matcher is passed, storage files of only those tracked paths
742 are passed with matches the matcher
743 are passed with matches the matcher
743 """
744 """
744 # yield data files first
745 # yield data files first
745 for x in self.data_entries(matcher):
746 for x in self.data_entries(matcher):
746 yield x
747 yield x
747 for x in self.top_entries(phase=phase):
748 for x in self.top_entries(phase=phase):
748 yield x
749 yield x
749
750
750 def copylist(self):
751 def copylist(self):
751 return _data
752 return _data
752
753
753 def write(self, tr):
754 def write(self, tr):
754 pass
755 pass
755
756
756 def invalidatecaches(self):
757 def invalidatecaches(self):
757 pass
758 pass
758
759
759 def markremoved(self, fn):
760 def markremoved(self, fn):
760 pass
761 pass
761
762
762 def __contains__(self, path):
763 def __contains__(self, path):
763 '''Checks if the store contains path'''
764 '''Checks if the store contains path'''
764 path = b"/".join((b"data", path))
765 path = b"/".join((b"data", path))
765 # file?
766 # file?
766 if self.vfs.exists(path + b".i"):
767 if self.vfs.exists(path + b".i"):
767 return True
768 return True
768 # dir?
769 # dir?
769 if not path.endswith(b"/"):
770 if not path.endswith(b"/"):
770 path = path + b"/"
771 path = path + b"/"
771 return self.vfs.exists(path)
772 return self.vfs.exists(path)
772
773
773
774
774 class encodedstore(basicstore):
775 class encodedstore(basicstore):
775 def __init__(self, path, vfstype):
776 def __init__(self, path, vfstype):
776 vfs = vfstype(path + b'/store')
777 vfs = vfstype(path + b'/store')
777 self.path = vfs.base
778 self.path = vfs.base
778 self.createmode = _calcmode(vfs)
779 self.createmode = _calcmode(vfs)
779 vfs.createmode = self.createmode
780 vfs.createmode = self.createmode
780 self.rawvfs = vfs
781 self.rawvfs = vfs
781 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
782 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
782 self.opener = self.vfs
783 self.opener = self.vfs
783
784
784 def _walk(self, relpath, recurse, undecodable=None):
785 def _walk(self, relpath, recurse, undecodable=None):
785 old = super()._walk(relpath, recurse)
786 old = super()._walk(relpath, recurse)
786 new = []
787 new = []
787 for f1, value in old:
788 for f1, value in old:
788 try:
789 try:
789 f2 = decodefilename(f1)
790 f2 = decodefilename(f1)
790 except KeyError:
791 except KeyError:
791 if undecodable is None:
792 if undecodable is None:
792 msg = _(b'undecodable revlog name %s') % f1
793 msg = _(b'undecodable revlog name %s') % f1
793 raise error.StorageError(msg)
794 raise error.StorageError(msg)
794 else:
795 else:
795 undecodable.append(f1)
796 undecodable.append(f1)
796 continue
797 continue
797 new.append((f2, value))
798 new.append((f2, value))
798 return new
799 return new
799
800
800 def data_entries(
801 def data_entries(
801 self, matcher=None, undecodable=None
802 self, matcher=None, undecodable=None
802 ) -> Generator[BaseStoreEntry, None, None]:
803 ) -> Generator[BaseStoreEntry, None, None]:
803 entries = super(encodedstore, self).data_entries(
804 entries = super(encodedstore, self).data_entries(
804 undecodable=undecodable
805 undecodable=undecodable
805 )
806 )
806 for entry in entries:
807 for entry in entries:
807 if _match_tracked_entry(entry, matcher):
808 if _match_tracked_entry(entry, matcher):
808 yield entry
809 yield entry
809
810
810 def join(self, f):
811 def join(self, f):
811 return self.path + b'/' + encodefilename(f)
812 return self.path + b'/' + encodefilename(f)
812
813
813 def copylist(self):
814 def copylist(self):
814 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
815 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
815
816
816
817
817 class fncache:
818 class fncache:
818 # the filename used to be partially encoded
819 # the filename used to be partially encoded
819 # hence the encodedir/decodedir dance
820 # hence the encodedir/decodedir dance
820 def __init__(self, vfs):
821 def __init__(self, vfs):
821 self.vfs = vfs
822 self.vfs = vfs
822 self._ignores = set()
823 self._ignores = set()
823 self.entries = None
824 self.entries = None
824 self._dirty = False
825 self._dirty = False
825 # set of new additions to fncache
826 # set of new additions to fncache
826 self.addls = set()
827 self.addls = set()
827
828
828 def ensureloaded(self, warn=None):
829 def ensureloaded(self, warn=None):
829 """read the fncache file if not already read.
830 """read the fncache file if not already read.
830
831
831 If the file on disk is corrupted, raise. If warn is provided,
832 If the file on disk is corrupted, raise. If warn is provided,
832 warn and keep going instead."""
833 warn and keep going instead."""
833 if self.entries is None:
834 if self.entries is None:
834 self._load(warn)
835 self._load(warn)
835
836
836 def _load(self, warn=None):
837 def _load(self, warn=None):
837 '''fill the entries from the fncache file'''
838 '''fill the entries from the fncache file'''
838 self._dirty = False
839 self._dirty = False
839 try:
840 try:
840 fp = self.vfs(b'fncache', mode=b'rb')
841 fp = self.vfs(b'fncache', mode=b'rb')
841 except IOError:
842 except IOError:
842 # skip nonexistent file
843 # skip nonexistent file
843 self.entries = set()
844 self.entries = set()
844 return
845 return
845
846
846 self.entries = set()
847 self.entries = set()
847 chunk = b''
848 chunk = b''
848 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
849 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
849 chunk += c
850 chunk += c
850 try:
851 try:
851 p = chunk.rindex(b'\n')
852 p = chunk.rindex(b'\n')
852 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
853 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
853 chunk = chunk[p + 1 :]
854 chunk = chunk[p + 1 :]
854 except ValueError:
855 except ValueError:
855 # substring '\n' not found, maybe the entry is bigger than the
856 # substring '\n' not found, maybe the entry is bigger than the
856 # chunksize, so let's keep iterating
857 # chunksize, so let's keep iterating
857 pass
858 pass
858
859
859 if chunk:
860 if chunk:
860 msg = _(b"fncache does not ends with a newline")
861 msg = _(b"fncache does not ends with a newline")
861 if warn:
862 if warn:
862 warn(msg + b'\n')
863 warn(msg + b'\n')
863 else:
864 else:
864 raise error.Abort(
865 raise error.Abort(
865 msg,
866 msg,
866 hint=_(
867 hint=_(
867 b"use 'hg debugrebuildfncache' to "
868 b"use 'hg debugrebuildfncache' to "
868 b"rebuild the fncache"
869 b"rebuild the fncache"
869 ),
870 ),
870 )
871 )
871 self._checkentries(fp, warn)
872 self._checkentries(fp, warn)
872 fp.close()
873 fp.close()
873
874
874 def _checkentries(self, fp, warn):
875 def _checkentries(self, fp, warn):
875 """make sure there is no empty string in entries"""
876 """make sure there is no empty string in entries"""
876 if b'' in self.entries:
877 if b'' in self.entries:
877 fp.seek(0)
878 fp.seek(0)
878 for n, line in enumerate(fp):
879 for n, line in enumerate(fp):
879 if not line.rstrip(b'\n'):
880 if not line.rstrip(b'\n'):
880 t = _(b'invalid entry in fncache, line %d') % (n + 1)
881 t = _(b'invalid entry in fncache, line %d') % (n + 1)
881 if warn:
882 if warn:
882 warn(t + b'\n')
883 warn(t + b'\n')
883 else:
884 else:
884 raise error.Abort(t)
885 raise error.Abort(t)
885
886
886 def write(self, tr):
887 def write(self, tr):
887 if self._dirty:
888 if self._dirty:
888 assert self.entries is not None
889 assert self.entries is not None
889 self.entries = self.entries | self.addls
890 self.entries = self.entries | self.addls
890 self.addls = set()
891 self.addls = set()
891 tr.addbackup(b'fncache')
892 tr.addbackup(b'fncache')
892 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
893 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
893 if self.entries:
894 if self.entries:
894 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
895 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
895 fp.close()
896 fp.close()
896 self._dirty = False
897 self._dirty = False
897 if self.addls:
898 if self.addls:
898 # if we have just new entries, let's append them to the fncache
899 # if we have just new entries, let's append them to the fncache
899 tr.addbackup(b'fncache')
900 tr.addbackup(b'fncache')
900 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
901 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
901 if self.addls:
902 if self.addls:
902 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
903 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
903 fp.close()
904 fp.close()
904 self.entries = None
905 self.entries = None
905 self.addls = set()
906 self.addls = set()
906
907
907 def addignore(self, fn):
908 def addignore(self, fn):
908 self._ignores.add(fn)
909 self._ignores.add(fn)
909
910
910 def add(self, fn):
911 def add(self, fn):
911 if fn in self._ignores:
912 if fn in self._ignores:
912 return
913 return
913 if self.entries is None:
914 if self.entries is None:
914 self._load()
915 self._load()
915 if fn not in self.entries:
916 if fn not in self.entries:
916 self.addls.add(fn)
917 self.addls.add(fn)
917
918
918 def remove(self, fn):
919 def remove(self, fn):
919 if self.entries is None:
920 if self.entries is None:
920 self._load()
921 self._load()
921 if fn in self.addls:
922 if fn in self.addls:
922 self.addls.remove(fn)
923 self.addls.remove(fn)
923 return
924 return
924 try:
925 try:
925 self.entries.remove(fn)
926 self.entries.remove(fn)
926 self._dirty = True
927 self._dirty = True
927 except KeyError:
928 except KeyError:
928 pass
929 pass
929
930
930 def __contains__(self, fn):
931 def __contains__(self, fn):
931 if fn in self.addls:
932 if fn in self.addls:
932 return True
933 return True
933 if self.entries is None:
934 if self.entries is None:
934 self._load()
935 self._load()
935 return fn in self.entries
936 return fn in self.entries
936
937
937 def __iter__(self):
938 def __iter__(self):
938 if self.entries is None:
939 if self.entries is None:
939 self._load()
940 self._load()
940 return iter(self.entries | self.addls)
941 return iter(self.entries | self.addls)
941
942
942
943
943 class _fncachevfs(vfsmod.proxyvfs):
944 class _fncachevfs(vfsmod.proxyvfs):
944 def __init__(self, vfs, fnc, encode):
945 def __init__(self, vfs, fnc, encode):
945 vfsmod.proxyvfs.__init__(self, vfs)
946 vfsmod.proxyvfs.__init__(self, vfs)
946 self.fncache = fnc
947 self.fncache = fnc
947 self.encode = encode
948 self.encode = encode
948
949
949 def __call__(self, path, mode=b'r', *args, **kw):
950 def __call__(self, path, mode=b'r', *args, **kw):
950 encoded = self.encode(path)
951 encoded = self.encode(path)
951 if (
952 if (
952 mode not in (b'r', b'rb')
953 mode not in (b'r', b'rb')
953 and (path.startswith(b'data/') or path.startswith(b'meta/'))
954 and (path.startswith(b'data/') or path.startswith(b'meta/'))
954 and revlog_type(path) is not None
955 and revlog_type(path) is not None
955 ):
956 ):
956 # do not trigger a fncache load when adding a file that already is
957 # do not trigger a fncache load when adding a file that already is
957 # known to exist.
958 # known to exist.
958 notload = self.fncache.entries is None and self.vfs.exists(encoded)
959 notload = self.fncache.entries is None and self.vfs.exists(encoded)
959 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
960 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
960 # when appending to an existing file, if the file has size zero,
961 # when appending to an existing file, if the file has size zero,
961 # it should be considered as missing. Such zero-size files are
962 # it should be considered as missing. Such zero-size files are
962 # the result of truncation when a transaction is aborted.
963 # the result of truncation when a transaction is aborted.
963 notload = False
964 notload = False
964 if not notload:
965 if not notload:
965 self.fncache.add(path)
966 self.fncache.add(path)
966 return self.vfs(encoded, mode, *args, **kw)
967 return self.vfs(encoded, mode, *args, **kw)
967
968
968 def join(self, path):
969 def join(self, path):
969 if path:
970 if path:
970 return self.vfs.join(self.encode(path))
971 return self.vfs.join(self.encode(path))
971 else:
972 else:
972 return self.vfs.join(path)
973 return self.vfs.join(path)
973
974
974 def register_file(self, path):
975 def register_file(self, path):
975 """generic hook point to lets fncache steer its stew"""
976 """generic hook point to lets fncache steer its stew"""
976 if path.startswith(b'data/') or path.startswith(b'meta/'):
977 if path.startswith(b'data/') or path.startswith(b'meta/'):
977 self.fncache.add(path)
978 self.fncache.add(path)
978
979
979
980
980 class fncachestore(basicstore):
981 class fncachestore(basicstore):
981 def __init__(self, path, vfstype, dotencode):
982 def __init__(self, path, vfstype, dotencode):
982 if dotencode:
983 if dotencode:
983 encode = _pathencode
984 encode = _pathencode
984 else:
985 else:
985 encode = _plainhybridencode
986 encode = _plainhybridencode
986 self.encode = encode
987 self.encode = encode
987 vfs = vfstype(path + b'/store')
988 vfs = vfstype(path + b'/store')
988 self.path = vfs.base
989 self.path = vfs.base
989 self.pathsep = self.path + b'/'
990 self.pathsep = self.path + b'/'
990 self.createmode = _calcmode(vfs)
991 self.createmode = _calcmode(vfs)
991 vfs.createmode = self.createmode
992 vfs.createmode = self.createmode
992 self.rawvfs = vfs
993 self.rawvfs = vfs
993 fnc = fncache(vfs)
994 fnc = fncache(vfs)
994 self.fncache = fnc
995 self.fncache = fnc
995 self.vfs = _fncachevfs(vfs, fnc, encode)
996 self.vfs = _fncachevfs(vfs, fnc, encode)
996 self.opener = self.vfs
997 self.opener = self.vfs
997
998
998 def join(self, f):
999 def join(self, f):
999 return self.pathsep + self.encode(f)
1000 return self.pathsep + self.encode(f)
1000
1001
1001 def getsize(self, path):
1002 def getsize(self, path):
1002 return self.rawvfs.stat(path).st_size
1003 return self.rawvfs.stat(path).st_size
1003
1004
1004 def data_entries(
1005 def data_entries(
1005 self, matcher=None, undecodable=None
1006 self, matcher=None, undecodable=None
1006 ) -> Generator[BaseStoreEntry, None, None]:
1007 ) -> Generator[BaseStoreEntry, None, None]:
1007 files = ((f, revlog_type(f)) for f in self.fncache)
1008 files = ((f, revlog_type(f)) for f in self.fncache)
1008 # Note: all files in fncache should be revlog related, However the
1009 # Note: all files in fncache should be revlog related, However the
1009 # fncache might contains such file added by previous version of
1010 # fncache might contains such file added by previous version of
1010 # Mercurial.
1011 # Mercurial.
1011 files = (f for f in files if f[1] is not None)
1012 files = (f for f in files if f[1] is not None)
1012 by_revlog = _gather_revlog(files)
1013 by_revlog = _gather_revlog(files)
1013 for revlog, details in by_revlog:
1014 for revlog, details in by_revlog:
1014 file_details = {}
1015 file_details = {}
1015 if revlog.startswith(b'data/'):
1016 if revlog.startswith(b'data/'):
1016 rl_type = FILEFLAGS_FILELOG
1017 rl_type = FILEFLAGS_FILELOG
1017 revlog_target_id = revlog.split(b'/', 1)[1]
1018 revlog_target_id = revlog.split(b'/', 1)[1]
1018 elif revlog.startswith(b'meta/'):
1019 elif revlog.startswith(b'meta/'):
1019 rl_type = FILEFLAGS_MANIFESTLOG
1020 rl_type = FILEFLAGS_MANIFESTLOG
1020 # drop the initial directory and the `00manifest` file part
1021 # drop the initial directory and the `00manifest` file part
1021 tmp = revlog.split(b'/', 1)[1]
1022 tmp = revlog.split(b'/', 1)[1]
1022 revlog_target_id = tmp.rsplit(b'/', 1)[0] + b'/'
1023 revlog_target_id = tmp.rsplit(b'/', 1)[0] + b'/'
1023 else:
1024 else:
1024 # unreachable
1025 # unreachable
1025 assert False, revlog
1026 assert False, revlog
1026 for ext, t in details.items():
1027 for ext, t in details.items():
1027 file_details[ext] = {
1028 file_details[ext] = {
1028 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
1029 'is_volatile': bool(t & FILEFLAGS_VOLATILE),
1029 }
1030 }
1030 entry = RevlogStoreEntry(
1031 entry = RevlogStoreEntry(
1031 path_prefix=revlog,
1032 path_prefix=revlog,
1032 revlog_type=rl_type,
1033 revlog_type=rl_type,
1033 target_id=revlog_target_id,
1034 target_id=revlog_target_id,
1034 details=file_details,
1035 details=file_details,
1035 )
1036 )
1036 if _match_tracked_entry(entry, matcher):
1037 if _match_tracked_entry(entry, matcher):
1037 yield entry
1038 yield entry
1038
1039
1039 def copylist(self):
1040 def copylist(self):
1040 d = (
1041 d = (
1041 b'bookmarks',
1042 b'bookmarks',
1042 b'narrowspec',
1043 b'narrowspec',
1043 b'data',
1044 b'data',
1044 b'meta',
1045 b'meta',
1045 b'dh',
1046 b'dh',
1046 b'fncache',
1047 b'fncache',
1047 b'phaseroots',
1048 b'phaseroots',
1048 b'obsstore',
1049 b'obsstore',
1049 b'00manifest.d',
1050 b'00manifest.d',
1050 b'00manifest.i',
1051 b'00manifest.i',
1051 b'00changelog.d',
1052 b'00changelog.d',
1052 b'00changelog.i',
1053 b'00changelog.i',
1053 b'requires',
1054 b'requires',
1054 )
1055 )
1055 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
1056 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
1056
1057
1057 def write(self, tr):
1058 def write(self, tr):
1058 self.fncache.write(tr)
1059 self.fncache.write(tr)
1059
1060
1060 def invalidatecaches(self):
1061 def invalidatecaches(self):
1061 self.fncache.entries = None
1062 self.fncache.entries = None
1062 self.fncache.addls = set()
1063 self.fncache.addls = set()
1063
1064
1064 def markremoved(self, fn):
1065 def markremoved(self, fn):
1065 self.fncache.remove(fn)
1066 self.fncache.remove(fn)
1066
1067
1067 def _exists(self, f):
1068 def _exists(self, f):
1068 ef = self.encode(f)
1069 ef = self.encode(f)
1069 try:
1070 try:
1070 self.getsize(ef)
1071 self.getsize(ef)
1071 return True
1072 return True
1072 except FileNotFoundError:
1073 except FileNotFoundError:
1073 return False
1074 return False
1074
1075
1075 def __contains__(self, path):
1076 def __contains__(self, path):
1076 '''Checks if the store contains path'''
1077 '''Checks if the store contains path'''
1077 path = b"/".join((b"data", path))
1078 path = b"/".join((b"data", path))
1078 # check for files (exact match)
1079 # check for files (exact match)
1079 e = path + b'.i'
1080 e = path + b'.i'
1080 if e in self.fncache and self._exists(e):
1081 if e in self.fncache and self._exists(e):
1081 return True
1082 return True
1082 # now check for directories (prefix match)
1083 # now check for directories (prefix match)
1083 if not path.endswith(b'/'):
1084 if not path.endswith(b'/'):
1084 path += b'/'
1085 path += b'/'
1085 for e in self.fncache:
1086 for e in self.fncache:
1086 if e.startswith(path) and self._exists(e):
1087 if e.startswith(path) and self._exists(e):
1087 return True
1088 return True
1088 return False
1089 return False
@@ -1,181 +1,181 b''
1 #require no-reposimplestore
1 #require no-reposimplestore
2
2
3 Test creating a consuming stream bundle v2
3 Test creating a consuming stream bundle v2
4
4
5 $ getmainid() {
5 $ getmainid() {
6 > hg -R main log --template '{node}\n' --rev "$1"
6 > hg -R main log --template '{node}\n' --rev "$1"
7 > }
7 > }
8
8
9 $ cp $HGRCPATH $TESTTMP/hgrc.orig
9 $ cp $HGRCPATH $TESTTMP/hgrc.orig
10
10
11 $ cat >> $HGRCPATH << EOF
11 $ cat >> $HGRCPATH << EOF
12 > [experimental]
12 > [experimental]
13 > evolution.createmarkers=True
13 > evolution.createmarkers=True
14 > evolution.exchange=True
14 > evolution.exchange=True
15 > bundle2-output-capture=True
15 > bundle2-output-capture=True
16 > [ui]
16 > [ui]
17 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
18 > [web]
18 > [web]
19 > push_ssl = false
19 > push_ssl = false
20 > allow_push = *
20 > allow_push = *
21 > [phases]
21 > [phases]
22 > publish=False
22 > publish=False
23 > [extensions]
23 > [extensions]
24 > drawdag=$TESTDIR/drawdag.py
24 > drawdag=$TESTDIR/drawdag.py
25 > clonebundles=
25 > clonebundles=
26 > EOF
26 > EOF
27
27
28 The extension requires a repo (currently unused)
28 The extension requires a repo (currently unused)
29
29
30 $ hg init main
30 $ hg init main
31 $ cd main
31 $ cd main
32
32
33 $ hg debugdrawdag <<'EOF'
33 $ hg debugdrawdag <<'EOF'
34 > E
34 > E
35 > |
35 > |
36 > D
36 > D
37 > |
37 > |
38 > C
38 > C
39 > |
39 > |
40 > B
40 > B
41 > |
41 > |
42 > A
42 > A
43 > EOF
43 > EOF
44
44
45 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
45 $ hg bundle -a --type="none-v2;stream=v2" bundle.hg
46 $ hg debugbundle bundle.hg
46 $ hg debugbundle bundle.hg
47 Stream params: {}
47 Stream params: {}
48 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlogv1%2Csparserevlog} (mandatory: True) (no-zstd !)
48 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlogv1%2Csparserevlog} (mandatory: True) (no-zstd !)
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (zstd no-rust !)
49 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (zstd no-rust !)
50 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (rust !)
50 stream2 -- {bytecount: 1693, filecount: 11, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (rust !)
51 $ hg debugbundle --spec bundle.hg
51 $ hg debugbundle --spec bundle.hg
52 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (no-zstd !)
52 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (no-zstd !)
53 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (zstd no-rust !)
53 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (zstd no-rust !)
54 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (rust !)
54 none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (rust !)
55
55
56 Test that we can apply the bundle as a stream clone bundle
56 Test that we can apply the bundle as a stream clone bundle
57
57
58 $ cat > .hg/clonebundles.manifest << EOF
58 $ cat > .hg/clonebundles.manifest << EOF
59 > http://localhost:$HGPORT1/bundle.hg BUNDLESPEC=`hg debugbundle --spec bundle.hg`
59 > http://localhost:$HGPORT1/bundle.hg BUNDLESPEC=`hg debugbundle --spec bundle.hg`
60 > EOF
60 > EOF
61
61
62 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
62 $ hg serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
63 $ cat hg.pid >> $DAEMON_PIDS
63 $ cat hg.pid >> $DAEMON_PIDS
64
64
65 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
65 $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
66 $ cat http.pid >> $DAEMON_PIDS
66 $ cat http.pid >> $DAEMON_PIDS
67
67
68 $ cd ..
68 $ cd ..
69 $ hg clone http://localhost:$HGPORT streamv2-clone-implicit --debug
69 $ hg clone http://localhost:$HGPORT streamv2-clone-implicit --debug
70 using http://localhost:$HGPORT/
70 using http://localhost:$HGPORT/
71 sending capabilities command
71 sending capabilities command
72 sending clonebundles command
72 sending clonebundles command
73 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
73 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
74 bundle2-input-bundle: with-transaction
74 bundle2-input-bundle: with-transaction
75 bundle2-input-part: "stream2" (params: 3 mandatory) supported
75 bundle2-input-part: "stream2" (params: 3 mandatory) supported
76 applying stream bundle
76 applying stream bundle
77 11 files to transfer, 1.65 KB of data
77 11 files to transfer, 1.65 KB of data
78 starting 4 threads for background file closing (?)
78 starting 4 threads for background file closing (?)
79 starting 4 threads for background file closing (?)
79 starting 4 threads for background file closing (?)
80 adding [s] data/A.i (66 bytes)
80 adding [s] data/A.i (66 bytes)
81 adding [s] data/B.i (66 bytes)
81 adding [s] data/B.i (66 bytes)
82 adding [s] data/C.i (66 bytes)
82 adding [s] data/C.i (66 bytes)
83 adding [s] data/D.i (66 bytes)
83 adding [s] data/D.i (66 bytes)
84 adding [s] data/E.i (66 bytes)
84 adding [s] data/E.i (66 bytes)
85 adding [s] phaseroots (43 bytes)
85 adding [s] 00manifest.i (584 bytes)
86 adding [s] 00manifest.i (584 bytes)
86 adding [s] 00changelog.i (595 bytes)
87 adding [s] 00changelog.i (595 bytes)
87 adding [s] phaseroots (43 bytes)
88 adding [c] branch2-served (94 bytes)
88 adding [c] branch2-served (94 bytes)
89 adding [c] rbc-names-v1 (7 bytes)
89 adding [c] rbc-names-v1 (7 bytes)
90 adding [c] rbc-revs-v1 (40 bytes)
90 adding [c] rbc-revs-v1 (40 bytes)
91 transferred 1.65 KB in * seconds (* */sec) (glob)
91 transferred 1.65 KB in * seconds (* */sec) (glob)
92 bundle2-input-part: total payload size 1840
92 bundle2-input-part: total payload size 1840
93 bundle2-input-bundle: 1 parts total
93 bundle2-input-bundle: 1 parts total
94 updating the branch cache
94 updating the branch cache
95 finished applying clone bundle
95 finished applying clone bundle
96 query 1; heads
96 query 1; heads
97 sending batch command
97 sending batch command
98 searching for changes
98 searching for changes
99 all remote heads known locally
99 all remote heads known locally
100 no changes found
100 no changes found
101 sending getbundle command
101 sending getbundle command
102 bundle2-input-bundle: with-transaction
102 bundle2-input-bundle: with-transaction
103 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
104 bundle2-input-part: "phase-heads" supported
104 bundle2-input-part: "phase-heads" supported
105 bundle2-input-part: total payload size 24
105 bundle2-input-part: total payload size 24
106 bundle2-input-bundle: 2 parts total
106 bundle2-input-bundle: 2 parts total
107 checking for updated bookmarks
107 checking for updated bookmarks
108 updating to branch default
108 updating to branch default
109 resolving manifests
109 resolving manifests
110 branchmerge: False, force: False, partial: False
110 branchmerge: False, force: False, partial: False
111 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
111 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
112 A: remote created -> g
112 A: remote created -> g
113 getting A
113 getting A
114 B: remote created -> g
114 B: remote created -> g
115 getting B
115 getting B
116 C: remote created -> g
116 C: remote created -> g
117 getting C
117 getting C
118 D: remote created -> g
118 D: remote created -> g
119 getting D
119 getting D
120 E: remote created -> g
120 E: remote created -> g
121 getting E
121 getting E
122 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 updating the branch cache
123 updating the branch cache
124 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
124 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
125
125
126 $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
126 $ hg clone --stream http://localhost:$HGPORT streamv2-clone-explicit --debug
127 using http://localhost:$HGPORT/
127 using http://localhost:$HGPORT/
128 sending capabilities command
128 sending capabilities command
129 sending clonebundles command
129 sending clonebundles command
130 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
130 applying clone bundle from http://localhost:$HGPORT1/bundle.hg
131 bundle2-input-bundle: with-transaction
131 bundle2-input-bundle: with-transaction
132 bundle2-input-part: "stream2" (params: 3 mandatory) supported
132 bundle2-input-part: "stream2" (params: 3 mandatory) supported
133 applying stream bundle
133 applying stream bundle
134 11 files to transfer, 1.65 KB of data
134 11 files to transfer, 1.65 KB of data
135 starting 4 threads for background file closing (?)
135 starting 4 threads for background file closing (?)
136 starting 4 threads for background file closing (?)
136 starting 4 threads for background file closing (?)
137 adding [s] data/A.i (66 bytes)
137 adding [s] data/A.i (66 bytes)
138 adding [s] data/B.i (66 bytes)
138 adding [s] data/B.i (66 bytes)
139 adding [s] data/C.i (66 bytes)
139 adding [s] data/C.i (66 bytes)
140 adding [s] data/D.i (66 bytes)
140 adding [s] data/D.i (66 bytes)
141 adding [s] data/E.i (66 bytes)
141 adding [s] data/E.i (66 bytes)
142 adding [s] phaseroots (43 bytes)
142 adding [s] 00manifest.i (584 bytes)
143 adding [s] 00manifest.i (584 bytes)
143 adding [s] 00changelog.i (595 bytes)
144 adding [s] 00changelog.i (595 bytes)
144 adding [s] phaseroots (43 bytes)
145 adding [c] branch2-served (94 bytes)
145 adding [c] branch2-served (94 bytes)
146 adding [c] rbc-names-v1 (7 bytes)
146 adding [c] rbc-names-v1 (7 bytes)
147 adding [c] rbc-revs-v1 (40 bytes)
147 adding [c] rbc-revs-v1 (40 bytes)
148 transferred 1.65 KB in * seconds (* */sec) (glob)
148 transferred 1.65 KB in * seconds (* */sec) (glob)
149 bundle2-input-part: total payload size 1840
149 bundle2-input-part: total payload size 1840
150 bundle2-input-bundle: 1 parts total
150 bundle2-input-bundle: 1 parts total
151 updating the branch cache
151 updating the branch cache
152 finished applying clone bundle
152 finished applying clone bundle
153 query 1; heads
153 query 1; heads
154 sending batch command
154 sending batch command
155 searching for changes
155 searching for changes
156 all remote heads known locally
156 all remote heads known locally
157 no changes found
157 no changes found
158 sending getbundle command
158 sending getbundle command
159 bundle2-input-bundle: with-transaction
159 bundle2-input-bundle: with-transaction
160 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
160 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
161 bundle2-input-part: "phase-heads" supported
161 bundle2-input-part: "phase-heads" supported
162 bundle2-input-part: total payload size 24
162 bundle2-input-part: total payload size 24
163 bundle2-input-bundle: 2 parts total
163 bundle2-input-bundle: 2 parts total
164 checking for updated bookmarks
164 checking for updated bookmarks
165 updating to branch default
165 updating to branch default
166 resolving manifests
166 resolving manifests
167 branchmerge: False, force: False, partial: False
167 branchmerge: False, force: False, partial: False
168 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
168 ancestor: 000000000000, local: 000000000000+, remote: 9bc730a19041
169 A: remote created -> g
169 A: remote created -> g
170 getting A
170 getting A
171 B: remote created -> g
171 B: remote created -> g
172 getting B
172 getting B
173 C: remote created -> g
173 C: remote created -> g
174 getting C
174 getting C
175 D: remote created -> g
175 D: remote created -> g
176 getting D
176 getting D
177 E: remote created -> g
177 E: remote created -> g
178 getting E
178 getting E
179 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
179 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
180 updating the branch cache
180 updating the branch cache
181 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
181 (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
General Comments 0
You need to be logged in to leave comments. Login now