##// END OF EJS Templates
walk: no longer ignore revlogs of files starting with `undo.` (issue6542)...
marmoute -
r48459:f030c7d2 default
parent child Browse files
Show More
@@ -1,829 +1,834 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import functools
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .pycompat import getattr
18 18 from .node import hex
19 19 from . import (
20 20 changelog,
21 21 error,
22 22 manifest,
23 23 policy,
24 24 pycompat,
25 25 util,
26 26 vfs as vfsmod,
27 27 )
28 28 from .utils import hashutil
29 29
30 30 parsers = policy.importmod('parsers')
31 31 # how much bytes should be read from fncache in one read
32 32 # It is done to prevent loading large fncache files into memory
33 33 fncache_chunksize = 10 ** 6
34 34
35 35
36 36 def _matchtrackedpath(path, matcher):
37 37 """parses a fncache entry and returns whether the entry is tracking a path
38 38 matched by matcher or not.
39 39
40 40 If matcher is None, returns True"""
41 41
42 42 if matcher is None:
43 43 return True
44 44 path = decodedir(path)
45 45 if path.startswith(b'data/'):
46 46 return matcher(path[len(b'data/') : -len(b'.i')])
47 47 elif path.startswith(b'meta/'):
48 48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49 49
50 50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51 51
52 52
53 53 # This avoids a collision between a file named foo and a dir named
54 54 # foo.i or foo.d
55 55 def _encodedir(path):
56 56 """
57 57 >>> _encodedir(b'data/foo.i')
58 58 'data/foo.i'
59 59 >>> _encodedir(b'data/foo.i/bla.i')
60 60 'data/foo.i.hg/bla.i'
61 61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 62 'data/foo.i.hg.hg/bla.i'
63 63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 65 """
66 66 return (
67 67 path.replace(b".hg/", b".hg.hg/")
68 68 .replace(b".i/", b".i.hg/")
69 69 .replace(b".d/", b".d.hg/")
70 70 )
71 71
72 72
73 73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74 74
75 75
76 76 def decodedir(path):
77 77 """
78 78 >>> decodedir(b'data/foo.i')
79 79 'data/foo.i'
80 80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 81 'data/foo.i/bla.i'
82 82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 83 'data/foo.i.hg/bla.i'
84 84 """
85 85 if b".hg/" not in path:
86 86 return path
87 87 return (
88 88 path.replace(b".d.hg/", b".d/")
89 89 .replace(b".i.hg/", b".i/")
90 90 .replace(b".hg.hg/", b".hg/")
91 91 )
92 92
93 93
94 94 def _reserved():
95 95 """characters that are problematic for filesystems
96 96
97 97 * ascii escapes (0..31)
98 98 * ascii hi (126..255)
99 99 * windows specials
100 100
101 101 these characters will be escaped by encodefunctions
102 102 """
103 103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 104 for x in range(32):
105 105 yield x
106 106 for x in range(126, 256):
107 107 yield x
108 108 for x in winreserved:
109 109 yield x
110 110
111 111
112 112 def _buildencodefun():
113 113 """
114 114 >>> enc, dec = _buildencodefun()
115 115
116 116 >>> enc(b'nothing/special.txt')
117 117 'nothing/special.txt'
118 118 >>> dec(b'nothing/special.txt')
119 119 'nothing/special.txt'
120 120
121 121 >>> enc(b'HELLO')
122 122 '_h_e_l_l_o'
123 123 >>> dec(b'_h_e_l_l_o')
124 124 'HELLO'
125 125
126 126 >>> enc(b'hello:world?')
127 127 'hello~3aworld~3f'
128 128 >>> dec(b'hello~3aworld~3f')
129 129 'hello:world?'
130 130
131 131 >>> enc(b'the\\x07quick\\xADshot')
132 132 'the~07quick~adshot'
133 133 >>> dec(b'the~07quick~adshot')
134 134 'the\\x07quick\\xadshot'
135 135 """
136 136 e = b'_'
137 137 xchr = pycompat.bytechr
138 138 asciistr = list(map(xchr, range(127)))
139 139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140 140
141 141 cmap = {x: x for x in asciistr}
142 142 for x in _reserved():
143 143 cmap[xchr(x)] = b"~%02x" % x
144 144 for x in capitals + [ord(e)]:
145 145 cmap[xchr(x)] = e + xchr(x).lower()
146 146
147 147 dmap = {}
148 148 for k, v in pycompat.iteritems(cmap):
149 149 dmap[v] = k
150 150
151 151 def decode(s):
152 152 i = 0
153 153 while i < len(s):
154 154 for l in pycompat.xrange(1, 4):
155 155 try:
156 156 yield dmap[s[i : i + l]]
157 157 i += l
158 158 break
159 159 except KeyError:
160 160 pass
161 161 else:
162 162 raise KeyError
163 163
164 164 return (
165 165 lambda s: b''.join(
166 166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 167 ),
168 168 lambda s: b''.join(list(decode(s))),
169 169 )
170 170
171 171
172 172 _encodefname, _decodefname = _buildencodefun()
173 173
174 174
175 175 def encodefilename(s):
176 176 """
177 177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 179 """
180 180 return _encodefname(encodedir(s))
181 181
182 182
183 183 def decodefilename(s):
184 184 """
185 185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 187 """
188 188 return decodedir(_decodefname(s))
189 189
190 190
191 191 def _buildlowerencodefun():
192 192 """
193 193 >>> f = _buildlowerencodefun()
194 194 >>> f(b'nothing/special.txt')
195 195 'nothing/special.txt'
196 196 >>> f(b'HELLO')
197 197 'hello'
198 198 >>> f(b'hello:world?')
199 199 'hello~3aworld~3f'
200 200 >>> f(b'the\\x07quick\\xADshot')
201 201 'the~07quick~adshot'
202 202 """
203 203 xchr = pycompat.bytechr
204 204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 205 for x in _reserved():
206 206 cmap[xchr(x)] = b"~%02x" % x
207 207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 208 cmap[xchr(x)] = xchr(x).lower()
209 209
210 210 def lowerencode(s):
211 211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212 212
213 213 return lowerencode
214 214
215 215
216 216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217 217
218 218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221 221
222 222
223 223 def _auxencode(path, dotencode):
224 224 """
225 225 Encodes filenames containing names reserved by Windows or which end in
226 226 period or space. Does not touch other single reserved characters c.
227 227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 228 Additionally encodes space or period at the beginning, if dotencode is
229 229 True. Parameter path is assumed to be all lowercase.
230 230 A segment only needs encoding if a reserved name appears as a
231 231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 232 doesn't need encoding.
233 233
234 234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 235 >>> _auxencode(s.split(b'/'), True)
236 236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 238 >>> _auxencode(s.split(b'/'), False)
239 239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 240 >>> _auxencode([b'foo. '], True)
241 241 ['foo.~20']
242 242 >>> _auxencode([b' .foo'], True)
243 243 ['~20.foo']
244 244 """
245 245 for i, n in enumerate(path):
246 246 if not n:
247 247 continue
248 248 if dotencode and n[0] in b'. ':
249 249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 250 path[i] = n
251 251 else:
252 252 l = n.find(b'.')
253 253 if l == -1:
254 254 l = len(n)
255 255 if (l == 3 and n[:3] in _winres3) or (
256 256 l == 4
257 257 and n[3:4] <= b'9'
258 258 and n[3:4] >= b'1'
259 259 and n[:3] in _winres4
260 260 ):
261 261 # encode third letter ('aux' -> 'au~78')
262 262 ec = b"~%02x" % ord(n[2:3])
263 263 n = n[0:2] + ec + n[3:]
264 264 path[i] = n
265 265 if n[-1] in b'. ':
266 266 # encode last period or space ('foo...' -> 'foo..~2e')
267 267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 268 return path
269 269
270 270
271 271 _maxstorepathlen = 120
272 272 _dirprefixlen = 8
273 273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274 274
275 275
276 276 def _hashencode(path, dotencode):
277 277 digest = hex(hashutil.sha1(path).digest())
278 278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 279 parts = _auxencode(le, dotencode)
280 280 basename = parts[-1]
281 281 _root, ext = os.path.splitext(basename)
282 282 sdirs = []
283 283 sdirslen = 0
284 284 for p in parts[:-1]:
285 285 d = p[:_dirprefixlen]
286 286 if d[-1] in b'. ':
287 287 # Windows can't access dirs ending in period or space
288 288 d = d[:-1] + b'_'
289 289 if sdirslen == 0:
290 290 t = len(d)
291 291 else:
292 292 t = sdirslen + 1 + len(d)
293 293 if t > _maxshortdirslen:
294 294 break
295 295 sdirs.append(d)
296 296 sdirslen = t
297 297 dirs = b'/'.join(sdirs)
298 298 if len(dirs) > 0:
299 299 dirs += b'/'
300 300 res = b'dh/' + dirs + digest + ext
301 301 spaceleft = _maxstorepathlen - len(res)
302 302 if spaceleft > 0:
303 303 filler = basename[:spaceleft]
304 304 res = b'dh/' + dirs + filler + digest + ext
305 305 return res
306 306
307 307
308 308 def _hybridencode(path, dotencode):
309 309 """encodes path with a length limit
310 310
311 311 Encodes all paths that begin with 'data/', according to the following.
312 312
313 313 Default encoding (reversible):
314 314
315 315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 316 characters are encoded as '~xx', where xx is the two digit hex code
317 317 of the character (see encodefilename).
318 318 Relevant path components consisting of Windows reserved filenames are
319 319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320 320
321 321 Hashed encoding (not reversible):
322 322
323 323 If the default-encoded path is longer than _maxstorepathlen, a
324 324 non-reversible hybrid hashing of the path is done instead.
325 325 This encoding uses up to _dirprefixlen characters of all directory
326 326 levels of the lowerencoded path, but not more levels than can fit into
327 327 _maxshortdirslen.
328 328 Then follows the filler followed by the sha digest of the full path.
329 329 The filler is the beginning of the basename of the lowerencoded path
330 330 (the basename is everything after the last path separator). The filler
331 331 is as long as possible, filling in characters from the basename until
332 332 the encoded path has _maxstorepathlen characters (or all chars of the
333 333 basename have been taken).
334 334 The extension (e.g. '.i' or '.d') is preserved.
335 335
336 336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 337 encoding was used.
338 338 """
339 339 path = encodedir(path)
340 340 ef = _encodefname(path).split(b'/')
341 341 res = b'/'.join(_auxencode(ef, dotencode))
342 342 if len(res) > _maxstorepathlen:
343 343 res = _hashencode(path, dotencode)
344 344 return res
345 345
346 346
347 347 def _pathencode(path):
348 348 de = encodedir(path)
349 349 if len(path) > _maxstorepathlen:
350 350 return _hashencode(de, True)
351 351 ef = _encodefname(de).split(b'/')
352 352 res = b'/'.join(_auxencode(ef, True))
353 353 if len(res) > _maxstorepathlen:
354 354 return _hashencode(de, True)
355 355 return res
356 356
357 357
358 358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359 359
360 360
361 361 def _plainhybridencode(f):
362 362 return _hybridencode(f, False)
363 363
364 364
365 365 def _calcmode(vfs):
366 366 try:
367 367 # files in .hg/ will be created using this mode
368 368 mode = vfs.stat().st_mode
369 369 # avoid some useless chmods
370 370 if (0o777 & ~util.umask) == (0o777 & mode):
371 371 mode = None
372 372 except OSError:
373 373 mode = None
374 374 return mode
375 375
376 376
377 377 _data = [
378 378 b'bookmarks',
379 379 b'narrowspec',
380 380 b'data',
381 381 b'meta',
382 382 b'00manifest.d',
383 383 b'00manifest.i',
384 384 b'00changelog.d',
385 385 b'00changelog.i',
386 386 b'phaseroots',
387 387 b'obsstore',
388 388 b'requires',
389 389 ]
390 390
391 391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 392 REVLOG_FILES_OTHER_EXT = (
393 393 b'.idx',
394 394 b'.d',
395 395 b'.dat',
396 396 b'.n',
397 397 b'.nd',
398 398 b'.sda',
399 399 b'd.tmpcensored',
400 400 )
401 401 # files that are "volatile" and might change between listing and streaming
402 402 #
403 403 # note: the ".nd" file are nodemap data and won't "change" but they might be
404 404 # deleted.
405 405 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
406 406
407 407 # some exception to the above matching
408 #
409 # XXX This is currently not in use because of issue6542
408 410 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
409 411
410 412
411 413 def is_revlog(f, kind, st):
412 414 if kind != stat.S_IFREG:
413 415 return None
414 416 return revlog_type(f)
415 417
416 418
417 419 def revlog_type(f):
418 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
420 # XXX we need to filter `undo.` created by the transaction here, however
421 # being naive about it also filter revlog for `undo.*` files, leading to
422 # issue6542. So we no longer use EXCLUDED.
423 if f.endswith(REVLOG_FILES_MAIN_EXT):
419 424 return FILEFLAGS_REVLOG_MAIN
420 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
425 elif f.endswith(REVLOG_FILES_OTHER_EXT):
421 426 t = FILETYPE_FILELOG_OTHER
422 427 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
423 428 t |= FILEFLAGS_VOLATILE
424 429 return t
425 430 return None
426 431
427 432
428 433 # the file is part of changelog data
429 434 FILEFLAGS_CHANGELOG = 1 << 13
430 435 # the file is part of manifest data
431 436 FILEFLAGS_MANIFESTLOG = 1 << 12
432 437 # the file is part of filelog data
433 438 FILEFLAGS_FILELOG = 1 << 11
434 439 # file that are not directly part of a revlog
435 440 FILEFLAGS_OTHER = 1 << 10
436 441
437 442 # the main entry point for a revlog
438 443 FILEFLAGS_REVLOG_MAIN = 1 << 1
439 444 # a secondary file for a revlog
440 445 FILEFLAGS_REVLOG_OTHER = 1 << 0
441 446
442 447 # files that are "volatile" and might change between listing and streaming
443 448 FILEFLAGS_VOLATILE = 1 << 20
444 449
445 450 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
446 451 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
447 452 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
448 453 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
449 454 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
450 455 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
451 456 FILETYPE_OTHER = FILEFLAGS_OTHER
452 457
453 458
454 459 class basicstore(object):
455 460 '''base class for local repository stores'''
456 461
457 462 def __init__(self, path, vfstype):
458 463 vfs = vfstype(path)
459 464 self.path = vfs.base
460 465 self.createmode = _calcmode(vfs)
461 466 vfs.createmode = self.createmode
462 467 self.rawvfs = vfs
463 468 self.vfs = vfsmod.filtervfs(vfs, encodedir)
464 469 self.opener = self.vfs
465 470
466 471 def join(self, f):
467 472 return self.path + b'/' + encodedir(f)
468 473
469 474 def _walk(self, relpath, recurse):
470 475 '''yields (unencoded, encoded, size)'''
471 476 path = self.path
472 477 if relpath:
473 478 path += b'/' + relpath
474 479 striplen = len(self.path) + 1
475 480 l = []
476 481 if self.rawvfs.isdir(path):
477 482 visit = [path]
478 483 readdir = self.rawvfs.readdir
479 484 while visit:
480 485 p = visit.pop()
481 486 for f, kind, st in readdir(p, stat=True):
482 487 fp = p + b'/' + f
483 488 rl_type = is_revlog(f, kind, st)
484 489 if rl_type is not None:
485 490 n = util.pconvert(fp[striplen:])
486 491 l.append((rl_type, decodedir(n), n, st.st_size))
487 492 elif kind == stat.S_IFDIR and recurse:
488 493 visit.append(fp)
489 494 l.sort()
490 495 return l
491 496
492 497 def changelog(self, trypending, concurrencychecker=None):
493 498 return changelog.changelog(
494 499 self.vfs,
495 500 trypending=trypending,
496 501 concurrencychecker=concurrencychecker,
497 502 )
498 503
499 504 def manifestlog(self, repo, storenarrowmatch):
500 505 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
501 506 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
502 507
503 508 def datafiles(self, matcher=None):
504 509 files = self._walk(b'data', True) + self._walk(b'meta', True)
505 510 for (t, u, e, s) in files:
506 511 yield (FILEFLAGS_FILELOG | t, u, e, s)
507 512
508 513 def topfiles(self):
509 514 # yield manifest before changelog
510 515 files = reversed(self._walk(b'', False))
511 516 for (t, u, e, s) in files:
512 517 if u.startswith(b'00changelog'):
513 518 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
514 519 elif u.startswith(b'00manifest'):
515 520 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
516 521 else:
517 522 yield (FILETYPE_OTHER | t, u, e, s)
518 523
519 524 def walk(self, matcher=None):
520 525 """return file related to data storage (ie: revlogs)
521 526
522 527 yields (file_type, unencoded, encoded, size)
523 528
524 529 if a matcher is passed, storage files of only those tracked paths
525 530 are passed with matches the matcher
526 531 """
527 532 # yield data files first
528 533 for x in self.datafiles(matcher):
529 534 yield x
530 535 for x in self.topfiles():
531 536 yield x
532 537
533 538 def copylist(self):
534 539 return _data
535 540
536 541 def write(self, tr):
537 542 pass
538 543
539 544 def invalidatecaches(self):
540 545 pass
541 546
542 547 def markremoved(self, fn):
543 548 pass
544 549
545 550 def __contains__(self, path):
546 551 '''Checks if the store contains path'''
547 552 path = b"/".join((b"data", path))
548 553 # file?
549 554 if self.vfs.exists(path + b".i"):
550 555 return True
551 556 # dir?
552 557 if not path.endswith(b"/"):
553 558 path = path + b"/"
554 559 return self.vfs.exists(path)
555 560
556 561
557 562 class encodedstore(basicstore):
558 563 def __init__(self, path, vfstype):
559 564 vfs = vfstype(path + b'/store')
560 565 self.path = vfs.base
561 566 self.createmode = _calcmode(vfs)
562 567 vfs.createmode = self.createmode
563 568 self.rawvfs = vfs
564 569 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
565 570 self.opener = self.vfs
566 571
567 572 def datafiles(self, matcher=None):
568 573 for t, a, b, size in super(encodedstore, self).datafiles():
569 574 try:
570 575 a = decodefilename(a)
571 576 except KeyError:
572 577 a = None
573 578 if a is not None and not _matchtrackedpath(a, matcher):
574 579 continue
575 580 yield t, a, b, size
576 581
577 582 def join(self, f):
578 583 return self.path + b'/' + encodefilename(f)
579 584
580 585 def copylist(self):
581 586 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
582 587
583 588
584 589 class fncache(object):
585 590 # the filename used to be partially encoded
586 591 # hence the encodedir/decodedir dance
587 592 def __init__(self, vfs):
588 593 self.vfs = vfs
589 594 self.entries = None
590 595 self._dirty = False
591 596 # set of new additions to fncache
592 597 self.addls = set()
593 598
594 599 def ensureloaded(self, warn=None):
595 600 """read the fncache file if not already read.
596 601
597 602 If the file on disk is corrupted, raise. If warn is provided,
598 603 warn and keep going instead."""
599 604 if self.entries is None:
600 605 self._load(warn)
601 606
602 607 def _load(self, warn=None):
603 608 '''fill the entries from the fncache file'''
604 609 self._dirty = False
605 610 try:
606 611 fp = self.vfs(b'fncache', mode=b'rb')
607 612 except IOError:
608 613 # skip nonexistent file
609 614 self.entries = set()
610 615 return
611 616
612 617 self.entries = set()
613 618 chunk = b''
614 619 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
615 620 chunk += c
616 621 try:
617 622 p = chunk.rindex(b'\n')
618 623 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
619 624 chunk = chunk[p + 1 :]
620 625 except ValueError:
621 626 # substring '\n' not found, maybe the entry is bigger than the
622 627 # chunksize, so let's keep iterating
623 628 pass
624 629
625 630 if chunk:
626 631 msg = _(b"fncache does not ends with a newline")
627 632 if warn:
628 633 warn(msg + b'\n')
629 634 else:
630 635 raise error.Abort(
631 636 msg,
632 637 hint=_(
633 638 b"use 'hg debugrebuildfncache' to "
634 639 b"rebuild the fncache"
635 640 ),
636 641 )
637 642 self._checkentries(fp, warn)
638 643 fp.close()
639 644
640 645 def _checkentries(self, fp, warn):
641 646 """make sure there is no empty string in entries"""
642 647 if b'' in self.entries:
643 648 fp.seek(0)
644 649 for n, line in enumerate(util.iterfile(fp)):
645 650 if not line.rstrip(b'\n'):
646 651 t = _(b'invalid entry in fncache, line %d') % (n + 1)
647 652 if warn:
648 653 warn(t + b'\n')
649 654 else:
650 655 raise error.Abort(t)
651 656
652 657 def write(self, tr):
653 658 if self._dirty:
654 659 assert self.entries is not None
655 660 self.entries = self.entries | self.addls
656 661 self.addls = set()
657 662 tr.addbackup(b'fncache')
658 663 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
659 664 if self.entries:
660 665 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
661 666 fp.close()
662 667 self._dirty = False
663 668 if self.addls:
664 669 # if we have just new entries, let's append them to the fncache
665 670 tr.addbackup(b'fncache')
666 671 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
667 672 if self.addls:
668 673 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
669 674 fp.close()
670 675 self.entries = None
671 676 self.addls = set()
672 677
673 678 def add(self, fn):
674 679 if self.entries is None:
675 680 self._load()
676 681 if fn not in self.entries:
677 682 self.addls.add(fn)
678 683
679 684 def remove(self, fn):
680 685 if self.entries is None:
681 686 self._load()
682 687 if fn in self.addls:
683 688 self.addls.remove(fn)
684 689 return
685 690 try:
686 691 self.entries.remove(fn)
687 692 self._dirty = True
688 693 except KeyError:
689 694 pass
690 695
691 696 def __contains__(self, fn):
692 697 if fn in self.addls:
693 698 return True
694 699 if self.entries is None:
695 700 self._load()
696 701 return fn in self.entries
697 702
698 703 def __iter__(self):
699 704 if self.entries is None:
700 705 self._load()
701 706 return iter(self.entries | self.addls)
702 707
703 708
704 709 class _fncachevfs(vfsmod.proxyvfs):
705 710 def __init__(self, vfs, fnc, encode):
706 711 vfsmod.proxyvfs.__init__(self, vfs)
707 712 self.fncache = fnc
708 713 self.encode = encode
709 714
710 715 def __call__(self, path, mode=b'r', *args, **kw):
711 716 encoded = self.encode(path)
712 717 if mode not in (b'r', b'rb') and (
713 718 path.startswith(b'data/') or path.startswith(b'meta/')
714 719 ):
715 720 # do not trigger a fncache load when adding a file that already is
716 721 # known to exist.
717 722 notload = self.fncache.entries is None and self.vfs.exists(encoded)
718 723 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
719 724 # when appending to an existing file, if the file has size zero,
720 725 # it should be considered as missing. Such zero-size files are
721 726 # the result of truncation when a transaction is aborted.
722 727 notload = False
723 728 if not notload:
724 729 self.fncache.add(path)
725 730 return self.vfs(encoded, mode, *args, **kw)
726 731
727 732 def join(self, path):
728 733 if path:
729 734 return self.vfs.join(self.encode(path))
730 735 else:
731 736 return self.vfs.join(path)
732 737
733 738 def register_file(self, path):
734 739 """generic hook point to lets fncache steer its stew"""
735 740 if path.startswith(b'data/') or path.startswith(b'meta/'):
736 741 self.fncache.add(path)
737 742
738 743
739 744 class fncachestore(basicstore):
740 745 def __init__(self, path, vfstype, dotencode):
741 746 if dotencode:
742 747 encode = _pathencode
743 748 else:
744 749 encode = _plainhybridencode
745 750 self.encode = encode
746 751 vfs = vfstype(path + b'/store')
747 752 self.path = vfs.base
748 753 self.pathsep = self.path + b'/'
749 754 self.createmode = _calcmode(vfs)
750 755 vfs.createmode = self.createmode
751 756 self.rawvfs = vfs
752 757 fnc = fncache(vfs)
753 758 self.fncache = fnc
754 759 self.vfs = _fncachevfs(vfs, fnc, encode)
755 760 self.opener = self.vfs
756 761
757 762 def join(self, f):
758 763 return self.pathsep + self.encode(f)
759 764
760 765 def getsize(self, path):
761 766 return self.rawvfs.stat(path).st_size
762 767
763 768 def datafiles(self, matcher=None):
764 769 for f in sorted(self.fncache):
765 770 if not _matchtrackedpath(f, matcher):
766 771 continue
767 772 ef = self.encode(f)
768 773 try:
769 774 t = revlog_type(f)
770 775 assert t is not None, f
771 776 t |= FILEFLAGS_FILELOG
772 777 yield t, f, ef, self.getsize(ef)
773 778 except OSError as err:
774 779 if err.errno != errno.ENOENT:
775 780 raise
776 781
777 782 def copylist(self):
778 783 d = (
779 784 b'bookmarks',
780 785 b'narrowspec',
781 786 b'data',
782 787 b'meta',
783 788 b'dh',
784 789 b'fncache',
785 790 b'phaseroots',
786 791 b'obsstore',
787 792 b'00manifest.d',
788 793 b'00manifest.i',
789 794 b'00changelog.d',
790 795 b'00changelog.i',
791 796 b'requires',
792 797 )
793 798 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
794 799
795 800 def write(self, tr):
796 801 self.fncache.write(tr)
797 802
798 803 def invalidatecaches(self):
799 804 self.fncache.entries = None
800 805 self.fncache.addls = set()
801 806
802 807 def markremoved(self, fn):
803 808 self.fncache.remove(fn)
804 809
805 810 def _exists(self, f):
806 811 ef = self.encode(f)
807 812 try:
808 813 self.getsize(ef)
809 814 return True
810 815 except OSError as err:
811 816 if err.errno != errno.ENOENT:
812 817 raise
813 818 # nonexistent entry
814 819 return False
815 820
816 821 def __contains__(self, path):
817 822 '''Checks if the store contains path'''
818 823 path = b"/".join((b"data", path))
819 824 # check for files (exact match)
820 825 e = path + b'.i'
821 826 if e in self.fncache and self._exists(e):
822 827 return True
823 828 # now check for directories (prefix match)
824 829 if not path.endswith(b'/'):
825 830 path += b'/'
826 831 for e in self.fncache:
827 832 if e.startswith(path) and self._exists(e):
828 833 return True
829 834 return False
@@ -1,632 +1,643 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 scmutil,
23 23 store,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 27 from ..revlogutils import (
28 28 constants as revlogconst,
29 29 flagutil,
30 30 nodemap,
31 31 sidedata as sidedatamod,
32 32 )
33 33 from . import actions as upgrade_actions
34 34
35 35
36 36 def get_sidedata_helpers(srcrepo, dstrepo):
37 37 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
38 38 sequential = pycompat.iswindows or not use_w
39 39 if not sequential:
40 40 srcrepo.register_sidedata_computer(
41 41 revlogconst.KIND_CHANGELOG,
42 42 sidedatamod.SD_FILES,
43 43 (sidedatamod.SD_FILES,),
44 44 metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
45 45 flagutil.REVIDX_HASCOPIESINFO,
46 46 replace=True,
47 47 )
48 48 return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
49 49
50 50
51 51 def _revlogfrompath(repo, rl_type, path):
52 52 """Obtain a revlog from a repo path.
53 53
54 54 An instance of the appropriate class is returned.
55 55 """
56 56 if rl_type & store.FILEFLAGS_CHANGELOG:
57 57 return changelog.changelog(repo.svfs)
58 58 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
59 59 mandir = b''
60 60 if b'/' in path:
61 61 mandir = path.rsplit(b'/', 1)[0]
62 62 return manifest.manifestrevlog(
63 63 repo.nodeconstants, repo.svfs, tree=mandir
64 64 )
65 65 else:
66 66 # drop the extension and the `data/` prefix
67 67 path = path.rsplit(b'.', 1)[0].split(b'/', 1)[1]
68 68 return filelog.filelog(repo.svfs, path)
69 69
70 70
71 71 def _copyrevlog(tr, destrepo, oldrl, rl_type, unencodedname):
72 72 """copy all relevant files for `oldrl` into `destrepo` store
73 73
74 74 Files are copied "as is" without any transformation. The copy is performed
75 75 without extra checks. Callers are responsible for making sure the copied
76 76 content is compatible with format of the destination repository.
77 77 """
78 78 oldrl = getattr(oldrl, '_revlog', oldrl)
79 79 newrl = _revlogfrompath(destrepo, rl_type, unencodedname)
80 80 newrl = getattr(newrl, '_revlog', newrl)
81 81
82 82 oldvfs = oldrl.opener
83 83 newvfs = newrl.opener
84 84 oldindex = oldvfs.join(oldrl._indexfile)
85 85 newindex = newvfs.join(newrl._indexfile)
86 86 olddata = oldvfs.join(oldrl._datafile)
87 87 newdata = newvfs.join(newrl._datafile)
88 88
89 89 with newvfs(newrl._indexfile, b'w'):
90 90 pass # create all the directories
91 91
92 92 util.copyfile(oldindex, newindex)
93 93 copydata = oldrl.opener.exists(oldrl._datafile)
94 94 if copydata:
95 95 util.copyfile(olddata, newdata)
96 96
97 97 if rl_type & store.FILEFLAGS_FILELOG:
98 98 destrepo.svfs.fncache.add(unencodedname)
99 99 if copydata:
100 100 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
101 101
102 102
103 103 UPGRADE_CHANGELOG = b"changelog"
104 104 UPGRADE_MANIFEST = b"manifest"
105 105 UPGRADE_FILELOGS = b"all-filelogs"
106 106
107 107 UPGRADE_ALL_REVLOGS = frozenset(
108 108 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
109 109 )
110 110
111 111
112 112 def matchrevlog(revlogfilter, rl_type):
113 113 """check if a revlog is selected for cloning.
114 114
115 115 In other words, are there any updates which need to be done on revlog
116 116 or it can be blindly copied.
117 117
118 118 The store entry is checked against the passed filter"""
119 119 if rl_type & store.FILEFLAGS_CHANGELOG:
120 120 return UPGRADE_CHANGELOG in revlogfilter
121 121 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
122 122 return UPGRADE_MANIFEST in revlogfilter
123 123 assert rl_type & store.FILEFLAGS_FILELOG
124 124 return UPGRADE_FILELOGS in revlogfilter
125 125
126 126
127 127 def _perform_clone(
128 128 ui,
129 129 dstrepo,
130 130 tr,
131 131 old_revlog,
132 132 rl_type,
133 133 unencoded,
134 134 upgrade_op,
135 135 sidedata_helpers,
136 136 oncopiedrevision,
137 137 ):
138 138 """returns the new revlog object created"""
139 139 newrl = None
140 140 if matchrevlog(upgrade_op.revlogs_to_process, rl_type):
141 141 ui.note(
142 142 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
143 143 )
144 144 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
145 145 old_revlog.clone(
146 146 tr,
147 147 newrl,
148 148 addrevisioncb=oncopiedrevision,
149 149 deltareuse=upgrade_op.delta_reuse_mode,
150 150 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
151 151 sidedata_helpers=sidedata_helpers,
152 152 )
153 153 else:
154 154 msg = _(b'blindly copying %s containing %i revisions\n')
155 155 ui.note(msg % (unencoded, len(old_revlog)))
156 156 _copyrevlog(tr, dstrepo, old_revlog, rl_type, unencoded)
157 157
158 158 newrl = _revlogfrompath(dstrepo, rl_type, unencoded)
159 159 return newrl
160 160
161 161
162 162 def _clonerevlogs(
163 163 ui,
164 164 srcrepo,
165 165 dstrepo,
166 166 tr,
167 167 upgrade_op,
168 168 ):
169 169 """Copy revlogs between 2 repos."""
170 170 revcount = 0
171 171 srcsize = 0
172 172 srcrawsize = 0
173 173 dstsize = 0
174 174 fcount = 0
175 175 frevcount = 0
176 176 fsrcsize = 0
177 177 frawsize = 0
178 178 fdstsize = 0
179 179 mcount = 0
180 180 mrevcount = 0
181 181 msrcsize = 0
182 182 mrawsize = 0
183 183 mdstsize = 0
184 184 crevcount = 0
185 185 csrcsize = 0
186 186 crawsize = 0
187 187 cdstsize = 0
188 188
189 189 alldatafiles = list(srcrepo.store.walk())
190 190 # mapping of data files which needs to be cloned
191 191 # key is unencoded filename
192 192 # value is revlog_object_from_srcrepo
193 193 manifests = {}
194 194 changelogs = {}
195 195 filelogs = {}
196 196
197 197 # Perform a pass to collect metadata. This validates we can open all
198 198 # source files and allows a unified progress bar to be displayed.
199 199 for rl_type, unencoded, encoded, size in alldatafiles:
200 200 if not rl_type & store.FILEFLAGS_REVLOG_MAIN:
201 201 continue
202 202
203 # the store.walk function will wrongly pickup transaction backup and
204 # get confused. As a quick fix for 5.9 release, we ignore those.
205 # (this is not a module constants because it seems better to keep the
206 # hack together)
207 skip_undo = (
208 b'undo.backup.00changelog.i',
209 b'undo.backup.00manifest.i',
210 )
211 if unencoded in skip_undo:
212 continue
213
203 214 rl = _revlogfrompath(srcrepo, rl_type, unencoded)
204 215
205 216 info = rl.storageinfo(
206 217 exclusivefiles=True,
207 218 revisionscount=True,
208 219 trackedsize=True,
209 220 storedsize=True,
210 221 )
211 222
212 223 revcount += info[b'revisionscount'] or 0
213 224 datasize = info[b'storedsize'] or 0
214 225 rawsize = info[b'trackedsize'] or 0
215 226
216 227 srcsize += datasize
217 228 srcrawsize += rawsize
218 229
219 230 # This is for the separate progress bars.
220 231 if rl_type & store.FILEFLAGS_CHANGELOG:
221 232 changelogs[unencoded] = (rl_type, rl)
222 233 crevcount += len(rl)
223 234 csrcsize += datasize
224 235 crawsize += rawsize
225 236 elif rl_type & store.FILEFLAGS_MANIFESTLOG:
226 237 manifests[unencoded] = (rl_type, rl)
227 238 mcount += 1
228 239 mrevcount += len(rl)
229 240 msrcsize += datasize
230 241 mrawsize += rawsize
231 242 elif rl_type & store.FILEFLAGS_FILELOG:
232 243 filelogs[unencoded] = (rl_type, rl)
233 244 fcount += 1
234 245 frevcount += len(rl)
235 246 fsrcsize += datasize
236 247 frawsize += rawsize
237 248 else:
238 249 error.ProgrammingError(b'unknown revlog type')
239 250
240 251 if not revcount:
241 252 return
242 253
243 254 ui.status(
244 255 _(
245 256 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
246 257 b'%d in changelog)\n'
247 258 )
248 259 % (revcount, frevcount, mrevcount, crevcount)
249 260 )
250 261 ui.status(
251 262 _(b'migrating %s in store; %s tracked data\n')
252 263 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
253 264 )
254 265
255 266 # Used to keep track of progress.
256 267 progress = None
257 268
258 269 def oncopiedrevision(rl, rev, node):
259 270 progress.increment()
260 271
261 272 sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
262 273
263 274 # Migrating filelogs
264 275 ui.status(
265 276 _(
266 277 b'migrating %d filelogs containing %d revisions '
267 278 b'(%s in store; %s tracked data)\n'
268 279 )
269 280 % (
270 281 fcount,
271 282 frevcount,
272 283 util.bytecount(fsrcsize),
273 284 util.bytecount(frawsize),
274 285 )
275 286 )
276 287 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
277 288 for unencoded, (rl_type, oldrl) in sorted(filelogs.items()):
278 289 newrl = _perform_clone(
279 290 ui,
280 291 dstrepo,
281 292 tr,
282 293 oldrl,
283 294 rl_type,
284 295 unencoded,
285 296 upgrade_op,
286 297 sidedata_helpers,
287 298 oncopiedrevision,
288 299 )
289 300 info = newrl.storageinfo(storedsize=True)
290 301 fdstsize += info[b'storedsize'] or 0
291 302 ui.status(
292 303 _(
293 304 b'finished migrating %d filelog revisions across %d '
294 305 b'filelogs; change in size: %s\n'
295 306 )
296 307 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
297 308 )
298 309
299 310 # Migrating manifests
300 311 ui.status(
301 312 _(
302 313 b'migrating %d manifests containing %d revisions '
303 314 b'(%s in store; %s tracked data)\n'
304 315 )
305 316 % (
306 317 mcount,
307 318 mrevcount,
308 319 util.bytecount(msrcsize),
309 320 util.bytecount(mrawsize),
310 321 )
311 322 )
312 323 if progress:
313 324 progress.complete()
314 325 progress = srcrepo.ui.makeprogress(
315 326 _(b'manifest revisions'), total=mrevcount
316 327 )
317 328 for unencoded, (rl_type, oldrl) in sorted(manifests.items()):
318 329 newrl = _perform_clone(
319 330 ui,
320 331 dstrepo,
321 332 tr,
322 333 oldrl,
323 334 rl_type,
324 335 unencoded,
325 336 upgrade_op,
326 337 sidedata_helpers,
327 338 oncopiedrevision,
328 339 )
329 340 info = newrl.storageinfo(storedsize=True)
330 341 mdstsize += info[b'storedsize'] or 0
331 342 ui.status(
332 343 _(
333 344 b'finished migrating %d manifest revisions across %d '
334 345 b'manifests; change in size: %s\n'
335 346 )
336 347 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
337 348 )
338 349
339 350 # Migrating changelog
340 351 ui.status(
341 352 _(
342 353 b'migrating changelog containing %d revisions '
343 354 b'(%s in store; %s tracked data)\n'
344 355 )
345 356 % (
346 357 crevcount,
347 358 util.bytecount(csrcsize),
348 359 util.bytecount(crawsize),
349 360 )
350 361 )
351 362 if progress:
352 363 progress.complete()
353 364 progress = srcrepo.ui.makeprogress(
354 365 _(b'changelog revisions'), total=crevcount
355 366 )
356 367 for unencoded, (rl_type, oldrl) in sorted(changelogs.items()):
357 368 newrl = _perform_clone(
358 369 ui,
359 370 dstrepo,
360 371 tr,
361 372 oldrl,
362 373 rl_type,
363 374 unencoded,
364 375 upgrade_op,
365 376 sidedata_helpers,
366 377 oncopiedrevision,
367 378 )
368 379 info = newrl.storageinfo(storedsize=True)
369 380 cdstsize += info[b'storedsize'] or 0
370 381 progress.complete()
371 382 ui.status(
372 383 _(
373 384 b'finished migrating %d changelog revisions; change in size: '
374 385 b'%s\n'
375 386 )
376 387 % (crevcount, util.bytecount(cdstsize - csrcsize))
377 388 )
378 389
379 390 dstsize = fdstsize + mdstsize + cdstsize
380 391 ui.status(
381 392 _(
382 393 b'finished migrating %d total revisions; total change in store '
383 394 b'size: %s\n'
384 395 )
385 396 % (revcount, util.bytecount(dstsize - srcsize))
386 397 )
387 398
388 399
389 400 def _files_to_copy_post_revlog_clone(srcrepo):
390 401 """yields files which should be copied to destination after revlogs
391 402 are cloned"""
392 403 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
393 404 # don't copy revlogs as they are already cloned
394 405 if store.revlog_type(path) is not None:
395 406 continue
396 407 # Skip transaction related files.
397 408 if path.startswith(b'undo'):
398 409 continue
399 410 # Only copy regular files.
400 411 if kind != stat.S_IFREG:
401 412 continue
402 413 # Skip other skipped files.
403 414 if path in (b'lock', b'fncache'):
404 415 continue
405 416 # TODO: should we skip cache too?
406 417
407 418 yield path
408 419
409 420
410 421 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
411 422 """Replace the stores after current repository is upgraded
412 423
413 424 Creates a backup of current repository store at backup path
414 425 Replaces upgraded store files in current repo from upgraded one
415 426
416 427 Arguments:
417 428 currentrepo: repo object of current repository
418 429 upgradedrepo: repo object of the upgraded data
419 430 backupvfs: vfs object for the backup path
420 431 upgrade_op: upgrade operation object
421 432 to be used to decide what all is upgraded
422 433 """
423 434 # TODO: don't blindly rename everything in store
424 435 # There can be upgrades where store is not touched at all
425 436 if upgrade_op.backup_store:
426 437 util.rename(currentrepo.spath, backupvfs.join(b'store'))
427 438 else:
428 439 currentrepo.vfs.rmtree(b'store', forcibly=True)
429 440 util.rename(upgradedrepo.spath, currentrepo.spath)
430 441
431 442
432 443 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
433 444 """Hook point for extensions to perform additional actions during upgrade.
434 445
435 446 This function is called after revlogs and store files have been copied but
436 447 before the new store is swapped into the original location.
437 448 """
438 449
439 450
440 451 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
441 452 """Do the low-level work of upgrading a repository.
442 453
443 454 The upgrade is effectively performed as a copy between a source
444 455 repository and a temporary destination repository.
445 456
446 457 The source repository is unmodified for as long as possible so the
447 458 upgrade can abort at any time without causing loss of service for
448 459 readers and without corrupting the source repository.
449 460 """
450 461 assert srcrepo.currentwlock()
451 462 assert dstrepo.currentwlock()
452 463 backuppath = None
453 464 backupvfs = None
454 465
455 466 ui.status(
456 467 _(
457 468 b'(it is safe to interrupt this process any time before '
458 469 b'data migration completes)\n'
459 470 )
460 471 )
461 472
462 473 if upgrade_actions.dirstatev2 in upgrade_op.upgrade_actions:
463 474 ui.status(_(b'upgrading to dirstate-v2 from v1\n'))
464 475 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v1', b'v2')
465 476 upgrade_op.upgrade_actions.remove(upgrade_actions.dirstatev2)
466 477
467 478 if upgrade_actions.dirstatev2 in upgrade_op.removed_actions:
468 479 ui.status(_(b'downgrading from dirstate-v2 to v1\n'))
469 480 upgrade_dirstate(ui, srcrepo, upgrade_op, b'v2', b'v1')
470 481 upgrade_op.removed_actions.remove(upgrade_actions.dirstatev2)
471 482
472 483 if not (upgrade_op.upgrade_actions or upgrade_op.removed_actions):
473 484 return
474 485
475 486 if upgrade_op.requirements_only:
476 487 ui.status(_(b'upgrading repository requirements\n'))
477 488 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
478 489 # if there is only one action and that is persistent nodemap upgrade
479 490 # directly write the nodemap file and update requirements instead of going
480 491 # through the whole cloning process
481 492 elif (
482 493 len(upgrade_op.upgrade_actions) == 1
483 494 and b'persistent-nodemap' in upgrade_op.upgrade_actions_names
484 495 and not upgrade_op.removed_actions
485 496 ):
486 497 ui.status(
487 498 _(b'upgrading repository to use persistent nodemap feature\n')
488 499 )
489 500 with srcrepo.transaction(b'upgrade') as tr:
490 501 unfi = srcrepo.unfiltered()
491 502 cl = unfi.changelog
492 503 nodemap.persist_nodemap(tr, cl, force=True)
493 504 # we want to directly operate on the underlying revlog to force
494 505 # create a nodemap file. This is fine since this is upgrade code
495 506 # and it heavily relies on repository being revlog based
496 507 # hence accessing private attributes can be justified
497 508 nodemap.persist_nodemap(
498 509 tr, unfi.manifestlog._rootstore._revlog, force=True
499 510 )
500 511 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
501 512 elif (
502 513 len(upgrade_op.removed_actions) == 1
503 514 and [
504 515 x
505 516 for x in upgrade_op.removed_actions
506 517 if x.name == b'persistent-nodemap'
507 518 ]
508 519 and not upgrade_op.upgrade_actions
509 520 ):
510 521 ui.status(
511 522 _(b'downgrading repository to not use persistent nodemap feature\n')
512 523 )
513 524 with srcrepo.transaction(b'upgrade') as tr:
514 525 unfi = srcrepo.unfiltered()
515 526 cl = unfi.changelog
516 527 nodemap.delete_nodemap(tr, srcrepo, cl)
517 528 # check comment 20 lines above for accessing private attributes
518 529 nodemap.delete_nodemap(
519 530 tr, srcrepo, unfi.manifestlog._rootstore._revlog
520 531 )
521 532 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
522 533 else:
523 534 with dstrepo.transaction(b'upgrade') as tr:
524 535 _clonerevlogs(
525 536 ui,
526 537 srcrepo,
527 538 dstrepo,
528 539 tr,
529 540 upgrade_op,
530 541 )
531 542
532 543 # Now copy other files in the store directory.
533 544 for p in _files_to_copy_post_revlog_clone(srcrepo):
534 545 srcrepo.ui.status(_(b'copying %s\n') % p)
535 546 src = srcrepo.store.rawvfs.join(p)
536 547 dst = dstrepo.store.rawvfs.join(p)
537 548 util.copyfile(src, dst, copystat=True)
538 549
539 550 finishdatamigration(ui, srcrepo, dstrepo, requirements)
540 551
541 552 ui.status(_(b'data fully upgraded in a temporary repository\n'))
542 553
543 554 if upgrade_op.backup_store:
544 555 backuppath = pycompat.mkdtemp(
545 556 prefix=b'upgradebackup.', dir=srcrepo.path
546 557 )
547 558 backupvfs = vfsmod.vfs(backuppath)
548 559
549 560 # Make a backup of requires file first, as it is the first to be modified.
550 561 util.copyfile(
551 562 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
552 563 )
553 564
554 565 # We install an arbitrary requirement that clients must not support
555 566 # as a mechanism to lock out new clients during the data swap. This is
556 567 # better than allowing a client to continue while the repository is in
557 568 # an inconsistent state.
558 569 ui.status(
559 570 _(
560 571 b'marking source repository as being upgraded; clients will be '
561 572 b'unable to read from repository\n'
562 573 )
563 574 )
564 575 scmutil.writereporequirements(
565 576 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
566 577 )
567 578
568 579 ui.status(_(b'starting in-place swap of repository data\n'))
569 580 if upgrade_op.backup_store:
570 581 ui.status(
571 582 _(b'replaced files will be backed up at %s\n') % backuppath
572 583 )
573 584
574 585 # Now swap in the new store directory. Doing it as a rename should make
575 586 # the operation nearly instantaneous and atomic (at least in well-behaved
576 587 # environments).
577 588 ui.status(_(b'replacing store...\n'))
578 589 tstart = util.timer()
579 590 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
580 591 elapsed = util.timer() - tstart
581 592 ui.status(
582 593 _(
583 594 b'store replacement complete; repository was inconsistent for '
584 595 b'%0.1fs\n'
585 596 )
586 597 % elapsed
587 598 )
588 599
589 600 # We first write the requirements file. Any new requirements will lock
590 601 # out legacy clients.
591 602 ui.status(
592 603 _(
593 604 b'finalizing requirements file and making repository readable '
594 605 b'again\n'
595 606 )
596 607 )
597 608 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
598 609
599 610 if upgrade_op.backup_store:
600 611 # The lock file from the old store won't be removed because nothing has a
601 612 # reference to its new location. So clean it up manually. Alternatively, we
602 613 # could update srcrepo.svfs and other variables to point to the new
603 614 # location. This is simpler.
604 615 assert backupvfs is not None # help pytype
605 616 backupvfs.unlink(b'store/lock')
606 617
607 618 return backuppath
608 619
609 620
610 621 def upgrade_dirstate(ui, srcrepo, upgrade_op, old, new):
611 622 if upgrade_op.backup_store:
612 623 backuppath = pycompat.mkdtemp(
613 624 prefix=b'upgradebackup.', dir=srcrepo.path
614 625 )
615 626 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
616 627 backupvfs = vfsmod.vfs(backuppath)
617 628 util.copyfile(
618 629 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
619 630 )
620 631 util.copyfile(
621 632 srcrepo.vfs.join(b'dirstate'), backupvfs.join(b'dirstate')
622 633 )
623 634
624 635 assert srcrepo.dirstate._use_dirstate_v2 == (old == b'v2')
625 636 srcrepo.dirstate._map._use_dirstate_tree = True
626 637 srcrepo.dirstate._map.preload()
627 638 srcrepo.dirstate._use_dirstate_v2 = new == b'v2'
628 639 srcrepo.dirstate._map._use_dirstate_v2 = srcrepo.dirstate._use_dirstate_v2
629 640 srcrepo.dirstate._dirty = True
630 641 srcrepo.dirstate.write(None)
631 642
632 643 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
@@ -1,707 +1,868 b''
1 1 #require serve no-reposimplestore no-chg
2 2
3 3 #testcases stream-legacy stream-bundle2
4 4
5 5 #if stream-legacy
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [server]
8 8 > bundle2.stream = no
9 9 > EOF
10 10 #endif
11 11
12 12 Initialize repository
13 13 the status call is to check for issue5130
14 14
15 15 $ hg init server
16 16 $ cd server
17 17 $ touch foo
18 18 $ hg -q commit -A -m initial
19 19 >>> for i in range(1024):
20 20 ... with open(str(i), 'wb') as fh:
21 21 ... fh.write(b"%d" % i) and None
22 22 $ hg -q commit -A -m 'add a lot of files'
23 23 $ hg st
24
25 add files with "tricky" name:
26
27 $ echo foo > 00changelog.i
28 $ echo foo > 00changelog.d
29 $ echo foo > 00changelog.n
30 $ echo foo > 00changelog-ab349180a0405010.nd
31 $ echo foo > 00manifest.i
32 $ echo foo > 00manifest.d
33 $ echo foo > foo.i
34 $ echo foo > foo.d
35 $ echo foo > foo.n
36 $ echo foo > undo.py
37 $ echo foo > undo.i
38 $ echo foo > undo.d
39 $ echo foo > undo.n
40 $ echo foo > undo.foo.i
41 $ echo foo > undo.foo.d
42 $ echo foo > undo.foo.n
43 $ echo foo > undo.babar
44 $ mkdir savanah
45 $ echo foo > savanah/foo.i
46 $ echo foo > savanah/foo.d
47 $ echo foo > savanah/foo.n
48 $ echo foo > savanah/undo.py
49 $ echo foo > savanah/undo.i
50 $ echo foo > savanah/undo.d
51 $ echo foo > savanah/undo.n
52 $ echo foo > savanah/undo.foo.i
53 $ echo foo > savanah/undo.foo.d
54 $ echo foo > savanah/undo.foo.n
55 $ echo foo > savanah/undo.babar
56 $ mkdir data
57 $ echo foo > data/foo.i
58 $ echo foo > data/foo.d
59 $ echo foo > data/foo.n
60 $ echo foo > data/undo.py
61 $ echo foo > data/undo.i
62 $ echo foo > data/undo.d
63 $ echo foo > data/undo.n
64 $ echo foo > data/undo.foo.i
65 $ echo foo > data/undo.foo.d
66 $ echo foo > data/undo.foo.n
67 $ echo foo > data/undo.babar
68 $ mkdir meta
69 $ echo foo > meta/foo.i
70 $ echo foo > meta/foo.d
71 $ echo foo > meta/foo.n
72 $ echo foo > meta/undo.py
73 $ echo foo > meta/undo.i
74 $ echo foo > meta/undo.d
75 $ echo foo > meta/undo.n
76 $ echo foo > meta/undo.foo.i
77 $ echo foo > meta/undo.foo.d
78 $ echo foo > meta/undo.foo.n
79 $ echo foo > meta/undo.babar
80 $ mkdir store
81 $ echo foo > store/foo.i
82 $ echo foo > store/foo.d
83 $ echo foo > store/foo.n
84 $ echo foo > store/undo.py
85 $ echo foo > store/undo.i
86 $ echo foo > store/undo.d
87 $ echo foo > store/undo.n
88 $ echo foo > store/undo.foo.i
89 $ echo foo > store/undo.foo.d
90 $ echo foo > store/undo.foo.n
91 $ echo foo > store/undo.babar
92 $ hg add .
93 adding 00changelog-ab349180a0405010.nd
94 adding 00changelog.d
95 adding 00changelog.i
96 adding 00changelog.n
97 adding 00manifest.d
98 adding 00manifest.i
99 adding data/foo.d
100 adding data/foo.i
101 adding data/foo.n
102 adding data/undo.babar
103 adding data/undo.d
104 adding data/undo.foo.d
105 adding data/undo.foo.i
106 adding data/undo.foo.n
107 adding data/undo.i
108 adding data/undo.n
109 adding data/undo.py
110 adding foo.d
111 adding foo.i
112 adding foo.n
113 adding meta/foo.d
114 adding meta/foo.i
115 adding meta/foo.n
116 adding meta/undo.babar
117 adding meta/undo.d
118 adding meta/undo.foo.d
119 adding meta/undo.foo.i
120 adding meta/undo.foo.n
121 adding meta/undo.i
122 adding meta/undo.n
123 adding meta/undo.py
124 adding savanah/foo.d
125 adding savanah/foo.i
126 adding savanah/foo.n
127 adding savanah/undo.babar
128 adding savanah/undo.d
129 adding savanah/undo.foo.d
130 adding savanah/undo.foo.i
131 adding savanah/undo.foo.n
132 adding savanah/undo.i
133 adding savanah/undo.n
134 adding savanah/undo.py
135 adding store/foo.d
136 adding store/foo.i
137 adding store/foo.n
138 adding store/undo.babar
139 adding store/undo.d
140 adding store/undo.foo.d
141 adding store/undo.foo.i
142 adding store/undo.foo.n
143 adding store/undo.i
144 adding store/undo.n
145 adding store/undo.py
146 adding undo.babar
147 adding undo.d
148 adding undo.foo.d
149 adding undo.foo.i
150 adding undo.foo.n
151 adding undo.i
152 adding undo.n
153 adding undo.py
154 $ hg ci -m 'add files with "tricky" name'
24 155 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 156 $ cat hg.pid > $DAEMON_PIDS
26 157 $ cd ..
27 158
28 159 Cannot stream clone when server.uncompressed is set
29 160
30 161 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 162 200 Script output follows
32 163
33 164 1
34 165
35 166 #if stream-legacy
36 167 $ hg debugcapabilities http://localhost:$HGPORT
37 168 Main capabilities:
38 169 batch
39 170 branchmap
40 171 $USUAL_BUNDLE2_CAPS_SERVER$
41 172 changegroupsubset
42 173 compression=$BUNDLE2_COMPRESSIONS$
43 174 getbundle
44 175 httpheader=1024
45 176 httpmediatype=0.1rx,0.1tx,0.2tx
46 177 known
47 178 lookup
48 179 pushkey
49 180 unbundle=HG10GZ,HG10BZ,HG10UN
50 181 unbundlehash
51 182 Bundle2 capabilities:
52 183 HG20
53 184 bookmarks
54 185 changegroup
55 186 01
56 187 02
57 188 checkheads
58 189 related
59 190 digests
60 191 md5
61 192 sha1
62 193 sha512
63 194 error
64 195 abort
65 196 unsupportedcontent
66 197 pushraced
67 198 pushkey
68 199 hgtagsfnodes
69 200 listkeys
70 201 phases
71 202 heads
72 203 pushkey
73 204 remote-changegroup
74 205 http
75 206 https
76 207
77 208 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 209 warning: stream clone requested but server has them disabled
79 210 requesting all changes
80 211 adding changesets
81 212 adding manifests
82 213 adding file changes
83 added 2 changesets with 1025 changes to 1025 files
84 new changesets 96ee1d7354c4:c17445101a72
214 added 3 changesets with 1086 changes to 1086 files
215 new changesets 96ee1d7354c4:7406a3463c3d
85 216
86 217 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 218 200 Script output follows
88 219 content-type: application/mercurial-0.2
89 220
90 221
91 222 $ f --size body --hexdump --bytes 100
92 223 body: size=232
93 224 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 225 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 226 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 227 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 228 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 229 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 230 0060: 69 73 20 66 |is f|
100 231
101 232 #endif
102 233 #if stream-bundle2
103 234 $ hg debugcapabilities http://localhost:$HGPORT
104 235 Main capabilities:
105 236 batch
106 237 branchmap
107 238 $USUAL_BUNDLE2_CAPS_SERVER$
108 239 changegroupsubset
109 240 compression=$BUNDLE2_COMPRESSIONS$
110 241 getbundle
111 242 httpheader=1024
112 243 httpmediatype=0.1rx,0.1tx,0.2tx
113 244 known
114 245 lookup
115 246 pushkey
116 247 unbundle=HG10GZ,HG10BZ,HG10UN
117 248 unbundlehash
118 249 Bundle2 capabilities:
119 250 HG20
120 251 bookmarks
121 252 changegroup
122 253 01
123 254 02
124 255 checkheads
125 256 related
126 257 digests
127 258 md5
128 259 sha1
129 260 sha512
130 261 error
131 262 abort
132 263 unsupportedcontent
133 264 pushraced
134 265 pushkey
135 266 hgtagsfnodes
136 267 listkeys
137 268 phases
138 269 heads
139 270 pushkey
140 271 remote-changegroup
141 272 http
142 273 https
143 274
144 275 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 276 warning: stream clone requested but server has them disabled
146 277 requesting all changes
147 278 adding changesets
148 279 adding manifests
149 280 adding file changes
150 added 2 changesets with 1025 changes to 1025 files
151 new changesets 96ee1d7354c4:c17445101a72
281 added 3 changesets with 1086 changes to 1086 files
282 new changesets 96ee1d7354c4:7406a3463c3d
152 283
153 284 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 285 200 Script output follows
155 286 content-type: application/mercurial-0.2
156 287
157 288
158 289 $ f --size body --hexdump --bytes 100
159 290 body: size=232
160 291 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 292 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 293 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 294 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 295 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 296 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 297 0060: 69 73 20 66 |is f|
167 298
168 299 #endif
169 300
170 301 $ killdaemons.py
171 302 $ cd server
172 303 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 304 $ cat hg.pid > $DAEMON_PIDS
174 305 $ cd ..
175 306
176 307 Basic clone
177 308
178 309 #if stream-legacy
179 310 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 311 streaming all changes
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
312 1088 files to transfer, 101 KB of data (no-zstd !)
313 transferred 101 KB in * seconds (*/sec) (glob) (no-zstd !)
314 1088 files to transfer, 98.4 KB of data (zstd !)
315 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
185 316 searching for changes
186 317 no changes found
187 318 $ cat server/errors.txt
188 319 #endif
189 320 #if stream-bundle2
190 321 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 322 streaming all changes
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
323 1091 files to transfer, 101 KB of data (no-zstd !)
324 transferred 101 KB in * seconds (*/sec) (glob) (no-zstd !)
325 1091 files to transfer, 98.5 KB of data (zstd !)
326 transferred 98.5 KB in * seconds (* */sec) (glob) (zstd !)
196 327
197 328 $ ls -1 clone1/.hg/cache
198 329 branch2-base
199 330 branch2-immutable
200 331 branch2-served
201 332 branch2-served.hidden
202 333 branch2-visible
203 334 branch2-visible-hidden
204 335 rbc-names-v1
205 336 rbc-revs-v1
206 337 tags2
207 338 tags2-served
208 339 $ cat server/errors.txt
209 340 #endif
210 341
211 342 getbundle requests with stream=1 are uncompressed
212 343
213 344 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
214 345 200 Script output follows
215 346 content-type: application/mercurial-0.2
216 347
217 348
218 349 #if no-zstd no-rust
219 350 $ f --size --hex --bytes 256 body
220 body: size=112262
351 body: size=118551
221 352 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
222 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
223 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98|
224 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030|
225 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
226 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
227 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
228 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
229 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
230 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
231 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
232 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
233 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
234 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u|
235 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....|
236 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
353 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
354 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10|
355 0030: 33 36 39 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |3695filecount109|
356 0040: 31 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |1requirementsdot|
357 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
358 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
359 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
360 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
361 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
362 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
363 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
364 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
365 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
366 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan|
367 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0|
237 368 #endif
238 369 #if zstd no-rust
239 370 $ f --size --hex --bytes 256 body
240 body: size=109410
371 body: size=115738
241 372 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
242 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
243 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95|
244 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
245 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
246 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
247 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
248 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress|
249 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo|
250 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl|
251 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.|
252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......|
253 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................|
254 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#|
255 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...|
256 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda|
373 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
374 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10|
375 0030: 30 38 35 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |0856filecount109|
376 0040: 31 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |1requirementsdot|
377 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
378 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
379 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres|
380 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl|
381 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
382 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s|
383 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......|
384 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................|
385 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.|
386 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..|
387 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed|
257 388 #endif
258 389 #if zstd rust no-dirstate-v2
259 390 $ f --size --hex --bytes 256 body
260 body: size=109431
391 body: size=115759
261 392 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
262 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
263 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95|
264 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
265 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
266 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
267 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
268 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node|
269 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com|
270 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C|
271 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars|
272 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.|
273 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..|
274 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................|
275 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)|
276 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.|
393 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
394 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10|
395 0030: 30 38 35 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |0856filecount109|
396 0040: 31 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |1requirementsdot|
397 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache|
398 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%|
399 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod|
400 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co|
401 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2|
402 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
403 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
404 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
405 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
406 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
407 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
277 408 #endif
278 409 #if zstd dirstate-v2
279 410 $ f --size --hex --bytes 256 body
280 411 body: size=109449
281 412 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
282 413 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
283 414 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
284 415 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
285 416 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
286 417 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
287 418 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
288 419 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
289 420 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
290 421 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
291 422 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
292 423 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
293 424 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
294 425 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
295 426 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
296 427 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
297 428 #endif
298 429
299 430 --uncompressed is an alias to --stream
300 431
301 432 #if stream-legacy
302 433 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
303 434 streaming all changes
304 1027 files to transfer, 96.3 KB of data (no-zstd !)
305 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
306 1027 files to transfer, 93.5 KB of data (zstd !)
307 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
435 1088 files to transfer, 101 KB of data (no-zstd !)
436 transferred 101 KB in * seconds (*/sec) (glob) (no-zstd !)
437 1088 files to transfer, 98.4 KB of data (zstd !)
438 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
308 439 searching for changes
309 440 no changes found
310 441 #endif
311 442 #if stream-bundle2
312 443 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
313 444 streaming all changes
314 1030 files to transfer, 96.5 KB of data (no-zstd !)
315 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
316 1030 files to transfer, 93.6 KB of data (zstd !)
317 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
445 1091 files to transfer, 101 KB of data (no-zstd !)
446 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
447 1091 files to transfer, 98.5 KB of data (zstd !)
448 transferred 98.5 KB in * seconds (* */sec) (glob) (zstd !)
318 449 #endif
319 450
320 451 Clone with background file closing enabled
321 452
322 453 #if stream-legacy
323 454 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
324 455 using http://localhost:$HGPORT/
325 456 sending capabilities command
326 457 sending branchmap command
327 458 streaming all changes
328 459 sending stream_out command
329 1027 files to transfer, 96.3 KB of data (no-zstd !)
330 1027 files to transfer, 93.5 KB of data (zstd !)
460 1088 files to transfer, 101 KB of data (no-zstd !)
461 1088 files to transfer, 98.4 KB of data (zstd !)
331 462 starting 4 threads for background file closing
332 463 updating the branch cache
333 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
334 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
464 transferred 101 KB in * seconds (*/sec) (glob) (no-zstd !)
465 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
335 466 query 1; heads
336 467 sending batch command
337 468 searching for changes
338 469 all remote heads known locally
339 470 no changes found
340 471 sending getbundle command
341 472 bundle2-input-bundle: with-transaction
342 473 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
343 474 bundle2-input-part: "phase-heads" supported
344 475 bundle2-input-part: total payload size 24
345 476 bundle2-input-bundle: 2 parts total
346 477 checking for updated bookmarks
347 478 updating the branch cache
348 479 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
349 480 #endif
350 481 #if stream-bundle2
351 482 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
352 483 using http://localhost:$HGPORT/
353 484 sending capabilities command
354 485 query 1; heads
355 486 sending batch command
356 487 streaming all changes
357 488 sending getbundle command
358 489 bundle2-input-bundle: with-transaction
359 490 bundle2-input-part: "stream2" (params: 3 mandatory) supported
360 491 applying stream bundle
361 1030 files to transfer, 96.5 KB of data (no-zstd !)
362 1030 files to transfer, 93.6 KB of data (zstd !)
492 1091 files to transfer, 101 KB of data (no-zstd !)
493 1091 files to transfer, 98.5 KB of data (zstd !)
363 494 starting 4 threads for background file closing
364 495 starting 4 threads for background file closing
365 496 updating the branch cache
366 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
367 bundle2-input-part: total payload size 112094 (no-zstd !)
368 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
369 bundle2-input-part: total payload size 109216 (zstd !)
497 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
498 bundle2-input-part: total payload size 118382 (no-zstd !)
499 transferred 98.5 KB in * seconds (* */sec) (glob) (zstd !)
500 bundle2-input-part: total payload size 115543 (zstd !)
370 501 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
371 502 bundle2-input-bundle: 2 parts total
372 503 checking for updated bookmarks
373 504 updating the branch cache
374 505 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
375 506 #endif
376 507
377 508 Cannot stream clone when there are secret changesets
378 509
379 510 $ hg -R server phase --force --secret -r tip
380 511 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
381 512 warning: stream clone requested but server has them disabled
382 513 requesting all changes
383 514 adding changesets
384 515 adding manifests
385 516 adding file changes
386 added 1 changesets with 1 changes to 1 files
387 new changesets 96ee1d7354c4
517 added 2 changesets with 1025 changes to 1025 files
518 new changesets 96ee1d7354c4:c17445101a72
388 519
389 520 $ killdaemons.py
390 521
391 522 Streaming of secrets can be overridden by server config
392 523
393 524 $ cd server
394 525 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
395 526 $ cat hg.pid > $DAEMON_PIDS
396 527 $ cd ..
397 528
398 529 #if stream-legacy
399 530 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
400 531 streaming all changes
401 1027 files to transfer, 96.3 KB of data (no-zstd !)
402 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
403 1027 files to transfer, 93.5 KB of data (zstd !)
404 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
532 1088 files to transfer, 101 KB of data (no-zstd !)
533 transferred 101 KB in * seconds (*/sec) (glob) (no-zstd !)
534 1088 files to transfer, 98.4 KB of data (zstd !)
535 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
405 536 searching for changes
406 537 no changes found
407 538 #endif
408 539 #if stream-bundle2
409 540 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
410 541 streaming all changes
411 1030 files to transfer, 96.5 KB of data (no-zstd !)
412 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
413 1030 files to transfer, 93.6 KB of data (zstd !)
414 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
542 1091 files to transfer, 101 KB of data (no-zstd !)
543 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
544 1091 files to transfer, 98.5 KB of data (zstd !)
545 transferred 98.5 KB in * seconds (* */sec) (glob) (zstd !)
415 546 #endif
416 547
417 548 $ killdaemons.py
418 549
419 550 Verify interaction between preferuncompressed and secret presence
420 551
421 552 $ cd server
422 553 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
423 554 $ cat hg.pid > $DAEMON_PIDS
424 555 $ cd ..
425 556
426 557 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
427 558 requesting all changes
428 559 adding changesets
429 560 adding manifests
430 561 adding file changes
431 added 1 changesets with 1 changes to 1 files
432 new changesets 96ee1d7354c4
562 added 2 changesets with 1025 changes to 1025 files
563 new changesets 96ee1d7354c4:c17445101a72
433 564
434 565 $ killdaemons.py
435 566
436 567 Clone not allowed when full bundles disabled and can't serve secrets
437 568
438 569 $ cd server
439 570 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
440 571 $ cat hg.pid > $DAEMON_PIDS
441 572 $ cd ..
442 573
443 574 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
444 575 warning: stream clone requested but server has them disabled
445 576 requesting all changes
446 577 remote: abort: server has pull-based clones disabled
447 578 abort: pull failed on remote
448 579 (remove --pull if specified or upgrade Mercurial)
449 580 [100]
450 581
451 582 Local stream clone with secrets involved
452 583 (This is just a test over behavior: if you have access to the repo's files,
453 584 there is no security so it isn't important to prevent a clone here.)
454 585
455 586 $ hg clone -U --stream server local-secret
456 587 warning: stream clone requested but server has them disabled
457 588 requesting all changes
458 589 adding changesets
459 590 adding manifests
460 591 adding file changes
461 added 1 changesets with 1 changes to 1 files
462 new changesets 96ee1d7354c4
592 added 2 changesets with 1025 changes to 1025 files
593 new changesets 96ee1d7354c4:c17445101a72
463 594
464 595 Stream clone while repo is changing:
465 596
466 597 $ mkdir changing
467 598 $ cd changing
468 599
469 600 extension for delaying the server process so we reliably can modify the repo
470 601 while cloning
471 602
472 603 $ cat > stream_steps.py <<EOF
473 604 > import os
474 605 > import sys
475 606 > from mercurial import (
476 607 > encoding,
477 608 > extensions,
478 609 > streamclone,
479 610 > testing,
480 611 > )
481 612 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
482 613 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
483 614 >
484 615 > def _test_sync_point_walk_1(orig, repo):
485 616 > testing.write_file(WALKED_FILE_1)
486 617 >
487 618 > def _test_sync_point_walk_2(orig, repo):
488 619 > assert repo._currentlock(repo._lockref) is None
489 620 > testing.wait_file(WALKED_FILE_2)
490 621 >
491 622 > extensions.wrapfunction(
492 623 > streamclone,
493 624 > '_test_sync_point_walk_1',
494 625 > _test_sync_point_walk_1
495 626 > )
496 627 > extensions.wrapfunction(
497 628 > streamclone,
498 629 > '_test_sync_point_walk_2',
499 630 > _test_sync_point_walk_2
500 631 > )
501 632 > EOF
502 633
503 634 prepare repo with small and big file to cover both code paths in emitrevlogdata
504 635
505 636 $ hg init repo
506 637 $ touch repo/f1
507 638 $ $TESTDIR/seq.py 50000 > repo/f2
508 639 $ hg -R repo ci -Aqm "0"
509 640 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
510 641 $ export HG_TEST_STREAM_WALKED_FILE_1
511 642 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
512 643 $ export HG_TEST_STREAM_WALKED_FILE_2
513 644 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
514 645 $ export HG_TEST_STREAM_WALKED_FILE_3
515 646 # $ cat << EOF >> $HGRCPATH
516 647 # > [hooks]
517 648 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
518 649 # > EOF
519 650 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
520 651 $ cat hg.pid >> $DAEMON_PIDS
521 652
522 653 clone while modifying the repo between stating file with write lock and
523 654 actually serving file content
524 655
525 656 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
526 657 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
527 658 $ echo >> repo/f1
528 659 $ echo >> repo/f2
529 660 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
530 661 $ touch $HG_TEST_STREAM_WALKED_FILE_2
531 662 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
532 663 $ hg -R clone id
533 664 000000000000
534 665 $ cat errors.log
535 666 $ cd ..
536 667
537 668 Stream repository with bookmarks
538 669 --------------------------------
539 670
540 671 (revert introduction of secret changeset)
541 672
542 673 $ hg -R server phase --draft 'secret()'
543 674
544 675 add a bookmark
545 676
546 677 $ hg -R server bookmark -r tip some-bookmark
547 678
548 679 clone it
549 680
550 681 #if stream-legacy
551 682 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
552 683 streaming all changes
553 1027 files to transfer, 96.3 KB of data (no-zstd !)
554 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
555 1027 files to transfer, 93.5 KB of data (zstd !)
556 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
684 1088 files to transfer, 101 KB of data (no-zstd !)
685 transferred 101 KB in * seconds (*) (glob) (no-zstd !)
686 1088 files to transfer, 98.4 KB of data (zstd !)
687 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
557 688 searching for changes
558 689 no changes found
559 690 updating to branch default
560 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
691 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
561 692 #endif
562 693 #if stream-bundle2
563 694 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
564 695 streaming all changes
565 1033 files to transfer, 96.6 KB of data (no-zstd !)
566 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
567 1033 files to transfer, 93.8 KB of data (zstd !)
568 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
696 1094 files to transfer, 101 KB of data (no-zstd !)
697 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
698 1094 files to transfer, 98.7 KB of data (zstd !)
699 transferred 98.7 KB in * seconds (* */sec) (glob) (zstd !)
569 700 updating to branch default
570 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
701 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
571 702 #endif
703 $ hg verify -R with-bookmarks
704 checking changesets
705 checking manifests
706 crosschecking files in changesets and manifests
707 checking files
708 checked 3 changesets with 1086 changes to 1086 files
572 709 $ hg -R with-bookmarks bookmarks
573 some-bookmark 1:c17445101a72
710 some-bookmark 2:7406a3463c3d
574 711
575 712 Stream repository with phases
576 713 -----------------------------
577 714
578 715 Clone as publishing
579 716
580 717 $ hg -R server phase -r 'all()'
581 718 0: draft
582 719 1: draft
720 2: draft
583 721
584 722 #if stream-legacy
585 723 $ hg clone --stream http://localhost:$HGPORT phase-publish
586 724 streaming all changes
587 1027 files to transfer, 96.3 KB of data (no-zstd !)
588 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
589 1027 files to transfer, 93.5 KB of data (zstd !)
590 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
725 1088 files to transfer, 101 KB of data (no-zstd !)
726 transferred 101 KB in * seconds (*) (glob) (no-zstd !)
727 1088 files to transfer, 98.4 KB of data (zstd !)
728 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
591 729 searching for changes
592 730 no changes found
593 731 updating to branch default
594 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
732 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
595 733 #endif
596 734 #if stream-bundle2
597 735 $ hg clone --stream http://localhost:$HGPORT phase-publish
598 736 streaming all changes
599 1033 files to transfer, 96.6 KB of data (no-zstd !)
600 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
601 1033 files to transfer, 93.8 KB of data (zstd !)
602 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
737 1094 files to transfer, 101 KB of data (no-zstd !)
738 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
739 1094 files to transfer, 98.7 KB of data (zstd !)
740 transferred 98.7 KB in * seconds (* */sec) (glob) (zstd !)
603 741 updating to branch default
604 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
742 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
605 743 #endif
744 $ hg verify -R phase-publish
745 checking changesets
746 checking manifests
747 crosschecking files in changesets and manifests
748 checking files
749 checked 3 changesets with 1086 changes to 1086 files
606 750 $ hg -R phase-publish phase -r 'all()'
607 751 0: public
608 752 1: public
753 2: public
609 754
610 755 Clone as non publishing
611 756
612 757 $ cat << EOF >> server/.hg/hgrc
613 758 > [phases]
614 759 > publish = False
615 760 > EOF
616 761 $ killdaemons.py
617 762 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
618 763 $ cat hg.pid > $DAEMON_PIDS
619 764
620 765 #if stream-legacy
621 766
622 767 With v1 of the stream protocol, changeset are always cloned as public. It make
623 768 stream v1 unsuitable for non-publishing repository.
624 769
625 770 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
626 771 streaming all changes
627 1027 files to transfer, 96.3 KB of data (no-zstd !)
628 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
629 1027 files to transfer, 93.5 KB of data (zstd !)
630 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
772 1088 files to transfer, 101 KB of data (no-zstd !)
773 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
774 1088 files to transfer, 98.4 KB of data (zstd !)
775 transferred 98.4 KB in * seconds (*/sec) (glob) (zstd !)
631 776 searching for changes
632 777 no changes found
633 778 updating to branch default
634 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
779 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
635 780 $ hg -R phase-no-publish phase -r 'all()'
636 781 0: public
637 782 1: public
783 2: public
638 784 #endif
639 785 #if stream-bundle2
640 786 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
641 787 streaming all changes
642 1034 files to transfer, 96.7 KB of data (no-zstd !)
643 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
644 1034 files to transfer, 93.9 KB of data (zstd !)
645 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
788 1095 files to transfer, 101 KB of data (no-zstd !)
789 transferred 101 KB in * seconds (* */sec) (glob) (no-zstd !)
790 1095 files to transfer, 98.7 KB of data (zstd !)
791 transferred 98.7 KB in * seconds (* */sec) (glob) (zstd !)
646 792 updating to branch default
647 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
793 1086 files updated, 0 files merged, 0 files removed, 0 files unresolved
648 794 $ hg -R phase-no-publish phase -r 'all()'
649 795 0: draft
650 796 1: draft
797 2: draft
651 798 #endif
799 $ hg verify -R phase-no-publish
800 checking changesets
801 checking manifests
802 crosschecking files in changesets and manifests
803 checking files
804 checked 3 changesets with 1086 changes to 1086 files
652 805
653 806 $ killdaemons.py
654 807
655 808 #if stream-legacy
656 809
657 810 With v1 of the stream protocol, changeset are always cloned as public. There's
658 811 no obsolescence markers exchange in stream v1.
659 812
660 813 #endif
661 814 #if stream-bundle2
662 815
663 816 Stream repository with obsolescence
664 817 -----------------------------------
665 818
666 819 Clone non-publishing with obsolescence
667 820
668 821 $ cat >> $HGRCPATH << EOF
669 822 > [experimental]
670 823 > evolution=all
671 824 > EOF
672 825
673 826 $ cd server
674 827 $ echo foo > foo
675 828 $ hg -q commit -m 'about to be pruned'
676 829 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
677 830 1 new obsolescence markers
678 831 obsoleted 1 changesets
679 832 $ hg up null -q
680 833 $ hg log -T '{rev}: {phase}\n'
834 2: draft
681 835 1: draft
682 836 0: draft
683 837 $ hg serve -p $HGPORT -d --pid-file=hg.pid
684 838 $ cat hg.pid > $DAEMON_PIDS
685 839 $ cd ..
686 840
687 841 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
688 842 streaming all changes
689 1035 files to transfer, 97.1 KB of data (no-zstd !)
690 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
691 1035 files to transfer, 94.3 KB of data (zstd !)
692 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
843 1096 files to transfer, 102 KB of data (no-zstd !)
844 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
845 1096 files to transfer, 99.1 KB of data (zstd !)
846 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
693 847 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
848 2: draft
694 849 1: draft
695 850 0: draft
696 851 $ hg debugobsolete -R with-obsolescence
697 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
852 aa82d3f59e13f41649d8ba3324e1ac8849ba78e7 0 {7406a3463c3de22c4288b4306d199705369a285a} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
853 $ hg verify -R with-obsolescence
854 checking changesets
855 checking manifests
856 crosschecking files in changesets and manifests
857 checking files
858 checked 4 changesets with 1087 changes to 1086 files
698 859
699 860 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
700 861 streaming all changes
701 862 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
702 863 abort: pull failed on remote
703 864 [100]
704 865
705 866 $ killdaemons.py
706 867
707 868 #endif
@@ -1,1062 +1,1067 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5
6 6 #if no-rust
7 7
8 8 $ cat << EOF >> $HGRCPATH
9 9 > [format]
10 10 > use-persistent-nodemap=yes
11 11 > [devel]
12 12 > persistent-nodemap=yes
13 13 > EOF
14 14
15 15 #endif
16 16
17 17 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
18 18 $ cd test-repo
19 19
20 20 Check handling of the default slow-path value
21 21
22 22 #if no-pure no-rust
23 23
24 24 $ hg id
25 25 abort: accessing `persistent-nodemap` repository without associated fast implementation.
26 26 (check `hg help config.format.use-persistent-nodemap` for details)
27 27 [255]
28 28
29 29 Unlock further check (we are here to test the feature)
30 30
31 31 $ cat << EOF >> $HGRCPATH
32 32 > [storage]
33 33 > # to avoid spamming the test
34 34 > revlog.persistent-nodemap.slow-path=allow
35 35 > EOF
36 36
37 37 #endif
38 38
39 39 #if rust
40 40
41 41 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
42 42 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
43 43 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
44 44 incorrectly used `libc::c_int` (32 bits).
45 45 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
46 46
47 47 $ hg log -r 00000000
48 48 changeset: -1:000000000000
49 49 tag: tip
50 50 user:
51 51 date: Thu Jan 01 00:00:00 1970 +0000
52 52
53 53
54 54 #endif
55 55
56 56
57 57 $ hg debugformat
58 58 format-variant repo
59 59 fncache: yes
60 60 dirstate-v2: no
61 61 dotencode: yes
62 62 generaldelta: yes
63 63 share-safe: no
64 64 sparserevlog: yes
65 65 persistent-nodemap: yes
66 66 copies-sdc: no
67 67 revlog-v2: no
68 68 changelog-v2: no
69 69 plain-cl-delta: yes
70 70 compression: zlib (no-zstd !)
71 71 compression: zstd (zstd !)
72 72 compression-level: default
73 73 $ hg debugbuilddag .+5000 --new-file
74 74
75 75 $ hg debugnodemap --metadata
76 76 uid: ???????? (glob)
77 77 tip-rev: 5000
78 78 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
79 79 data-length: 121088
80 80 data-unused: 0
81 81 data-unused: 0.000%
82 82 $ f --size .hg/store/00changelog.n
83 83 .hg/store/00changelog.n: size=62
84 84
85 85 Simple lookup works
86 86
87 87 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
88 88 $ hg log -r "$ANYNODE" --template '{rev}\n'
89 89 5000
90 90
91 91
92 92 #if rust
93 93
94 94 $ f --sha256 .hg/store/00changelog-*.nd
95 95 .hg/store/00changelog-????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
96 96
97 97 $ f --sha256 .hg/store/00manifest-*.nd
98 98 .hg/store/00manifest-????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
99 99 $ hg debugnodemap --dump-new | f --sha256 --size
100 100 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
101 101 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
102 102 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
103 103 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
104 104 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
105 105 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
106 106 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
107 107 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
108 108 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
109 109 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
110 110 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
111 111 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
112 112 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
113 113 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
114 114 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
115 115 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
116 116 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
117 117 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
118 118 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
119 119
120 120
121 121 #else
122 122
123 123 $ f --sha256 .hg/store/00changelog-*.nd
124 124 .hg/store/00changelog-????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
125 125 $ hg debugnodemap --dump-new | f --sha256 --size
126 126 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
127 127 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
128 128 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
129 129 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
130 130 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
131 131 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
132 132 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
133 133 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
134 134 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
135 135 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
136 136 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
137 137 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
138 138 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
139 139 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
140 140 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
141 141 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
142 142 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
143 143 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
144 144 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
145 145
146 146 #endif
147 147
148 148 $ hg debugnodemap --check
149 149 revision in index: 5001
150 150 revision in nodemap: 5001
151 151
152 152 add a new commit
153 153
154 154 $ hg up
155 155 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 156 $ echo foo > foo
157 157 $ hg add foo
158 158
159 159
160 160 Check slow-path config value handling
161 161 -------------------------------------
162 162
163 163 #if no-pure no-rust
164 164
165 165 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
166 166 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
167 167 falling back to default value: abort
168 168 abort: accessing `persistent-nodemap` repository without associated fast implementation.
169 169 (check `hg help config.format.use-persistent-nodemap` for details)
170 170 [255]
171 171
172 172 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
173 173 warning: accessing `persistent-nodemap` repository without associated fast implementation.
174 174 (check `hg help config.format.use-persistent-nodemap` for details)
175 175 changeset: 5000:6b02b8c7b966
176 176 tag: tip
177 177 user: debugbuilddag
178 178 date: Thu Jan 01 01:23:20 1970 +0000
179 179 summary: r5000
180 180
181 181 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
182 182 abort: accessing `persistent-nodemap` repository without associated fast implementation.
183 183 (check `hg help config.format.use-persistent-nodemap` for details)
184 184 [255]
185 185
186 186 #else
187 187
188 188 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
189 189 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
190 190 falling back to default value: abort
191 191 6b02b8c7b966+ tip
192 192
193 193 #endif
194 194
195 195 $ hg ci -m 'foo'
196 196
197 197 #if no-pure no-rust
198 198 $ hg debugnodemap --metadata
199 199 uid: ???????? (glob)
200 200 tip-rev: 5001
201 201 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
202 202 data-length: 121088
203 203 data-unused: 0
204 204 data-unused: 0.000%
205 205 #else
206 206 $ hg debugnodemap --metadata
207 207 uid: ???????? (glob)
208 208 tip-rev: 5001
209 209 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
210 210 data-length: 121344
211 211 data-unused: 256
212 212 data-unused: 0.211%
213 213 #endif
214 214
215 215 $ f --size .hg/store/00changelog.n
216 216 .hg/store/00changelog.n: size=62
217 217
218 218 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
219 219
220 220 #if pure
221 221 $ f --sha256 .hg/store/00changelog-*.nd --size
222 222 .hg/store/00changelog-????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
223 223 #endif
224 224
225 225 #if rust
226 226 $ f --sha256 .hg/store/00changelog-*.nd --size
227 227 .hg/store/00changelog-????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
228 228 #endif
229 229
230 230 #if no-pure no-rust
231 231 $ f --sha256 .hg/store/00changelog-*.nd --size
232 232 .hg/store/00changelog-????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
233 233 #endif
234 234
235 235 $ hg debugnodemap --check
236 236 revision in index: 5002
237 237 revision in nodemap: 5002
238 238
239 239 Test code path without mmap
240 240 ---------------------------
241 241
242 242 $ echo bar > bar
243 243 $ hg add bar
244 244 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
245 245
246 246 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
247 247 revision in index: 5003
248 248 revision in nodemap: 5003
249 249 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
250 250 revision in index: 5003
251 251 revision in nodemap: 5003
252 252
253 253
254 254 #if pure
255 255 $ hg debugnodemap --metadata
256 256 uid: ???????? (glob)
257 257 tip-rev: 5002
258 258 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
259 259 data-length: 121600
260 260 data-unused: 512
261 261 data-unused: 0.421%
262 262 $ f --sha256 .hg/store/00changelog-*.nd --size
263 263 .hg/store/00changelog-????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
264 264 #endif
265 265 #if rust
266 266 $ hg debugnodemap --metadata
267 267 uid: ???????? (glob)
268 268 tip-rev: 5002
269 269 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
270 270 data-length: 121600
271 271 data-unused: 512
272 272 data-unused: 0.421%
273 273 $ f --sha256 .hg/store/00changelog-*.nd --size
274 274 .hg/store/00changelog-????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
275 275 #endif
276 276 #if no-pure no-rust
277 277 $ hg debugnodemap --metadata
278 278 uid: ???????? (glob)
279 279 tip-rev: 5002
280 280 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
281 281 data-length: 121088
282 282 data-unused: 0
283 283 data-unused: 0.000%
284 284 $ f --sha256 .hg/store/00changelog-*.nd --size
285 285 .hg/store/00changelog-????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
286 286 #endif
287 287
288 288 Test force warming the cache
289 289
290 290 $ rm .hg/store/00changelog.n
291 291 $ hg debugnodemap --metadata
292 292 $ hg debugupdatecache
293 293 #if pure
294 294 $ hg debugnodemap --metadata
295 295 uid: ???????? (glob)
296 296 tip-rev: 5002
297 297 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
298 298 data-length: 121088
299 299 data-unused: 0
300 300 data-unused: 0.000%
301 301 #else
302 302 $ hg debugnodemap --metadata
303 303 uid: ???????? (glob)
304 304 tip-rev: 5002
305 305 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
306 306 data-length: 121088
307 307 data-unused: 0
308 308 data-unused: 0.000%
309 309 #endif
310 310
311 311 Check out of sync nodemap
312 312 =========================
313 313
314 314 First copy old data on the side.
315 315
316 316 $ mkdir ../tmp-copies
317 317 $ cp .hg/store/00changelog-????????.nd .hg/store/00changelog.n ../tmp-copies
318 318
319 319 Nodemap lagging behind
320 320 ----------------------
321 321
322 322 make a new commit
323 323
324 324 $ echo bar2 > bar
325 325 $ hg ci -m 'bar2'
326 326 $ NODE=`hg log -r tip -T '{node}\n'`
327 327 $ hg log -r "$NODE" -T '{rev}\n'
328 328 5003
329 329
330 330 If the nodemap is lagging behind, it can catch up fine
331 331
332 332 $ hg debugnodemap --metadata
333 333 uid: ???????? (glob)
334 334 tip-rev: 5003
335 335 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
336 336 data-length: 121344 (pure !)
337 337 data-length: 121344 (rust !)
338 338 data-length: 121152 (no-rust no-pure !)
339 339 data-unused: 192 (pure !)
340 340 data-unused: 192 (rust !)
341 341 data-unused: 0 (no-rust no-pure !)
342 342 data-unused: 0.158% (pure !)
343 343 data-unused: 0.158% (rust !)
344 344 data-unused: 0.000% (no-rust no-pure !)
345 345 $ cp -f ../tmp-copies/* .hg/store/
346 346 $ hg debugnodemap --metadata
347 347 uid: ???????? (glob)
348 348 tip-rev: 5002
349 349 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
350 350 data-length: 121088
351 351 data-unused: 0
352 352 data-unused: 0.000%
353 353 $ hg log -r "$NODE" -T '{rev}\n'
354 354 5003
355 355
356 356 changelog altered
357 357 -----------------
358 358
359 359 If the nodemap is not gated behind a requirements, an unaware client can alter
360 360 the repository so the revlog used to generate the nodemap is not longer
361 361 compatible with the persistent nodemap. We need to detect that.
362 362
363 363 $ hg up "$NODE~5"
364 364 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
365 365 $ echo bar > babar
366 366 $ hg add babar
367 367 $ hg ci -m 'babar'
368 368 created new head
369 369 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
370 370 $ hg log -r "$OTHERNODE" -T '{rev}\n'
371 371 5004
372 372
373 373 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
374 374
375 375 the nodemap should detect the changelog have been tampered with and recover.
376 376
377 377 $ hg debugnodemap --metadata
378 378 uid: ???????? (glob)
379 379 tip-rev: 5002
380 380 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
381 381 data-length: 121536 (pure !)
382 382 data-length: 121088 (rust !)
383 383 data-length: 121088 (no-pure no-rust !)
384 384 data-unused: 448 (pure !)
385 385 data-unused: 0 (rust !)
386 386 data-unused: 0 (no-pure no-rust !)
387 387 data-unused: 0.000% (rust !)
388 388 data-unused: 0.369% (pure !)
389 389 data-unused: 0.000% (no-pure no-rust !)
390 390
391 391 $ cp -f ../tmp-copies/* .hg/store/
392 392 $ hg debugnodemap --metadata
393 393 uid: ???????? (glob)
394 394 tip-rev: 5002
395 395 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
396 396 data-length: 121088
397 397 data-unused: 0
398 398 data-unused: 0.000%
399 399 $ hg log -r "$OTHERNODE" -T '{rev}\n'
400 400 5002
401 401
402 402 missing data file
403 403 -----------------
404 404
405 405 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
406 406 > sed 's/uid: //'`
407 407 $ FILE=.hg/store/00changelog-"${UUID}".nd
408 408 $ mv $FILE ../tmp-data-file
409 409 $ cp .hg/store/00changelog.n ../tmp-docket
410 410
411 411 mercurial don't crash
412 412
413 413 $ hg log -r .
414 414 changeset: 5002:b355ef8adce0
415 415 tag: tip
416 416 parent: 4998:d918ad6d18d3
417 417 user: test
418 418 date: Thu Jan 01 00:00:00 1970 +0000
419 419 summary: babar
420 420
421 421 $ hg debugnodemap --metadata
422 422
423 423 $ hg debugupdatecache
424 424 $ hg debugnodemap --metadata
425 425 uid: * (glob)
426 426 tip-rev: 5002
427 427 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
428 428 data-length: 121088
429 429 data-unused: 0
430 430 data-unused: 0.000%
431 431 $ mv ../tmp-data-file $FILE
432 432 $ mv ../tmp-docket .hg/store/00changelog.n
433 433
434 434 Check transaction related property
435 435 ==================================
436 436
437 437 An up to date nodemap should be available to shell hooks,
438 438
439 439 $ echo dsljfl > a
440 440 $ hg add a
441 441 $ hg ci -m a
442 442 $ hg debugnodemap --metadata
443 443 uid: ???????? (glob)
444 444 tip-rev: 5003
445 445 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
446 446 data-length: 121088
447 447 data-unused: 0
448 448 data-unused: 0.000%
449 449 $ echo babar2 > babar
450 450 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
451 451 uid: ???????? (glob)
452 452 tip-rev: 5004
453 453 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
454 454 data-length: 121280 (pure !)
455 455 data-length: 121280 (rust !)
456 456 data-length: 121088 (no-pure no-rust !)
457 457 data-unused: 192 (pure !)
458 458 data-unused: 192 (rust !)
459 459 data-unused: 0 (no-pure no-rust !)
460 460 data-unused: 0.158% (pure !)
461 461 data-unused: 0.158% (rust !)
462 462 data-unused: 0.000% (no-pure no-rust !)
463 463 $ hg debugnodemap --metadata
464 464 uid: ???????? (glob)
465 465 tip-rev: 5004
466 466 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
467 467 data-length: 121280 (pure !)
468 468 data-length: 121280 (rust !)
469 469 data-length: 121088 (no-pure no-rust !)
470 470 data-unused: 192 (pure !)
471 471 data-unused: 192 (rust !)
472 472 data-unused: 0 (no-pure no-rust !)
473 473 data-unused: 0.158% (pure !)
474 474 data-unused: 0.158% (rust !)
475 475 data-unused: 0.000% (no-pure no-rust !)
476 476
477 477 Another process does not see the pending nodemap content during run.
478 478
479 479 $ echo qpoasp > a
480 480 $ hg ci -m a2 \
481 481 > --config "hooks.pretxnclose=sh \"$RUNTESTDIR/testlib/wait-on-file\" 20 sync-repo-read sync-txn-pending" \
482 482 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
483 483
484 484 (read the repository while the commit transaction is pending)
485 485
486 486 $ sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-pending && \
487 487 > hg debugnodemap --metadata && \
488 488 > sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-close sync-repo-read
489 489 uid: ???????? (glob)
490 490 tip-rev: 5004
491 491 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
492 492 data-length: 121280 (pure !)
493 493 data-length: 121280 (rust !)
494 494 data-length: 121088 (no-pure no-rust !)
495 495 data-unused: 192 (pure !)
496 496 data-unused: 192 (rust !)
497 497 data-unused: 0 (no-pure no-rust !)
498 498 data-unused: 0.158% (pure !)
499 499 data-unused: 0.158% (rust !)
500 500 data-unused: 0.000% (no-pure no-rust !)
501 501 $ hg debugnodemap --metadata
502 502 uid: ???????? (glob)
503 503 tip-rev: 5005
504 504 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
505 505 data-length: 121536 (pure !)
506 506 data-length: 121536 (rust !)
507 507 data-length: 121088 (no-pure no-rust !)
508 508 data-unused: 448 (pure !)
509 509 data-unused: 448 (rust !)
510 510 data-unused: 0 (no-pure no-rust !)
511 511 data-unused: 0.369% (pure !)
512 512 data-unused: 0.369% (rust !)
513 513 data-unused: 0.000% (no-pure no-rust !)
514 514
515 515 $ cat output.txt
516 516
517 517 Check that a failing transaction will properly revert the data
518 518
519 519 $ echo plakfe > a
520 520 $ f --size --sha256 .hg/store/00changelog-*.nd
521 521 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
522 522 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
523 523 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
524 524 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
525 525 transaction abort!
526 526 rollback completed
527 527 abort: This is a late abort
528 528 [255]
529 529 $ hg debugnodemap --metadata
530 530 uid: ???????? (glob)
531 531 tip-rev: 5005
532 532 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
533 533 data-length: 121536 (pure !)
534 534 data-length: 121536 (rust !)
535 535 data-length: 121088 (no-pure no-rust !)
536 536 data-unused: 448 (pure !)
537 537 data-unused: 448 (rust !)
538 538 data-unused: 0 (no-pure no-rust !)
539 539 data-unused: 0.369% (pure !)
540 540 data-unused: 0.369% (rust !)
541 541 data-unused: 0.000% (no-pure no-rust !)
542 542 $ f --size --sha256 .hg/store/00changelog-*.nd
543 543 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
544 544 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
545 545 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
546 546
547 547 Check that removing content does not confuse the nodemap
548 548 --------------------------------------------------------
549 549
550 550 removing data with rollback
551 551
552 552 $ echo aso > a
553 553 $ hg ci -m a4
554 554 $ hg rollback
555 555 repository tip rolled back to revision 5005 (undo commit)
556 556 working directory now based on revision 5005
557 557 $ hg id -r .
558 558 90d5d3ba2fc4 tip
559 559
560 560 roming data with strip
561 561
562 562 $ echo aso > a
563 563 $ hg ci -m a4
564 564 $ hg --config extensions.strip= strip -r . --no-backup
565 565 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
566 566 $ hg id -r . --traceback
567 567 90d5d3ba2fc4 tip
568 568
569 569 Test upgrade / downgrade
570 570 ========================
571 571
572 572 downgrading
573 573
574 574 $ cat << EOF >> .hg/hgrc
575 575 > [format]
576 576 > use-persistent-nodemap=no
577 577 > EOF
578 578 $ hg debugformat -v
579 579 format-variant repo config default
580 580 fncache: yes yes yes
581 581 dirstate-v2: no no no
582 582 dotencode: yes yes yes
583 583 generaldelta: yes yes yes
584 584 share-safe: no no no
585 585 sparserevlog: yes yes yes
586 586 persistent-nodemap: yes no no
587 587 copies-sdc: no no no
588 588 revlog-v2: no no no
589 589 changelog-v2: no no no
590 590 plain-cl-delta: yes yes yes
591 591 compression: zlib zlib zlib (no-zstd !)
592 592 compression: zstd zstd zstd (zstd !)
593 593 compression-level: default default default
594 594 $ hg debugupgraderepo --run --no-backup
595 595 upgrade will perform the following actions:
596 596
597 597 requirements
598 598 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
599 599 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
600 600 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
601 601 removed: persistent-nodemap
602 602
603 603 processed revlogs:
604 604 - all-filelogs
605 605 - changelog
606 606 - manifest
607 607
608 608 beginning upgrade...
609 609 repository locked and read-only
610 610 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
611 611 (it is safe to interrupt this process any time before data migration completes)
612 612 downgrading repository to not use persistent nodemap feature
613 613 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
614 614 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
615 615 00changelog-*.nd (glob)
616 616 00manifest-*.nd (glob)
617 617 undo.backup.00changelog.n
618 618 undo.backup.00manifest.n
619 619 $ hg debugnodemap --metadata
620 620
621 621
622 622 upgrading
623 623
624 624 $ cat << EOF >> .hg/hgrc
625 625 > [format]
626 626 > use-persistent-nodemap=yes
627 627 > EOF
628 628 $ hg debugformat -v
629 629 format-variant repo config default
630 630 fncache: yes yes yes
631 631 dirstate-v2: no no no
632 632 dotencode: yes yes yes
633 633 generaldelta: yes yes yes
634 634 share-safe: no no no
635 635 sparserevlog: yes yes yes
636 636 persistent-nodemap: no yes no
637 637 copies-sdc: no no no
638 638 revlog-v2: no no no
639 639 changelog-v2: no no no
640 640 plain-cl-delta: yes yes yes
641 641 compression: zlib zlib zlib (no-zstd !)
642 642 compression: zstd zstd zstd (zstd !)
643 643 compression-level: default default default
644 644 $ hg debugupgraderepo --run --no-backup
645 645 upgrade will perform the following actions:
646 646
647 647 requirements
648 648 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
649 649 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
650 650 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
651 651 added: persistent-nodemap
652 652
653 653 persistent-nodemap
654 654 Speedup revision lookup by node id.
655 655
656 656 processed revlogs:
657 657 - all-filelogs
658 658 - changelog
659 659 - manifest
660 660
661 661 beginning upgrade...
662 662 repository locked and read-only
663 663 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
664 664 (it is safe to interrupt this process any time before data migration completes)
665 665 upgrading repository to use persistent nodemap feature
666 666 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
667 667 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
668 668 00changelog-*.nd (glob)
669 669 00changelog.n
670 670 00manifest-*.nd (glob)
671 671 00manifest.n
672 672 undo.backup.00changelog.n
673 673 undo.backup.00manifest.n
674 674
675 675 $ hg debugnodemap --metadata
676 676 uid: * (glob)
677 677 tip-rev: 5005
678 678 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
679 679 data-length: 121088
680 680 data-unused: 0
681 681 data-unused: 0.000%
682 682
683 683 Running unrelated upgrade
684 684
685 685 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
686 686 upgrade will perform the following actions:
687 687
688 688 requirements
689 689 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store (no-zstd no-dirstate-v2 !)
690 690 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd no-dirstate-v2 !)
691 691 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd dirstate-v2 !)
692 692
693 693 optimisations: re-delta-all
694 694
695 695 processed revlogs:
696 696 - all-filelogs
697 697 - changelog
698 698 - manifest
699 699
700 700 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
701 701 00changelog-*.nd (glob)
702 702 00changelog.n
703 703 00manifest-*.nd (glob)
704 704 00manifest.n
705 705
706 706 $ hg debugnodemap --metadata
707 707 uid: * (glob)
708 708 tip-rev: 5005
709 709 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
710 710 data-length: 121088
711 711 data-unused: 0
712 712 data-unused: 0.000%
713 713
714 714 Persistent nodemap and local/streaming clone
715 715 ============================================
716 716
717 717 $ cd ..
718 718
719 719 standard clone
720 720 --------------
721 721
722 722 The persistent nodemap should exist after a streaming clone
723 723
724 724 $ hg clone --pull --quiet -U test-repo standard-clone
725 725 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
726 726 00changelog-*.nd (glob)
727 727 00changelog.n
728 728 00manifest-*.nd (glob)
729 729 00manifest.n
730 730 $ hg -R standard-clone debugnodemap --metadata
731 731 uid: * (glob)
732 732 tip-rev: 5005
733 733 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
734 734 data-length: 121088
735 735 data-unused: 0
736 736 data-unused: 0.000%
737 737
738 738
739 739 local clone
740 740 ------------
741 741
742 742 The persistent nodemap should exist after a streaming clone
743 743
744 744 $ hg clone -U test-repo local-clone
745 745 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
746 746 00changelog-*.nd (glob)
747 747 00changelog.n
748 748 00manifest-*.nd (glob)
749 749 00manifest.n
750 750 $ hg -R local-clone debugnodemap --metadata
751 751 uid: * (glob)
752 752 tip-rev: 5005
753 753 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
754 754 data-length: 121088
755 755 data-unused: 0
756 756 data-unused: 0.000%
757 757
758 758 Test various corruption case
759 759 ============================
760 760
761 761 Missing datafile
762 762 ----------------
763 763
764 764 Test behavior with a missing datafile
765 765
766 766 $ hg clone --quiet --pull test-repo corruption-test-repo
767 767 $ ls -1 corruption-test-repo/.hg/store/00changelog*
768 768 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
769 769 corruption-test-repo/.hg/store/00changelog.d
770 770 corruption-test-repo/.hg/store/00changelog.i
771 771 corruption-test-repo/.hg/store/00changelog.n
772 772 $ rm corruption-test-repo/.hg/store/00changelog*.nd
773 773 $ hg log -R corruption-test-repo -r .
774 774 changeset: 5005:90d5d3ba2fc4
775 775 tag: tip
776 776 user: test
777 777 date: Thu Jan 01 00:00:00 1970 +0000
778 778 summary: a2
779 779
780 780 $ ls -1 corruption-test-repo/.hg/store/00changelog*
781 781 corruption-test-repo/.hg/store/00changelog.d
782 782 corruption-test-repo/.hg/store/00changelog.i
783 783 corruption-test-repo/.hg/store/00changelog.n
784 784
785 785 Truncated data file
786 786 -------------------
787 787
788 788 Test behavior with a too short datafile
789 789
790 790 rebuild the missing data
791 791 $ hg -R corruption-test-repo debugupdatecache
792 792 $ ls -1 corruption-test-repo/.hg/store/00changelog*
793 793 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
794 794 corruption-test-repo/.hg/store/00changelog.d
795 795 corruption-test-repo/.hg/store/00changelog.i
796 796 corruption-test-repo/.hg/store/00changelog.n
797 797
798 798 truncate the file
799 799
800 800 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
801 801 $ f -s $datafilepath
802 802 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
803 803 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=noxfer
804 804 10+0 records in
805 805 10+0 records out
806 806 $ mv $datafilepath-tmp $datafilepath
807 807 $ f -s $datafilepath
808 808 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
809 809
810 810 Check that Mercurial reaction to this event
811 811
812 812 $ hg -R corruption-test-repo log -r . --traceback
813 813 changeset: 5005:90d5d3ba2fc4
814 814 tag: tip
815 815 user: test
816 816 date: Thu Jan 01 00:00:00 1970 +0000
817 817 summary: a2
818 818
819 819
820 820
821 821 stream clone
822 822 ============
823 823
824 824 The persistent nodemap should exist after a streaming clone
825 825
826 826 Simple case
827 827 -----------
828 828
829 829 No race condition
830 830
831 831 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
832 832 adding [s] 00manifest.n (62 bytes)
833 833 adding [s] 00manifest-*.nd (118 KB) (glob)
834 834 adding [s] 00changelog.n (62 bytes)
835 835 adding [s] 00changelog-*.nd (118 KB) (glob)
836 836 adding [s] 00manifest.d (452 KB) (no-zstd !)
837 837 adding [s] 00manifest.d (491 KB) (zstd !)
838 838 adding [s] 00changelog.d (360 KB) (no-zstd !)
839 839 adding [s] 00changelog.d (368 KB) (zstd !)
840 840 adding [s] 00manifest.i (313 KB)
841 841 adding [s] 00changelog.i (313 KB)
842 842 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
843 843 00changelog-*.nd (glob)
844 844 00changelog.n
845 845 00manifest-*.nd (glob)
846 846 00manifest.n
847 847 $ hg -R stream-clone debugnodemap --metadata
848 848 uid: * (glob)
849 849 tip-rev: 5005
850 850 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
851 851 data-length: 121088
852 852 data-unused: 0
853 853 data-unused: 0.000%
854 854
855 855 new data appened
856 856 -----------------
857 857
858 858 Other commit happening on the server during the stream clone
859 859
860 860 setup the step-by-step stream cloning
861 861
862 862 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
863 863 $ export HG_TEST_STREAM_WALKED_FILE_1
864 864 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
865 865 $ export HG_TEST_STREAM_WALKED_FILE_2
866 866 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
867 867 $ export HG_TEST_STREAM_WALKED_FILE_3
868 868 $ cat << EOF >> test-repo/.hg/hgrc
869 869 > [extensions]
870 870 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
871 871 > EOF
872 872
873 873 Check and record file state beforehand
874 874
875 875 $ f --size test-repo/.hg/store/00changelog*
876 876 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
877 877 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
878 878 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
879 879 test-repo/.hg/store/00changelog.i: size=320384
880 880 test-repo/.hg/store/00changelog.n: size=62
881 881 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
882 882 uid: * (glob)
883 883 tip-rev: 5005
884 884 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
885 885 data-length: 121088
886 886 data-unused: 0
887 887 data-unused: 0.000%
888 888
889 889 Prepare a commit
890 890
891 891 $ echo foo >> test-repo/foo
892 892 $ hg -R test-repo/ add test-repo/foo
893 893
894 894 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
895 895
896 896 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
897 897 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
898 898 $ hg -R test-repo/ commit -m foo
899 899 $ touch $HG_TEST_STREAM_WALKED_FILE_2
900 900 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
901 901 $ cat clone-output
902 902 adding [s] 00manifest.n (62 bytes)
903 903 adding [s] 00manifest-*.nd (118 KB) (glob)
904 904 adding [s] 00changelog.n (62 bytes)
905 905 adding [s] 00changelog-*.nd (118 KB) (glob)
906 906 adding [s] 00manifest.d (452 KB) (no-zstd !)
907 907 adding [s] 00manifest.d (491 KB) (zstd !)
908 908 adding [s] 00changelog.d (360 KB) (no-zstd !)
909 909 adding [s] 00changelog.d (368 KB) (zstd !)
910 910 adding [s] 00manifest.i (313 KB)
911 911 adding [s] 00changelog.i (313 KB)
912 912
913 913 Check the result state
914 914
915 915 $ f --size stream-clone-race-1/.hg/store/00changelog*
916 916 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
917 917 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
918 918 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
919 919 stream-clone-race-1/.hg/store/00changelog.i: size=320384
920 920 stream-clone-race-1/.hg/store/00changelog.n: size=62
921 921
922 922 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
923 923 uid: * (glob)
924 924 tip-rev: 5005
925 925 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
926 926 data-length: 121088
927 927 data-unused: 0
928 928 data-unused: 0.000%
929 929
930 930 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
931 931 (ie: the following diff should be empty)
932 932
933 933 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
934 934
935 935 #if no-rust no-pure
936 936 $ diff -u server-metadata.txt client-metadata.txt
937 937 --- server-metadata.txt * (glob)
938 938 +++ client-metadata.txt * (glob)
939 939 @@ -1,4 +1,4 @@
940 940 -uid: * (glob)
941 941 +uid: * (glob)
942 942 tip-rev: 5005
943 943 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
944 944 data-length: 121088
945 945 [1]
946 946 #else
947 947 $ diff -u server-metadata.txt client-metadata.txt
948 948 #endif
949 949
950 950
951 951 Clean up after the test.
952 952
953 953 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
954 954 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
955 955 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
956 956
957 957 full regeneration
958 958 -----------------
959 959
960 960 A full nodemap is generated
961 961
962 962 (ideally this test would append enough data to make sure the nodemap data file
963 963 get changed, however to make thing simpler we will force the regeneration for
964 964 this test.
965 965
966 966 Check the initial state
967 967
968 968 $ f --size test-repo/.hg/store/00changelog*
969 969 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
970 970 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
971 971 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
972 972 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
973 973 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
974 974 test-repo/.hg/store/00changelog.i: size=320448
975 975 test-repo/.hg/store/00changelog.n: size=62
976 976 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
977 977 uid: * (glob)
978 978 tip-rev: 5006
979 979 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
980 980 data-length: 121344 (rust !)
981 981 data-length: 121344 (pure !)
982 982 data-length: 121152 (no-rust no-pure !)
983 983 data-unused: 192 (rust !)
984 984 data-unused: 192 (pure !)
985 985 data-unused: 0 (no-rust no-pure !)
986 986 data-unused: 0.158% (rust !)
987 987 data-unused: 0.158% (pure !)
988 988 data-unused: 0.000% (no-rust no-pure !)
989 989
990 990 Performe the mix of clone and full refresh of the nodemap, so that the files
991 991 (and filenames) are different between listing time and actual transfer time.
992 992
993 993 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
994 994 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
995 995 $ rm test-repo/.hg/store/00changelog.n
996 996 $ rm test-repo/.hg/store/00changelog-*.nd
997 997 $ hg -R test-repo/ debugupdatecache
998 998 $ touch $HG_TEST_STREAM_WALKED_FILE_2
999 999 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1000
1001 (note: the stream clone code wronly pick the `undo.` files)
1002
1000 1003 $ cat clone-output-2
1004 adding [s] undo.backup.00manifest.n (62 bytes) (known-bad-output !)
1005 adding [s] undo.backup.00changelog.n (62 bytes) (known-bad-output !)
1001 1006 adding [s] 00manifest.n (62 bytes)
1002 1007 adding [s] 00manifest-*.nd (118 KB) (glob)
1003 1008 adding [s] 00changelog.n (62 bytes)
1004 1009 adding [s] 00changelog-*.nd (118 KB) (glob)
1005 1010 adding [s] 00manifest.d (492 KB) (zstd !)
1006 1011 adding [s] 00manifest.d (452 KB) (no-zstd !)
1007 1012 adding [s] 00changelog.d (360 KB) (no-zstd !)
1008 1013 adding [s] 00changelog.d (368 KB) (zstd !)
1009 1014 adding [s] 00manifest.i (313 KB)
1010 1015 adding [s] 00changelog.i (313 KB)
1011 1016
1012 1017 Check the result.
1013 1018
1014 1019 $ f --size stream-clone-race-2/.hg/store/00changelog*
1015 1020 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1016 1021 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1017 1022 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1018 1023 stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
1019 1024 stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
1020 1025 stream-clone-race-2/.hg/store/00changelog.i: size=320448
1021 1026 stream-clone-race-2/.hg/store/00changelog.n: size=62
1022 1027
1023 1028 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1024 1029 uid: * (glob)
1025 1030 tip-rev: 5006
1026 1031 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1027 1032 data-length: 121344 (rust !)
1028 1033 data-unused: 192 (rust !)
1029 1034 data-unused: 0.158% (rust !)
1030 1035 data-length: 121152 (no-rust no-pure !)
1031 1036 data-unused: 0 (no-rust no-pure !)
1032 1037 data-unused: 0.000% (no-rust no-pure !)
1033 1038 data-length: 121344 (pure !)
1034 1039 data-unused: 192 (pure !)
1035 1040 data-unused: 0.158% (pure !)
1036 1041
1037 1042 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1038 1043 (ie: the following diff should be empty)
1039 1044
1040 1045 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1041 1046
1042 1047 #if no-rust no-pure
1043 1048 $ diff -u server-metadata-2.txt client-metadata-2.txt
1044 1049 --- server-metadata-2.txt * (glob)
1045 1050 +++ client-metadata-2.txt * (glob)
1046 1051 @@ -1,4 +1,4 @@
1047 1052 -uid: * (glob)
1048 1053 +uid: * (glob)
1049 1054 tip-rev: 5006
1050 1055 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1051 1056 data-length: 121152
1052 1057 [1]
1053 1058 #else
1054 1059 $ diff -u server-metadata-2.txt client-metadata-2.txt
1055 1060 #endif
1056 1061
1057 1062 Clean up after the test
1058 1063
1059 1064 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1060 1065 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1061 1066 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1062 1067
General Comments 0
You need to be logged in to leave comments. Login now