##// END OF EJS Templates
upgrade: implement partial upgrade for upgrading persistent-nodemap...
Pulkit Goyal -
r47199:98e39f04 default
parent child Browse files
Show More
@@ -1,647 +1,652 b''
1 1 # nodemap.py - nodemap related code and utilities
2 2 #
3 3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import os
13 13 import re
14 14 import struct
15 15
16 16 from ..node import hex
17 17
18 18 from .. import (
19 19 error,
20 20 util,
21 21 )
22 22
23 23
24 24 class NodeMap(dict):
25 25 def __missing__(self, x):
26 26 raise error.RevlogError(b'unknown node: %s' % x)
27 27
28 28
29 29 def persisted_data(revlog):
30 30 """read the nodemap for a revlog from disk"""
31 31 if revlog.nodemap_file is None:
32 32 return None
33 33 pdata = revlog.opener.tryread(revlog.nodemap_file)
34 34 if not pdata:
35 35 return None
36 36 offset = 0
37 37 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
38 38 if version != ONDISK_VERSION:
39 39 return None
40 40 offset += S_VERSION.size
41 41 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
42 42 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
43 43 offset += S_HEADER.size
44 44 docket = NodeMapDocket(pdata[offset : offset + uid_size])
45 45 offset += uid_size
46 46 docket.tip_rev = tip_rev
47 47 docket.tip_node = pdata[offset : offset + tip_node_size]
48 48 docket.data_length = data_length
49 49 docket.data_unused = data_unused
50 50
51 51 filename = _rawdata_filepath(revlog, docket)
52 52 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
53 53 try:
54 54 with revlog.opener(filename) as fd:
55 55 if use_mmap:
56 56 data = util.buffer(util.mmapread(fd, data_length))
57 57 else:
58 58 data = fd.read(data_length)
59 59 except (IOError, OSError) as e:
60 60 if e.errno == errno.ENOENT:
61 61 return None
62 62 else:
63 63 raise
64 64 if len(data) < data_length:
65 65 return None
66 66 return docket, data
67 67
68 68
69 69 def setup_persistent_nodemap(tr, revlog):
70 70 """Install whatever is needed transaction side to persist a nodemap on disk
71 71
72 72 (only actually persist the nodemap if this is relevant for this revlog)
73 73 """
74 74 if revlog._inline:
75 75 return # inlined revlog are too small for this to be relevant
76 76 if revlog.nodemap_file is None:
77 77 return # we do not use persistent_nodemap on this revlog
78 78
79 79 # we need to happen after the changelog finalization, in that use "cl-"
80 80 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog.nodemap_file
81 81 if tr.hasfinalize(callback_id):
82 82 return # no need to register again
83 83 tr.addpending(
84 84 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
85 85 )
86 86 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
87 87
88 88
89 89 class _NoTransaction(object):
90 90 """transaction like object to update the nodemap outside a transaction"""
91 91
92 92 def __init__(self):
93 93 self._postclose = {}
94 94
95 95 def addpostclose(self, callback_id, callback_func):
96 96 self._postclose[callback_id] = callback_func
97 97
98 98 def registertmp(self, *args, **kwargs):
99 99 pass
100 100
101 101 def addbackup(self, *args, **kwargs):
102 102 pass
103 103
104 104 def add(self, *args, **kwargs):
105 105 pass
106 106
107 107 def addabort(self, *args, **kwargs):
108 108 pass
109 109
110 110 def _report(self, *args):
111 111 pass
112 112
113 113
114 114 def update_persistent_nodemap(revlog):
115 115 """update the persistent nodemap right now
116 116
117 117 To be used for updating the nodemap on disk outside of a normal transaction
118 118 setup (eg, `debugupdatecache`).
119 119 """
120 120 if revlog._inline:
121 121 return # inlined revlog are too small for this to be relevant
122 122 if revlog.nodemap_file is None:
123 123 return # we do not use persistent_nodemap on this revlog
124 124
125 125 notr = _NoTransaction()
126 126 persist_nodemap(notr, revlog)
127 127 for k in sorted(notr._postclose):
128 128 notr._postclose[k](None)
129 129
130 130
131 def persist_nodemap(tr, revlog, pending=False):
131 def persist_nodemap(tr, revlog, pending=False, force=False):
132 132 """Write nodemap data on disk for a given revlog"""
133 133 if getattr(revlog, 'filteredrevs', ()):
134 134 raise error.ProgrammingError(
135 135 "cannot persist nodemap of a filtered changelog"
136 136 )
137 137 if revlog.nodemap_file is None:
138 msg = "calling persist nodemap on a revlog without the feature enabled"
139 raise error.ProgrammingError(msg)
138 if force:
139 revlog.nodemap_file = get_nodemap_file(
140 revlog.opener, revlog.indexfile
141 )
142 else:
143 msg = "calling persist nodemap on a revlog without the feature enabled"
144 raise error.ProgrammingError(msg)
140 145
141 146 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
142 147 ondisk_docket = revlog._nodemap_docket
143 148 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
144 149 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
145 150
146 151 data = None
147 152 # first attemp an incremental update of the data
148 153 if can_incremental and ondisk_docket is not None:
149 154 target_docket = revlog._nodemap_docket.copy()
150 155 (
151 156 src_docket,
152 157 data_changed_count,
153 158 data,
154 159 ) = revlog.index.nodemap_data_incremental()
155 160 new_length = target_docket.data_length + len(data)
156 161 new_unused = target_docket.data_unused + data_changed_count
157 162 if src_docket != target_docket:
158 163 data = None
159 164 elif new_length <= (new_unused * 10): # under 10% of unused data
160 165 data = None
161 166 else:
162 167 datafile = _rawdata_filepath(revlog, target_docket)
163 168 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
164 169 # store vfs
165 170 tr.add(datafile, target_docket.data_length)
166 171 with revlog.opener(datafile, b'r+') as fd:
167 172 fd.seek(target_docket.data_length)
168 173 fd.write(data)
169 174 if feed_data:
170 175 if use_mmap:
171 176 fd.seek(0)
172 177 new_data = fd.read(new_length)
173 178 else:
174 179 fd.flush()
175 180 new_data = util.buffer(util.mmapread(fd, new_length))
176 181 target_docket.data_length = new_length
177 182 target_docket.data_unused = new_unused
178 183
179 184 if data is None:
180 185 # otherwise fallback to a full new export
181 186 target_docket = NodeMapDocket()
182 187 datafile = _rawdata_filepath(revlog, target_docket)
183 188 if util.safehasattr(revlog.index, "nodemap_data_all"):
184 189 data = revlog.index.nodemap_data_all()
185 190 else:
186 191 data = persistent_data(revlog.index)
187 192 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
188 193 # store vfs
189 194
190 195 tryunlink = revlog.opener.tryunlink
191 196
192 197 def abortck(tr):
193 198 tryunlink(datafile)
194 199
195 200 callback_id = b"delete-%s" % datafile
196 201
197 202 # some flavor of the transaction abort does not cleanup new file, it
198 203 # simply empty them.
199 204 tr.addabort(callback_id, abortck)
200 205 with revlog.opener(datafile, b'w+') as fd:
201 206 fd.write(data)
202 207 if feed_data:
203 208 if use_mmap:
204 209 new_data = data
205 210 else:
206 211 fd.flush()
207 212 new_data = util.buffer(util.mmapread(fd, len(data)))
208 213 target_docket.data_length = len(data)
209 214 target_docket.tip_rev = revlog.tiprev()
210 215 target_docket.tip_node = revlog.node(target_docket.tip_rev)
211 216 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
212 217 # store vfs
213 218 file_path = revlog.nodemap_file
214 219 if pending:
215 220 file_path += b'.a'
216 221 tr.registertmp(file_path)
217 222 else:
218 223 tr.addbackup(file_path)
219 224
220 225 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
221 226 fp.write(target_docket.serialize())
222 227 revlog._nodemap_docket = target_docket
223 228 if feed_data:
224 229 revlog.index.update_nodemap_data(target_docket, new_data)
225 230
226 231 # search for old index file in all cases, some older process might have
227 232 # left one behind.
228 233 olds = _other_rawdata_filepath(revlog, target_docket)
229 234 if olds:
230 235 realvfs = getattr(revlog, '_realopener', revlog.opener)
231 236
232 237 def cleanup(tr):
233 238 for oldfile in olds:
234 239 realvfs.tryunlink(oldfile)
235 240
236 241 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
237 242 tr.addpostclose(callback_id, cleanup)
238 243
239 244
240 245 ### Nodemap docket file
241 246 #
242 247 # The nodemap data are stored on disk using 2 files:
243 248 #
244 249 # * a raw data files containing a persistent nodemap
245 250 # (see `Nodemap Trie` section)
246 251 #
247 252 # * a small "docket" file containing medatadata
248 253 #
249 254 # While the nodemap data can be multiple tens of megabytes, the "docket" is
250 255 # small, it is easy to update it automatically or to duplicated its content
251 256 # during a transaction.
252 257 #
253 258 # Multiple raw data can exist at the same time (The currently valid one and a
254 259 # new one beind used by an in progress transaction). To accomodate this, the
255 260 # filename hosting the raw data has a variable parts. The exact filename is
256 261 # specified inside the "docket" file.
257 262 #
258 263 # The docket file contains information to find, qualify and validate the raw
259 264 # data. Its content is currently very light, but it will expand as the on disk
260 265 # nodemap gains the necessary features to be used in production.
261 266
262 267 ONDISK_VERSION = 1
263 268 S_VERSION = struct.Struct(">B")
264 269 S_HEADER = struct.Struct(">BQQQQ")
265 270
266 271 ID_SIZE = 8
267 272
268 273
269 274 def _make_uid():
270 275 """return a new unique identifier.
271 276
272 277 The identifier is random and composed of ascii characters."""
273 278 return hex(os.urandom(ID_SIZE))
274 279
275 280
276 281 class NodeMapDocket(object):
277 282 """metadata associated with persistent nodemap data
278 283
279 284 The persistent data may come from disk or be on their way to disk.
280 285 """
281 286
282 287 def __init__(self, uid=None):
283 288 if uid is None:
284 289 uid = _make_uid()
285 290 # a unique identifier for the data file:
286 291 # - When new data are appended, it is preserved.
287 292 # - When a new data file is created, a new identifier is generated.
288 293 self.uid = uid
289 294 # the tipmost revision stored in the data file. This revision and all
290 295 # revision before it are expected to be encoded in the data file.
291 296 self.tip_rev = None
292 297 # the node of that tipmost revision, if it mismatch the current index
293 298 # data the docket is not valid for the current index and should be
294 299 # discarded.
295 300 #
296 301 # note: this method is not perfect as some destructive operation could
297 302 # preserve the same tip_rev + tip_node while altering lower revision.
298 303 # However this multiple other caches have the same vulnerability (eg:
299 304 # brancmap cache).
300 305 self.tip_node = None
301 306 # the size (in bytes) of the persisted data to encode the nodemap valid
302 307 # for `tip_rev`.
303 308 # - data file shorter than this are corrupted,
304 309 # - any extra data should be ignored.
305 310 self.data_length = None
306 311 # the amount (in bytes) of "dead" data, still in the data file but no
307 312 # longer used for the nodemap.
308 313 self.data_unused = 0
309 314
310 315 def copy(self):
311 316 new = NodeMapDocket(uid=self.uid)
312 317 new.tip_rev = self.tip_rev
313 318 new.tip_node = self.tip_node
314 319 new.data_length = self.data_length
315 320 new.data_unused = self.data_unused
316 321 return new
317 322
318 323 def __cmp__(self, other):
319 324 if self.uid < other.uid:
320 325 return -1
321 326 if self.uid > other.uid:
322 327 return 1
323 328 elif self.data_length < other.data_length:
324 329 return -1
325 330 elif self.data_length > other.data_length:
326 331 return 1
327 332 return 0
328 333
329 334 def __eq__(self, other):
330 335 return self.uid == other.uid and self.data_length == other.data_length
331 336
332 337 def serialize(self):
333 338 """return serialized bytes for a docket using the passed uid"""
334 339 data = []
335 340 data.append(S_VERSION.pack(ONDISK_VERSION))
336 341 headers = (
337 342 len(self.uid),
338 343 self.tip_rev,
339 344 self.data_length,
340 345 self.data_unused,
341 346 len(self.tip_node),
342 347 )
343 348 data.append(S_HEADER.pack(*headers))
344 349 data.append(self.uid)
345 350 data.append(self.tip_node)
346 351 return b''.join(data)
347 352
348 353
349 354 def _rawdata_filepath(revlog, docket):
350 355 """The (vfs relative) nodemap's rawdata file for a given uid"""
351 356 if revlog.nodemap_file.endswith(b'.n.a'):
352 357 prefix = revlog.nodemap_file[:-4]
353 358 else:
354 359 prefix = revlog.nodemap_file[:-2]
355 360 return b"%s-%s.nd" % (prefix, docket.uid)
356 361
357 362
358 363 def _other_rawdata_filepath(revlog, docket):
359 364 prefix = revlog.nodemap_file[:-2]
360 365 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
361 366 new_file_path = _rawdata_filepath(revlog, docket)
362 367 new_file_name = revlog.opener.basename(new_file_path)
363 368 dirpath = revlog.opener.dirname(new_file_path)
364 369 others = []
365 370 for f in revlog.opener.listdir(dirpath):
366 371 if pattern.match(f) and f != new_file_name:
367 372 others.append(f)
368 373 return others
369 374
370 375
371 376 ### Nodemap Trie
372 377 #
373 378 # This is a simple reference implementation to compute and persist a nodemap
374 379 # trie. This reference implementation is write only. The python version of this
375 380 # is not expected to be actually used, since it wont provide performance
376 381 # improvement over existing non-persistent C implementation.
377 382 #
378 383 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
379 384 # revision can be adressed using its node shortest prefix.
380 385 #
381 386 # The trie is stored as a sequence of block. Each block contains 16 entries
382 387 # (signed 64bit integer, big endian). Each entry can be one of the following:
383 388 #
384 389 # * value >= 0 -> index of sub-block
385 390 # * value == -1 -> no value
386 391 # * value < -1 -> encoded revision: rev = -(value+2)
387 392 #
388 393 # See REV_OFFSET and _transform_rev below.
389 394 #
390 395 # The implementation focus on simplicity, not on performance. A Rust
391 396 # implementation should provide a efficient version of the same binary
392 397 # persistence. This reference python implementation is never meant to be
393 398 # extensively use in production.
394 399
395 400
396 401 def persistent_data(index):
397 402 """return the persistent binary form for a nodemap for a given index"""
398 403 trie = _build_trie(index)
399 404 return _persist_trie(trie)
400 405
401 406
402 407 def update_persistent_data(index, root, max_idx, last_rev):
403 408 """return the incremental update for persistent nodemap from a given index"""
404 409 changed_block, trie = _update_trie(index, root, last_rev)
405 410 return (
406 411 changed_block * S_BLOCK.size,
407 412 _persist_trie(trie, existing_idx=max_idx),
408 413 )
409 414
410 415
411 416 S_BLOCK = struct.Struct(">" + ("l" * 16))
412 417
413 418 NO_ENTRY = -1
414 419 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
415 420 REV_OFFSET = 2
416 421
417 422
418 423 def _transform_rev(rev):
419 424 """Return the number used to represent the rev in the tree.
420 425
421 426 (or retrieve a rev number from such representation)
422 427
423 428 Note that this is an involution, a function equal to its inverse (i.e.
424 429 which gives the identity when applied to itself).
425 430 """
426 431 return -(rev + REV_OFFSET)
427 432
428 433
429 434 def _to_int(hex_digit):
430 435 """turn an hexadecimal digit into a proper integer"""
431 436 return int(hex_digit, 16)
432 437
433 438
434 439 class Block(dict):
435 440 """represent a block of the Trie
436 441
437 442 contains up to 16 entry indexed from 0 to 15"""
438 443
439 444 def __init__(self):
440 445 super(Block, self).__init__()
441 446 # If this block exist on disk, here is its ID
442 447 self.ondisk_id = None
443 448
444 449 def __iter__(self):
445 450 return iter(self.get(i) for i in range(16))
446 451
447 452
448 453 def _build_trie(index):
449 454 """build a nodemap trie
450 455
451 456 The nodemap stores revision number for each unique prefix.
452 457
453 458 Each block is a dictionary with keys in `[0, 15]`. Values are either
454 459 another block or a revision number.
455 460 """
456 461 root = Block()
457 462 for rev in range(len(index)):
458 463 current_hex = hex(index[rev][7])
459 464 _insert_into_block(index, 0, root, rev, current_hex)
460 465 return root
461 466
462 467
463 468 def _update_trie(index, root, last_rev):
464 469 """consume"""
465 470 changed = 0
466 471 for rev in range(last_rev + 1, len(index)):
467 472 current_hex = hex(index[rev][7])
468 473 changed += _insert_into_block(index, 0, root, rev, current_hex)
469 474 return changed, root
470 475
471 476
472 477 def _insert_into_block(index, level, block, current_rev, current_hex):
473 478 """insert a new revision in a block
474 479
475 480 index: the index we are adding revision for
476 481 level: the depth of the current block in the trie
477 482 block: the block currently being considered
478 483 current_rev: the revision number we are adding
479 484 current_hex: the hexadecimal representation of the of that revision
480 485 """
481 486 changed = 1
482 487 if block.ondisk_id is not None:
483 488 block.ondisk_id = None
484 489 hex_digit = _to_int(current_hex[level : level + 1])
485 490 entry = block.get(hex_digit)
486 491 if entry is None:
487 492 # no entry, simply store the revision number
488 493 block[hex_digit] = current_rev
489 494 elif isinstance(entry, dict):
490 495 # need to recurse to an underlying block
491 496 changed += _insert_into_block(
492 497 index, level + 1, entry, current_rev, current_hex
493 498 )
494 499 else:
495 500 # collision with a previously unique prefix, inserting new
496 501 # vertices to fit both entry.
497 502 other_hex = hex(index[entry][7])
498 503 other_rev = entry
499 504 new = Block()
500 505 block[hex_digit] = new
501 506 _insert_into_block(index, level + 1, new, other_rev, other_hex)
502 507 _insert_into_block(index, level + 1, new, current_rev, current_hex)
503 508 return changed
504 509
505 510
506 511 def _persist_trie(root, existing_idx=None):
507 512 """turn a nodemap trie into persistent binary data
508 513
509 514 See `_build_trie` for nodemap trie structure"""
510 515 block_map = {}
511 516 if existing_idx is not None:
512 517 base_idx = existing_idx + 1
513 518 else:
514 519 base_idx = 0
515 520 chunks = []
516 521 for tn in _walk_trie(root):
517 522 if tn.ondisk_id is not None:
518 523 block_map[id(tn)] = tn.ondisk_id
519 524 else:
520 525 block_map[id(tn)] = len(chunks) + base_idx
521 526 chunks.append(_persist_block(tn, block_map))
522 527 return b''.join(chunks)
523 528
524 529
525 530 def _walk_trie(block):
526 531 """yield all the block in a trie
527 532
528 533 Children blocks are always yield before their parent block.
529 534 """
530 535 for (__, item) in sorted(block.items()):
531 536 if isinstance(item, dict):
532 537 for sub_block in _walk_trie(item):
533 538 yield sub_block
534 539 yield block
535 540
536 541
537 542 def _persist_block(block_node, block_map):
538 543 """produce persistent binary data for a single block
539 544
540 545 Children block are assumed to be already persisted and present in
541 546 block_map.
542 547 """
543 548 data = tuple(_to_value(v, block_map) for v in block_node)
544 549 return S_BLOCK.pack(*data)
545 550
546 551
547 552 def _to_value(item, block_map):
548 553 """persist any value as an integer"""
549 554 if item is None:
550 555 return NO_ENTRY
551 556 elif isinstance(item, dict):
552 557 return block_map[id(item)]
553 558 else:
554 559 return _transform_rev(item)
555 560
556 561
557 562 def parse_data(data):
558 563 """parse parse nodemap data into a nodemap Trie"""
559 564 if (len(data) % S_BLOCK.size) != 0:
560 565 msg = "nodemap data size is not a multiple of block size (%d): %d"
561 566 raise error.Abort(msg % (S_BLOCK.size, len(data)))
562 567 if not data:
563 568 return Block(), None
564 569 block_map = {}
565 570 new_blocks = []
566 571 for i in range(0, len(data), S_BLOCK.size):
567 572 block = Block()
568 573 block.ondisk_id = len(block_map)
569 574 block_map[block.ondisk_id] = block
570 575 block_data = data[i : i + S_BLOCK.size]
571 576 values = S_BLOCK.unpack(block_data)
572 577 new_blocks.append((block, values))
573 578 for b, values in new_blocks:
574 579 for idx, v in enumerate(values):
575 580 if v == NO_ENTRY:
576 581 continue
577 582 elif v >= 0:
578 583 b[idx] = block_map[v]
579 584 else:
580 585 b[idx] = _transform_rev(v)
581 586 return block, i // S_BLOCK.size
582 587
583 588
584 589 # debug utility
585 590
586 591
587 592 def check_data(ui, index, data):
588 593 """verify that the provided nodemap data are valid for the given idex"""
589 594 ret = 0
590 595 ui.status((b"revision in index: %d\n") % len(index))
591 596 root, __ = parse_data(data)
592 597 all_revs = set(_all_revisions(root))
593 598 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
594 599 for r in range(len(index)):
595 600 if r not in all_revs:
596 601 msg = b" revision missing from nodemap: %d\n" % r
597 602 ui.write_err(msg)
598 603 ret = 1
599 604 else:
600 605 all_revs.remove(r)
601 606 nm_rev = _find_node(root, hex(index[r][7]))
602 607 if nm_rev is None:
603 608 msg = b" revision node does not match any entries: %d\n" % r
604 609 ui.write_err(msg)
605 610 ret = 1
606 611 elif nm_rev != r:
607 612 msg = (
608 613 b" revision node does not match the expected revision: "
609 614 b"%d != %d\n" % (r, nm_rev)
610 615 )
611 616 ui.write_err(msg)
612 617 ret = 1
613 618
614 619 if all_revs:
615 620 for r in sorted(all_revs):
616 621 msg = b" extra revision in nodemap: %d\n" % r
617 622 ui.write_err(msg)
618 623 ret = 1
619 624 return ret
620 625
621 626
622 627 def _all_revisions(root):
623 628 """return all revisions stored in a Trie"""
624 629 for block in _walk_trie(root):
625 630 for v in block:
626 631 if v is None or isinstance(v, Block):
627 632 continue
628 633 yield v
629 634
630 635
631 636 def _find_node(block, node):
632 637 """find the revision associated with a given node"""
633 638 entry = block.get(_to_int(node[0:1]))
634 639 if isinstance(entry, dict):
635 640 return _find_node(entry, node[1:])
636 641 return entry
637 642
638 643
639 644 def get_nodemap_file(opener, indexfile):
640 645 if indexfile.endswith(b'.a'):
641 646 pending_path = indexfile[:-4] + b".n.a"
642 647 if opener.exists(pending_path):
643 648 return pending_path
644 649 else:
645 650 return indexfile[:-4] + b".n"
646 651 else:
647 652 return indexfile[:-2] + b".n"
@@ -1,539 +1,556 b''
1 1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 2 #
3 3 # Copyright (c) 2016-present, Gregory Szorc
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import stat
11 11
12 12 from ..i18n import _
13 13 from ..pycompat import getattr
14 14 from .. import (
15 15 changelog,
16 16 error,
17 17 filelog,
18 18 manifest,
19 19 metadata,
20 20 pycompat,
21 21 requirements,
22 22 revlog,
23 23 scmutil,
24 24 util,
25 25 vfs as vfsmod,
26 26 )
27 from ..revlogutils import nodemap
27 28
28 29
29 30 def _revlogfrompath(repo, path):
30 31 """Obtain a revlog from a repo path.
31 32
32 33 An instance of the appropriate class is returned.
33 34 """
34 35 if path == b'00changelog.i':
35 36 return changelog.changelog(repo.svfs)
36 37 elif path.endswith(b'00manifest.i'):
37 38 mandir = path[: -len(b'00manifest.i')]
38 39 return manifest.manifestrevlog(repo.svfs, tree=mandir)
39 40 else:
40 41 # reverse of "/".join(("data", path + ".i"))
41 42 return filelog.filelog(repo.svfs, path[5:-2])
42 43
43 44
44 45 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
45 46 """copy all relevant files for `oldrl` into `destrepo` store
46 47
47 48 Files are copied "as is" without any transformation. The copy is performed
48 49 without extra checks. Callers are responsible for making sure the copied
49 50 content is compatible with format of the destination repository.
50 51 """
51 52 oldrl = getattr(oldrl, '_revlog', oldrl)
52 53 newrl = _revlogfrompath(destrepo, unencodedname)
53 54 newrl = getattr(newrl, '_revlog', newrl)
54 55
55 56 oldvfs = oldrl.opener
56 57 newvfs = newrl.opener
57 58 oldindex = oldvfs.join(oldrl.indexfile)
58 59 newindex = newvfs.join(newrl.indexfile)
59 60 olddata = oldvfs.join(oldrl.datafile)
60 61 newdata = newvfs.join(newrl.datafile)
61 62
62 63 with newvfs(newrl.indexfile, b'w'):
63 64 pass # create all the directories
64 65
65 66 util.copyfile(oldindex, newindex)
66 67 copydata = oldrl.opener.exists(oldrl.datafile)
67 68 if copydata:
68 69 util.copyfile(olddata, newdata)
69 70
70 71 if not (
71 72 unencodedname.endswith(b'00changelog.i')
72 73 or unencodedname.endswith(b'00manifest.i')
73 74 ):
74 75 destrepo.svfs.fncache.add(unencodedname)
75 76 if copydata:
76 77 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
77 78
78 79
79 80 UPGRADE_CHANGELOG = b"changelog"
80 81 UPGRADE_MANIFEST = b"manifest"
81 82 UPGRADE_FILELOGS = b"all-filelogs"
82 83
83 84 UPGRADE_ALL_REVLOGS = frozenset(
84 85 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
85 86 )
86 87
87 88
88 89 def getsidedatacompanion(srcrepo, dstrepo):
89 90 sidedatacompanion = None
90 91 removedreqs = srcrepo.requirements - dstrepo.requirements
91 92 addedreqs = dstrepo.requirements - srcrepo.requirements
92 93 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
93 94
94 95 def sidedatacompanion(rl, rev):
95 96 rl = getattr(rl, '_revlog', rl)
96 97 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
97 98 return True, (), {}, 0, 0
98 99 return False, (), {}, 0, 0
99 100
100 101 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
101 102 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
102 103 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
103 104 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
104 105 return sidedatacompanion
105 106
106 107
107 108 def matchrevlog(revlogfilter, entry):
108 109 """check if a revlog is selected for cloning.
109 110
110 111 In other words, are there any updates which need to be done on revlog
111 112 or it can be blindly copied.
112 113
113 114 The store entry is checked against the passed filter"""
114 115 if entry.endswith(b'00changelog.i'):
115 116 return UPGRADE_CHANGELOG in revlogfilter
116 117 elif entry.endswith(b'00manifest.i'):
117 118 return UPGRADE_MANIFEST in revlogfilter
118 119 return UPGRADE_FILELOGS in revlogfilter
119 120
120 121
121 122 def _perform_clone(
122 123 ui,
123 124 dstrepo,
124 125 tr,
125 126 old_revlog,
126 127 unencoded,
127 128 upgrade_op,
128 129 sidedatacompanion,
129 130 oncopiedrevision,
130 131 ):
131 132 """ returns the new revlog object created"""
132 133 newrl = None
133 134 if matchrevlog(upgrade_op.revlogs_to_process, unencoded):
134 135 ui.note(
135 136 _(b'cloning %d revisions from %s\n') % (len(old_revlog), unencoded)
136 137 )
137 138 newrl = _revlogfrompath(dstrepo, unencoded)
138 139 old_revlog.clone(
139 140 tr,
140 141 newrl,
141 142 addrevisioncb=oncopiedrevision,
142 143 deltareuse=upgrade_op.delta_reuse_mode,
143 144 forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
144 145 sidedatacompanion=sidedatacompanion,
145 146 )
146 147 else:
147 148 msg = _(b'blindly copying %s containing %i revisions\n')
148 149 ui.note(msg % (unencoded, len(old_revlog)))
149 150 _copyrevlog(tr, dstrepo, old_revlog, unencoded)
150 151
151 152 newrl = _revlogfrompath(dstrepo, unencoded)
152 153 return newrl
153 154
154 155
155 156 def _clonerevlogs(
156 157 ui,
157 158 srcrepo,
158 159 dstrepo,
159 160 tr,
160 161 upgrade_op,
161 162 ):
162 163 """Copy revlogs between 2 repos."""
163 164 revcount = 0
164 165 srcsize = 0
165 166 srcrawsize = 0
166 167 dstsize = 0
167 168 fcount = 0
168 169 frevcount = 0
169 170 fsrcsize = 0
170 171 frawsize = 0
171 172 fdstsize = 0
172 173 mcount = 0
173 174 mrevcount = 0
174 175 msrcsize = 0
175 176 mrawsize = 0
176 177 mdstsize = 0
177 178 crevcount = 0
178 179 csrcsize = 0
179 180 crawsize = 0
180 181 cdstsize = 0
181 182
182 183 alldatafiles = list(srcrepo.store.walk())
183 184 # mapping of data files which needs to be cloned
184 185 # key is unencoded filename
185 186 # value is revlog_object_from_srcrepo
186 187 manifests = {}
187 188 changelogs = {}
188 189 filelogs = {}
189 190
190 191 # Perform a pass to collect metadata. This validates we can open all
191 192 # source files and allows a unified progress bar to be displayed.
192 193 for unencoded, encoded, size in alldatafiles:
193 194 if not unencoded.endswith(b'.i'):
194 195 continue
195 196
196 197 rl = _revlogfrompath(srcrepo, unencoded)
197 198
198 199 info = rl.storageinfo(
199 200 exclusivefiles=True,
200 201 revisionscount=True,
201 202 trackedsize=True,
202 203 storedsize=True,
203 204 )
204 205
205 206 revcount += info[b'revisionscount'] or 0
206 207 datasize = info[b'storedsize'] or 0
207 208 rawsize = info[b'trackedsize'] or 0
208 209
209 210 srcsize += datasize
210 211 srcrawsize += rawsize
211 212
212 213 # This is for the separate progress bars.
213 214 if isinstance(rl, changelog.changelog):
214 215 changelogs[unencoded] = rl
215 216 crevcount += len(rl)
216 217 csrcsize += datasize
217 218 crawsize += rawsize
218 219 elif isinstance(rl, manifest.manifestrevlog):
219 220 manifests[unencoded] = rl
220 221 mcount += 1
221 222 mrevcount += len(rl)
222 223 msrcsize += datasize
223 224 mrawsize += rawsize
224 225 elif isinstance(rl, filelog.filelog):
225 226 filelogs[unencoded] = rl
226 227 fcount += 1
227 228 frevcount += len(rl)
228 229 fsrcsize += datasize
229 230 frawsize += rawsize
230 231 else:
231 232 error.ProgrammingError(b'unknown revlog type')
232 233
233 234 if not revcount:
234 235 return
235 236
236 237 ui.status(
237 238 _(
238 239 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
239 240 b'%d in changelog)\n'
240 241 )
241 242 % (revcount, frevcount, mrevcount, crevcount)
242 243 )
243 244 ui.status(
244 245 _(b'migrating %s in store; %s tracked data\n')
245 246 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
246 247 )
247 248
248 249 # Used to keep track of progress.
249 250 progress = None
250 251
251 252 def oncopiedrevision(rl, rev, node):
252 253 progress.increment()
253 254
254 255 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
255 256
256 257 # Migrating filelogs
257 258 ui.status(
258 259 _(
259 260 b'migrating %d filelogs containing %d revisions '
260 261 b'(%s in store; %s tracked data)\n'
261 262 )
262 263 % (
263 264 fcount,
264 265 frevcount,
265 266 util.bytecount(fsrcsize),
266 267 util.bytecount(frawsize),
267 268 )
268 269 )
269 270 progress = srcrepo.ui.makeprogress(_(b'file revisions'), total=frevcount)
270 271 for unencoded, oldrl in sorted(filelogs.items()):
271 272 newrl = _perform_clone(
272 273 ui,
273 274 dstrepo,
274 275 tr,
275 276 oldrl,
276 277 unencoded,
277 278 upgrade_op,
278 279 sidedatacompanion,
279 280 oncopiedrevision,
280 281 )
281 282 info = newrl.storageinfo(storedsize=True)
282 283 fdstsize += info[b'storedsize'] or 0
283 284 ui.status(
284 285 _(
285 286 b'finished migrating %d filelog revisions across %d '
286 287 b'filelogs; change in size: %s\n'
287 288 )
288 289 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
289 290 )
290 291
291 292 # Migrating manifests
292 293 ui.status(
293 294 _(
294 295 b'migrating %d manifests containing %d revisions '
295 296 b'(%s in store; %s tracked data)\n'
296 297 )
297 298 % (
298 299 mcount,
299 300 mrevcount,
300 301 util.bytecount(msrcsize),
301 302 util.bytecount(mrawsize),
302 303 )
303 304 )
304 305 if progress:
305 306 progress.complete()
306 307 progress = srcrepo.ui.makeprogress(
307 308 _(b'manifest revisions'), total=mrevcount
308 309 )
309 310 for unencoded, oldrl in sorted(manifests.items()):
310 311 newrl = _perform_clone(
311 312 ui,
312 313 dstrepo,
313 314 tr,
314 315 oldrl,
315 316 unencoded,
316 317 upgrade_op,
317 318 sidedatacompanion,
318 319 oncopiedrevision,
319 320 )
320 321 info = newrl.storageinfo(storedsize=True)
321 322 mdstsize += info[b'storedsize'] or 0
322 323 ui.status(
323 324 _(
324 325 b'finished migrating %d manifest revisions across %d '
325 326 b'manifests; change in size: %s\n'
326 327 )
327 328 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
328 329 )
329 330
330 331 # Migrating changelog
331 332 ui.status(
332 333 _(
333 334 b'migrating changelog containing %d revisions '
334 335 b'(%s in store; %s tracked data)\n'
335 336 )
336 337 % (
337 338 crevcount,
338 339 util.bytecount(csrcsize),
339 340 util.bytecount(crawsize),
340 341 )
341 342 )
342 343 if progress:
343 344 progress.complete()
344 345 progress = srcrepo.ui.makeprogress(
345 346 _(b'changelog revisions'), total=crevcount
346 347 )
347 348 for unencoded, oldrl in sorted(changelogs.items()):
348 349 newrl = _perform_clone(
349 350 ui,
350 351 dstrepo,
351 352 tr,
352 353 oldrl,
353 354 unencoded,
354 355 upgrade_op,
355 356 sidedatacompanion,
356 357 oncopiedrevision,
357 358 )
358 359 info = newrl.storageinfo(storedsize=True)
359 360 cdstsize += info[b'storedsize'] or 0
360 361 progress.complete()
361 362 ui.status(
362 363 _(
363 364 b'finished migrating %d changelog revisions; change in size: '
364 365 b'%s\n'
365 366 )
366 367 % (crevcount, util.bytecount(cdstsize - csrcsize))
367 368 )
368 369
369 370 dstsize = fdstsize + mdstsize + cdstsize
370 371 ui.status(
371 372 _(
372 373 b'finished migrating %d total revisions; total change in store '
373 374 b'size: %s\n'
374 375 )
375 376 % (revcount, util.bytecount(dstsize - srcsize))
376 377 )
377 378
378 379
379 380 def _files_to_copy_post_revlog_clone(srcrepo):
380 381 """yields files which should be copied to destination after revlogs
381 382 are cloned"""
382 383 for path, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
383 384 # don't copy revlogs as they are already cloned
384 385 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
385 386 continue
386 387 # Skip transaction related files.
387 388 if path.startswith(b'undo'):
388 389 continue
389 390 # Only copy regular files.
390 391 if kind != stat.S_IFREG:
391 392 continue
392 393 # Skip other skipped files.
393 394 if path in (b'lock', b'fncache'):
394 395 continue
395 396 # TODO: should we skip cache too?
396 397
397 398 yield path
398 399
399 400
400 401 def _replacestores(currentrepo, upgradedrepo, backupvfs, upgrade_op):
401 402 """Replace the stores after current repository is upgraded
402 403
403 404 Creates a backup of current repository store at backup path
404 405 Replaces upgraded store files in current repo from upgraded one
405 406
406 407 Arguments:
407 408 currentrepo: repo object of current repository
408 409 upgradedrepo: repo object of the upgraded data
409 410 backupvfs: vfs object for the backup path
410 411 upgrade_op: upgrade operation object
411 412 to be used to decide what all is upgraded
412 413 """
413 414 # TODO: don't blindly rename everything in store
414 415 # There can be upgrades where store is not touched at all
415 416 if upgrade_op.backup_store:
416 417 util.rename(currentrepo.spath, backupvfs.join(b'store'))
417 418 else:
418 419 currentrepo.vfs.rmtree(b'store', forcibly=True)
419 420 util.rename(upgradedrepo.spath, currentrepo.spath)
420 421
421 422
422 423 def finishdatamigration(ui, srcrepo, dstrepo, requirements):
423 424 """Hook point for extensions to perform additional actions during upgrade.
424 425
425 426 This function is called after revlogs and store files have been copied but
426 427 before the new store is swapped into the original location.
427 428 """
428 429
429 430
430 431 def upgrade(ui, srcrepo, dstrepo, upgrade_op):
431 432 """Do the low-level work of upgrading a repository.
432 433
433 434 The upgrade is effectively performed as a copy between a source
434 435 repository and a temporary destination repository.
435 436
436 437 The source repository is unmodified for as long as possible so the
437 438 upgrade can abort at any time without causing loss of service for
438 439 readers and without corrupting the source repository.
439 440 """
440 441 assert srcrepo.currentwlock()
441 442 assert dstrepo.currentwlock()
442 443 backuppath = None
443 444 backupvfs = None
444 445
445 446 ui.status(
446 447 _(
447 448 b'(it is safe to interrupt this process any time before '
448 449 b'data migration completes)\n'
449 450 )
450 451 )
451 452
452 453 if upgrade_op.requirements_only:
453 454 ui.status(_(b'upgrading repository requirements\n'))
454 455 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
456 # if there is only one action and that is persistent nodemap upgrade
457 # directly write the nodemap file and update requirements instead of going
458 # through the whole cloning process
459 elif (
460 len(upgrade_op.upgrade_actions) == 1
461 and b'persistent-nodemap' in upgrade_op._upgrade_actions_names
462 and not upgrade_op.removed_actions
463 ):
464 ui.status(
465 _(b'upgrading repository to use persistent nodemap feature\n')
466 )
467 with srcrepo.transaction(b'upgrade') as tr:
468 unfi = srcrepo.unfiltered()
469 cl = unfi.changelog
470 nodemap.persist_nodemap(tr, cl, force=True)
471 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
455 472 else:
456 473 with dstrepo.transaction(b'upgrade') as tr:
457 474 _clonerevlogs(
458 475 ui,
459 476 srcrepo,
460 477 dstrepo,
461 478 tr,
462 479 upgrade_op,
463 480 )
464 481
465 482 # Now copy other files in the store directory.
466 483 for p in _files_to_copy_post_revlog_clone(srcrepo):
467 484 srcrepo.ui.status(_(b'copying %s\n') % p)
468 485 src = srcrepo.store.rawvfs.join(p)
469 486 dst = dstrepo.store.rawvfs.join(p)
470 487 util.copyfile(src, dst, copystat=True)
471 488
472 489 finishdatamigration(ui, srcrepo, dstrepo, requirements)
473 490
474 491 ui.status(_(b'data fully upgraded in a temporary repository\n'))
475 492
476 493 if upgrade_op.backup_store:
477 494 backuppath = pycompat.mkdtemp(
478 495 prefix=b'upgradebackup.', dir=srcrepo.path
479 496 )
480 497 backupvfs = vfsmod.vfs(backuppath)
481 498
482 499 # Make a backup of requires file first, as it is the first to be modified.
483 500 util.copyfile(
484 501 srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires')
485 502 )
486 503
487 504 # We install an arbitrary requirement that clients must not support
488 505 # as a mechanism to lock out new clients during the data swap. This is
489 506 # better than allowing a client to continue while the repository is in
490 507 # an inconsistent state.
491 508 ui.status(
492 509 _(
493 510 b'marking source repository as being upgraded; clients will be '
494 511 b'unable to read from repository\n'
495 512 )
496 513 )
497 514 scmutil.writereporequirements(
498 515 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
499 516 )
500 517
501 518 ui.status(_(b'starting in-place swap of repository data\n'))
502 519 if upgrade_op.backup_store:
503 520 ui.status(
504 521 _(b'replaced files will be backed up at %s\n') % backuppath
505 522 )
506 523
507 524 # Now swap in the new store directory. Doing it as a rename should make
508 525 # the operation nearly instantaneous and atomic (at least in well-behaved
509 526 # environments).
510 527 ui.status(_(b'replacing store...\n'))
511 528 tstart = util.timer()
512 529 _replacestores(srcrepo, dstrepo, backupvfs, upgrade_op)
513 530 elapsed = util.timer() - tstart
514 531 ui.status(
515 532 _(
516 533 b'store replacement complete; repository was inconsistent for '
517 534 b'%0.1fs\n'
518 535 )
519 536 % elapsed
520 537 )
521 538
522 539 # We first write the requirements file. Any new requirements will lock
523 540 # out legacy clients.
524 541 ui.status(
525 542 _(
526 543 b'finalizing requirements file and making repository readable '
527 544 b'again\n'
528 545 )
529 546 )
530 547 scmutil.writereporequirements(srcrepo, upgrade_op.new_requirements)
531 548
532 549 if upgrade_op.backup_store:
533 550 # The lock file from the old store won't be removed because nothing has a
534 551 # reference to its new location. So clean it up manually. Alternatively, we
535 552 # could update srcrepo.svfs and other variables to point to the new
536 553 # location. This is simpler.
537 554 backupvfs.unlink(b'store/lock')
538 555
539 556 return backuppath
@@ -1,768 +1,751 b''
1 1 ===================================
2 2 Test the persistent on-disk nodemap
3 3 ===================================
4 4
5 5 $ cat << EOF >> $HGRCPATH
6 6 > [format]
7 7 > use-persistent-nodemap=yes
8 8 > [devel]
9 9 > persistent-nodemap=yes
10 10 > EOF
11 11
12 12 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
13 13 $ cd test-repo
14 14
15 15 Check handling of the default slow-path value
16 16
17 17 #if no-pure no-rust
18 18
19 19 $ hg id
20 20 abort: accessing `persistent-nodemap` repository without associated fast implementation.
21 21 (check `hg help config.format.use-persistent-nodemap` for details)
22 22 [255]
23 23
24 24 Unlock further check (we are here to test the feature)
25 25
26 26 $ cat << EOF >> $HGRCPATH
27 27 > [storage]
28 28 > # to avoid spamming the test
29 29 > revlog.persistent-nodemap.slow-path=allow
30 30 > EOF
31 31
32 32 #endif
33 33
34 34 #if rust
35 35
36 36 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
37 37 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
38 38 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
39 39 incorrectly used `libc::c_int` (32 bits).
40 40 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
41 41
42 42 $ hg log -r 00000000
43 43 changeset: -1:000000000000
44 44 tag: tip
45 45 user:
46 46 date: Thu Jan 01 00:00:00 1970 +0000
47 47
48 48
49 49 #endif
50 50
51 51
52 52 $ hg debugformat
53 53 format-variant repo
54 54 fncache: yes
55 55 dotencode: yes
56 56 generaldelta: yes
57 57 share-safe: no
58 58 sparserevlog: yes
59 59 sidedata: no
60 60 persistent-nodemap: yes
61 61 copies-sdc: no
62 62 plain-cl-delta: yes
63 63 compression: zlib
64 64 compression-level: default
65 65 $ hg debugbuilddag .+5000 --new-file
66 66
67 67 $ hg debugnodemap --metadata
68 68 uid: ???????????????? (glob)
69 69 tip-rev: 5000
70 70 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
71 71 data-length: 121088
72 72 data-unused: 0
73 73 data-unused: 0.000%
74 74 $ f --size .hg/store/00changelog.n
75 75 .hg/store/00changelog.n: size=70
76 76
77 77 Simple lookup works
78 78
79 79 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
80 80 $ hg log -r "$ANYNODE" --template '{rev}\n'
81 81 5000
82 82
83 83
84 84 #if rust
85 85
86 86 $ f --sha256 .hg/store/00changelog-*.nd
87 87 .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
88 88
89 89 $ f --sha256 .hg/store/00manifest-*.nd
90 90 .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
91 91 $ hg debugnodemap --dump-new | f --sha256 --size
92 92 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
93 93 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
94 94 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
95 95 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
96 96 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
97 97 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
98 98 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
99 99 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
100 100 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
101 101 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
102 102 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
103 103 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
104 104 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
105 105 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
106 106 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
107 107 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
108 108 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
109 109 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
110 110 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
111 111
112 112
113 113 #else
114 114
115 115 $ f --sha256 .hg/store/00changelog-*.nd
116 116 .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
117 117 $ hg debugnodemap --dump-new | f --sha256 --size
118 118 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
119 119 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
120 120 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
121 121 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
122 122 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
123 123 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
124 124 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
125 125 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
126 126 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
127 127 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
128 128 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
129 129 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
130 130 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
131 131 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
132 132 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
133 133 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
134 134 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
135 135 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
136 136 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
137 137
138 138 #endif
139 139
140 140 $ hg debugnodemap --check
141 141 revision in index: 5001
142 142 revision in nodemap: 5001
143 143
144 144 add a new commit
145 145
146 146 $ hg up
147 147 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 148 $ echo foo > foo
149 149 $ hg add foo
150 150
151 151
152 152 Check slow-path config value handling
153 153 -------------------------------------
154 154
155 155 #if no-pure no-rust
156 156
157 157 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
158 158 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
159 159 falling back to default value: abort
160 160 abort: accessing `persistent-nodemap` repository without associated fast implementation.
161 161 (check `hg help config.format.use-persistent-nodemap` for details)
162 162 [255]
163 163
164 164 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
165 165 warning: accessing `persistent-nodemap` repository without associated fast implementation.
166 166 (check `hg help config.format.use-persistent-nodemap` for details)
167 167 changeset: 5000:6b02b8c7b966
168 168 tag: tip
169 169 user: debugbuilddag
170 170 date: Thu Jan 01 01:23:20 1970 +0000
171 171 summary: r5000
172 172
173 173 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
174 174 abort: accessing `persistent-nodemap` repository without associated fast implementation.
175 175 (check `hg help config.format.use-persistent-nodemap` for details)
176 176 [255]
177 177
178 178 #else
179 179
180 180 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
181 181 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
182 182 falling back to default value: abort
183 183 6b02b8c7b966+ tip
184 184
185 185 #endif
186 186
187 187 $ hg ci -m 'foo'
188 188
189 189 #if no-pure no-rust
190 190 $ hg debugnodemap --metadata
191 191 uid: ???????????????? (glob)
192 192 tip-rev: 5001
193 193 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
194 194 data-length: 121088
195 195 data-unused: 0
196 196 data-unused: 0.000%
197 197 #else
198 198 $ hg debugnodemap --metadata
199 199 uid: ???????????????? (glob)
200 200 tip-rev: 5001
201 201 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
202 202 data-length: 121344
203 203 data-unused: 256
204 204 data-unused: 0.211%
205 205 #endif
206 206
207 207 $ f --size .hg/store/00changelog.n
208 208 .hg/store/00changelog.n: size=70
209 209
210 210 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
211 211
212 212 #if pure
213 213 $ f --sha256 .hg/store/00changelog-*.nd --size
214 214 .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
215 215 #endif
216 216
217 217 #if rust
218 218 $ f --sha256 .hg/store/00changelog-*.nd --size
219 219 .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
220 220 #endif
221 221
222 222 #if no-pure no-rust
223 223 $ f --sha256 .hg/store/00changelog-*.nd --size
224 224 .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
225 225 #endif
226 226
227 227 $ hg debugnodemap --check
228 228 revision in index: 5002
229 229 revision in nodemap: 5002
230 230
231 231 Test code path without mmap
232 232 ---------------------------
233 233
234 234 $ echo bar > bar
235 235 $ hg add bar
236 236 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
237 237
238 238 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
239 239 revision in index: 5003
240 240 revision in nodemap: 5003
241 241 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
242 242 revision in index: 5003
243 243 revision in nodemap: 5003
244 244
245 245
246 246 #if pure
247 247 $ hg debugnodemap --metadata
248 248 uid: ???????????????? (glob)
249 249 tip-rev: 5002
250 250 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
251 251 data-length: 121600
252 252 data-unused: 512
253 253 data-unused: 0.421%
254 254 $ f --sha256 .hg/store/00changelog-*.nd --size
255 255 .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
256 256 #endif
257 257 #if rust
258 258 $ hg debugnodemap --metadata
259 259 uid: ???????????????? (glob)
260 260 tip-rev: 5002
261 261 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
262 262 data-length: 121600
263 263 data-unused: 512
264 264 data-unused: 0.421%
265 265 $ f --sha256 .hg/store/00changelog-*.nd --size
266 266 .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
267 267 #endif
268 268 #if no-pure no-rust
269 269 $ hg debugnodemap --metadata
270 270 uid: ???????????????? (glob)
271 271 tip-rev: 5002
272 272 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
273 273 data-length: 121088
274 274 data-unused: 0
275 275 data-unused: 0.000%
276 276 $ f --sha256 .hg/store/00changelog-*.nd --size
277 277 .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
278 278 #endif
279 279
280 280 Test force warming the cache
281 281
282 282 $ rm .hg/store/00changelog.n
283 283 $ hg debugnodemap --metadata
284 284 $ hg debugupdatecache
285 285 #if pure
286 286 $ hg debugnodemap --metadata
287 287 uid: ???????????????? (glob)
288 288 tip-rev: 5002
289 289 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
290 290 data-length: 121088
291 291 data-unused: 0
292 292 data-unused: 0.000%
293 293 #else
294 294 $ hg debugnodemap --metadata
295 295 uid: ???????????????? (glob)
296 296 tip-rev: 5002
297 297 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
298 298 data-length: 121088
299 299 data-unused: 0
300 300 data-unused: 0.000%
301 301 #endif
302 302
303 303 Check out of sync nodemap
304 304 =========================
305 305
306 306 First copy old data on the side.
307 307
308 308 $ mkdir ../tmp-copies
309 309 $ cp .hg/store/00changelog-????????????????.nd .hg/store/00changelog.n ../tmp-copies
310 310
311 311 Nodemap lagging behind
312 312 ----------------------
313 313
314 314 make a new commit
315 315
316 316 $ echo bar2 > bar
317 317 $ hg ci -m 'bar2'
318 318 $ NODE=`hg log -r tip -T '{node}\n'`
319 319 $ hg log -r "$NODE" -T '{rev}\n'
320 320 5003
321 321
322 322 If the nodemap is lagging behind, it can catch up fine
323 323
324 324 $ hg debugnodemap --metadata
325 325 uid: ???????????????? (glob)
326 326 tip-rev: 5003
327 327 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
328 328 data-length: 121344 (pure !)
329 329 data-length: 121344 (rust !)
330 330 data-length: 121152 (no-rust no-pure !)
331 331 data-unused: 192 (pure !)
332 332 data-unused: 192 (rust !)
333 333 data-unused: 0 (no-rust no-pure !)
334 334 data-unused: 0.158% (pure !)
335 335 data-unused: 0.158% (rust !)
336 336 data-unused: 0.000% (no-rust no-pure !)
337 337 $ cp -f ../tmp-copies/* .hg/store/
338 338 $ hg debugnodemap --metadata
339 339 uid: ???????????????? (glob)
340 340 tip-rev: 5002
341 341 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
342 342 data-length: 121088
343 343 data-unused: 0
344 344 data-unused: 0.000%
345 345 $ hg log -r "$NODE" -T '{rev}\n'
346 346 5003
347 347
348 348 changelog altered
349 349 -----------------
350 350
351 351 If the nodemap is not gated behind a requirements, an unaware client can alter
352 352 the repository so the revlog used to generate the nodemap is not longer
353 353 compatible with the persistent nodemap. We need to detect that.
354 354
355 355 $ hg up "$NODE~5"
356 356 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
357 357 $ echo bar > babar
358 358 $ hg add babar
359 359 $ hg ci -m 'babar'
360 360 created new head
361 361 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
362 362 $ hg log -r "$OTHERNODE" -T '{rev}\n'
363 363 5004
364 364
365 365 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
366 366
367 367 the nodemap should detect the changelog have been tampered with and recover.
368 368
369 369 $ hg debugnodemap --metadata
370 370 uid: ???????????????? (glob)
371 371 tip-rev: 5002
372 372 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
373 373 data-length: 121536 (pure !)
374 374 data-length: 121088 (rust !)
375 375 data-length: 121088 (no-pure no-rust !)
376 376 data-unused: 448 (pure !)
377 377 data-unused: 0 (rust !)
378 378 data-unused: 0 (no-pure no-rust !)
379 379 data-unused: 0.000% (rust !)
380 380 data-unused: 0.369% (pure !)
381 381 data-unused: 0.000% (no-pure no-rust !)
382 382
383 383 $ cp -f ../tmp-copies/* .hg/store/
384 384 $ hg debugnodemap --metadata
385 385 uid: ???????????????? (glob)
386 386 tip-rev: 5002
387 387 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
388 388 data-length: 121088
389 389 data-unused: 0
390 390 data-unused: 0.000%
391 391 $ hg log -r "$OTHERNODE" -T '{rev}\n'
392 392 5002
393 393
394 394 missing data file
395 395 -----------------
396 396
397 397 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
398 398 > sed 's/uid: //'`
399 399 $ FILE=.hg/store/00changelog-"${UUID}".nd
400 400 $ mv $FILE ../tmp-data-file
401 401 $ cp .hg/store/00changelog.n ../tmp-docket
402 402
403 403 mercurial don't crash
404 404
405 405 $ hg log -r .
406 406 changeset: 5002:b355ef8adce0
407 407 tag: tip
408 408 parent: 4998:d918ad6d18d3
409 409 user: test
410 410 date: Thu Jan 01 00:00:00 1970 +0000
411 411 summary: babar
412 412
413 413 $ hg debugnodemap --metadata
414 414
415 415 $ hg debugupdatecache
416 416 $ hg debugnodemap --metadata
417 417 uid: * (glob)
418 418 tip-rev: 5002
419 419 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
420 420 data-length: 121088
421 421 data-unused: 0
422 422 data-unused: 0.000%
423 423 $ mv ../tmp-data-file $FILE
424 424 $ mv ../tmp-docket .hg/store/00changelog.n
425 425
426 426 Check transaction related property
427 427 ==================================
428 428
429 429 An up to date nodemap should be available to shell hooks,
430 430
431 431 $ echo dsljfl > a
432 432 $ hg add a
433 433 $ hg ci -m a
434 434 $ hg debugnodemap --metadata
435 435 uid: ???????????????? (glob)
436 436 tip-rev: 5003
437 437 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
438 438 data-length: 121088
439 439 data-unused: 0
440 440 data-unused: 0.000%
441 441 $ echo babar2 > babar
442 442 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
443 443 uid: ???????????????? (glob)
444 444 tip-rev: 5004
445 445 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
446 446 data-length: 121280 (pure !)
447 447 data-length: 121280 (rust !)
448 448 data-length: 121088 (no-pure no-rust !)
449 449 data-unused: 192 (pure !)
450 450 data-unused: 192 (rust !)
451 451 data-unused: 0 (no-pure no-rust !)
452 452 data-unused: 0.158% (pure !)
453 453 data-unused: 0.158% (rust !)
454 454 data-unused: 0.000% (no-pure no-rust !)
455 455 $ hg debugnodemap --metadata
456 456 uid: ???????????????? (glob)
457 457 tip-rev: 5004
458 458 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
459 459 data-length: 121280 (pure !)
460 460 data-length: 121280 (rust !)
461 461 data-length: 121088 (no-pure no-rust !)
462 462 data-unused: 192 (pure !)
463 463 data-unused: 192 (rust !)
464 464 data-unused: 0 (no-pure no-rust !)
465 465 data-unused: 0.158% (pure !)
466 466 data-unused: 0.158% (rust !)
467 467 data-unused: 0.000% (no-pure no-rust !)
468 468
469 469 Another process does not see the pending nodemap content during run.
470 470
471 471 $ PATH=$RUNTESTDIR/testlib/:$PATH
472 472 $ echo qpoasp > a
473 473 $ hg ci -m a2 \
474 474 > --config "hooks.pretxnclose=wait-on-file 20 sync-repo-read sync-txn-pending" \
475 475 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
476 476
477 477 (read the repository while the commit transaction is pending)
478 478
479 479 $ wait-on-file 20 sync-txn-pending && \
480 480 > hg debugnodemap --metadata && \
481 481 > wait-on-file 20 sync-txn-close sync-repo-read
482 482 uid: ???????????????? (glob)
483 483 tip-rev: 5004
484 484 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
485 485 data-length: 121280 (pure !)
486 486 data-length: 121280 (rust !)
487 487 data-length: 121088 (no-pure no-rust !)
488 488 data-unused: 192 (pure !)
489 489 data-unused: 192 (rust !)
490 490 data-unused: 0 (no-pure no-rust !)
491 491 data-unused: 0.158% (pure !)
492 492 data-unused: 0.158% (rust !)
493 493 data-unused: 0.000% (no-pure no-rust !)
494 494 $ hg debugnodemap --metadata
495 495 uid: ???????????????? (glob)
496 496 tip-rev: 5005
497 497 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
498 498 data-length: 121536 (pure !)
499 499 data-length: 121536 (rust !)
500 500 data-length: 121088 (no-pure no-rust !)
501 501 data-unused: 448 (pure !)
502 502 data-unused: 448 (rust !)
503 503 data-unused: 0 (no-pure no-rust !)
504 504 data-unused: 0.369% (pure !)
505 505 data-unused: 0.369% (rust !)
506 506 data-unused: 0.000% (no-pure no-rust !)
507 507
508 508 $ cat output.txt
509 509
510 510 Check that a failing transaction will properly revert the data
511 511
512 512 $ echo plakfe > a
513 513 $ f --size --sha256 .hg/store/00changelog-*.nd
514 514 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
515 515 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
516 516 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
517 517 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
518 518 transaction abort!
519 519 rollback completed
520 520 abort: This is a late abort
521 521 [255]
522 522 $ hg debugnodemap --metadata
523 523 uid: ???????????????? (glob)
524 524 tip-rev: 5005
525 525 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
526 526 data-length: 121536 (pure !)
527 527 data-length: 121536 (rust !)
528 528 data-length: 121088 (no-pure no-rust !)
529 529 data-unused: 448 (pure !)
530 530 data-unused: 448 (rust !)
531 531 data-unused: 0 (no-pure no-rust !)
532 532 data-unused: 0.369% (pure !)
533 533 data-unused: 0.369% (rust !)
534 534 data-unused: 0.000% (no-pure no-rust !)
535 535 $ f --size --sha256 .hg/store/00changelog-*.nd
536 536 .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
537 537 .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
538 538 .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
539 539
540 540 Check that removing content does not confuse the nodemap
541 541 --------------------------------------------------------
542 542
543 543 removing data with rollback
544 544
545 545 $ echo aso > a
546 546 $ hg ci -m a4
547 547 $ hg rollback
548 548 repository tip rolled back to revision 5005 (undo commit)
549 549 working directory now based on revision 5005
550 550 $ hg id -r .
551 551 90d5d3ba2fc4 tip
552 552
553 553 roming data with strip
554 554
555 555 $ echo aso > a
556 556 $ hg ci -m a4
557 557 $ hg --config extensions.strip= strip -r . --no-backup
558 558 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
559 559 $ hg id -r . --traceback
560 560 90d5d3ba2fc4 tip
561 561
562 562 Test upgrade / downgrade
563 563 ========================
564 564
565 565 downgrading
566 566
567 567 $ cat << EOF >> .hg/hgrc
568 568 > [format]
569 569 > use-persistent-nodemap=no
570 570 > EOF
571 571 $ hg debugformat -v
572 572 format-variant repo config default
573 573 fncache: yes yes yes
574 574 dotencode: yes yes yes
575 575 generaldelta: yes yes yes
576 576 share-safe: no no no
577 577 sparserevlog: yes yes yes
578 578 sidedata: no no no
579 579 persistent-nodemap: yes no no
580 580 copies-sdc: no no no
581 581 plain-cl-delta: yes yes yes
582 582 compression: zlib zlib zlib
583 583 compression-level: default default default
584 584 $ hg debugupgraderepo --run --no-backup --quiet
585 585 upgrade will perform the following actions:
586 586
587 587 requirements
588 588 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
589 589 removed: persistent-nodemap
590 590
591 591 processed revlogs:
592 592 - all-filelogs
593 593 - changelog
594 594 - manifest
595 595
596 596 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
597 597 [1]
598 598 $ hg debugnodemap --metadata
599 599
600 600
601 601 upgrading
602 602
603 603 $ cat << EOF >> .hg/hgrc
604 604 > [format]
605 605 > use-persistent-nodemap=yes
606 606 > EOF
607 607 $ hg debugformat -v
608 608 format-variant repo config default
609 609 fncache: yes yes yes
610 610 dotencode: yes yes yes
611 611 generaldelta: yes yes yes
612 612 share-safe: no no no
613 613 sparserevlog: yes yes yes
614 614 sidedata: no no no
615 615 persistent-nodemap: no yes no
616 616 copies-sdc: no no no
617 617 plain-cl-delta: yes yes yes
618 618 compression: zlib zlib zlib
619 619 compression-level: default default default
620 620 $ hg debugupgraderepo --run --no-backup
621 621 upgrade will perform the following actions:
622 622
623 623 requirements
624 624 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
625 625 added: persistent-nodemap
626 626
627 627 persistent-nodemap
628 628 Speedup revision lookup by node id.
629 629
630 630 processed revlogs:
631 631 - all-filelogs
632 632 - changelog
633 633 - manifest
634 634
635 635 beginning upgrade...
636 636 repository locked and read-only
637 637 creating temporary repository to stage upgraded data: $TESTTMP/test-repo/.hg/upgrade.* (glob)
638 638 (it is safe to interrupt this process any time before data migration completes)
639 migrating 15018 total revisions (5006 in filelogs, 5006 in manifests, 5006 in changelog)
640 migrating 1.74 MB in store; 569 MB tracked data
641 migrating 5004 filelogs containing 5006 revisions (346 KB in store; 28.2 KB tracked data)
642 finished migrating 5006 filelog revisions across 5004 filelogs; change in size: 0 bytes
643 migrating 1 manifests containing 5006 revisions (765 KB in store; 569 MB tracked data)
644 finished migrating 5006 manifest revisions across 1 manifests; change in size: 0 bytes
645 migrating changelog containing 5006 revisions (673 KB in store; 363 KB tracked data)
646 finished migrating 5006 changelog revisions; change in size: 0 bytes
647 finished migrating 15018 total revisions; total change in store size: 0 bytes
648 copying phaseroots
649 data fully upgraded in a temporary repository
650 marking source repository as being upgraded; clients will be unable to read from repository
651 starting in-place swap of repository data
652 replacing store...
653 store replacement complete; repository was inconsistent for *s (glob)
654 finalizing requirements file and making repository readable again
639 upgrading repository to use persistent nodemap feature
655 640 removing temporary repository $TESTTMP/test-repo/.hg/upgrade.* (glob)
656 641 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
657 642 00changelog-*.nd (glob)
658 643 00changelog.n
659 00manifest-*.nd (glob)
660 00manifest.n
661 644
662 645 $ hg debugnodemap --metadata
663 646 uid: * (glob)
664 647 tip-rev: 5005
665 648 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
666 649 data-length: 121088
667 650 data-unused: 0
668 651 data-unused: 0.000%
669 652
670 653 Running unrelated upgrade
671 654
672 655 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
673 656 upgrade will perform the following actions:
674 657
675 658 requirements
676 659 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
677 660
678 661 optimisations: re-delta-all
679 662
680 663 processed revlogs:
681 664 - all-filelogs
682 665 - changelog
683 666 - manifest
684 667
685 668 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
686 669 00changelog-*.nd (glob)
687 670 00changelog.n
688 671 00manifest-*.nd (glob)
689 672 00manifest.n
690 673
691 674 $ hg debugnodemap --metadata
692 675 uid: * (glob)
693 676 tip-rev: 5005
694 677 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
695 678 data-length: 121088
696 679 data-unused: 0
697 680 data-unused: 0.000%
698 681
699 682 Persistent nodemap and local/streaming clone
700 683 ============================================
701 684
702 685 $ cd ..
703 686
704 687 standard clone
705 688 --------------
706 689
707 690 The persistent nodemap should exist after a streaming clone
708 691
709 692 $ hg clone --pull --quiet -U test-repo standard-clone
710 693 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
711 694 00changelog-*.nd (glob)
712 695 00changelog.n
713 696 00manifest-*.nd (glob)
714 697 00manifest.n
715 698 $ hg -R standard-clone debugnodemap --metadata
716 699 uid: * (glob)
717 700 tip-rev: 5005
718 701 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
719 702 data-length: 121088
720 703 data-unused: 0
721 704 data-unused: 0.000%
722 705
723 706
724 707 local clone
725 708 ------------
726 709
727 710 The persistent nodemap should exist after a streaming clone
728 711
729 712 $ hg clone -U test-repo local-clone
730 713 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
731 714 00changelog-*.nd (glob)
732 715 00changelog.n
733 716 00manifest-*.nd (glob)
734 717 00manifest.n
735 718 $ hg -R local-clone debugnodemap --metadata
736 719 uid: * (glob)
737 720 tip-rev: 5005
738 721 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
739 722 data-length: 121088
740 723 data-unused: 0
741 724 data-unused: 0.000%
742 725
743 726 stream clone
744 727 ------------
745 728
746 729 The persistent nodemap should exist after a streaming clone
747 730
748 731 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
749 732 adding [s] 00manifest.n (70 bytes)
750 733 adding [s] 00manifest.i (313 KB)
751 734 adding [s] 00manifest.d (452 KB)
752 735 adding [s] 00manifest-*.nd (118 KB) (glob)
753 736 adding [s] 00changelog.n (70 bytes)
754 737 adding [s] 00changelog.i (313 KB)
755 738 adding [s] 00changelog.d (360 KB)
756 739 adding [s] 00changelog-*.nd (118 KB) (glob)
757 740 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
758 741 00changelog-*.nd (glob)
759 742 00changelog.n
760 743 00manifest-*.nd (glob)
761 744 00manifest.n
762 745 $ hg -R stream-clone debugnodemap --metadata
763 746 uid: * (glob)
764 747 tip-rev: 5005
765 748 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
766 749 data-length: 121088
767 750 data-unused: 0
768 751 data-unused: 0.000%
General Comments 0
You need to be logged in to leave comments. Login now