##// END OF EJS Templates
persistent-nodemap: avoid writing nodemap for empty revlog...
marmoute -
r52068:1486d8c6 stable
parent child Browse files
Show More
@@ -1,668 +1,670 b''
1 1 # nodemap.py - nodemap related code and utilities
2 2 #
3 3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import re
11 11 import struct
12 12
13 13 from ..node import hex
14 14
15 15 from .. import (
16 16 error,
17 17 requirements,
18 18 util,
19 19 )
20 20 from . import docket as docket_mod
21 21
22 22
23 23 class NodeMap(dict):
24 24 def __missing__(self, x):
25 25 raise error.RevlogError(b'unknown node: %s' % x)
26 26
27 27
28 28 def test_race_hook_1():
29 29 """hook point for test
30 30
31 31 This let tests to have things happens between the docket reading and the
32 32 data reading"""
33 33 pass
34 34
35 35
36 36 def post_stream_cleanup(repo):
37 37 """The stream clone might needs to remove some file if persisten nodemap
38 38 was dropped while stream cloning
39 39 """
40 40 if requirements.REVLOGV1_REQUIREMENT not in repo.requirements:
41 41 return
42 42 if requirements.NODEMAP_REQUIREMENT in repo.requirements:
43 43 return
44 44 unfi = repo.unfiltered()
45 45 delete_nodemap(None, unfi, unfi.changelog)
46 46 delete_nodemap(None, repo, unfi.manifestlog._rootstore._revlog)
47 47
48 48
49 49 def persisted_data(revlog):
50 50 """read the nodemap for a revlog from disk"""
51 51 if revlog._nodemap_file is None:
52 52 return None
53 53 pdata = revlog.opener.tryread(revlog._nodemap_file)
54 54 if not pdata:
55 55 return None
56 56 offset = 0
57 57 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
58 58 if version != ONDISK_VERSION:
59 59 return None
60 60 offset += S_VERSION.size
61 61 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
62 62 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
63 63 offset += S_HEADER.size
64 64 docket = NodeMapDocket(pdata[offset : offset + uid_size])
65 65 offset += uid_size
66 66 docket.tip_rev = tip_rev
67 67 docket.tip_node = pdata[offset : offset + tip_node_size]
68 68 docket.data_length = data_length
69 69 docket.data_unused = data_unused
70 70
71 71 filename = _rawdata_filepath(revlog, docket)
72 72 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
73 73
74 74 test_race_hook_1()
75 75 try:
76 76 with revlog.opener(filename) as fd:
77 77 if use_mmap:
78 78 try:
79 79 data = util.buffer(util.mmapread(fd, data_length))
80 80 except ValueError:
81 81 # raised when the read file is too small
82 82 data = b''
83 83 else:
84 84 data = fd.read(data_length)
85 85 except FileNotFoundError:
86 86 return None
87 87 if len(data) < data_length:
88 88 return None
89 89 return docket, data
90 90
91 91
92 92 def setup_persistent_nodemap(tr, revlog):
93 93 """Install whatever is needed transaction side to persist a nodemap on disk
94 94
95 95 (only actually persist the nodemap if this is relevant for this revlog)
96 96 """
97 97 if revlog._inline:
98 98 return # inlined revlog are too small for this to be relevant
99 99 if revlog._nodemap_file is None:
100 100 return # we do not use persistent_nodemap on this revlog
101 101
102 102 # we need to happen after the changelog finalization, in that use "cl-"
103 103 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
104 104 if tr.hasfinalize(callback_id):
105 105 return # no need to register again
106 106 tr.addpending(
107 107 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
108 108 )
109 109 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
110 110
111 111
112 112 class _NoTransaction:
113 113 """transaction like object to update the nodemap outside a transaction"""
114 114
115 115 def __init__(self):
116 116 self._postclose = {}
117 117
118 118 def addpostclose(self, callback_id, callback_func):
119 119 self._postclose[callback_id] = callback_func
120 120
121 121 def registertmp(self, *args, **kwargs):
122 122 pass
123 123
124 124 def addbackup(self, *args, **kwargs):
125 125 pass
126 126
127 127 def add(self, *args, **kwargs):
128 128 pass
129 129
130 130 def addabort(self, *args, **kwargs):
131 131 pass
132 132
133 133 def _report(self, *args):
134 134 pass
135 135
136 136
137 137 def update_persistent_nodemap(revlog):
138 138 """update the persistent nodemap right now
139 139
140 140 To be used for updating the nodemap on disk outside of a normal transaction
141 141 setup (eg, `debugupdatecache`).
142 142 """
143 143 if revlog._inline:
144 144 return # inlined revlog are too small for this to be relevant
145 145 if revlog._nodemap_file is None:
146 146 return # we do not use persistent_nodemap on this revlog
147 147
148 148 notr = _NoTransaction()
149 149 persist_nodemap(notr, revlog)
150 150 for k in sorted(notr._postclose):
151 151 notr._postclose[k](None)
152 152
153 153
154 154 def delete_nodemap(tr, repo, revlog):
155 155 """Delete nodemap data on disk for a given revlog"""
156 156 prefix = revlog.radix
157 157 pattern = re.compile(br"(^|/)%s(-[0-9a-f]+\.nd|\.n(\.a)?)$" % prefix)
158 158 dirpath = revlog.opener.dirname(revlog._indexfile)
159 159 for f in revlog.opener.listdir(dirpath):
160 160 if pattern.match(f):
161 161 repo.svfs.tryunlink(f)
162 162
163 163
164 164 def persist_nodemap(tr, revlog, pending=False, force=False):
165 165 """Write nodemap data on disk for a given revlog"""
166 if len(revlog.index) <= 0:
167 return
166 168 if getattr(revlog, 'filteredrevs', ()):
167 169 raise error.ProgrammingError(
168 170 "cannot persist nodemap of a filtered changelog"
169 171 )
170 172 if revlog._nodemap_file is None:
171 173 if force:
172 174 revlog._nodemap_file = get_nodemap_file(revlog)
173 175 else:
174 176 msg = "calling persist nodemap on a revlog without the feature enabled"
175 177 raise error.ProgrammingError(msg)
176 178
177 179 can_incremental = hasattr(revlog.index, "nodemap_data_incremental")
178 180 ondisk_docket = revlog._nodemap_docket
179 181 feed_data = hasattr(revlog.index, "update_nodemap_data")
180 182 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
181 183
182 184 data = None
183 185 # first attemp an incremental update of the data
184 186 if can_incremental and ondisk_docket is not None:
185 187 target_docket = revlog._nodemap_docket.copy()
186 188 (
187 189 src_docket,
188 190 data_changed_count,
189 191 data,
190 192 ) = revlog.index.nodemap_data_incremental()
191 193 new_length = target_docket.data_length + len(data)
192 194 new_unused = target_docket.data_unused + data_changed_count
193 195 if src_docket != target_docket:
194 196 data = None
195 197 elif new_length <= (new_unused * 10): # under 10% of unused data
196 198 data = None
197 199 else:
198 200 datafile = _rawdata_filepath(revlog, target_docket)
199 201 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
200 202 # store vfs
201 203 tr.add(datafile, target_docket.data_length)
202 204 with revlog.opener(datafile, b'r+') as fd:
203 205 fd.seek(target_docket.data_length)
204 206 fd.write(data)
205 207 if feed_data:
206 208 if use_mmap:
207 209 fd.seek(0)
208 210 new_data = fd.read(new_length)
209 211 else:
210 212 fd.flush()
211 213 new_data = util.buffer(util.mmapread(fd, new_length))
212 214 target_docket.data_length = new_length
213 215 target_docket.data_unused = new_unused
214 216
215 217 if data is None:
216 218 # otherwise fallback to a full new export
217 219 target_docket = NodeMapDocket()
218 220 datafile = _rawdata_filepath(revlog, target_docket)
219 221 if hasattr(revlog.index, "nodemap_data_all"):
220 222 data = revlog.index.nodemap_data_all()
221 223 else:
222 224 data = persistent_data(revlog.index)
223 225 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
224 226 # store vfs
225 227
226 228 tryunlink = revlog.opener.tryunlink
227 229
228 230 def abortck(tr):
229 231 tryunlink(datafile)
230 232
231 233 callback_id = b"delete-%s" % datafile
232 234
233 235 # some flavor of the transaction abort does not cleanup new file, it
234 236 # simply empty them.
235 237 tr.addabort(callback_id, abortck)
236 238 with revlog.opener(datafile, b'w+') as fd:
237 239 fd.write(data)
238 240 if feed_data:
239 241 if use_mmap:
240 242 new_data = data
241 243 else:
242 244 fd.flush()
243 245 new_data = util.buffer(util.mmapread(fd, len(data)))
244 246 target_docket.data_length = len(data)
245 247 target_docket.tip_rev = revlog.tiprev()
246 248 target_docket.tip_node = revlog.node(target_docket.tip_rev)
247 249 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
248 250 # store vfs
249 251 file_path = revlog._nodemap_file
250 252 if pending:
251 253 file_path += b'.a'
252 254 tr.registertmp(file_path)
253 255 else:
254 256 tr.addbackup(file_path)
255 257
256 258 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
257 259 fp.write(target_docket.serialize())
258 260 revlog._nodemap_docket = target_docket
259 261 if feed_data:
260 262 revlog.index.update_nodemap_data(target_docket, new_data)
261 263
262 264 # search for old index file in all cases, some older process might have
263 265 # left one behind.
264 266 olds = _other_rawdata_filepath(revlog, target_docket)
265 267 if olds:
266 268 realvfs = getattr(revlog, '_realopener', revlog.opener)
267 269
268 270 def cleanup(tr):
269 271 for oldfile in olds:
270 272 realvfs.tryunlink(oldfile)
271 273
272 274 callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
273 275 tr.addpostclose(callback_id, cleanup)
274 276
275 277
276 278 ### Nodemap docket file
277 279 #
278 280 # The nodemap data are stored on disk using 2 files:
279 281 #
280 282 # * a raw data files containing a persistent nodemap
281 283 # (see `Nodemap Trie` section)
282 284 #
283 285 # * a small "docket" file containing medatadata
284 286 #
285 287 # While the nodemap data can be multiple tens of megabytes, the "docket" is
286 288 # small, it is easy to update it automatically or to duplicated its content
287 289 # during a transaction.
288 290 #
289 291 # Multiple raw data can exist at the same time (The currently valid one and a
290 292 # new one beind used by an in progress transaction). To accomodate this, the
291 293 # filename hosting the raw data has a variable parts. The exact filename is
292 294 # specified inside the "docket" file.
293 295 #
294 296 # The docket file contains information to find, qualify and validate the raw
295 297 # data. Its content is currently very light, but it will expand as the on disk
296 298 # nodemap gains the necessary features to be used in production.
297 299
298 300 ONDISK_VERSION = 1
299 301 S_VERSION = struct.Struct(">B")
300 302 S_HEADER = struct.Struct(">BQQQQ")
301 303
302 304
303 305 class NodeMapDocket:
304 306 """metadata associated with persistent nodemap data
305 307
306 308 The persistent data may come from disk or be on their way to disk.
307 309 """
308 310
309 311 def __init__(self, uid=None):
310 312 if uid is None:
311 313 uid = docket_mod.make_uid()
312 314 # a unique identifier for the data file:
313 315 # - When new data are appended, it is preserved.
314 316 # - When a new data file is created, a new identifier is generated.
315 317 self.uid = uid
316 318 # the tipmost revision stored in the data file. This revision and all
317 319 # revision before it are expected to be encoded in the data file.
318 320 self.tip_rev = None
319 321 # the node of that tipmost revision, if it mismatch the current index
320 322 # data the docket is not valid for the current index and should be
321 323 # discarded.
322 324 #
323 325 # note: this method is not perfect as some destructive operation could
324 326 # preserve the same tip_rev + tip_node while altering lower revision.
325 327 # However this multiple other caches have the same vulnerability (eg:
326 328 # brancmap cache).
327 329 self.tip_node = None
328 330 # the size (in bytes) of the persisted data to encode the nodemap valid
329 331 # for `tip_rev`.
330 332 # - data file shorter than this are corrupted,
331 333 # - any extra data should be ignored.
332 334 self.data_length = None
333 335 # the amount (in bytes) of "dead" data, still in the data file but no
334 336 # longer used for the nodemap.
335 337 self.data_unused = 0
336 338
337 339 def copy(self):
338 340 new = NodeMapDocket(uid=self.uid)
339 341 new.tip_rev = self.tip_rev
340 342 new.tip_node = self.tip_node
341 343 new.data_length = self.data_length
342 344 new.data_unused = self.data_unused
343 345 return new
344 346
345 347 def __cmp__(self, other):
346 348 if self.uid < other.uid:
347 349 return -1
348 350 if self.uid > other.uid:
349 351 return 1
350 352 elif self.data_length < other.data_length:
351 353 return -1
352 354 elif self.data_length > other.data_length:
353 355 return 1
354 356 return 0
355 357
356 358 def __eq__(self, other):
357 359 return self.uid == other.uid and self.data_length == other.data_length
358 360
359 361 def serialize(self):
360 362 """return serialized bytes for a docket using the passed uid"""
361 363 data = []
362 364 data.append(S_VERSION.pack(ONDISK_VERSION))
363 365 headers = (
364 366 len(self.uid),
365 367 self.tip_rev,
366 368 self.data_length,
367 369 self.data_unused,
368 370 len(self.tip_node),
369 371 )
370 372 data.append(S_HEADER.pack(*headers))
371 373 data.append(self.uid)
372 374 data.append(self.tip_node)
373 375 return b''.join(data)
374 376
375 377
376 378 def _rawdata_filepath(revlog, docket):
377 379 """The (vfs relative) nodemap's rawdata file for a given uid"""
378 380 prefix = revlog.radix
379 381 return b"%s-%s.nd" % (prefix, docket.uid)
380 382
381 383
382 384 def _other_rawdata_filepath(revlog, docket):
383 385 prefix = revlog.radix
384 386 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
385 387 new_file_path = _rawdata_filepath(revlog, docket)
386 388 new_file_name = revlog.opener.basename(new_file_path)
387 389 dirpath = revlog.opener.dirname(new_file_path)
388 390 others = []
389 391 for f in revlog.opener.listdir(dirpath):
390 392 if pattern.match(f) and f != new_file_name:
391 393 others.append(f)
392 394 return others
393 395
394 396
395 397 ### Nodemap Trie
396 398 #
397 399 # This is a simple reference implementation to compute and persist a nodemap
398 400 # trie. This reference implementation is write only. The python version of this
399 401 # is not expected to be actually used, since it wont provide performance
400 402 # improvement over existing non-persistent C implementation.
401 403 #
402 404 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
403 405 # revision can be adressed using its node shortest prefix.
404 406 #
405 407 # The trie is stored as a sequence of block. Each block contains 16 entries
406 408 # (signed 64bit integer, big endian). Each entry can be one of the following:
407 409 #
408 410 # * value >= 0 -> index of sub-block
409 411 # * value == -1 -> no value
410 412 # * value < -1 -> encoded revision: rev = -(value+2)
411 413 #
412 414 # See REV_OFFSET and _transform_rev below.
413 415 #
414 416 # The implementation focus on simplicity, not on performance. A Rust
415 417 # implementation should provide a efficient version of the same binary
416 418 # persistence. This reference python implementation is never meant to be
417 419 # extensively use in production.
418 420
419 421
420 422 def persistent_data(index):
421 423 """return the persistent binary form for a nodemap for a given index"""
422 424 trie = _build_trie(index)
423 425 return _persist_trie(trie)
424 426
425 427
426 428 def update_persistent_data(index, root, max_idx, last_rev):
427 429 """return the incremental update for persistent nodemap from a given index"""
428 430 changed_block, trie = _update_trie(index, root, last_rev)
429 431 return (
430 432 changed_block * S_BLOCK.size,
431 433 _persist_trie(trie, existing_idx=max_idx),
432 434 )
433 435
434 436
435 437 S_BLOCK = struct.Struct(">" + ("l" * 16))
436 438
437 439 NO_ENTRY = -1
438 440 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
439 441 REV_OFFSET = 2
440 442
441 443
442 444 def _transform_rev(rev):
443 445 """Return the number used to represent the rev in the tree.
444 446
445 447 (or retrieve a rev number from such representation)
446 448
447 449 Note that this is an involution, a function equal to its inverse (i.e.
448 450 which gives the identity when applied to itself).
449 451 """
450 452 return -(rev + REV_OFFSET)
451 453
452 454
453 455 def _to_int(hex_digit):
454 456 """turn an hexadecimal digit into a proper integer"""
455 457 return int(hex_digit, 16)
456 458
457 459
458 460 class Block(dict):
459 461 """represent a block of the Trie
460 462
461 463 contains up to 16 entry indexed from 0 to 15"""
462 464
463 465 def __init__(self):
464 466 super(Block, self).__init__()
465 467 # If this block exist on disk, here is its ID
466 468 self.ondisk_id = None
467 469
468 470 def __iter__(self):
469 471 return iter(self.get(i) for i in range(16))
470 472
471 473
472 474 def _build_trie(index):
473 475 """build a nodemap trie
474 476
475 477 The nodemap stores revision number for each unique prefix.
476 478
477 479 Each block is a dictionary with keys in `[0, 15]`. Values are either
478 480 another block or a revision number.
479 481 """
480 482 root = Block()
481 483 for rev in range(len(index)):
482 484 current_hex = hex(index[rev][7])
483 485 _insert_into_block(index, 0, root, rev, current_hex)
484 486 return root
485 487
486 488
487 489 def _update_trie(index, root, last_rev):
488 490 """consume"""
489 491 changed = 0
490 492 for rev in range(last_rev + 1, len(index)):
491 493 current_hex = hex(index[rev][7])
492 494 changed += _insert_into_block(index, 0, root, rev, current_hex)
493 495 return changed, root
494 496
495 497
496 498 def _insert_into_block(index, level, block, current_rev, current_hex):
497 499 """insert a new revision in a block
498 500
499 501 index: the index we are adding revision for
500 502 level: the depth of the current block in the trie
501 503 block: the block currently being considered
502 504 current_rev: the revision number we are adding
503 505 current_hex: the hexadecimal representation of the of that revision
504 506 """
505 507 changed = 1
506 508 if block.ondisk_id is not None:
507 509 block.ondisk_id = None
508 510 hex_digit = _to_int(current_hex[level : level + 1])
509 511 entry = block.get(hex_digit)
510 512 if entry is None:
511 513 # no entry, simply store the revision number
512 514 block[hex_digit] = current_rev
513 515 elif isinstance(entry, dict):
514 516 # need to recurse to an underlying block
515 517 changed += _insert_into_block(
516 518 index, level + 1, entry, current_rev, current_hex
517 519 )
518 520 else:
519 521 # collision with a previously unique prefix, inserting new
520 522 # vertices to fit both entry.
521 523 other_hex = hex(index[entry][7])
522 524 other_rev = entry
523 525 new = Block()
524 526 block[hex_digit] = new
525 527 _insert_into_block(index, level + 1, new, other_rev, other_hex)
526 528 _insert_into_block(index, level + 1, new, current_rev, current_hex)
527 529 return changed
528 530
529 531
530 532 def _persist_trie(root, existing_idx=None):
531 533 """turn a nodemap trie into persistent binary data
532 534
533 535 See `_build_trie` for nodemap trie structure"""
534 536 block_map = {}
535 537 if existing_idx is not None:
536 538 base_idx = existing_idx + 1
537 539 else:
538 540 base_idx = 0
539 541 chunks = []
540 542 for tn in _walk_trie(root):
541 543 if tn.ondisk_id is not None:
542 544 block_map[id(tn)] = tn.ondisk_id
543 545 else:
544 546 block_map[id(tn)] = len(chunks) + base_idx
545 547 chunks.append(_persist_block(tn, block_map))
546 548 return b''.join(chunks)
547 549
548 550
549 551 def _walk_trie(block):
550 552 """yield all the block in a trie
551 553
552 554 Children blocks are always yield before their parent block.
553 555 """
554 556 for (__, item) in sorted(block.items()):
555 557 if isinstance(item, dict):
556 558 for sub_block in _walk_trie(item):
557 559 yield sub_block
558 560 yield block
559 561
560 562
561 563 def _persist_block(block_node, block_map):
562 564 """produce persistent binary data for a single block
563 565
564 566 Children block are assumed to be already persisted and present in
565 567 block_map.
566 568 """
567 569 data = tuple(_to_value(v, block_map) for v in block_node)
568 570 return S_BLOCK.pack(*data)
569 571
570 572
571 573 def _to_value(item, block_map):
572 574 """persist any value as an integer"""
573 575 if item is None:
574 576 return NO_ENTRY
575 577 elif isinstance(item, dict):
576 578 return block_map[id(item)]
577 579 else:
578 580 return _transform_rev(item)
579 581
580 582
581 583 def parse_data(data):
582 584 """parse parse nodemap data into a nodemap Trie"""
583 585 if (len(data) % S_BLOCK.size) != 0:
584 586 msg = b"nodemap data size is not a multiple of block size (%d): %d"
585 587 raise error.Abort(msg % (S_BLOCK.size, len(data)))
586 588 if not data:
587 589 return Block(), None
588 590 block_map = {}
589 591 new_blocks = []
590 592 for i in range(0, len(data), S_BLOCK.size):
591 593 block = Block()
592 594 block.ondisk_id = len(block_map)
593 595 block_map[block.ondisk_id] = block
594 596 block_data = data[i : i + S_BLOCK.size]
595 597 values = S_BLOCK.unpack(block_data)
596 598 new_blocks.append((block, values))
597 599 for b, values in new_blocks:
598 600 for idx, v in enumerate(values):
599 601 if v == NO_ENTRY:
600 602 continue
601 603 elif v >= 0:
602 604 b[idx] = block_map[v]
603 605 else:
604 606 b[idx] = _transform_rev(v)
605 607 return block, i // S_BLOCK.size
606 608
607 609
608 610 # debug utility
609 611
610 612
611 613 def check_data(ui, index, data):
612 614 """verify that the provided nodemap data are valid for the given idex"""
613 615 ret = 0
614 616 ui.status((b"revisions in index: %d\n") % len(index))
615 617 root, __ = parse_data(data)
616 618 all_revs = set(_all_revisions(root))
617 619 ui.status((b"revisions in nodemap: %d\n") % len(all_revs))
618 620 for r in range(len(index)):
619 621 if r not in all_revs:
620 622 msg = b" revision missing from nodemap: %d\n" % r
621 623 ui.write_err(msg)
622 624 ret = 1
623 625 else:
624 626 all_revs.remove(r)
625 627 nm_rev = _find_node(root, hex(index[r][7]))
626 628 if nm_rev is None:
627 629 msg = b" revision node does not match any entries: %d\n" % r
628 630 ui.write_err(msg)
629 631 ret = 1
630 632 elif nm_rev != r:
631 633 msg = (
632 634 b" revision node does not match the expected revision: "
633 635 b"%d != %d\n" % (r, nm_rev)
634 636 )
635 637 ui.write_err(msg)
636 638 ret = 1
637 639
638 640 if all_revs:
639 641 for r in sorted(all_revs):
640 642 msg = b" extra revisions in nodemap: %d\n" % r
641 643 ui.write_err(msg)
642 644 ret = 1
643 645 return ret
644 646
645 647
646 648 def _all_revisions(root):
647 649 """return all revisions stored in a Trie"""
648 650 for block in _walk_trie(root):
649 651 for v in block:
650 652 if v is None or isinstance(v, Block):
651 653 continue
652 654 yield v
653 655
654 656
655 657 def _find_node(block, node):
656 658 """find the revision associated with a given node"""
657 659 entry = block.get(_to_int(node[0:1]))
658 660 if isinstance(entry, dict):
659 661 return _find_node(entry, node[1:])
660 662 return entry
661 663
662 664
663 665 def get_nodemap_file(revlog):
664 666 if revlog._trypending:
665 667 pending_path = revlog.radix + b".n.a"
666 668 if revlog.opener.exists(pending_path):
667 669 return pending_path
668 670 return revlog.radix + b".n"
General Comments 0
You need to be logged in to leave comments. Login now