##// END OF EJS Templates
merge with stable
Gregory Szorc -
r39183:b95b48a5 merge default
parent child Browse files
Show More
@@ -1,1889 +1,1946 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance'''
3 3
4 4 # "historical portability" policy of perf.py:
5 5 #
6 6 # We have to do:
7 7 # - make perf.py "loadable" with as wide Mercurial version as possible
8 8 # This doesn't mean that perf commands work correctly with that Mercurial.
9 9 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
10 10 # - make historical perf command work correctly with as wide Mercurial
11 11 # version as possible
12 12 #
13 13 # We have to do, if possible with reasonable cost:
14 14 # - make recent perf command for historical feature work correctly
15 15 # with early Mercurial
16 16 #
17 17 # We don't have to do:
18 18 # - make perf command for recent feature work correctly with early
19 19 # Mercurial
20 20
21 21 from __future__ import absolute_import
22 22 import functools
23 23 import gc
24 24 import os
25 25 import random
26 26 import struct
27 27 import sys
28 28 import threading
29 29 import time
30 30 from mercurial import (
31 31 changegroup,
32 32 cmdutil,
33 33 commands,
34 34 copies,
35 35 error,
36 36 extensions,
37 37 mdiff,
38 38 merge,
39 39 revlog,
40 40 util,
41 41 )
42 42
43 43 # for "historical portability":
44 44 # try to import modules separately (in dict order), and ignore
45 45 # failure, because these aren't available with early Mercurial
46 46 try:
47 47 from mercurial import branchmap # since 2.5 (or bcee63733aad)
48 48 except ImportError:
49 49 pass
50 50 try:
51 51 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
52 52 except ImportError:
53 53 pass
54 54 try:
55 55 from mercurial import registrar # since 3.7 (or 37d50250b696)
56 56 dir(registrar) # forcibly load it
57 57 except ImportError:
58 58 registrar = None
59 59 try:
60 60 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
61 61 except ImportError:
62 62 pass
63 63 try:
64 64 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
65 65 except ImportError:
66 66 pass
67 67 try:
68 68 from mercurial import pycompat
69 69 getargspec = pycompat.getargspec # added to module after 4.5
70 70 except (ImportError, AttributeError):
71 71 import inspect
72 72 getargspec = inspect.getargspec
73 73
74 74 try:
75 75 # 4.7+
76 76 queue = pycompat.queue.Queue
77 77 except (AttributeError, ImportError):
78 78 # <4.7.
79 79 try:
80 80 queue = pycompat.queue
81 81 except (AttributeError, ImportError):
82 82 queue = util.queue
83 83
84 84 try:
85 85 from mercurial import logcmdutil
86 86 makelogtemplater = logcmdutil.maketemplater
87 87 except (AttributeError, ImportError):
88 88 try:
89 89 makelogtemplater = cmdutil.makelogtemplater
90 90 except (AttributeError, ImportError):
91 91 makelogtemplater = None
92 92
93 93 # for "historical portability":
94 94 # define util.safehasattr forcibly, because util.safehasattr has been
95 95 # available since 1.9.3 (or 94b200a11cf7)
96 96 _undefined = object()
97 97 def safehasattr(thing, attr):
98 98 return getattr(thing, attr, _undefined) is not _undefined
99 99 setattr(util, 'safehasattr', safehasattr)
100 100
101 101 # for "historical portability":
102 102 # define util.timer forcibly, because util.timer has been available
103 103 # since ae5d60bb70c9
104 104 if safehasattr(time, 'perf_counter'):
105 105 util.timer = time.perf_counter
106 106 elif os.name == 'nt':
107 107 util.timer = time.clock
108 108 else:
109 109 util.timer = time.time
110 110
111 111 # for "historical portability":
112 112 # use locally defined empty option list, if formatteropts isn't
113 113 # available, because commands.formatteropts has been available since
114 114 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
115 115 # available since 2.2 (or ae5f92e154d3)
116 116 formatteropts = getattr(cmdutil, "formatteropts",
117 117 getattr(commands, "formatteropts", []))
118 118
119 119 # for "historical portability":
120 120 # use locally defined option list, if debugrevlogopts isn't available,
121 121 # because commands.debugrevlogopts has been available since 3.7 (or
122 122 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
123 123 # since 1.9 (or a79fea6b3e77).
124 124 revlogopts = getattr(cmdutil, "debugrevlogopts",
125 125 getattr(commands, "debugrevlogopts", [
126 126 ('c', 'changelog', False, ('open changelog')),
127 127 ('m', 'manifest', False, ('open manifest')),
128 128 ('', 'dir', False, ('open directory manifest')),
129 129 ]))
130 130
131 131 cmdtable = {}
132 132
133 133 # for "historical portability":
134 134 # define parsealiases locally, because cmdutil.parsealiases has been
135 135 # available since 1.5 (or 6252852b4332)
136 136 def parsealiases(cmd):
137 137 return cmd.lstrip("^").split("|")
138 138
139 139 if safehasattr(registrar, 'command'):
140 140 command = registrar.command(cmdtable)
141 141 elif safehasattr(cmdutil, 'command'):
142 142 command = cmdutil.command(cmdtable)
143 143 if 'norepo' not in getargspec(command).args:
144 144 # for "historical portability":
145 145 # wrap original cmdutil.command, because "norepo" option has
146 146 # been available since 3.1 (or 75a96326cecb)
147 147 _command = command
148 148 def command(name, options=(), synopsis=None, norepo=False):
149 149 if norepo:
150 150 commands.norepo += ' %s' % ' '.join(parsealiases(name))
151 151 return _command(name, list(options), synopsis)
152 152 else:
153 153 # for "historical portability":
154 154 # define "@command" annotation locally, because cmdutil.command
155 155 # has been available since 1.9 (or 2daa5179e73f)
156 156 def command(name, options=(), synopsis=None, norepo=False):
157 157 def decorator(func):
158 158 if synopsis:
159 159 cmdtable[name] = func, list(options), synopsis
160 160 else:
161 161 cmdtable[name] = func, list(options)
162 162 if norepo:
163 163 commands.norepo += ' %s' % ' '.join(parsealiases(name))
164 164 return func
165 165 return decorator
166 166
167 167 try:
168 168 import mercurial.registrar
169 169 import mercurial.configitems
170 170 configtable = {}
171 171 configitem = mercurial.registrar.configitem(configtable)
172 172 configitem('perf', 'presleep',
173 173 default=mercurial.configitems.dynamicdefault,
174 174 )
175 175 configitem('perf', 'stub',
176 176 default=mercurial.configitems.dynamicdefault,
177 177 )
178 178 configitem('perf', 'parentscount',
179 179 default=mercurial.configitems.dynamicdefault,
180 180 )
181 181 configitem('perf', 'all-timing',
182 182 default=mercurial.configitems.dynamicdefault,
183 183 )
184 184 except (ImportError, AttributeError):
185 185 pass
186 186
187 187 def getlen(ui):
188 188 if ui.configbool("perf", "stub", False):
189 189 return lambda x: 1
190 190 return len
191 191
192 192 def gettimer(ui, opts=None):
193 193 """return a timer function and formatter: (timer, formatter)
194 194
195 195 This function exists to gather the creation of formatter in a single
196 196 place instead of duplicating it in all performance commands."""
197 197
198 198 # enforce an idle period before execution to counteract power management
199 199 # experimental config: perf.presleep
200 200 time.sleep(getint(ui, "perf", "presleep", 1))
201 201
202 202 if opts is None:
203 203 opts = {}
204 204 # redirect all to stderr unless buffer api is in use
205 205 if not ui._buffers:
206 206 ui = ui.copy()
207 207 uifout = safeattrsetter(ui, 'fout', ignoremissing=True)
208 208 if uifout:
209 209 # for "historical portability":
210 210 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
211 211 uifout.set(ui.ferr)
212 212
213 213 # get a formatter
214 214 uiformatter = getattr(ui, 'formatter', None)
215 215 if uiformatter:
216 216 fm = uiformatter('perf', opts)
217 217 else:
218 218 # for "historical portability":
219 219 # define formatter locally, because ui.formatter has been
220 220 # available since 2.2 (or ae5f92e154d3)
221 221 from mercurial import node
222 222 class defaultformatter(object):
223 223 """Minimized composition of baseformatter and plainformatter
224 224 """
225 225 def __init__(self, ui, topic, opts):
226 226 self._ui = ui
227 227 if ui.debugflag:
228 228 self.hexfunc = node.hex
229 229 else:
230 230 self.hexfunc = node.short
231 231 def __nonzero__(self):
232 232 return False
233 233 __bool__ = __nonzero__
234 234 def startitem(self):
235 235 pass
236 236 def data(self, **data):
237 237 pass
238 238 def write(self, fields, deftext, *fielddata, **opts):
239 239 self._ui.write(deftext % fielddata, **opts)
240 240 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
241 241 if cond:
242 242 self._ui.write(deftext % fielddata, **opts)
243 243 def plain(self, text, **opts):
244 244 self._ui.write(text, **opts)
245 245 def end(self):
246 246 pass
247 247 fm = defaultformatter(ui, 'perf', opts)
248 248
249 249 # stub function, runs code only once instead of in a loop
250 250 # experimental config: perf.stub
251 251 if ui.configbool("perf", "stub", False):
252 252 return functools.partial(stub_timer, fm), fm
253 253
254 254 # experimental config: perf.all-timing
255 255 displayall = ui.configbool("perf", "all-timing", False)
256 256 return functools.partial(_timer, fm, displayall=displayall), fm
257 257
258 258 def stub_timer(fm, func, title=None):
259 259 func()
260 260
261 261 def _timer(fm, func, title=None, displayall=False):
262 262 gc.collect()
263 263 results = []
264 264 begin = util.timer()
265 265 count = 0
266 266 while True:
267 267 ostart = os.times()
268 268 cstart = util.timer()
269 269 r = func()
270 270 cstop = util.timer()
271 271 ostop = os.times()
272 272 count += 1
273 273 a, b = ostart, ostop
274 274 results.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
275 275 if cstop - begin > 3 and count >= 100:
276 276 break
277 277 if cstop - begin > 10 and count >= 3:
278 278 break
279 279
280 280 fm.startitem()
281 281
282 282 if title:
283 283 fm.write('title', '! %s\n', title)
284 284 if r:
285 285 fm.write('result', '! result: %s\n', r)
286 286 def display(role, entry):
287 287 prefix = ''
288 288 if role != 'best':
289 289 prefix = '%s.' % role
290 290 fm.plain('!')
291 291 fm.write(prefix + 'wall', ' wall %f', entry[0])
292 292 fm.write(prefix + 'comb', ' comb %f', entry[1] + entry[2])
293 293 fm.write(prefix + 'user', ' user %f', entry[1])
294 294 fm.write(prefix + 'sys', ' sys %f', entry[2])
295 295 fm.write(prefix + 'count', ' (%s of %d)', role, count)
296 296 fm.plain('\n')
297 297 results.sort()
298 298 min_val = results[0]
299 299 display('best', min_val)
300 300 if displayall:
301 301 max_val = results[-1]
302 302 display('max', max_val)
303 303 avg = tuple([sum(x) / count for x in zip(*results)])
304 304 display('avg', avg)
305 305 median = results[len(results) // 2]
306 306 display('median', median)
307 307
308 308 # utilities for historical portability
309 309
310 310 def getint(ui, section, name, default):
311 311 # for "historical portability":
312 312 # ui.configint has been available since 1.9 (or fa2b596db182)
313 313 v = ui.config(section, name, None)
314 314 if v is None:
315 315 return default
316 316 try:
317 317 return int(v)
318 318 except ValueError:
319 319 raise error.ConfigError(("%s.%s is not an integer ('%s')")
320 320 % (section, name, v))
321 321
322 322 def safeattrsetter(obj, name, ignoremissing=False):
323 323 """Ensure that 'obj' has 'name' attribute before subsequent setattr
324 324
325 325 This function is aborted, if 'obj' doesn't have 'name' attribute
326 326 at runtime. This avoids overlooking removal of an attribute, which
327 327 breaks assumption of performance measurement, in the future.
328 328
329 329 This function returns the object to (1) assign a new value, and
330 330 (2) restore an original value to the attribute.
331 331
332 332 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
333 333 abortion, and this function returns None. This is useful to
334 334 examine an attribute, which isn't ensured in all Mercurial
335 335 versions.
336 336 """
337 337 if not util.safehasattr(obj, name):
338 338 if ignoremissing:
339 339 return None
340 340 raise error.Abort(("missing attribute %s of %s might break assumption"
341 341 " of performance measurement") % (name, obj))
342 342
343 343 origvalue = getattr(obj, name)
344 344 class attrutil(object):
345 345 def set(self, newvalue):
346 346 setattr(obj, name, newvalue)
347 347 def restore(self):
348 348 setattr(obj, name, origvalue)
349 349
350 350 return attrutil()
351 351
352 352 # utilities to examine each internal API changes
353 353
354 354 def getbranchmapsubsettable():
355 355 # for "historical portability":
356 356 # subsettable is defined in:
357 357 # - branchmap since 2.9 (or 175c6fd8cacc)
358 358 # - repoview since 2.5 (or 59a9f18d4587)
359 359 for mod in (branchmap, repoview):
360 360 subsettable = getattr(mod, 'subsettable', None)
361 361 if subsettable:
362 362 return subsettable
363 363
364 364 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
365 365 # branchmap and repoview modules exist, but subsettable attribute
366 366 # doesn't)
367 367 raise error.Abort(("perfbranchmap not available with this Mercurial"),
368 368 hint="use 2.5 or later")
369 369
370 370 def getsvfs(repo):
371 371 """Return appropriate object to access files under .hg/store
372 372 """
373 373 # for "historical portability":
374 374 # repo.svfs has been available since 2.3 (or 7034365089bf)
375 375 svfs = getattr(repo, 'svfs', None)
376 376 if svfs:
377 377 return svfs
378 378 else:
379 379 return getattr(repo, 'sopener')
380 380
381 381 def getvfs(repo):
382 382 """Return appropriate object to access files under .hg
383 383 """
384 384 # for "historical portability":
385 385 # repo.vfs has been available since 2.3 (or 7034365089bf)
386 386 vfs = getattr(repo, 'vfs', None)
387 387 if vfs:
388 388 return vfs
389 389 else:
390 390 return getattr(repo, 'opener')
391 391
392 392 def repocleartagscachefunc(repo):
393 393 """Return the function to clear tags cache according to repo internal API
394 394 """
395 395 if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525)
396 396 # in this case, setattr(repo, '_tagscache', None) or so isn't
397 397 # correct way to clear tags cache, because existing code paths
398 398 # expect _tagscache to be a structured object.
399 399 def clearcache():
400 400 # _tagscache has been filteredpropertycache since 2.5 (or
401 401 # 98c867ac1330), and delattr() can't work in such case
402 402 if '_tagscache' in vars(repo):
403 403 del repo.__dict__['_tagscache']
404 404 return clearcache
405 405
406 406 repotags = safeattrsetter(repo, '_tags', ignoremissing=True)
407 407 if repotags: # since 1.4 (or 5614a628d173)
408 408 return lambda : repotags.set(None)
409 409
410 410 repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True)
411 411 if repotagscache: # since 0.6 (or d7df759d0e97)
412 412 return lambda : repotagscache.set(None)
413 413
414 414 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
415 415 # this point, but it isn't so problematic, because:
416 416 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
417 417 # in perftags() causes failure soon
418 418 # - perf.py itself has been available since 1.1 (or eb240755386d)
419 419 raise error.Abort(("tags API of this hg command is unknown"))
420 420
421 421 # utilities to clear cache
422 422
423 423 def clearfilecache(repo, attrname):
424 424 unfi = repo.unfiltered()
425 425 if attrname in vars(unfi):
426 426 delattr(unfi, attrname)
427 427 unfi._filecache.pop(attrname, None)
428 428
429 429 # perf commands
430 430
431 431 @command('perfwalk', formatteropts)
432 432 def perfwalk(ui, repo, *pats, **opts):
433 433 timer, fm = gettimer(ui, opts)
434 434 m = scmutil.match(repo[None], pats, {})
435 435 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
436 436 ignored=False))))
437 437 fm.end()
438 438
439 439 @command('perfannotate', formatteropts)
440 440 def perfannotate(ui, repo, f, **opts):
441 441 timer, fm = gettimer(ui, opts)
442 442 fc = repo['.'][f]
443 443 timer(lambda: len(fc.annotate(True)))
444 444 fm.end()
445 445
446 446 @command('perfstatus',
447 447 [('u', 'unknown', False,
448 448 'ask status to look for unknown files')] + formatteropts)
449 449 def perfstatus(ui, repo, **opts):
450 450 #m = match.always(repo.root, repo.getcwd())
451 451 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
452 452 # False))))
453 453 timer, fm = gettimer(ui, opts)
454 454 timer(lambda: sum(map(len, repo.status(unknown=opts['unknown']))))
455 455 fm.end()
456 456
457 457 @command('perfaddremove', formatteropts)
458 458 def perfaddremove(ui, repo, **opts):
459 459 timer, fm = gettimer(ui, opts)
460 460 try:
461 461 oldquiet = repo.ui.quiet
462 462 repo.ui.quiet = True
463 463 matcher = scmutil.match(repo[None])
464 464 opts['dry_run'] = True
465 465 timer(lambda: scmutil.addremove(repo, matcher, "", opts))
466 466 finally:
467 467 repo.ui.quiet = oldquiet
468 468 fm.end()
469 469
470 470 def clearcaches(cl):
471 471 # behave somewhat consistently across internal API changes
472 472 if util.safehasattr(cl, 'clearcaches'):
473 473 cl.clearcaches()
474 474 elif util.safehasattr(cl, '_nodecache'):
475 475 from mercurial.node import nullid, nullrev
476 476 cl._nodecache = {nullid: nullrev}
477 477 cl._nodepos = None
478 478
479 479 @command('perfheads', formatteropts)
480 480 def perfheads(ui, repo, **opts):
481 481 timer, fm = gettimer(ui, opts)
482 482 cl = repo.changelog
483 483 def d():
484 484 len(cl.headrevs())
485 485 clearcaches(cl)
486 486 timer(d)
487 487 fm.end()
488 488
489 489 @command('perftags', formatteropts)
490 490 def perftags(ui, repo, **opts):
491 491 import mercurial.changelog
492 492 import mercurial.manifest
493 493 timer, fm = gettimer(ui, opts)
494 494 svfs = getsvfs(repo)
495 495 repocleartagscache = repocleartagscachefunc(repo)
496 496 def t():
497 497 repo.changelog = mercurial.changelog.changelog(svfs)
498 498 repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo)
499 499 repocleartagscache()
500 500 return len(repo.tags())
501 501 timer(t)
502 502 fm.end()
503 503
504 504 @command('perfancestors', formatteropts)
505 505 def perfancestors(ui, repo, **opts):
506 506 timer, fm = gettimer(ui, opts)
507 507 heads = repo.changelog.headrevs()
508 508 def d():
509 509 for a in repo.changelog.ancestors(heads):
510 510 pass
511 511 timer(d)
512 512 fm.end()
513 513
514 514 @command('perfancestorset', formatteropts)
515 515 def perfancestorset(ui, repo, revset, **opts):
516 516 timer, fm = gettimer(ui, opts)
517 517 revs = repo.revs(revset)
518 518 heads = repo.changelog.headrevs()
519 519 def d():
520 520 s = repo.changelog.ancestors(heads)
521 521 for rev in revs:
522 522 rev in s
523 523 timer(d)
524 524 fm.end()
525 525
526 526 @command('perfbookmarks', formatteropts)
527 527 def perfbookmarks(ui, repo, **opts):
528 528 """benchmark parsing bookmarks from disk to memory"""
529 529 timer, fm = gettimer(ui, opts)
530 530 def d():
531 531 clearfilecache(repo, '_bookmarks')
532 532 repo._bookmarks
533 533 timer(d)
534 534 fm.end()
535 535
536 536 @command('perfbundleread', formatteropts, 'BUNDLE')
537 537 def perfbundleread(ui, repo, bundlepath, **opts):
538 538 """Benchmark reading of bundle files.
539 539
540 540 This command is meant to isolate the I/O part of bundle reading as
541 541 much as possible.
542 542 """
543 543 from mercurial import (
544 544 bundle2,
545 545 exchange,
546 546 streamclone,
547 547 )
548 548
549 549 def makebench(fn):
550 550 def run():
551 551 with open(bundlepath, 'rb') as fh:
552 552 bundle = exchange.readbundle(ui, fh, bundlepath)
553 553 fn(bundle)
554 554
555 555 return run
556 556
557 557 def makereadnbytes(size):
558 558 def run():
559 559 with open(bundlepath, 'rb') as fh:
560 560 bundle = exchange.readbundle(ui, fh, bundlepath)
561 561 while bundle.read(size):
562 562 pass
563 563
564 564 return run
565 565
566 566 def makestdioread(size):
567 567 def run():
568 568 with open(bundlepath, 'rb') as fh:
569 569 while fh.read(size):
570 570 pass
571 571
572 572 return run
573 573
574 574 # bundle1
575 575
576 576 def deltaiter(bundle):
577 577 for delta in bundle.deltaiter():
578 578 pass
579 579
580 580 def iterchunks(bundle):
581 581 for chunk in bundle.getchunks():
582 582 pass
583 583
584 584 # bundle2
585 585
586 586 def forwardchunks(bundle):
587 587 for chunk in bundle._forwardchunks():
588 588 pass
589 589
590 590 def iterparts(bundle):
591 591 for part in bundle.iterparts():
592 592 pass
593 593
594 594 def iterpartsseekable(bundle):
595 595 for part in bundle.iterparts(seekable=True):
596 596 pass
597 597
598 598 def seek(bundle):
599 599 for part in bundle.iterparts(seekable=True):
600 600 part.seek(0, os.SEEK_END)
601 601
602 602 def makepartreadnbytes(size):
603 603 def run():
604 604 with open(bundlepath, 'rb') as fh:
605 605 bundle = exchange.readbundle(ui, fh, bundlepath)
606 606 for part in bundle.iterparts():
607 607 while part.read(size):
608 608 pass
609 609
610 610 return run
611 611
612 612 benches = [
613 613 (makestdioread(8192), 'read(8k)'),
614 614 (makestdioread(16384), 'read(16k)'),
615 615 (makestdioread(32768), 'read(32k)'),
616 616 (makestdioread(131072), 'read(128k)'),
617 617 ]
618 618
619 619 with open(bundlepath, 'rb') as fh:
620 620 bundle = exchange.readbundle(ui, fh, bundlepath)
621 621
622 622 if isinstance(bundle, changegroup.cg1unpacker):
623 623 benches.extend([
624 624 (makebench(deltaiter), 'cg1 deltaiter()'),
625 625 (makebench(iterchunks), 'cg1 getchunks()'),
626 626 (makereadnbytes(8192), 'cg1 read(8k)'),
627 627 (makereadnbytes(16384), 'cg1 read(16k)'),
628 628 (makereadnbytes(32768), 'cg1 read(32k)'),
629 629 (makereadnbytes(131072), 'cg1 read(128k)'),
630 630 ])
631 631 elif isinstance(bundle, bundle2.unbundle20):
632 632 benches.extend([
633 633 (makebench(forwardchunks), 'bundle2 forwardchunks()'),
634 634 (makebench(iterparts), 'bundle2 iterparts()'),
635 635 (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
636 636 (makebench(seek), 'bundle2 part seek()'),
637 637 (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
638 638 (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
639 639 (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
640 640 (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
641 641 ])
642 642 elif isinstance(bundle, streamclone.streamcloneapplier):
643 643 raise error.Abort('stream clone bundles not supported')
644 644 else:
645 645 raise error.Abort('unhandled bundle type: %s' % type(bundle))
646 646
647 647 for fn, title in benches:
648 648 timer, fm = gettimer(ui, opts)
649 649 timer(fn, title=title)
650 650 fm.end()
651 651
652 652 @command('perfchangegroupchangelog', formatteropts +
653 653 [('', 'version', '02', 'changegroup version'),
654 654 ('r', 'rev', '', 'revisions to add to changegroup')])
655 655 def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts):
656 656 """Benchmark producing a changelog group for a changegroup.
657 657
658 658 This measures the time spent processing the changelog during a
659 659 bundle operation. This occurs during `hg bundle` and on a server
660 660 processing a `getbundle` wire protocol request (handles clones
661 661 and pull requests).
662 662
663 663 By default, all revisions are added to the changegroup.
664 664 """
665 665 cl = repo.changelog
666 666 nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')]
667 667 bundler = changegroup.getbundler(version, repo)
668 668
669 669 def d():
670 670 state, chunks = bundler._generatechangelog(cl, nodes)
671 671 for chunk in chunks:
672 672 pass
673 673
674 674 timer, fm = gettimer(ui, opts)
675 675
676 676 # Terminal printing can interfere with timing. So disable it.
677 677 with ui.configoverride({('progress', 'disable'): True}):
678 678 timer(d)
679 679
680 680 fm.end()
681 681
682 682 @command('perfdirs', formatteropts)
683 683 def perfdirs(ui, repo, **opts):
684 684 timer, fm = gettimer(ui, opts)
685 685 dirstate = repo.dirstate
686 686 'a' in dirstate
687 687 def d():
688 688 dirstate.hasdir('a')
689 689 del dirstate._map._dirs
690 690 timer(d)
691 691 fm.end()
692 692
693 693 @command('perfdirstate', formatteropts)
694 694 def perfdirstate(ui, repo, **opts):
695 695 timer, fm = gettimer(ui, opts)
696 696 "a" in repo.dirstate
697 697 def d():
698 698 repo.dirstate.invalidate()
699 699 "a" in repo.dirstate
700 700 timer(d)
701 701 fm.end()
702 702
703 703 @command('perfdirstatedirs', formatteropts)
704 704 def perfdirstatedirs(ui, repo, **opts):
705 705 timer, fm = gettimer(ui, opts)
706 706 "a" in repo.dirstate
707 707 def d():
708 708 repo.dirstate.hasdir("a")
709 709 del repo.dirstate._map._dirs
710 710 timer(d)
711 711 fm.end()
712 712
713 713 @command('perfdirstatefoldmap', formatteropts)
714 714 def perfdirstatefoldmap(ui, repo, **opts):
715 715 timer, fm = gettimer(ui, opts)
716 716 dirstate = repo.dirstate
717 717 'a' in dirstate
718 718 def d():
719 719 dirstate._map.filefoldmap.get('a')
720 720 del dirstate._map.filefoldmap
721 721 timer(d)
722 722 fm.end()
723 723
724 724 @command('perfdirfoldmap', formatteropts)
725 725 def perfdirfoldmap(ui, repo, **opts):
726 726 timer, fm = gettimer(ui, opts)
727 727 dirstate = repo.dirstate
728 728 'a' in dirstate
729 729 def d():
730 730 dirstate._map.dirfoldmap.get('a')
731 731 del dirstate._map.dirfoldmap
732 732 del dirstate._map._dirs
733 733 timer(d)
734 734 fm.end()
735 735
736 736 @command('perfdirstatewrite', formatteropts)
737 737 def perfdirstatewrite(ui, repo, **opts):
738 738 timer, fm = gettimer(ui, opts)
739 739 ds = repo.dirstate
740 740 "a" in ds
741 741 def d():
742 742 ds._dirty = True
743 743 ds.write(repo.currenttransaction())
744 744 timer(d)
745 745 fm.end()
746 746
747 747 @command('perfmergecalculate',
748 748 [('r', 'rev', '.', 'rev to merge against')] + formatteropts)
749 749 def perfmergecalculate(ui, repo, rev, **opts):
750 750 timer, fm = gettimer(ui, opts)
751 751 wctx = repo[None]
752 752 rctx = scmutil.revsingle(repo, rev, rev)
753 753 ancestor = wctx.ancestor(rctx)
754 754 # we don't want working dir files to be stat'd in the benchmark, so prime
755 755 # that cache
756 756 wctx.dirty()
757 757 def d():
758 758 # acceptremote is True because we don't want prompts in the middle of
759 759 # our benchmark
760 760 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
761 761 acceptremote=True, followcopies=True)
762 762 timer(d)
763 763 fm.end()
764 764
765 765 @command('perfpathcopies', [], "REV REV")
766 766 def perfpathcopies(ui, repo, rev1, rev2, **opts):
767 767 timer, fm = gettimer(ui, opts)
768 768 ctx1 = scmutil.revsingle(repo, rev1, rev1)
769 769 ctx2 = scmutil.revsingle(repo, rev2, rev2)
770 770 def d():
771 771 copies.pathcopies(ctx1, ctx2)
772 772 timer(d)
773 773 fm.end()
774 774
775 775 @command('perfphases',
776 776 [('', 'full', False, 'include file reading time too'),
777 777 ], "")
778 778 def perfphases(ui, repo, **opts):
779 779 """benchmark phasesets computation"""
780 780 timer, fm = gettimer(ui, opts)
781 781 _phases = repo._phasecache
782 782 full = opts.get('full')
783 783 def d():
784 784 phases = _phases
785 785 if full:
786 786 clearfilecache(repo, '_phasecache')
787 787 phases = repo._phasecache
788 788 phases.invalidate()
789 789 phases.loadphaserevs(repo)
790 790 timer(d)
791 791 fm.end()
792 792
793 @command('perfphasesremote',
794 [], "[DEST]")
795 def perfphasesremote(ui, repo, dest=None, **opts):
796 """benchmark time needed to analyse phases of the remote server"""
797 from mercurial.node import (
798 bin,
799 )
800 from mercurial import (
801 exchange,
802 hg,
803 phases,
804 )
805 timer, fm = gettimer(ui, opts)
806
807 path = ui.paths.getpath(dest, default=('default-push', 'default'))
808 if not path:
809 raise error.abort(('default repository not configured!'),
810 hint=("see 'hg help config.paths'"))
811 dest = path.pushloc or path.loc
812 branches = (path.branch, opts.get('branch') or [])
813 ui.status(('analysing phase of %s\n') % util.hidepassword(dest))
814 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
815 other = hg.peer(repo, opts, dest)
816
817 # easier to perform discovery through the operation
818 op = exchange.pushoperation(repo, other)
819 exchange._pushdiscoverychangeset(op)
820
821 remotesubset = op.fallbackheads
822
823 with other.commandexecutor() as e:
824 remotephases = e.callcommand('listkeys',
825 {'namespace': 'phases'}).result()
826 del other
827 publishing = remotephases.get('publishing', False)
828 if publishing:
829 ui.status(('publishing: yes\n'))
830 else:
831 ui.status(('publishing: no\n'))
832
833 nodemap = repo.changelog.nodemap
834 nonpublishroots = 0
835 for nhex, phase in remotephases.iteritems():
836 if nhex == 'publishing': # ignore data related to publish option
837 continue
838 node = bin(nhex)
839 if node in nodemap and int(phase):
840 nonpublishroots += 1
841 ui.status(('number of roots: %d\n') % len(remotephases))
842 ui.status(('number of known non public roots: %d\n') % nonpublishroots)
843 def d():
844 phases.remotephasessummary(repo,
845 remotesubset,
846 remotephases)
847 timer(d)
848 fm.end()
849
793 850 @command('perfmanifest',[
794 851 ('m', 'manifest-rev', False, 'Look up a manifest node revision'),
795 852 ('', 'clear-disk', False, 'clear on-disk caches too'),
796 853 ], 'REV|NODE')
797 854 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
798 855 """benchmark the time to read a manifest from disk and return a usable
799 856 dict-like object
800 857
801 858 Manifest caches are cleared before retrieval."""
802 859 timer, fm = gettimer(ui, opts)
803 860 if not manifest_rev:
804 861 ctx = scmutil.revsingle(repo, rev, rev)
805 862 t = ctx.manifestnode()
806 863 else:
807 864 t = repo.manifestlog._revlog.lookup(rev)
808 865 def d():
809 866 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
810 867 repo.manifestlog[t].read()
811 868 timer(d)
812 869 fm.end()
813 870
814 871 @command('perfchangeset', formatteropts)
815 872 def perfchangeset(ui, repo, rev, **opts):
816 873 timer, fm = gettimer(ui, opts)
817 874 n = scmutil.revsingle(repo, rev).node()
818 875 def d():
819 876 repo.changelog.read(n)
820 877 #repo.changelog._cache = None
821 878 timer(d)
822 879 fm.end()
823 880
824 881 @command('perfindex', formatteropts)
825 882 def perfindex(ui, repo, **opts):
826 883 import mercurial.revlog
827 884 timer, fm = gettimer(ui, opts)
828 885 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
829 886 n = repo["tip"].node()
830 887 svfs = getsvfs(repo)
831 888 def d():
832 889 cl = mercurial.revlog.revlog(svfs, "00changelog.i")
833 890 cl.rev(n)
834 891 timer(d)
835 892 fm.end()
836 893
837 894 @command('perfstartup', formatteropts)
838 895 def perfstartup(ui, repo, **opts):
839 896 timer, fm = gettimer(ui, opts)
840 897 cmd = sys.argv[0]
841 898 def d():
842 899 if os.name != 'nt':
843 900 os.system("HGRCPATH= %s version -q > /dev/null" % cmd)
844 901 else:
845 902 os.environ['HGRCPATH'] = ' '
846 903 os.system("%s version -q > NUL" % cmd)
847 904 timer(d)
848 905 fm.end()
849 906
850 907 @command('perfparents', formatteropts)
851 908 def perfparents(ui, repo, **opts):
852 909 timer, fm = gettimer(ui, opts)
853 910 # control the number of commits perfparents iterates over
854 911 # experimental config: perf.parentscount
855 912 count = getint(ui, "perf", "parentscount", 1000)
856 913 if len(repo.changelog) < count:
857 914 raise error.Abort("repo needs %d commits for this test" % count)
858 915 repo = repo.unfiltered()
859 916 nl = [repo.changelog.node(i) for i in xrange(count)]
860 917 def d():
861 918 for n in nl:
862 919 repo.changelog.parents(n)
863 920 timer(d)
864 921 fm.end()
865 922
866 923 @command('perfctxfiles', formatteropts)
867 924 def perfctxfiles(ui, repo, x, **opts):
868 925 x = int(x)
869 926 timer, fm = gettimer(ui, opts)
870 927 def d():
871 928 len(repo[x].files())
872 929 timer(d)
873 930 fm.end()
874 931
875 932 @command('perfrawfiles', formatteropts)
876 933 def perfrawfiles(ui, repo, x, **opts):
877 934 x = int(x)
878 935 timer, fm = gettimer(ui, opts)
879 936 cl = repo.changelog
880 937 def d():
881 938 len(cl.read(x)[3])
882 939 timer(d)
883 940 fm.end()
884 941
885 942 @command('perflookup', formatteropts)
886 943 def perflookup(ui, repo, rev, **opts):
887 944 timer, fm = gettimer(ui, opts)
888 945 timer(lambda: len(repo.lookup(rev)))
889 946 fm.end()
890 947
891 948 @command('perflinelogedits',
892 949 [('n', 'edits', 10000, 'number of edits'),
893 950 ('', 'max-hunk-lines', 10, 'max lines in a hunk'),
894 951 ], norepo=True)
895 952 def perflinelogedits(ui, **opts):
896 953 from mercurial import linelog
897 954
898 955 edits = opts['edits']
899 956 maxhunklines = opts['max_hunk_lines']
900 957
901 958 maxb1 = 100000
902 959 random.seed(0)
903 960 randint = random.randint
904 961 currentlines = 0
905 962 arglist = []
906 963 for rev in xrange(edits):
907 964 a1 = randint(0, currentlines)
908 965 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
909 966 b1 = randint(0, maxb1)
910 967 b2 = randint(b1, b1 + maxhunklines)
911 968 currentlines += (b2 - b1) - (a2 - a1)
912 969 arglist.append((rev, a1, a2, b1, b2))
913 970
914 971 def d():
915 972 ll = linelog.linelog()
916 973 for args in arglist:
917 974 ll.replacelines(*args)
918 975
919 976 timer, fm = gettimer(ui, opts)
920 977 timer(d)
921 978 fm.end()
922 979
923 980 @command('perfrevrange', formatteropts)
924 981 def perfrevrange(ui, repo, *specs, **opts):
925 982 timer, fm = gettimer(ui, opts)
926 983 revrange = scmutil.revrange
927 984 timer(lambda: len(revrange(repo, specs)))
928 985 fm.end()
929 986
930 987 @command('perfnodelookup', formatteropts)
931 988 def perfnodelookup(ui, repo, rev, **opts):
932 989 timer, fm = gettimer(ui, opts)
933 990 import mercurial.revlog
934 991 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
935 992 n = scmutil.revsingle(repo, rev).node()
936 993 cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i")
937 994 def d():
938 995 cl.rev(n)
939 996 clearcaches(cl)
940 997 timer(d)
941 998 fm.end()
942 999
943 1000 @command('perflog',
944 1001 [('', 'rename', False, 'ask log to follow renames')] + formatteropts)
945 1002 def perflog(ui, repo, rev=None, **opts):
946 1003 if rev is None:
947 1004 rev=[]
948 1005 timer, fm = gettimer(ui, opts)
949 1006 ui.pushbuffer()
950 1007 timer(lambda: commands.log(ui, repo, rev=rev, date='', user='',
951 1008 copies=opts.get('rename')))
952 1009 ui.popbuffer()
953 1010 fm.end()
954 1011
955 1012 @command('perfmoonwalk', formatteropts)
956 1013 def perfmoonwalk(ui, repo, **opts):
957 1014 """benchmark walking the changelog backwards
958 1015
959 1016 This also loads the changelog data for each revision in the changelog.
960 1017 """
961 1018 timer, fm = gettimer(ui, opts)
962 1019 def moonwalk():
963 1020 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
964 1021 ctx = repo[i]
965 1022 ctx.branch() # read changelog data (in addition to the index)
966 1023 timer(moonwalk)
967 1024 fm.end()
968 1025
969 1026 @command('perftemplating',
970 1027 [('r', 'rev', [], 'revisions to run the template on'),
971 1028 ] + formatteropts)
972 1029 def perftemplating(ui, repo, testedtemplate=None, **opts):
973 1030 """test the rendering time of a given template"""
974 1031 if makelogtemplater is None:
975 1032 raise error.Abort(("perftemplating not available with this Mercurial"),
976 1033 hint="use 4.3 or later")
977 1034
978 1035 nullui = ui.copy()
979 1036 nullui.fout = open(os.devnull, 'wb')
980 1037 nullui.disablepager()
981 1038 revs = opts.get('rev')
982 1039 if not revs:
983 1040 revs = ['all()']
984 1041 revs = list(scmutil.revrange(repo, revs))
985 1042
986 1043 defaulttemplate = ('{date|shortdate} [{rev}:{node|short}]'
987 1044 ' {author|person}: {desc|firstline}\n')
988 1045 if testedtemplate is None:
989 1046 testedtemplate = defaulttemplate
990 1047 displayer = makelogtemplater(nullui, repo, testedtemplate)
991 1048 def format():
992 1049 for r in revs:
993 1050 ctx = repo[r]
994 1051 displayer.show(ctx)
995 1052 displayer.flush(ctx)
996 1053
997 1054 timer, fm = gettimer(ui, opts)
998 1055 timer(format)
999 1056 fm.end()
1000 1057
1001 1058 @command('perfcca', formatteropts)
1002 1059 def perfcca(ui, repo, **opts):
1003 1060 timer, fm = gettimer(ui, opts)
1004 1061 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1005 1062 fm.end()
1006 1063
1007 1064 @command('perffncacheload', formatteropts)
1008 1065 def perffncacheload(ui, repo, **opts):
1009 1066 timer, fm = gettimer(ui, opts)
1010 1067 s = repo.store
1011 1068 def d():
1012 1069 s.fncache._load()
1013 1070 timer(d)
1014 1071 fm.end()
1015 1072
1016 1073 @command('perffncachewrite', formatteropts)
1017 1074 def perffncachewrite(ui, repo, **opts):
1018 1075 timer, fm = gettimer(ui, opts)
1019 1076 s = repo.store
1020 1077 lock = repo.lock()
1021 1078 s.fncache._load()
1022 1079 tr = repo.transaction('perffncachewrite')
1023 1080 tr.addbackup('fncache')
1024 1081 def d():
1025 1082 s.fncache._dirty = True
1026 1083 s.fncache.write(tr)
1027 1084 timer(d)
1028 1085 tr.close()
1029 1086 lock.release()
1030 1087 fm.end()
1031 1088
1032 1089 @command('perffncacheencode', formatteropts)
1033 1090 def perffncacheencode(ui, repo, **opts):
1034 1091 timer, fm = gettimer(ui, opts)
1035 1092 s = repo.store
1036 1093 s.fncache._load()
1037 1094 def d():
1038 1095 for p in s.fncache.entries:
1039 1096 s.encode(p)
1040 1097 timer(d)
1041 1098 fm.end()
1042 1099
1043 1100 def _bdiffworker(q, blocks, xdiff, ready, done):
1044 1101 while not done.is_set():
1045 1102 pair = q.get()
1046 1103 while pair is not None:
1047 1104 if xdiff:
1048 1105 mdiff.bdiff.xdiffblocks(*pair)
1049 1106 elif blocks:
1050 1107 mdiff.bdiff.blocks(*pair)
1051 1108 else:
1052 1109 mdiff.textdiff(*pair)
1053 1110 q.task_done()
1054 1111 pair = q.get()
1055 1112 q.task_done() # for the None one
1056 1113 with ready:
1057 1114 ready.wait()
1058 1115
1059 1116 @command('perfbdiff', revlogopts + formatteropts + [
1060 1117 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1061 1118 ('', 'alldata', False, 'test bdiffs for all associated revisions'),
1062 1119 ('', 'threads', 0, 'number of thread to use (disable with 0)'),
1063 1120 ('', 'blocks', False, 'test computing diffs into blocks'),
1064 1121 ('', 'xdiff', False, 'use xdiff algorithm'),
1065 1122 ],
1066 1123
1067 1124 '-c|-m|FILE REV')
1068 1125 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1069 1126 """benchmark a bdiff between revisions
1070 1127
1071 1128 By default, benchmark a bdiff between its delta parent and itself.
1072 1129
1073 1130 With ``--count``, benchmark bdiffs between delta parents and self for N
1074 1131 revisions starting at the specified revision.
1075 1132
1076 1133 With ``--alldata``, assume the requested revision is a changeset and
1077 1134 measure bdiffs for all changes related to that changeset (manifest
1078 1135 and filelogs).
1079 1136 """
1080 1137 opts = pycompat.byteskwargs(opts)
1081 1138
1082 1139 if opts['xdiff'] and not opts['blocks']:
1083 1140 raise error.CommandError('perfbdiff', '--xdiff requires --blocks')
1084 1141
1085 1142 if opts['alldata']:
1086 1143 opts['changelog'] = True
1087 1144
1088 1145 if opts.get('changelog') or opts.get('manifest'):
1089 1146 file_, rev = None, file_
1090 1147 elif rev is None:
1091 1148 raise error.CommandError('perfbdiff', 'invalid arguments')
1092 1149
1093 1150 blocks = opts['blocks']
1094 1151 xdiff = opts['xdiff']
1095 1152 textpairs = []
1096 1153
1097 1154 r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts)
1098 1155
1099 1156 startrev = r.rev(r.lookup(rev))
1100 1157 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1101 1158 if opts['alldata']:
1102 1159 # Load revisions associated with changeset.
1103 1160 ctx = repo[rev]
1104 1161 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1105 1162 for pctx in ctx.parents():
1106 1163 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1107 1164 textpairs.append((pman, mtext))
1108 1165
1109 1166 # Load filelog revisions by iterating manifest delta.
1110 1167 man = ctx.manifest()
1111 1168 pman = ctx.p1().manifest()
1112 1169 for filename, change in pman.diff(man).items():
1113 1170 fctx = repo.file(filename)
1114 1171 f1 = fctx.revision(change[0][0] or -1)
1115 1172 f2 = fctx.revision(change[1][0] or -1)
1116 1173 textpairs.append((f1, f2))
1117 1174 else:
1118 1175 dp = r.deltaparent(rev)
1119 1176 textpairs.append((r.revision(dp), r.revision(rev)))
1120 1177
1121 1178 withthreads = threads > 0
1122 1179 if not withthreads:
1123 1180 def d():
1124 1181 for pair in textpairs:
1125 1182 if xdiff:
1126 1183 mdiff.bdiff.xdiffblocks(*pair)
1127 1184 elif blocks:
1128 1185 mdiff.bdiff.blocks(*pair)
1129 1186 else:
1130 1187 mdiff.textdiff(*pair)
1131 1188 else:
1132 1189 q = queue()
1133 1190 for i in xrange(threads):
1134 1191 q.put(None)
1135 1192 ready = threading.Condition()
1136 1193 done = threading.Event()
1137 1194 for i in xrange(threads):
1138 1195 threading.Thread(target=_bdiffworker,
1139 1196 args=(q, blocks, xdiff, ready, done)).start()
1140 1197 q.join()
1141 1198 def d():
1142 1199 for pair in textpairs:
1143 1200 q.put(pair)
1144 1201 for i in xrange(threads):
1145 1202 q.put(None)
1146 1203 with ready:
1147 1204 ready.notify_all()
1148 1205 q.join()
1149 1206 timer, fm = gettimer(ui, opts)
1150 1207 timer(d)
1151 1208 fm.end()
1152 1209
1153 1210 if withthreads:
1154 1211 done.set()
1155 1212 for i in xrange(threads):
1156 1213 q.put(None)
1157 1214 with ready:
1158 1215 ready.notify_all()
1159 1216
1160 1217 @command('perfunidiff', revlogopts + formatteropts + [
1161 1218 ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
1162 1219 ('', 'alldata', False, 'test unidiffs for all associated revisions'),
1163 1220 ], '-c|-m|FILE REV')
1164 1221 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1165 1222 """benchmark a unified diff between revisions
1166 1223
1167 1224 This doesn't include any copy tracing - it's just a unified diff
1168 1225 of the texts.
1169 1226
1170 1227 By default, benchmark a diff between its delta parent and itself.
1171 1228
1172 1229 With ``--count``, benchmark diffs between delta parents and self for N
1173 1230 revisions starting at the specified revision.
1174 1231
1175 1232 With ``--alldata``, assume the requested revision is a changeset and
1176 1233 measure diffs for all changes related to that changeset (manifest
1177 1234 and filelogs).
1178 1235 """
1179 1236 if opts['alldata']:
1180 1237 opts['changelog'] = True
1181 1238
1182 1239 if opts.get('changelog') or opts.get('manifest'):
1183 1240 file_, rev = None, file_
1184 1241 elif rev is None:
1185 1242 raise error.CommandError('perfunidiff', 'invalid arguments')
1186 1243
1187 1244 textpairs = []
1188 1245
1189 1246 r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
1190 1247
1191 1248 startrev = r.rev(r.lookup(rev))
1192 1249 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1193 1250 if opts['alldata']:
1194 1251 # Load revisions associated with changeset.
1195 1252 ctx = repo[rev]
1196 1253 mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
1197 1254 for pctx in ctx.parents():
1198 1255 pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
1199 1256 textpairs.append((pman, mtext))
1200 1257
1201 1258 # Load filelog revisions by iterating manifest delta.
1202 1259 man = ctx.manifest()
1203 1260 pman = ctx.p1().manifest()
1204 1261 for filename, change in pman.diff(man).items():
1205 1262 fctx = repo.file(filename)
1206 1263 f1 = fctx.revision(change[0][0] or -1)
1207 1264 f2 = fctx.revision(change[1][0] or -1)
1208 1265 textpairs.append((f1, f2))
1209 1266 else:
1210 1267 dp = r.deltaparent(rev)
1211 1268 textpairs.append((r.revision(dp), r.revision(rev)))
1212 1269
1213 1270 def d():
1214 1271 for left, right in textpairs:
1215 1272 # The date strings don't matter, so we pass empty strings.
1216 1273 headerlines, hunks = mdiff.unidiff(
1217 1274 left, '', right, '', 'left', 'right', binary=False)
1218 1275 # consume iterators in roughly the way patch.py does
1219 1276 b'\n'.join(headerlines)
1220 1277 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1221 1278 timer, fm = gettimer(ui, opts)
1222 1279 timer(d)
1223 1280 fm.end()
1224 1281
1225 1282 @command('perfdiffwd', formatteropts)
1226 1283 def perfdiffwd(ui, repo, **opts):
1227 1284 """Profile diff of working directory changes"""
1228 1285 timer, fm = gettimer(ui, opts)
1229 1286 options = {
1230 1287 'w': 'ignore_all_space',
1231 1288 'b': 'ignore_space_change',
1232 1289 'B': 'ignore_blank_lines',
1233 1290 }
1234 1291
1235 1292 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1236 1293 opts = dict((options[c], '1') for c in diffopt)
1237 1294 def d():
1238 1295 ui.pushbuffer()
1239 1296 commands.diff(ui, repo, **opts)
1240 1297 ui.popbuffer()
1241 1298 title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none')
1242 1299 timer(d, title)
1243 1300 fm.end()
1244 1301
1245 1302 @command('perfrevlogindex', revlogopts + formatteropts,
1246 1303 '-c|-m|FILE')
1247 1304 def perfrevlogindex(ui, repo, file_=None, **opts):
1248 1305 """Benchmark operations against a revlog index.
1249 1306
1250 1307 This tests constructing a revlog instance, reading index data,
1251 1308 parsing index data, and performing various operations related to
1252 1309 index data.
1253 1310 """
1254 1311
1255 1312 rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts)
1256 1313
1257 1314 opener = getattr(rl, 'opener') # trick linter
1258 1315 indexfile = rl.indexfile
1259 1316 data = opener.read(indexfile)
1260 1317
1261 1318 header = struct.unpack('>I', data[0:4])[0]
1262 1319 version = header & 0xFFFF
1263 1320 if version == 1:
1264 1321 revlogio = revlog.revlogio()
1265 1322 inline = header & (1 << 16)
1266 1323 else:
1267 1324 raise error.Abort(('unsupported revlog version: %d') % version)
1268 1325
1269 1326 rllen = len(rl)
1270 1327
1271 1328 node0 = rl.node(0)
1272 1329 node25 = rl.node(rllen // 4)
1273 1330 node50 = rl.node(rllen // 2)
1274 1331 node75 = rl.node(rllen // 4 * 3)
1275 1332 node100 = rl.node(rllen - 1)
1276 1333
1277 1334 allrevs = range(rllen)
1278 1335 allrevsrev = list(reversed(allrevs))
1279 1336 allnodes = [rl.node(rev) for rev in range(rllen)]
1280 1337 allnodesrev = list(reversed(allnodes))
1281 1338
1282 1339 def constructor():
1283 1340 revlog.revlog(opener, indexfile)
1284 1341
1285 1342 def read():
1286 1343 with opener(indexfile) as fh:
1287 1344 fh.read()
1288 1345
1289 1346 def parseindex():
1290 1347 revlogio.parseindex(data, inline)
1291 1348
1292 1349 def getentry(revornode):
1293 1350 index = revlogio.parseindex(data, inline)[0]
1294 1351 index[revornode]
1295 1352
1296 1353 def getentries(revs, count=1):
1297 1354 index = revlogio.parseindex(data, inline)[0]
1298 1355
1299 1356 for i in range(count):
1300 1357 for rev in revs:
1301 1358 index[rev]
1302 1359
1303 1360 def resolvenode(node):
1304 1361 nodemap = revlogio.parseindex(data, inline)[1]
1305 1362 # This only works for the C code.
1306 1363 if nodemap is None:
1307 1364 return
1308 1365
1309 1366 try:
1310 1367 nodemap[node]
1311 1368 except error.RevlogError:
1312 1369 pass
1313 1370
1314 1371 def resolvenodes(nodes, count=1):
1315 1372 nodemap = revlogio.parseindex(data, inline)[1]
1316 1373 if nodemap is None:
1317 1374 return
1318 1375
1319 1376 for i in range(count):
1320 1377 for node in nodes:
1321 1378 try:
1322 1379 nodemap[node]
1323 1380 except error.RevlogError:
1324 1381 pass
1325 1382
1326 1383 benches = [
1327 1384 (constructor, 'revlog constructor'),
1328 1385 (read, 'read'),
1329 1386 (parseindex, 'create index object'),
1330 1387 (lambda: getentry(0), 'retrieve index entry for rev 0'),
1331 1388 (lambda: resolvenode('a' * 20), 'look up missing node'),
1332 1389 (lambda: resolvenode(node0), 'look up node at rev 0'),
1333 1390 (lambda: resolvenode(node25), 'look up node at 1/4 len'),
1334 1391 (lambda: resolvenode(node50), 'look up node at 1/2 len'),
1335 1392 (lambda: resolvenode(node75), 'look up node at 3/4 len'),
1336 1393 (lambda: resolvenode(node100), 'look up node at tip'),
1337 1394 # 2x variation is to measure caching impact.
1338 1395 (lambda: resolvenodes(allnodes),
1339 1396 'look up all nodes (forward)'),
1340 1397 (lambda: resolvenodes(allnodes, 2),
1341 1398 'look up all nodes 2x (forward)'),
1342 1399 (lambda: resolvenodes(allnodesrev),
1343 1400 'look up all nodes (reverse)'),
1344 1401 (lambda: resolvenodes(allnodesrev, 2),
1345 1402 'look up all nodes 2x (reverse)'),
1346 1403 (lambda: getentries(allrevs),
1347 1404 'retrieve all index entries (forward)'),
1348 1405 (lambda: getentries(allrevs, 2),
1349 1406 'retrieve all index entries 2x (forward)'),
1350 1407 (lambda: getentries(allrevsrev),
1351 1408 'retrieve all index entries (reverse)'),
1352 1409 (lambda: getentries(allrevsrev, 2),
1353 1410 'retrieve all index entries 2x (reverse)'),
1354 1411 ]
1355 1412
1356 1413 for fn, title in benches:
1357 1414 timer, fm = gettimer(ui, opts)
1358 1415 timer(fn, title=title)
1359 1416 fm.end()
1360 1417
1361 1418 @command('perfrevlogrevisions', revlogopts + formatteropts +
1362 1419 [('d', 'dist', 100, 'distance between the revisions'),
1363 1420 ('s', 'startrev', 0, 'revision to start reading at'),
1364 1421 ('', 'reverse', False, 'read in reverse')],
1365 1422 '-c|-m|FILE')
1366 1423 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
1367 1424 **opts):
1368 1425 """Benchmark reading a series of revisions from a revlog.
1369 1426
1370 1427 By default, we read every ``-d/--dist`` revision from 0 to tip of
1371 1428 the specified revlog.
1372 1429
1373 1430 The start revision can be defined via ``-s/--startrev``.
1374 1431 """
1375 1432 rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts)
1376 1433 rllen = getlen(ui)(rl)
1377 1434
1378 1435 def d():
1379 1436 rl.clearcaches()
1380 1437
1381 1438 beginrev = startrev
1382 1439 endrev = rllen
1383 1440 dist = opts['dist']
1384 1441
1385 1442 if reverse:
1386 1443 beginrev, endrev = endrev, beginrev
1387 1444 dist = -1 * dist
1388 1445
1389 1446 for x in xrange(beginrev, endrev, dist):
1390 1447 # Old revisions don't support passing int.
1391 1448 n = rl.node(x)
1392 1449 rl.revision(n)
1393 1450
1394 1451 timer, fm = gettimer(ui, opts)
1395 1452 timer(d)
1396 1453 fm.end()
1397 1454
1398 1455 @command('perfrevlogchunks', revlogopts + formatteropts +
1399 1456 [('e', 'engines', '', 'compression engines to use'),
1400 1457 ('s', 'startrev', 0, 'revision to start at')],
1401 1458 '-c|-m|FILE')
1402 1459 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
1403 1460 """Benchmark operations on revlog chunks.
1404 1461
1405 1462 Logically, each revlog is a collection of fulltext revisions. However,
1406 1463 stored within each revlog are "chunks" of possibly compressed data. This
1407 1464 data needs to be read and decompressed or compressed and written.
1408 1465
1409 1466 This command measures the time it takes to read+decompress and recompress
1410 1467 chunks in a revlog. It effectively isolates I/O and compression performance.
1411 1468 For measurements of higher-level operations like resolving revisions,
1412 1469 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
1413 1470 """
1414 1471 rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts)
1415 1472
1416 1473 # _chunkraw was renamed to _getsegmentforrevs.
1417 1474 try:
1418 1475 segmentforrevs = rl._getsegmentforrevs
1419 1476 except AttributeError:
1420 1477 segmentforrevs = rl._chunkraw
1421 1478
1422 1479 # Verify engines argument.
1423 1480 if engines:
1424 1481 engines = set(e.strip() for e in engines.split(','))
1425 1482 for engine in engines:
1426 1483 try:
1427 1484 util.compressionengines[engine]
1428 1485 except KeyError:
1429 1486 raise error.Abort('unknown compression engine: %s' % engine)
1430 1487 else:
1431 1488 engines = []
1432 1489 for e in util.compengines:
1433 1490 engine = util.compengines[e]
1434 1491 try:
1435 1492 if engine.available():
1436 1493 engine.revlogcompressor().compress('dummy')
1437 1494 engines.append(e)
1438 1495 except NotImplementedError:
1439 1496 pass
1440 1497
1441 1498 revs = list(rl.revs(startrev, len(rl) - 1))
1442 1499
1443 1500 def rlfh(rl):
1444 1501 if rl._inline:
1445 1502 return getsvfs(repo)(rl.indexfile)
1446 1503 else:
1447 1504 return getsvfs(repo)(rl.datafile)
1448 1505
1449 1506 def doread():
1450 1507 rl.clearcaches()
1451 1508 for rev in revs:
1452 1509 segmentforrevs(rev, rev)
1453 1510
1454 1511 def doreadcachedfh():
1455 1512 rl.clearcaches()
1456 1513 fh = rlfh(rl)
1457 1514 for rev in revs:
1458 1515 segmentforrevs(rev, rev, df=fh)
1459 1516
1460 1517 def doreadbatch():
1461 1518 rl.clearcaches()
1462 1519 segmentforrevs(revs[0], revs[-1])
1463 1520
1464 1521 def doreadbatchcachedfh():
1465 1522 rl.clearcaches()
1466 1523 fh = rlfh(rl)
1467 1524 segmentforrevs(revs[0], revs[-1], df=fh)
1468 1525
1469 1526 def dochunk():
1470 1527 rl.clearcaches()
1471 1528 fh = rlfh(rl)
1472 1529 for rev in revs:
1473 1530 rl._chunk(rev, df=fh)
1474 1531
1475 1532 chunks = [None]
1476 1533
1477 1534 def dochunkbatch():
1478 1535 rl.clearcaches()
1479 1536 fh = rlfh(rl)
1480 1537 # Save chunks as a side-effect.
1481 1538 chunks[0] = rl._chunks(revs, df=fh)
1482 1539
1483 1540 def docompress(compressor):
1484 1541 rl.clearcaches()
1485 1542
1486 1543 try:
1487 1544 # Swap in the requested compression engine.
1488 1545 oldcompressor = rl._compressor
1489 1546 rl._compressor = compressor
1490 1547 for chunk in chunks[0]:
1491 1548 rl.compress(chunk)
1492 1549 finally:
1493 1550 rl._compressor = oldcompressor
1494 1551
1495 1552 benches = [
1496 1553 (lambda: doread(), 'read'),
1497 1554 (lambda: doreadcachedfh(), 'read w/ reused fd'),
1498 1555 (lambda: doreadbatch(), 'read batch'),
1499 1556 (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'),
1500 1557 (lambda: dochunk(), 'chunk'),
1501 1558 (lambda: dochunkbatch(), 'chunk batch'),
1502 1559 ]
1503 1560
1504 1561 for engine in sorted(engines):
1505 1562 compressor = util.compengines[engine].revlogcompressor()
1506 1563 benches.append((functools.partial(docompress, compressor),
1507 1564 'compress w/ %s' % engine))
1508 1565
1509 1566 for fn, title in benches:
1510 1567 timer, fm = gettimer(ui, opts)
1511 1568 timer(fn, title=title)
1512 1569 fm.end()
1513 1570
1514 1571 @command('perfrevlogrevision', revlogopts + formatteropts +
1515 1572 [('', 'cache', False, 'use caches instead of clearing')],
1516 1573 '-c|-m|FILE REV')
1517 1574 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
1518 1575 """Benchmark obtaining a revlog revision.
1519 1576
1520 1577 Obtaining a revlog revision consists of roughly the following steps:
1521 1578
1522 1579 1. Compute the delta chain
1523 1580 2. Obtain the raw chunks for that delta chain
1524 1581 3. Decompress each raw chunk
1525 1582 4. Apply binary patches to obtain fulltext
1526 1583 5. Verify hash of fulltext
1527 1584
1528 1585 This command measures the time spent in each of these phases.
1529 1586 """
1530 1587 if opts.get('changelog') or opts.get('manifest'):
1531 1588 file_, rev = None, file_
1532 1589 elif rev is None:
1533 1590 raise error.CommandError('perfrevlogrevision', 'invalid arguments')
1534 1591
1535 1592 r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts)
1536 1593
1537 1594 # _chunkraw was renamed to _getsegmentforrevs.
1538 1595 try:
1539 1596 segmentforrevs = r._getsegmentforrevs
1540 1597 except AttributeError:
1541 1598 segmentforrevs = r._chunkraw
1542 1599
1543 1600 node = r.lookup(rev)
1544 1601 rev = r.rev(node)
1545 1602
1546 1603 def getrawchunks(data, chain):
1547 1604 start = r.start
1548 1605 length = r.length
1549 1606 inline = r._inline
1550 1607 iosize = r._io.size
1551 1608 buffer = util.buffer
1552 1609 offset = start(chain[0])
1553 1610
1554 1611 chunks = []
1555 1612 ladd = chunks.append
1556 1613
1557 1614 for rev in chain:
1558 1615 chunkstart = start(rev)
1559 1616 if inline:
1560 1617 chunkstart += (rev + 1) * iosize
1561 1618 chunklength = length(rev)
1562 1619 ladd(buffer(data, chunkstart - offset, chunklength))
1563 1620
1564 1621 return chunks
1565 1622
1566 1623 def dodeltachain(rev):
1567 1624 if not cache:
1568 1625 r.clearcaches()
1569 1626 r._deltachain(rev)
1570 1627
1571 1628 def doread(chain):
1572 1629 if not cache:
1573 1630 r.clearcaches()
1574 1631 segmentforrevs(chain[0], chain[-1])
1575 1632
1576 1633 def dorawchunks(data, chain):
1577 1634 if not cache:
1578 1635 r.clearcaches()
1579 1636 getrawchunks(data, chain)
1580 1637
1581 1638 def dodecompress(chunks):
1582 1639 decomp = r.decompress
1583 1640 for chunk in chunks:
1584 1641 decomp(chunk)
1585 1642
1586 1643 def dopatch(text, bins):
1587 1644 if not cache:
1588 1645 r.clearcaches()
1589 1646 mdiff.patches(text, bins)
1590 1647
1591 1648 def dohash(text):
1592 1649 if not cache:
1593 1650 r.clearcaches()
1594 1651 r.checkhash(text, node, rev=rev)
1595 1652
1596 1653 def dorevision():
1597 1654 if not cache:
1598 1655 r.clearcaches()
1599 1656 r.revision(node)
1600 1657
1601 1658 chain = r._deltachain(rev)[0]
1602 1659 data = segmentforrevs(chain[0], chain[-1])[1]
1603 1660 rawchunks = getrawchunks(data, chain)
1604 1661 bins = r._chunks(chain)
1605 1662 text = str(bins[0])
1606 1663 bins = bins[1:]
1607 1664 text = mdiff.patches(text, bins)
1608 1665
1609 1666 benches = [
1610 1667 (lambda: dorevision(), 'full'),
1611 1668 (lambda: dodeltachain(rev), 'deltachain'),
1612 1669 (lambda: doread(chain), 'read'),
1613 1670 (lambda: dorawchunks(data, chain), 'rawchunks'),
1614 1671 (lambda: dodecompress(rawchunks), 'decompress'),
1615 1672 (lambda: dopatch(text, bins), 'patch'),
1616 1673 (lambda: dohash(text), 'hash'),
1617 1674 ]
1618 1675
1619 1676 for fn, title in benches:
1620 1677 timer, fm = gettimer(ui, opts)
1621 1678 timer(fn, title=title)
1622 1679 fm.end()
1623 1680
1624 1681 @command('perfrevset',
1625 1682 [('C', 'clear', False, 'clear volatile cache between each call.'),
1626 1683 ('', 'contexts', False, 'obtain changectx for each revision')]
1627 1684 + formatteropts, "REVSET")
1628 1685 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
1629 1686 """benchmark the execution time of a revset
1630 1687
1631 1688 Use the --clean option if need to evaluate the impact of build volatile
1632 1689 revisions set cache on the revset execution. Volatile cache hold filtered
1633 1690 and obsolete related cache."""
1634 1691 timer, fm = gettimer(ui, opts)
1635 1692 def d():
1636 1693 if clear:
1637 1694 repo.invalidatevolatilesets()
1638 1695 if contexts:
1639 1696 for ctx in repo.set(expr): pass
1640 1697 else:
1641 1698 for r in repo.revs(expr): pass
1642 1699 timer(d)
1643 1700 fm.end()
1644 1701
1645 1702 @command('perfvolatilesets',
1646 1703 [('', 'clear-obsstore', False, 'drop obsstore between each call.'),
1647 1704 ] + formatteropts)
1648 1705 def perfvolatilesets(ui, repo, *names, **opts):
1649 1706 """benchmark the computation of various volatile set
1650 1707
1651 1708 Volatile set computes element related to filtering and obsolescence."""
1652 1709 timer, fm = gettimer(ui, opts)
1653 1710 repo = repo.unfiltered()
1654 1711
1655 1712 def getobs(name):
1656 1713 def d():
1657 1714 repo.invalidatevolatilesets()
1658 1715 if opts['clear_obsstore']:
1659 1716 clearfilecache(repo, 'obsstore')
1660 1717 obsolete.getrevs(repo, name)
1661 1718 return d
1662 1719
1663 1720 allobs = sorted(obsolete.cachefuncs)
1664 1721 if names:
1665 1722 allobs = [n for n in allobs if n in names]
1666 1723
1667 1724 for name in allobs:
1668 1725 timer(getobs(name), title=name)
1669 1726
1670 1727 def getfiltered(name):
1671 1728 def d():
1672 1729 repo.invalidatevolatilesets()
1673 1730 if opts['clear_obsstore']:
1674 1731 clearfilecache(repo, 'obsstore')
1675 1732 repoview.filterrevs(repo, name)
1676 1733 return d
1677 1734
1678 1735 allfilter = sorted(repoview.filtertable)
1679 1736 if names:
1680 1737 allfilter = [n for n in allfilter if n in names]
1681 1738
1682 1739 for name in allfilter:
1683 1740 timer(getfiltered(name), title=name)
1684 1741 fm.end()
1685 1742
1686 1743 @command('perfbranchmap',
1687 1744 [('f', 'full', False,
1688 1745 'Includes build time of subset'),
1689 1746 ('', 'clear-revbranch', False,
1690 1747 'purge the revbranch cache between computation'),
1691 1748 ] + formatteropts)
1692 1749 def perfbranchmap(ui, repo, *filternames, **opts):
1693 1750 """benchmark the update of a branchmap
1694 1751
1695 1752 This benchmarks the full repo.branchmap() call with read and write disabled
1696 1753 """
1697 1754 full = opts.get("full", False)
1698 1755 clear_revbranch = opts.get("clear_revbranch", False)
1699 1756 timer, fm = gettimer(ui, opts)
1700 1757 def getbranchmap(filtername):
1701 1758 """generate a benchmark function for the filtername"""
1702 1759 if filtername is None:
1703 1760 view = repo
1704 1761 else:
1705 1762 view = repo.filtered(filtername)
1706 1763 def d():
1707 1764 if clear_revbranch:
1708 1765 repo.revbranchcache()._clear()
1709 1766 if full:
1710 1767 view._branchcaches.clear()
1711 1768 else:
1712 1769 view._branchcaches.pop(filtername, None)
1713 1770 view.branchmap()
1714 1771 return d
1715 1772 # add filter in smaller subset to bigger subset
1716 1773 possiblefilters = set(repoview.filtertable)
1717 1774 if filternames:
1718 1775 possiblefilters &= set(filternames)
1719 1776 subsettable = getbranchmapsubsettable()
1720 1777 allfilters = []
1721 1778 while possiblefilters:
1722 1779 for name in possiblefilters:
1723 1780 subset = subsettable.get(name)
1724 1781 if subset not in possiblefilters:
1725 1782 break
1726 1783 else:
1727 1784 assert False, 'subset cycle %s!' % possiblefilters
1728 1785 allfilters.append(name)
1729 1786 possiblefilters.remove(name)
1730 1787
1731 1788 # warm the cache
1732 1789 if not full:
1733 1790 for name in allfilters:
1734 1791 repo.filtered(name).branchmap()
1735 1792 if not filternames or 'unfiltered' in filternames:
1736 1793 # add unfiltered
1737 1794 allfilters.append(None)
1738 1795
1739 1796 branchcacheread = safeattrsetter(branchmap, 'read')
1740 1797 branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
1741 1798 branchcacheread.set(lambda repo: None)
1742 1799 branchcachewrite.set(lambda bc, repo: None)
1743 1800 try:
1744 1801 for name in allfilters:
1745 1802 printname = name
1746 1803 if name is None:
1747 1804 printname = 'unfiltered'
1748 1805 timer(getbranchmap(name), title=str(printname))
1749 1806 finally:
1750 1807 branchcacheread.restore()
1751 1808 branchcachewrite.restore()
1752 1809 fm.end()
1753 1810
1754 1811 @command('perfbranchmapload', [
1755 1812 ('f', 'filter', '', 'Specify repoview filter'),
1756 1813 ('', 'list', False, 'List brachmap filter caches'),
1757 1814 ] + formatteropts)
1758 1815 def perfbranchmapread(ui, repo, filter='', list=False, **opts):
1759 1816 """benchmark reading the branchmap"""
1760 1817 if list:
1761 1818 for name, kind, st in repo.cachevfs.readdir(stat=True):
1762 1819 if name.startswith('branch2'):
1763 1820 filtername = name.partition('-')[2] or 'unfiltered'
1764 1821 ui.status('%s - %s\n'
1765 1822 % (filtername, util.bytecount(st.st_size)))
1766 1823 return
1767 1824 if filter:
1768 1825 repo = repoview.repoview(repo, filter)
1769 1826 else:
1770 1827 repo = repo.unfiltered()
1771 1828 # try once without timer, the filter may not be cached
1772 1829 if branchmap.read(repo) is None:
1773 1830 raise error.Abort('No brachmap cached for %s repo'
1774 1831 % (filter or 'unfiltered'))
1775 1832 timer, fm = gettimer(ui, opts)
1776 1833 timer(lambda: branchmap.read(repo) and None)
1777 1834 fm.end()
1778 1835
1779 1836 @command('perfloadmarkers')
1780 1837 def perfloadmarkers(ui, repo):
1781 1838 """benchmark the time to parse the on-disk markers for a repo
1782 1839
1783 1840 Result is the number of markers in the repo."""
1784 1841 timer, fm = gettimer(ui)
1785 1842 svfs = getsvfs(repo)
1786 1843 timer(lambda: len(obsolete.obsstore(svfs)))
1787 1844 fm.end()
1788 1845
1789 1846 @command('perflrucachedict', formatteropts +
1790 1847 [('', 'size', 4, 'size of cache'),
1791 1848 ('', 'gets', 10000, 'number of key lookups'),
1792 1849 ('', 'sets', 10000, 'number of key sets'),
1793 1850 ('', 'mixed', 10000, 'number of mixed mode operations'),
1794 1851 ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')],
1795 1852 norepo=True)
1796 1853 def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000,
1797 1854 mixedgetfreq=50, **opts):
1798 1855 def doinit():
1799 1856 for i in xrange(10000):
1800 1857 util.lrucachedict(size)
1801 1858
1802 1859 values = []
1803 1860 for i in xrange(size):
1804 1861 values.append(random.randint(0, sys.maxint))
1805 1862
1806 1863 # Get mode fills the cache and tests raw lookup performance with no
1807 1864 # eviction.
1808 1865 getseq = []
1809 1866 for i in xrange(gets):
1810 1867 getseq.append(random.choice(values))
1811 1868
1812 1869 def dogets():
1813 1870 d = util.lrucachedict(size)
1814 1871 for v in values:
1815 1872 d[v] = v
1816 1873 for key in getseq:
1817 1874 value = d[key]
1818 1875 value # silence pyflakes warning
1819 1876
1820 1877 # Set mode tests insertion speed with cache eviction.
1821 1878 setseq = []
1822 1879 for i in xrange(sets):
1823 1880 setseq.append(random.randint(0, sys.maxint))
1824 1881
1825 1882 def dosets():
1826 1883 d = util.lrucachedict(size)
1827 1884 for v in setseq:
1828 1885 d[v] = v
1829 1886
1830 1887 # Mixed mode randomly performs gets and sets with eviction.
1831 1888 mixedops = []
1832 1889 for i in xrange(mixed):
1833 1890 r = random.randint(0, 100)
1834 1891 if r < mixedgetfreq:
1835 1892 op = 0
1836 1893 else:
1837 1894 op = 1
1838 1895
1839 1896 mixedops.append((op, random.randint(0, size * 2)))
1840 1897
1841 1898 def domixed():
1842 1899 d = util.lrucachedict(size)
1843 1900
1844 1901 for op, v in mixedops:
1845 1902 if op == 0:
1846 1903 try:
1847 1904 d[v]
1848 1905 except KeyError:
1849 1906 pass
1850 1907 else:
1851 1908 d[v] = v
1852 1909
1853 1910 benches = [
1854 1911 (doinit, 'init'),
1855 1912 (dogets, 'gets'),
1856 1913 (dosets, 'sets'),
1857 1914 (domixed, 'mixed')
1858 1915 ]
1859 1916
1860 1917 for fn, title in benches:
1861 1918 timer, fm = gettimer(ui, opts)
1862 1919 timer(fn, title=title)
1863 1920 fm.end()
1864 1921
1865 1922 @command('perfwrite', formatteropts)
1866 1923 def perfwrite(ui, repo, **opts):
1867 1924 """microbenchmark ui.write
1868 1925 """
1869 1926 timer, fm = gettimer(ui, opts)
1870 1927 def write():
1871 1928 for i in range(100000):
1872 1929 ui.write(('Testing write performance\n'))
1873 1930 timer(write)
1874 1931 fm.end()
1875 1932
1876 1933 def uisetup(ui):
1877 1934 if (util.safehasattr(cmdutil, 'openrevlog') and
1878 1935 not util.safehasattr(commands, 'debugrevlogopts')):
1879 1936 # for "historical portability":
1880 1937 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
1881 1938 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
1882 1939 # openrevlog() should cause failure, because it has been
1883 1940 # available since 3.5 (or 49c583ca48c4).
1884 1941 def openrevlog(orig, repo, cmd, file_, opts):
1885 1942 if opts.get('dir') and not util.safehasattr(repo, 'dirlog'):
1886 1943 raise error.Abort("This version doesn't support --dir option",
1887 1944 hint="use 3.5 or later")
1888 1945 return orig(repo, cmd, file_, opts)
1889 1946 extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog)
@@ -1,698 +1,728 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 from __future__ import absolute_import
104 104
105 105 import errno
106 106 import struct
107 107
108 108 from .i18n import _
109 109 from .node import (
110 110 bin,
111 111 hex,
112 112 nullid,
113 113 nullrev,
114 114 short,
115 115 )
116 116 from . import (
117 117 error,
118 118 pycompat,
119 119 smartset,
120 120 txnutil,
121 121 util,
122 122 )
123 123
124 124 _fphasesentry = struct.Struct('>i20s')
125 125
126 126 allphases = public, draft, secret = range(3)
127 127 trackedphases = allphases[1:]
128 128 phasenames = ['public', 'draft', 'secret']
129 129 mutablephases = tuple(allphases[1:])
130 130 remotehiddenphases = tuple(allphases[2:])
131 131
132 132 def _readroots(repo, phasedefaults=None):
133 133 """Read phase roots from disk
134 134
135 135 phasedefaults is a list of fn(repo, roots) callable, which are
136 136 executed if the phase roots file does not exist. When phases are
137 137 being initialized on an existing repository, this could be used to
138 138 set selected changesets phase to something else than public.
139 139
140 140 Return (roots, dirty) where dirty is true if roots differ from
141 141 what is being stored.
142 142 """
143 143 repo = repo.unfiltered()
144 144 dirty = False
145 145 roots = [set() for i in allphases]
146 146 try:
147 147 f, pending = txnutil.trypending(repo.root, repo.svfs, 'phaseroots')
148 148 try:
149 149 for line in f:
150 150 phase, nh = line.split()
151 151 roots[int(phase)].add(bin(nh))
152 152 finally:
153 153 f.close()
154 154 except IOError as inst:
155 155 if inst.errno != errno.ENOENT:
156 156 raise
157 157 if phasedefaults:
158 158 for f in phasedefaults:
159 159 roots = f(repo, roots)
160 160 dirty = True
161 161 return roots, dirty
162 162
163 163 def binaryencode(phasemapping):
164 164 """encode a 'phase -> nodes' mapping into a binary stream
165 165
166 166 Since phases are integer the mapping is actually a python list:
167 167 [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
168 168 """
169 169 binarydata = []
170 170 for phase, nodes in enumerate(phasemapping):
171 171 for head in nodes:
172 172 binarydata.append(_fphasesentry.pack(phase, head))
173 173 return ''.join(binarydata)
174 174
175 175 def binarydecode(stream):
176 176 """decode a binary stream into a 'phase -> nodes' mapping
177 177
178 178 Since phases are integer the mapping is actually a python list."""
179 179 headsbyphase = [[] for i in allphases]
180 180 entrysize = _fphasesentry.size
181 181 while True:
182 182 entry = stream.read(entrysize)
183 183 if len(entry) < entrysize:
184 184 if entry:
185 185 raise error.Abort(_('bad phase-heads stream'))
186 186 break
187 187 phase, node = _fphasesentry.unpack(entry)
188 188 headsbyphase[phase].append(node)
189 189 return headsbyphase
190 190
191 191 def _trackphasechange(data, rev, old, new):
192 192 """add a phase move the <data> dictionnary
193 193
194 194 If data is None, nothing happens.
195 195 """
196 196 if data is None:
197 197 return
198 198 existing = data.get(rev)
199 199 if existing is not None:
200 200 old = existing[0]
201 201 data[rev] = (old, new)
202 202
203 203 class phasecache(object):
204 204 def __init__(self, repo, phasedefaults, _load=True):
205 205 if _load:
206 206 # Cheap trick to allow shallow-copy without copy module
207 207 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
208 208 self._loadedrevslen = 0
209 209 self._phasesets = None
210 210 self.filterunknown(repo)
211 211 self.opener = repo.svfs
212 212
213 213 def getrevset(self, repo, phases, subset=None):
214 214 """return a smartset for the given phases"""
215 215 self.loadphaserevs(repo) # ensure phase's sets are loaded
216 216 phases = set(phases)
217 217 if public not in phases:
218 218 # fast path: _phasesets contains the interesting sets,
219 219 # might only need a union and post-filtering.
220 220 if len(phases) == 1:
221 221 [p] = phases
222 222 revs = self._phasesets[p]
223 223 else:
224 224 revs = set.union(*[self._phasesets[p] for p in phases])
225 225 if repo.changelog.filteredrevs:
226 226 revs = revs - repo.changelog.filteredrevs
227 227 if subset is None:
228 228 return smartset.baseset(revs)
229 229 else:
230 230 return subset & smartset.baseset(revs)
231 231 else:
232 232 phases = set(allphases).difference(phases)
233 233 if not phases:
234 234 return smartset.fullreposet(repo)
235 235 if len(phases) == 1:
236 236 [p] = phases
237 237 revs = self._phasesets[p]
238 238 else:
239 239 revs = set.union(*[self._phasesets[p] for p in phases])
240 240 if subset is None:
241 241 subset = smartset.fullreposet(repo)
242 242 if not revs:
243 243 return subset
244 244 return subset.filter(lambda r: r not in revs)
245 245
246 246 def copy(self):
247 247 # Shallow copy meant to ensure isolation in
248 248 # advance/retractboundary(), nothing more.
249 249 ph = self.__class__(None, None, _load=False)
250 250 ph.phaseroots = self.phaseroots[:]
251 251 ph.dirty = self.dirty
252 252 ph.opener = self.opener
253 253 ph._loadedrevslen = self._loadedrevslen
254 254 ph._phasesets = self._phasesets
255 255 return ph
256 256
257 257 def replace(self, phcache):
258 258 """replace all values in 'self' with content of phcache"""
259 259 for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
260 260 '_phasesets'):
261 261 setattr(self, a, getattr(phcache, a))
262 262
263 263 def _getphaserevsnative(self, repo):
264 264 repo = repo.unfiltered()
265 265 nativeroots = []
266 266 for phase in trackedphases:
267 267 nativeroots.append(pycompat.maplist(repo.changelog.rev,
268 268 self.phaseroots[phase]))
269 269 return repo.changelog.computephases(nativeroots)
270 270
271 271 def _computephaserevspure(self, repo):
272 272 repo = repo.unfiltered()
273 273 cl = repo.changelog
274 274 self._phasesets = [set() for phase in allphases]
275 275 roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
276 276 if roots:
277 277 ps = set(cl.descendants(roots))
278 278 for root in roots:
279 279 ps.add(root)
280 280 self._phasesets[secret] = ps
281 281 roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
282 282 if roots:
283 283 ps = set(cl.descendants(roots))
284 284 for root in roots:
285 285 ps.add(root)
286 286 ps.difference_update(self._phasesets[secret])
287 287 self._phasesets[draft] = ps
288 288 self._loadedrevslen = len(cl)
289 289
290 290 def loadphaserevs(self, repo):
291 291 """ensure phase information is loaded in the object"""
292 292 if self._phasesets is None:
293 293 try:
294 294 res = self._getphaserevsnative(repo)
295 295 self._loadedrevslen, self._phasesets = res
296 296 except AttributeError:
297 297 self._computephaserevspure(repo)
298 298
299 299 def invalidate(self):
300 300 self._loadedrevslen = 0
301 301 self._phasesets = None
302 302
303 303 def phase(self, repo, rev):
304 304 # We need a repo argument here to be able to build _phasesets
305 305 # if necessary. The repository instance is not stored in
306 306 # phasecache to avoid reference cycles. The changelog instance
307 307 # is not stored because it is a filecache() property and can
308 308 # be replaced without us being notified.
309 309 if rev == nullrev:
310 310 return public
311 311 if rev < nullrev:
312 312 raise ValueError(_('cannot lookup negative revision'))
313 313 if rev >= self._loadedrevslen:
314 314 self.invalidate()
315 315 self.loadphaserevs(repo)
316 316 for phase in trackedphases:
317 317 if rev in self._phasesets[phase]:
318 318 return phase
319 319 return public
320 320
321 321 def write(self):
322 322 if not self.dirty:
323 323 return
324 324 f = self.opener('phaseroots', 'w', atomictemp=True, checkambig=True)
325 325 try:
326 326 self._write(f)
327 327 finally:
328 328 f.close()
329 329
330 330 def _write(self, fp):
331 331 for phase, roots in enumerate(self.phaseroots):
332 332 for h in sorted(roots):
333 333 fp.write('%i %s\n' % (phase, hex(h)))
334 334 self.dirty = False
335 335
336 336 def _updateroots(self, phase, newroots, tr):
337 337 self.phaseroots[phase] = newroots
338 338 self.invalidate()
339 339 self.dirty = True
340 340
341 341 tr.addfilegenerator('phase', ('phaseroots',), self._write)
342 342 tr.hookargs['phases_moved'] = '1'
343 343
344 344 def registernew(self, repo, tr, targetphase, nodes):
345 345 repo = repo.unfiltered()
346 346 self._retractboundary(repo, tr, targetphase, nodes)
347 347 if tr is not None and 'phases' in tr.changes:
348 348 phasetracking = tr.changes['phases']
349 349 torev = repo.changelog.rev
350 350 phase = self.phase
351 351 for n in nodes:
352 352 rev = torev(n)
353 353 revphase = phase(repo, rev)
354 354 _trackphasechange(phasetracking, rev, None, revphase)
355 355 repo.invalidatevolatilesets()
356 356
357 357 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
358 358 """Set all 'nodes' to phase 'targetphase'
359 359
360 360 Nodes with a phase lower than 'targetphase' are not affected.
361 361
362 362 If dryrun is True, no actions will be performed
363 363
364 364 Returns a set of revs whose phase is changed or should be changed
365 365 """
366 366 # Be careful to preserve shallow-copied values: do not update
367 367 # phaseroots values, replace them.
368 368 if tr is None:
369 369 phasetracking = None
370 370 else:
371 371 phasetracking = tr.changes.get('phases')
372 372
373 373 repo = repo.unfiltered()
374 374
375 375 changes = set() # set of revisions to be changed
376 376 delroots = [] # set of root deleted by this path
377 377 for phase in pycompat.xrange(targetphase + 1, len(allphases)):
378 378 # filter nodes that are not in a compatible phase already
379 379 nodes = [n for n in nodes
380 380 if self.phase(repo, repo[n].rev()) >= phase]
381 381 if not nodes:
382 382 break # no roots to move anymore
383 383
384 384 olds = self.phaseroots[phase]
385 385
386 386 affected = repo.revs('%ln::%ln', olds, nodes)
387 387 changes.update(affected)
388 388 if dryrun:
389 389 continue
390 390 for r in affected:
391 391 _trackphasechange(phasetracking, r, self.phase(repo, r),
392 392 targetphase)
393 393
394 394 roots = set(ctx.node() for ctx in repo.set(
395 395 'roots((%ln::) - %ld)', olds, affected))
396 396 if olds != roots:
397 397 self._updateroots(phase, roots, tr)
398 398 # some roots may need to be declared for lower phases
399 399 delroots.extend(olds - roots)
400 400 if not dryrun:
401 401 # declare deleted root in the target phase
402 402 if targetphase != 0:
403 403 self._retractboundary(repo, tr, targetphase, delroots)
404 404 repo.invalidatevolatilesets()
405 405 return changes
406 406
407 407 def retractboundary(self, repo, tr, targetphase, nodes):
408 408 oldroots = self.phaseroots[:targetphase + 1]
409 409 if tr is None:
410 410 phasetracking = None
411 411 else:
412 412 phasetracking = tr.changes.get('phases')
413 413 repo = repo.unfiltered()
414 414 if (self._retractboundary(repo, tr, targetphase, nodes)
415 415 and phasetracking is not None):
416 416
417 417 # find the affected revisions
418 418 new = self.phaseroots[targetphase]
419 419 old = oldroots[targetphase]
420 420 affected = set(repo.revs('(%ln::) - (%ln::)', new, old))
421 421
422 422 # find the phase of the affected revision
423 423 for phase in pycompat.xrange(targetphase, -1, -1):
424 424 if phase:
425 425 roots = oldroots[phase]
426 426 revs = set(repo.revs('%ln::%ld', roots, affected))
427 427 affected -= revs
428 428 else: # public phase
429 429 revs = affected
430 430 for r in revs:
431 431 _trackphasechange(phasetracking, r, phase, targetphase)
432 432 repo.invalidatevolatilesets()
433 433
434 434 def _retractboundary(self, repo, tr, targetphase, nodes):
435 435 # Be careful to preserve shallow-copied values: do not update
436 436 # phaseroots values, replace them.
437 437
438 438 repo = repo.unfiltered()
439 439 currentroots = self.phaseroots[targetphase]
440 440 finalroots = oldroots = set(currentroots)
441 441 newroots = [n for n in nodes
442 442 if self.phase(repo, repo[n].rev()) < targetphase]
443 443 if newroots:
444 444
445 445 if nullid in newroots:
446 446 raise error.Abort(_('cannot change null revision phase'))
447 447 currentroots = currentroots.copy()
448 448 currentroots.update(newroots)
449 449
450 450 # Only compute new roots for revs above the roots that are being
451 451 # retracted.
452 452 minnewroot = min(repo[n].rev() for n in newroots)
453 453 aboveroots = [n for n in currentroots
454 454 if repo[n].rev() >= minnewroot]
455 455 updatedroots = repo.set('roots(%ln::)', aboveroots)
456 456
457 457 finalroots = set(n for n in currentroots if repo[n].rev() <
458 458 minnewroot)
459 459 finalroots.update(ctx.node() for ctx in updatedroots)
460 460 if finalroots != oldroots:
461 461 self._updateroots(targetphase, finalroots, tr)
462 462 return True
463 463 return False
464 464
465 465 def filterunknown(self, repo):
466 466 """remove unknown nodes from the phase boundary
467 467
468 468 Nothing is lost as unknown nodes only hold data for their descendants.
469 469 """
470 470 filtered = False
471 471 nodemap = repo.changelog.nodemap # to filter unknown nodes
472 472 for phase, nodes in enumerate(self.phaseroots):
473 473 missing = sorted(node for node in nodes if node not in nodemap)
474 474 if missing:
475 475 for mnode in missing:
476 476 repo.ui.debug(
477 477 'removing unknown node %s from %i-phase boundary\n'
478 478 % (short(mnode), phase))
479 479 nodes.symmetric_difference_update(missing)
480 480 filtered = True
481 481 if filtered:
482 482 self.dirty = True
483 483 # filterunknown is called by repo.destroyed, we may have no changes in
484 484 # root but _phasesets contents is certainly invalid (or at least we
485 485 # have not proper way to check that). related to issue 3858.
486 486 #
487 487 # The other caller is __init__ that have no _phasesets initialized
488 488 # anyway. If this change we should consider adding a dedicated
489 489 # "destroyed" function to phasecache or a proper cache key mechanism
490 490 # (see branchmap one)
491 491 self.invalidate()
492 492
493 493 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
494 494 """Add nodes to a phase changing other nodes phases if necessary.
495 495
496 496 This function move boundary *forward* this means that all nodes
497 497 are set in the target phase or kept in a *lower* phase.
498 498
499 499 Simplify boundary to contains phase roots only.
500 500
501 501 If dryrun is True, no actions will be performed
502 502
503 503 Returns a set of revs whose phase is changed or should be changed
504 504 """
505 505 phcache = repo._phasecache.copy()
506 506 changes = phcache.advanceboundary(repo, tr, targetphase, nodes,
507 507 dryrun=dryrun)
508 508 if not dryrun:
509 509 repo._phasecache.replace(phcache)
510 510 return changes
511 511
512 512 def retractboundary(repo, tr, targetphase, nodes):
513 513 """Set nodes back to a phase changing other nodes phases if
514 514 necessary.
515 515
516 516 This function move boundary *backward* this means that all nodes
517 517 are set in the target phase or kept in a *higher* phase.
518 518
519 519 Simplify boundary to contains phase roots only."""
520 520 phcache = repo._phasecache.copy()
521 521 phcache.retractboundary(repo, tr, targetphase, nodes)
522 522 repo._phasecache.replace(phcache)
523 523
524 524 def registernew(repo, tr, targetphase, nodes):
525 525 """register a new revision and its phase
526 526
527 527 Code adding revisions to the repository should use this function to
528 528 set new changeset in their target phase (or higher).
529 529 """
530 530 phcache = repo._phasecache.copy()
531 531 phcache.registernew(repo, tr, targetphase, nodes)
532 532 repo._phasecache.replace(phcache)
533 533
534 534 def listphases(repo):
535 535 """List phases root for serialization over pushkey"""
536 536 # Use ordered dictionary so behavior is deterministic.
537 537 keys = util.sortdict()
538 538 value = '%i' % draft
539 539 cl = repo.unfiltered().changelog
540 540 for root in repo._phasecache.phaseroots[draft]:
541 541 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
542 542 keys[hex(root)] = value
543 543
544 544 if repo.publishing():
545 545 # Add an extra data to let remote know we are a publishing
546 546 # repo. Publishing repo can't just pretend they are old repo.
547 547 # When pushing to a publishing repo, the client still need to
548 548 # push phase boundary
549 549 #
550 550 # Push do not only push changeset. It also push phase data.
551 551 # New phase data may apply to common changeset which won't be
552 552 # push (as they are common). Here is a very simple example:
553 553 #
554 554 # 1) repo A push changeset X as draft to repo B
555 555 # 2) repo B make changeset X public
556 556 # 3) repo B push to repo A. X is not pushed but the data that
557 557 # X as now public should
558 558 #
559 559 # The server can't handle it on it's own as it has no idea of
560 560 # client phase data.
561 561 keys['publishing'] = 'True'
562 562 return keys
563 563
564 564 def pushphase(repo, nhex, oldphasestr, newphasestr):
565 565 """List phases root for serialization over pushkey"""
566 566 repo = repo.unfiltered()
567 567 with repo.lock():
568 568 currentphase = repo[nhex].phase()
569 569 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
570 570 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
571 571 if currentphase == oldphase and newphase < oldphase:
572 572 with repo.transaction('pushkey-phase') as tr:
573 573 advanceboundary(repo, tr, newphase, [bin(nhex)])
574 574 return True
575 575 elif currentphase == newphase:
576 576 # raced, but got correct result
577 577 return True
578 578 else:
579 579 return False
580 580
581 581 def subsetphaseheads(repo, subset):
582 582 """Finds the phase heads for a subset of a history
583 583
584 584 Returns a list indexed by phase number where each item is a list of phase
585 585 head nodes.
586 586 """
587 587 cl = repo.changelog
588 588
589 589 headsbyphase = [[] for i in allphases]
590 590 # No need to keep track of secret phase; any heads in the subset that
591 591 # are not mentioned are implicitly secret.
592 592 for phase in allphases[:-1]:
593 593 revset = "heads(%%ln & %s())" % phasenames[phase]
594 594 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
595 595 return headsbyphase
596 596
597 597 def updatephases(repo, trgetter, headsbyphase):
598 598 """Updates the repo with the given phase heads"""
599 599 # Now advance phase boundaries of all but secret phase
600 600 #
601 601 # run the update (and fetch transaction) only if there are actually things
602 602 # to update. This avoid creating empty transaction during no-op operation.
603 603
604 604 for phase in allphases[:-1]:
605 605 revset = '%%ln - %s()' % phasenames[phase]
606 606 heads = [c.node() for c in repo.set(revset, headsbyphase[phase])]
607 607 if heads:
608 608 advanceboundary(repo, trgetter(), phase, heads)
609 609
610 610 def analyzeremotephases(repo, subset, roots):
611 611 """Compute phases heads and root in a subset of node from root dict
612 612
613 613 * subset is heads of the subset
614 614 * roots is {<nodeid> => phase} mapping. key and value are string.
615 615
616 616 Accept unknown element input
617 617 """
618 618 repo = repo.unfiltered()
619 619 # build list from dictionary
620 620 draftroots = []
621 621 nodemap = repo.changelog.nodemap # to filter unknown nodes
622 622 for nhex, phase in roots.iteritems():
623 623 if nhex == 'publishing': # ignore data related to publish option
624 624 continue
625 625 node = bin(nhex)
626 626 phase = int(phase)
627 627 if phase == public:
628 628 if node != nullid:
629 629 repo.ui.warn(_('ignoring inconsistent public root'
630 630 ' from remote: %s\n') % nhex)
631 631 elif phase == draft:
632 632 if node in nodemap:
633 633 draftroots.append(node)
634 634 else:
635 635 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
636 636 % (phase, nhex))
637 637 # compute heads
638 638 publicheads = newheads(repo, subset, draftroots)
639 639 return publicheads, draftroots
640 640
641 641 class remotephasessummary(object):
642 642 """summarize phase information on the remote side
643 643
644 644 :publishing: True is the remote is publishing
645 645 :publicheads: list of remote public phase heads (nodes)
646 646 :draftheads: list of remote draft phase heads (nodes)
647 647 :draftroots: list of remote draft phase root (nodes)
648 648 """
649 649
650 650 def __init__(self, repo, remotesubset, remoteroots):
651 651 unfi = repo.unfiltered()
652 652 self._allremoteroots = remoteroots
653 653
654 654 self.publishing = remoteroots.get('publishing', False)
655 655
656 656 ana = analyzeremotephases(repo, remotesubset, remoteroots)
657 657 self.publicheads, self.draftroots = ana
658 658 # Get the list of all "heads" revs draft on remote
659 659 dheads = unfi.set('heads(%ln::%ln)', self.draftroots, remotesubset)
660 660 self.draftheads = [c.node() for c in dheads]
661 661
662 662 def newheads(repo, heads, roots):
663 663 """compute new head of a subset minus another
664 664
665 665 * `heads`: define the first subset
666 666 * `roots`: define the second we subtract from the first"""
667 # prevent an import cycle
668 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
669 from . import dagop
670
667 671 repo = repo.unfiltered()
668 revs = repo.revs('heads(::%ln - (%ln::%ln))', heads, roots, heads)
669 return pycompat.maplist(repo.changelog.node, revs)
672 cl = repo.changelog
673 rev = cl.nodemap.get
674 if not roots:
675 return heads
676 if not heads or heads == [nullrev]:
677 return []
678 # The logic operated on revisions, convert arguments early for convenience
679 new_heads = set(rev(n) for n in heads if n != nullid)
680 roots = [rev(n) for n in roots]
681 if not heads or not roots:
682 return heads
683 # compute the area we need to remove
684 affected_zone = repo.revs("(%ld::%ld)", roots, new_heads)
685 # heads in the area are no longer heads
686 new_heads.difference_update(affected_zone)
687 # revisions in the area have children outside of it,
688 # They might be new heads
689 candidates = repo.revs("parents(%ld + (%ld and merge())) and not null",
690 roots, affected_zone)
691 candidates -= affected_zone
692 if new_heads or candidates:
693 # remove candidate that are ancestors of other heads
694 new_heads.update(candidates)
695 prunestart = repo.revs("parents(%ld) and not null", new_heads)
696 pruned = dagop.reachableroots(repo, candidates, prunestart)
697 new_heads.difference_update(pruned)
698
699 return pycompat.maplist(cl.node, sorted(new_heads))
670 700
671 701 def newcommitphase(ui):
672 702 """helper to get the target phase of new commit
673 703
674 704 Handle all possible values for the phases.new-commit options.
675 705
676 706 """
677 707 v = ui.config('phases', 'new-commit')
678 708 try:
679 709 return phasenames.index(v)
680 710 except ValueError:
681 711 try:
682 712 return int(v)
683 713 except ValueError:
684 714 msg = _("phases.new-commit: not a valid phase name ('%s')")
685 715 raise error.ConfigError(msg % v)
686 716
687 717 def hassecret(repo):
688 718 """utility function that check if a repo have any secret changeset."""
689 719 return bool(repo._phasecache.phaseroots[2])
690 720
691 721 def preparehookargs(node, old, new):
692 722 if old is None:
693 723 old = ''
694 724 else:
695 725 old = phasenames[old]
696 726 return {'node': node,
697 727 'oldphase': old,
698 728 'phase': phasenames[new]}
@@ -1,3008 +1,3020 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import contextlib
18 18 import errno
19 19 import hashlib
20 20 import heapq
21 21 import os
22 22 import re
23 23 import struct
24 24 import zlib
25 25
26 26 # import stuff from node for others to import from revlog
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullid,
31 31 nullrev,
32 32 wdirfilenodeids,
33 33 wdirhex,
34 34 wdirid,
35 35 wdirrev,
36 36 )
37 37 from .i18n import _
38 38 from .thirdparty import (
39 39 attr,
40 40 )
41 41 from . import (
42 42 ancestor,
43 43 error,
44 44 mdiff,
45 45 policy,
46 46 pycompat,
47 47 templatefilters,
48 48 util,
49 49 )
50 50 from .utils import (
51 51 stringutil,
52 52 )
53 53
54 54 parsers = policy.importmod(r'parsers')
55 55
56 56 # Aliased for performance.
57 57 _zlibdecompress = zlib.decompress
58 58
59 59 # revlog header flags
60 60 REVLOGV0 = 0
61 61 REVLOGV1 = 1
62 62 # Dummy value until file format is finalized.
63 63 # Reminder: change the bounds check in revlog.__init__ when this is changed.
64 64 REVLOGV2 = 0xDEAD
65 65 FLAG_INLINE_DATA = (1 << 16)
66 66 FLAG_GENERALDELTA = (1 << 17)
67 67 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
68 68 REVLOG_DEFAULT_FORMAT = REVLOGV1
69 69 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
70 70 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
71 71 REVLOGV2_FLAGS = REVLOGV1_FLAGS
72 72
73 73 # revlog index flags
74 74 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
75 75 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
76 76 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
77 77 REVIDX_DEFAULT_FLAGS = 0
78 78 # stable order in which flags need to be processed and their processors applied
79 79 REVIDX_FLAGS_ORDER = [
80 80 REVIDX_ISCENSORED,
81 81 REVIDX_ELLIPSIS,
82 82 REVIDX_EXTSTORED,
83 83 ]
84 84 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
85 85 # bitmark for flags that could cause rawdata content change
86 86 REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
87 87
88 88 # max size of revlog with inline data
89 89 _maxinline = 131072
90 90 _chunksize = 1048576
91 91
92 92 RevlogError = error.RevlogError
93 93 LookupError = error.LookupError
94 94 AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError
95 95 CensoredNodeError = error.CensoredNodeError
96 96 ProgrammingError = error.ProgrammingError
97 97
98 98 # Store flag processors (cf. 'addflagprocessor()' to register)
99 99 _flagprocessors = {
100 100 REVIDX_ISCENSORED: None,
101 101 }
102 102
103 103 _mdre = re.compile('\1\n')
104 104 def parsemeta(text):
105 105 """return (metadatadict, metadatasize)"""
106 106 # text can be buffer, so we can't use .startswith or .index
107 107 if text[:2] != '\1\n':
108 108 return None, None
109 109 s = _mdre.search(text, 2).start()
110 110 mtext = text[2:s]
111 111 meta = {}
112 112 for l in mtext.splitlines():
113 113 k, v = l.split(": ", 1)
114 114 meta[k] = v
115 115 return meta, (s + 2)
116 116
117 117 def packmeta(meta, text):
118 118 keys = sorted(meta)
119 119 metatext = "".join("%s: %s\n" % (k, meta[k]) for k in keys)
120 120 return "\1\n%s\1\n%s" % (metatext, text)
121 121
122 122 def _censoredtext(text):
123 123 m, offs = parsemeta(text)
124 124 return m and "censored" in m
125 125
126 126 def addflagprocessor(flag, processor):
127 127 """Register a flag processor on a revision data flag.
128 128
129 129 Invariant:
130 130 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
131 131 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
132 132 - Only one flag processor can be registered on a specific flag.
133 133 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
134 134 following signatures:
135 135 - (read) f(self, rawtext) -> text, bool
136 136 - (write) f(self, text) -> rawtext, bool
137 137 - (raw) f(self, rawtext) -> bool
138 138 "text" is presented to the user. "rawtext" is stored in revlog data, not
139 139 directly visible to the user.
140 140 The boolean returned by these transforms is used to determine whether
141 141 the returned text can be used for hash integrity checking. For example,
142 142 if "write" returns False, then "text" is used to generate hash. If
143 143 "write" returns True, that basically means "rawtext" returned by "write"
144 144 should be used to generate hash. Usually, "write" and "read" return
145 145 different booleans. And "raw" returns a same boolean as "write".
146 146
147 147 Note: The 'raw' transform is used for changegroup generation and in some
148 148 debug commands. In this case the transform only indicates whether the
149 149 contents can be used for hash integrity checks.
150 150 """
151 151 if not flag & REVIDX_KNOWN_FLAGS:
152 152 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
153 153 raise ProgrammingError(msg)
154 154 if flag not in REVIDX_FLAGS_ORDER:
155 155 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
156 156 raise ProgrammingError(msg)
157 157 if flag in _flagprocessors:
158 158 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
159 159 raise error.Abort(msg)
160 160 _flagprocessors[flag] = processor
161 161
162 162 def getoffset(q):
163 163 return int(q >> 16)
164 164
165 165 def gettype(q):
166 166 return int(q & 0xFFFF)
167 167
168 168 def offset_type(offset, type):
169 169 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
170 170 raise ValueError('unknown revlog index flags')
171 171 return int(int(offset) << 16 | type)
172 172
173 173 _nullhash = hashlib.sha1(nullid)
174 174
175 175 def hash(text, p1, p2):
176 176 """generate a hash from the given text and its parent hashes
177 177
178 178 This hash combines both the current file contents and its history
179 179 in a manner that makes it easy to distinguish nodes with the same
180 180 content in the revision graph.
181 181 """
182 182 # As of now, if one of the parent node is null, p2 is null
183 183 if p2 == nullid:
184 184 # deep copy of a hash is faster than creating one
185 185 s = _nullhash.copy()
186 186 s.update(p1)
187 187 else:
188 188 # none of the parent nodes are nullid
189 189 if p1 < p2:
190 190 a = p1
191 191 b = p2
192 192 else:
193 193 a = p2
194 194 b = p1
195 195 s = hashlib.sha1(a)
196 196 s.update(b)
197 197 s.update(text)
198 198 return s.digest()
199 199
200 200 class _testrevlog(object):
201 201 """minimalist fake revlog to use in doctests"""
202 202
203 203 def __init__(self, data, density=0.5, mingap=0):
204 204 """data is an list of revision payload boundaries"""
205 205 self._data = data
206 206 self._srdensitythreshold = density
207 207 self._srmingapsize = mingap
208 208
209 209 def start(self, rev):
210 210 if rev == 0:
211 211 return 0
212 212 return self._data[rev - 1]
213 213
214 214 def end(self, rev):
215 215 return self._data[rev]
216 216
217 217 def length(self, rev):
218 218 return self.end(rev) - self.start(rev)
219 219
220 220 def __len__(self):
221 221 return len(self._data)
222 222
223 223 def _trimchunk(revlog, revs, startidx, endidx=None):
224 224 """returns revs[startidx:endidx] without empty trailing revs
225 225
226 226 Doctest Setup
227 227 >>> revlog = _testrevlog([
228 228 ... 5, #0
229 229 ... 10, #1
230 230 ... 12, #2
231 231 ... 12, #3 (empty)
232 232 ... 17, #4
233 233 ... 21, #5
234 234 ... 21, #6 (empty)
235 235 ... ])
236 236
237 237 Contiguous cases:
238 238 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0)
239 239 [0, 1, 2, 3, 4, 5]
240 240 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 5)
241 241 [0, 1, 2, 3, 4]
242 242 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 0, 4)
243 243 [0, 1, 2]
244 244 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 2, 4)
245 245 [2]
246 246 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3)
247 247 [3, 4, 5]
248 248 >>> _trimchunk(revlog, [0, 1, 2, 3, 4, 5, 6], 3, 5)
249 249 [3, 4]
250 250
251 251 Discontiguous cases:
252 252 >>> _trimchunk(revlog, [1, 3, 5, 6], 0)
253 253 [1, 3, 5]
254 254 >>> _trimchunk(revlog, [1, 3, 5, 6], 0, 2)
255 255 [1]
256 256 >>> _trimchunk(revlog, [1, 3, 5, 6], 1, 3)
257 257 [3, 5]
258 258 >>> _trimchunk(revlog, [1, 3, 5, 6], 1)
259 259 [3, 5]
260 260 """
261 261 length = revlog.length
262 262
263 263 if endidx is None:
264 264 endidx = len(revs)
265 265
266 # Trim empty revs at the end, but never the very first revision of a chain
267 while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0:
266 # If we have a non-emtpy delta candidate, there are nothing to trim
267 if revs[endidx - 1] < len(revlog):
268 # Trim empty revs at the end, except the very first revision of a chain
269 while (endidx > 1
270 and endidx > startidx
271 and length(revs[endidx - 1]) == 0):
268 272 endidx -= 1
269 273
270 274 return revs[startidx:endidx]
271 275
272 def _segmentspan(revlog, revs):
276 def _segmentspan(revlog, revs, deltainfo=None):
273 277 """Get the byte span of a segment of revisions
274 278
275 279 revs is a sorted array of revision numbers
276 280
277 281 >>> revlog = _testrevlog([
278 282 ... 5, #0
279 283 ... 10, #1
280 284 ... 12, #2
281 285 ... 12, #3 (empty)
282 286 ... 17, #4
283 287 ... ])
284 288
285 289 >>> _segmentspan(revlog, [0, 1, 2, 3, 4])
286 290 17
287 291 >>> _segmentspan(revlog, [0, 4])
288 292 17
289 293 >>> _segmentspan(revlog, [3, 4])
290 294 5
291 295 >>> _segmentspan(revlog, [1, 2, 3,])
292 296 7
293 297 >>> _segmentspan(revlog, [1, 3])
294 298 7
295 299 """
296 300 if not revs:
297 301 return 0
298 return revlog.end(revs[-1]) - revlog.start(revs[0])
302 if deltainfo is not None and len(revlog) <= revs[-1]:
303 if len(revs) == 1:
304 return deltainfo.deltalen
305 offset = revlog.end(len(revlog) - 1)
306 end = deltainfo.deltalen + offset
307 else:
308 end = revlog.end(revs[-1])
309 return end - revlog.start(revs[0])
299 310
300 311 def _slicechunk(revlog, revs, deltainfo=None, targetsize=None):
301 312 """slice revs to reduce the amount of unrelated data to be read from disk.
302 313
303 314 ``revs`` is sliced into groups that should be read in one time.
304 315 Assume that revs are sorted.
305 316
306 317 The initial chunk is sliced until the overall density (payload/chunks-span
307 318 ratio) is above `revlog._srdensitythreshold`. No gap smaller than
308 319 `revlog._srmingapsize` is skipped.
309 320
310 321 If `targetsize` is set, no chunk larger than `targetsize` will be yield.
311 322 For consistency with other slicing choice, this limit won't go lower than
312 323 `revlog._srmingapsize`.
313 324
314 325 If individual revisions chunk are larger than this limit, they will still
315 326 be raised individually.
316 327
317 328 >>> revlog = _testrevlog([
318 329 ... 5, #00 (5)
319 330 ... 10, #01 (5)
320 331 ... 12, #02 (2)
321 332 ... 12, #03 (empty)
322 333 ... 27, #04 (15)
323 334 ... 31, #05 (4)
324 335 ... 31, #06 (empty)
325 336 ... 42, #07 (11)
326 337 ... 47, #08 (5)
327 338 ... 47, #09 (empty)
328 339 ... 48, #10 (1)
329 340 ... 51, #11 (3)
330 341 ... 74, #12 (23)
331 342 ... 85, #13 (11)
332 343 ... 86, #14 (1)
333 344 ... 91, #15 (5)
334 345 ... ])
335 346
336 347 >>> list(_slicechunk(revlog, list(range(16))))
337 348 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
338 349 >>> list(_slicechunk(revlog, [0, 15]))
339 350 [[0], [15]]
340 351 >>> list(_slicechunk(revlog, [0, 11, 15]))
341 352 [[0], [11], [15]]
342 353 >>> list(_slicechunk(revlog, [0, 11, 13, 15]))
343 354 [[0], [11, 13, 15]]
344 355 >>> list(_slicechunk(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
345 356 [[1, 2], [5, 8, 10, 11], [14]]
346 357
347 358 Slicing with a maximum chunk size
348 359 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=15))
349 360 [[0], [11], [13], [15]]
350 361 >>> list(_slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
351 362 [[0], [11], [13, 15]]
352 363 """
353 364 if targetsize is not None:
354 365 targetsize = max(targetsize, revlog._srmingapsize)
355 366 # targetsize should not be specified when evaluating delta candidates:
356 367 # * targetsize is used to ensure we stay within specification when reading,
357 368 # * deltainfo is used to pick are good delta chain when writing.
358 369 if not (deltainfo is None or targetsize is None):
359 370 msg = 'cannot use `targetsize` with a `deltainfo`'
360 371 raise error.ProgrammingError(msg)
361 372 for chunk in _slicechunktodensity(revlog, revs,
362 373 deltainfo,
363 374 revlog._srdensitythreshold,
364 375 revlog._srmingapsize):
365 376 for subchunk in _slicechunktosize(revlog, chunk, targetsize):
366 377 yield subchunk
367 378
368 379 def _slicechunktosize(revlog, revs, targetsize=None):
369 380 """slice revs to match the target size
370 381
371 382 This is intended to be used on chunk that density slicing selected by that
372 383 are still too large compared to the read garantee of revlog. This might
373 384 happens when "minimal gap size" interrupted the slicing or when chain are
374 385 built in a way that create large blocks next to each other.
375 386
376 387 >>> revlog = _testrevlog([
377 388 ... 3, #0 (3)
378 389 ... 5, #1 (2)
379 390 ... 6, #2 (1)
380 391 ... 8, #3 (2)
381 392 ... 8, #4 (empty)
382 393 ... 11, #5 (3)
383 394 ... 12, #6 (1)
384 395 ... 13, #7 (1)
385 396 ... 14, #8 (1)
386 397 ... ])
387 398
388 399 Cases where chunk is already small enough
389 400 >>> list(_slicechunktosize(revlog, [0], 3))
390 401 [[0]]
391 402 >>> list(_slicechunktosize(revlog, [6, 7], 3))
392 403 [[6, 7]]
393 404 >>> list(_slicechunktosize(revlog, [0], None))
394 405 [[0]]
395 406 >>> list(_slicechunktosize(revlog, [6, 7], None))
396 407 [[6, 7]]
397 408
398 409 cases where we need actual slicing
399 410 >>> list(_slicechunktosize(revlog, [0, 1], 3))
400 411 [[0], [1]]
401 412 >>> list(_slicechunktosize(revlog, [1, 3], 3))
402 413 [[1], [3]]
403 414 >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
404 415 [[1, 2], [3]]
405 416 >>> list(_slicechunktosize(revlog, [3, 5], 3))
406 417 [[3], [5]]
407 418 >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
408 419 [[3], [5]]
409 420 >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
410 421 [[5], [6, 7, 8]]
411 422 >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
412 423 [[0], [1, 2], [3], [5], [6, 7, 8]]
413 424
414 425 Case with too large individual chunk (must return valid chunk)
415 426 >>> list(_slicechunktosize(revlog, [0, 1], 2))
416 427 [[0], [1]]
417 428 >>> list(_slicechunktosize(revlog, [1, 3], 1))
418 429 [[1], [3]]
419 430 >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
420 431 [[3], [5]]
421 432 """
422 433 assert targetsize is None or 0 <= targetsize
423 434 if targetsize is None or _segmentspan(revlog, revs) <= targetsize:
424 435 yield revs
425 436 return
426 437
427 438 startrevidx = 0
428 439 startdata = revlog.start(revs[0])
429 440 endrevidx = 0
430 441 iterrevs = enumerate(revs)
431 442 next(iterrevs) # skip first rev.
432 443 for idx, r in iterrevs:
433 444 span = revlog.end(r) - startdata
434 445 if span <= targetsize:
435 446 endrevidx = idx
436 447 else:
437 448 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
438 449 if chunk:
439 450 yield chunk
440 451 startrevidx = idx
441 452 startdata = revlog.start(r)
442 453 endrevidx = idx
443 454 yield _trimchunk(revlog, revs, startrevidx)
444 455
445 456 def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
446 457 mingapsize=0):
447 458 """slice revs to reduce the amount of unrelated data to be read from disk.
448 459
449 460 ``revs`` is sliced into groups that should be read in one time.
450 461 Assume that revs are sorted.
451 462
452 463 ``deltainfo`` is a _deltainfo instance of a revision that we would append
453 464 to the top of the revlog.
454 465
455 466 The initial chunk is sliced until the overall density (payload/chunks-span
456 467 ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
457 468 skipped.
458 469
459 470 >>> revlog = _testrevlog([
460 471 ... 5, #00 (5)
461 472 ... 10, #01 (5)
462 473 ... 12, #02 (2)
463 474 ... 12, #03 (empty)
464 475 ... 27, #04 (15)
465 476 ... 31, #05 (4)
466 477 ... 31, #06 (empty)
467 478 ... 42, #07 (11)
468 479 ... 47, #08 (5)
469 480 ... 47, #09 (empty)
470 481 ... 48, #10 (1)
471 482 ... 51, #11 (3)
472 483 ... 74, #12 (23)
473 484 ... 85, #13 (11)
474 485 ... 86, #14 (1)
475 486 ... 91, #15 (5)
476 487 ... ])
477 488
478 489 >>> list(_slicechunktodensity(revlog, list(range(16))))
479 490 [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
480 491 >>> list(_slicechunktodensity(revlog, [0, 15]))
481 492 [[0], [15]]
482 493 >>> list(_slicechunktodensity(revlog, [0, 11, 15]))
483 494 [[0], [11], [15]]
484 495 >>> list(_slicechunktodensity(revlog, [0, 11, 13, 15]))
485 496 [[0], [11, 13, 15]]
486 497 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14]))
487 498 [[1, 2], [5, 8, 10, 11], [14]]
488 499 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
489 500 ... mingapsize=20))
490 501 [[1, 2, 3, 5, 8, 10, 11], [14]]
491 502 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
492 503 ... targetdensity=0.95))
493 504 [[1, 2], [5], [8, 10, 11], [14]]
494 505 >>> list(_slicechunktodensity(revlog, [1, 2, 3, 5, 8, 10, 11, 14],
495 506 ... targetdensity=0.95, mingapsize=12))
496 507 [[1, 2], [5, 8, 10, 11], [14]]
497 508 """
498 509 start = revlog.start
499 510 length = revlog.length
500 511
501 512 if len(revs) <= 1:
502 513 yield revs
503 514 return
504 515
505 516 nextrev = len(revlog)
506 517 nextoffset = revlog.end(nextrev - 1)
507 518
508 519 if deltainfo is None:
509 520 deltachainspan = _segmentspan(revlog, revs)
510 521 chainpayload = sum(length(r) for r in revs)
511 522 else:
512 523 deltachainspan = deltainfo.distance
513 524 chainpayload = deltainfo.compresseddeltalen
514 525
515 526 if deltachainspan < mingapsize:
516 527 yield revs
517 528 return
518 529
519 530 readdata = deltachainspan
520 531
521 532 if deltachainspan:
522 533 density = chainpayload / float(deltachainspan)
523 534 else:
524 535 density = 1.0
525 536
526 537 if density >= targetdensity:
527 538 yield revs
528 539 return
529 540
530 if deltainfo is not None:
541 if deltainfo is not None and deltainfo.deltalen:
531 542 revs = list(revs)
532 543 revs.append(nextrev)
533 544
534 545 # Store the gaps in a heap to have them sorted by decreasing size
535 546 gapsheap = []
536 547 heapq.heapify(gapsheap)
537 548 prevend = None
538 549 for i, rev in enumerate(revs):
539 550 if rev < nextrev:
540 551 revstart = start(rev)
541 552 revlen = length(rev)
542 553 else:
543 554 revstart = nextoffset
544 555 revlen = deltainfo.deltalen
545 556
546 557 # Skip empty revisions to form larger holes
547 558 if revlen == 0:
548 559 continue
549 560
550 561 if prevend is not None:
551 562 gapsize = revstart - prevend
552 563 # only consider holes that are large enough
553 564 if gapsize > mingapsize:
554 565 heapq.heappush(gapsheap, (-gapsize, i))
555 566
556 567 prevend = revstart + revlen
557 568
558 569 # Collect the indices of the largest holes until the density is acceptable
559 570 indicesheap = []
560 571 heapq.heapify(indicesheap)
561 572 while gapsheap and density < targetdensity:
562 573 oppgapsize, gapidx = heapq.heappop(gapsheap)
563 574
564 575 heapq.heappush(indicesheap, gapidx)
565 576
566 577 # the gap sizes are stored as negatives to be sorted decreasingly
567 578 # by the heap
568 579 readdata -= (-oppgapsize)
569 580 if readdata > 0:
570 581 density = chainpayload / float(readdata)
571 582 else:
572 583 density = 1.0
573 584
574 585 # Cut the revs at collected indices
575 586 previdx = 0
576 587 while indicesheap:
577 588 idx = heapq.heappop(indicesheap)
578 589
579 590 chunk = _trimchunk(revlog, revs, previdx, idx)
580 591 if chunk:
581 592 yield chunk
582 593
583 594 previdx = idx
584 595
585 596 chunk = _trimchunk(revlog, revs, previdx)
586 597 if chunk:
587 598 yield chunk
588 599
589 600 @attr.s(slots=True, frozen=True)
590 601 class _deltainfo(object):
591 602 distance = attr.ib()
592 603 deltalen = attr.ib()
593 604 data = attr.ib()
594 605 base = attr.ib()
595 606 chainbase = attr.ib()
596 607 chainlen = attr.ib()
597 608 compresseddeltalen = attr.ib()
598 609
599 610 class _deltacomputer(object):
600 611 def __init__(self, revlog):
601 612 self.revlog = revlog
602 613
603 614 def _getcandidaterevs(self, p1, p2, cachedelta):
604 615 """
605 616 Provides revisions that present an interest to be diffed against,
606 617 grouped by level of easiness.
607 618 """
608 619 revlog = self.revlog
609 620 gdelta = revlog._generaldelta
610 621 curr = len(revlog)
611 622 prev = curr - 1
612 623 p1r, p2r = revlog.rev(p1), revlog.rev(p2)
613 624
614 625 # should we try to build a delta?
615 626 if prev != nullrev and revlog.storedeltachains:
616 627 tested = set()
617 628 # This condition is true most of the time when processing
618 629 # changegroup data into a generaldelta repo. The only time it
619 630 # isn't true is if this is the first revision in a delta chain
620 631 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
621 632 if cachedelta and gdelta and revlog._lazydeltabase:
622 633 # Assume what we received from the server is a good choice
623 634 # build delta will reuse the cache
624 635 yield (cachedelta[0],)
625 636 tested.add(cachedelta[0])
626 637
627 638 if gdelta:
628 639 # exclude already lazy tested base if any
629 640 parents = [p for p in (p1r, p2r)
630 641 if p != nullrev and p not in tested]
631 642
632 643 if not revlog._deltabothparents and len(parents) == 2:
633 644 parents.sort()
634 645 # To minimize the chance of having to build a fulltext,
635 646 # pick first whichever parent is closest to us (max rev)
636 647 yield (parents[1],)
637 648 # then the other one (min rev) if the first did not fit
638 649 yield (parents[0],)
639 650 tested.update(parents)
640 651 elif len(parents) > 0:
641 652 # Test all parents (1 or 2), and keep the best candidate
642 653 yield parents
643 654 tested.update(parents)
644 655
645 656 if prev not in tested:
646 657 # other approach failed try against prev to hopefully save us a
647 658 # fulltext.
648 659 yield (prev,)
649 660 tested.add(prev)
650 661
651 662 def buildtext(self, revinfo, fh):
652 663 """Builds a fulltext version of a revision
653 664
654 665 revinfo: _revisioninfo instance that contains all needed info
655 666 fh: file handle to either the .i or the .d revlog file,
656 667 depending on whether it is inlined or not
657 668 """
658 669 btext = revinfo.btext
659 670 if btext[0] is not None:
660 671 return btext[0]
661 672
662 673 revlog = self.revlog
663 674 cachedelta = revinfo.cachedelta
664 675 flags = revinfo.flags
665 676 node = revinfo.node
666 677
667 678 baserev = cachedelta[0]
668 679 delta = cachedelta[1]
669 680 # special case deltas which replace entire base; no need to decode
670 681 # base revision. this neatly avoids censored bases, which throw when
671 682 # they're decoded.
672 683 hlen = struct.calcsize(">lll")
673 684 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
674 685 len(delta) - hlen):
675 686 btext[0] = delta[hlen:]
676 687 else:
677 688 # deltabase is rawtext before changed by flag processors, which is
678 689 # equivalent to non-raw text
679 690 basetext = revlog.revision(baserev, _df=fh, raw=False)
680 691 btext[0] = mdiff.patch(basetext, delta)
681 692
682 693 try:
683 694 res = revlog._processflags(btext[0], flags, 'read', raw=True)
684 695 btext[0], validatehash = res
685 696 if validatehash:
686 697 revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
687 698 if flags & REVIDX_ISCENSORED:
688 699 raise RevlogError(_('node %s is not censored') % node)
689 700 except CensoredNodeError:
690 701 # must pass the censored index flag to add censored revisions
691 702 if not flags & REVIDX_ISCENSORED:
692 703 raise
693 704 return btext[0]
694 705
695 706 def _builddeltadiff(self, base, revinfo, fh):
696 707 revlog = self.revlog
697 708 t = self.buildtext(revinfo, fh)
698 709 if revlog.iscensored(base):
699 710 # deltas based on a censored revision must replace the
700 711 # full content in one patch, so delta works everywhere
701 712 header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
702 713 delta = header + t
703 714 else:
704 715 ptext = revlog.revision(base, _df=fh, raw=True)
705 716 delta = mdiff.textdiff(ptext, t)
706 717
707 718 return delta
708 719
709 720 def _builddeltainfo(self, revinfo, base, fh):
710 721 # can we use the cached delta?
711 722 if revinfo.cachedelta and revinfo.cachedelta[0] == base:
712 723 delta = revinfo.cachedelta[1]
713 724 else:
714 725 delta = self._builddeltadiff(base, revinfo, fh)
715 726 revlog = self.revlog
716 727 header, data = revlog.compress(delta)
717 728 deltalen = len(header) + len(data)
718 729 chainbase = revlog.chainbase(base)
719 730 offset = revlog.end(len(revlog) - 1)
720 731 dist = deltalen + offset - revlog.start(chainbase)
721 732 if revlog._generaldelta:
722 733 deltabase = base
723 734 else:
724 735 deltabase = chainbase
725 736 chainlen, compresseddeltalen = revlog._chaininfo(base)
726 737 chainlen += 1
727 738 compresseddeltalen += deltalen
728 739 return _deltainfo(dist, deltalen, (header, data), deltabase,
729 740 chainbase, chainlen, compresseddeltalen)
730 741
731 742 def finddeltainfo(self, revinfo, fh):
732 743 """Find an acceptable delta against a candidate revision
733 744
734 745 revinfo: information about the revision (instance of _revisioninfo)
735 746 fh: file handle to either the .i or the .d revlog file,
736 747 depending on whether it is inlined or not
737 748
738 749 Returns the first acceptable candidate revision, as ordered by
739 750 _getcandidaterevs
740 751 """
741 752 if not revinfo.textlen:
742 753 return None # empty file do not need delta
743 754
744 755 cachedelta = revinfo.cachedelta
745 756 p1 = revinfo.p1
746 757 p2 = revinfo.p2
747 758 revlog = self.revlog
748 759
749 760 deltalength = self.revlog.length
750 761 deltaparent = self.revlog.deltaparent
751 762
752 763 deltainfo = None
753 764 deltas_limit = revinfo.textlen * LIMIT_DELTA2TEXT
754 765 for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
755 766 # filter out delta base that will never produce good delta
756 767 candidaterevs = [r for r in candidaterevs
757 768 if self.revlog.length(r) <= deltas_limit]
758 769 nominateddeltas = []
759 770 for candidaterev in candidaterevs:
760 771 # skip over empty delta (no need to include them in a chain)
761 772 while candidaterev != nullrev and not deltalength(candidaterev):
762 773 candidaterev = deltaparent(candidaterev)
763 774 # no need to try a delta against nullid, this will be handled
764 775 # by fulltext later.
765 776 if candidaterev == nullrev:
766 777 continue
767 778 # no delta for rawtext-changing revs (see "candelta" for why)
768 779 if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
769 780 continue
770 781 candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
771 782 if revlog._isgooddeltainfo(candidatedelta, revinfo):
772 783 nominateddeltas.append(candidatedelta)
773 784 if nominateddeltas:
774 785 deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
775 786 break
776 787
777 788 return deltainfo
778 789
779 790 @attr.s(slots=True, frozen=True)
780 791 class _revisioninfo(object):
781 792 """Information about a revision that allows building its fulltext
782 793 node: expected hash of the revision
783 794 p1, p2: parent revs of the revision
784 795 btext: built text cache consisting of a one-element list
785 796 cachedelta: (baserev, uncompressed_delta) or None
786 797 flags: flags associated to the revision storage
787 798
788 799 One of btext[0] or cachedelta must be set.
789 800 """
790 801 node = attr.ib()
791 802 p1 = attr.ib()
792 803 p2 = attr.ib()
793 804 btext = attr.ib()
794 805 textlen = attr.ib()
795 806 cachedelta = attr.ib()
796 807 flags = attr.ib()
797 808
798 809 # index v0:
799 810 # 4 bytes: offset
800 811 # 4 bytes: compressed length
801 812 # 4 bytes: base rev
802 813 # 4 bytes: link rev
803 814 # 20 bytes: parent 1 nodeid
804 815 # 20 bytes: parent 2 nodeid
805 816 # 20 bytes: nodeid
806 817 indexformatv0 = struct.Struct(">4l20s20s20s")
807 818 indexformatv0_pack = indexformatv0.pack
808 819 indexformatv0_unpack = indexformatv0.unpack
809 820
810 821 class revlogoldindex(list):
811 822 def __getitem__(self, i):
812 823 if i == -1:
813 824 return (0, 0, 0, -1, -1, -1, -1, nullid)
814 825 return list.__getitem__(self, i)
815 826
816 827 # maximum <delta-chain-data>/<revision-text-length> ratio
817 828 LIMIT_DELTA2TEXT = 2
818 829
819 830 class revlogoldio(object):
820 831 def __init__(self):
821 832 self.size = indexformatv0.size
822 833
823 834 def parseindex(self, data, inline):
824 835 s = self.size
825 836 index = []
826 837 nodemap = {nullid: nullrev}
827 838 n = off = 0
828 839 l = len(data)
829 840 while off + s <= l:
830 841 cur = data[off:off + s]
831 842 off += s
832 843 e = indexformatv0_unpack(cur)
833 844 # transform to revlogv1 format
834 845 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
835 846 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
836 847 index.append(e2)
837 848 nodemap[e[6]] = n
838 849 n += 1
839 850
840 851 return revlogoldindex(index), nodemap, None
841 852
842 853 def packentry(self, entry, node, version, rev):
843 854 if gettype(entry[0]):
844 855 raise RevlogError(_('index entry flags need revlog version 1'))
845 856 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
846 857 node(entry[5]), node(entry[6]), entry[7])
847 858 return indexformatv0_pack(*e2)
848 859
849 860 # index ng:
850 861 # 6 bytes: offset
851 862 # 2 bytes: flags
852 863 # 4 bytes: compressed length
853 864 # 4 bytes: uncompressed length
854 865 # 4 bytes: base rev
855 866 # 4 bytes: link rev
856 867 # 4 bytes: parent 1 rev
857 868 # 4 bytes: parent 2 rev
858 869 # 32 bytes: nodeid
859 870 indexformatng = struct.Struct(">Qiiiiii20s12x")
860 871 indexformatng_pack = indexformatng.pack
861 872 versionformat = struct.Struct(">I")
862 873 versionformat_pack = versionformat.pack
863 874 versionformat_unpack = versionformat.unpack
864 875
865 876 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
866 877 # signed integer)
867 878 _maxentrysize = 0x7fffffff
868 879
869 880 class revlogio(object):
870 881 def __init__(self):
871 882 self.size = indexformatng.size
872 883
873 884 def parseindex(self, data, inline):
874 885 # call the C implementation to parse the index data
875 886 index, cache = parsers.parse_index2(data, inline)
876 887 return index, getattr(index, 'nodemap', None), cache
877 888
878 889 def packentry(self, entry, node, version, rev):
879 890 p = indexformatng_pack(*entry)
880 891 if rev == 0:
881 892 p = versionformat_pack(version) + p[4:]
882 893 return p
883 894
884 895 class revlog(object):
885 896 """
886 897 the underlying revision storage object
887 898
888 899 A revlog consists of two parts, an index and the revision data.
889 900
890 901 The index is a file with a fixed record size containing
891 902 information on each revision, including its nodeid (hash), the
892 903 nodeids of its parents, the position and offset of its data within
893 904 the data file, and the revision it's based on. Finally, each entry
894 905 contains a linkrev entry that can serve as a pointer to external
895 906 data.
896 907
897 908 The revision data itself is a linear collection of data chunks.
898 909 Each chunk represents a revision and is usually represented as a
899 910 delta against the previous chunk. To bound lookup time, runs of
900 911 deltas are limited to about 2 times the length of the original
901 912 version data. This makes retrieval of a version proportional to
902 913 its size, or O(1) relative to the number of revisions.
903 914
904 915 Both pieces of the revlog are written to in an append-only
905 916 fashion, which means we never need to rewrite a file to insert or
906 917 remove data, and can use some simple techniques to avoid the need
907 918 for locking while reading.
908 919
909 920 If checkambig, indexfile is opened with checkambig=True at
910 921 writing, to avoid file stat ambiguity.
911 922
912 923 If mmaplargeindex is True, and an mmapindexthreshold is set, the
913 924 index will be mmapped rather than read if it is larger than the
914 925 configured threshold.
915 926
916 927 If censorable is True, the revlog can have censored revisions.
917 928 """
918 929 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
919 930 mmaplargeindex=False, censorable=False):
920 931 """
921 932 create a revlog object
922 933
923 934 opener is a function that abstracts the file opening operation
924 935 and can be used to implement COW semantics or the like.
925 936 """
926 937 self.indexfile = indexfile
927 938 self.datafile = datafile or (indexfile[:-2] + ".d")
928 939 self.opener = opener
929 940 # When True, indexfile is opened with checkambig=True at writing, to
930 941 # avoid file stat ambiguity.
931 942 self._checkambig = checkambig
932 943 self._censorable = censorable
933 944 # 3-tuple of (node, rev, text) for a raw revision.
934 945 self._cache = None
935 946 # Maps rev to chain base rev.
936 947 self._chainbasecache = util.lrucachedict(100)
937 948 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
938 949 self._chunkcache = (0, '')
939 950 # How much data to read and cache into the raw revlog data cache.
940 951 self._chunkcachesize = 65536
941 952 self._maxchainlen = None
942 953 self._deltabothparents = True
943 954 self.index = []
944 955 # Mapping of partial identifiers to full nodes.
945 956 self._pcache = {}
946 957 # Mapping of revision integer to full node.
947 958 self._nodecache = {nullid: nullrev}
948 959 self._nodepos = None
949 960 self._compengine = 'zlib'
950 961 self._maxdeltachainspan = -1
951 962 self._withsparseread = False
952 963 self._sparserevlog = False
953 964 self._srdensitythreshold = 0.50
954 965 self._srmingapsize = 262144
955 966
956 967 mmapindexthreshold = None
957 968 v = REVLOG_DEFAULT_VERSION
958 969 opts = getattr(opener, 'options', None)
959 970 if opts is not None:
960 971 if 'revlogv2' in opts:
961 972 # version 2 revlogs always use generaldelta.
962 973 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
963 974 elif 'revlogv1' in opts:
964 975 if 'generaldelta' in opts:
965 976 v |= FLAG_GENERALDELTA
966 977 else:
967 978 v = 0
968 979 if 'chunkcachesize' in opts:
969 980 self._chunkcachesize = opts['chunkcachesize']
970 981 if 'maxchainlen' in opts:
971 982 self._maxchainlen = opts['maxchainlen']
972 983 if 'deltabothparents' in opts:
973 984 self._deltabothparents = opts['deltabothparents']
974 985 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
975 986 if 'compengine' in opts:
976 987 self._compengine = opts['compengine']
977 988 if 'maxdeltachainspan' in opts:
978 989 self._maxdeltachainspan = opts['maxdeltachainspan']
979 990 if mmaplargeindex and 'mmapindexthreshold' in opts:
980 991 mmapindexthreshold = opts['mmapindexthreshold']
981 992 self._sparserevlog = bool(opts.get('sparse-revlog', False))
982 993 withsparseread = bool(opts.get('with-sparse-read', False))
983 994 # sparse-revlog forces sparse-read
984 995 self._withsparseread = self._sparserevlog or withsparseread
985 996 if 'sparse-read-density-threshold' in opts:
986 997 self._srdensitythreshold = opts['sparse-read-density-threshold']
987 998 if 'sparse-read-min-gap-size' in opts:
988 999 self._srmingapsize = opts['sparse-read-min-gap-size']
989 1000
990 1001 if self._chunkcachesize <= 0:
991 1002 raise RevlogError(_('revlog chunk cache size %r is not greater '
992 1003 'than 0') % self._chunkcachesize)
993 1004 elif self._chunkcachesize & (self._chunkcachesize - 1):
994 1005 raise RevlogError(_('revlog chunk cache size %r is not a power '
995 1006 'of 2') % self._chunkcachesize)
996 1007
997 1008 indexdata = ''
998 1009 self._initempty = True
999 1010 try:
1000 1011 with self._indexfp() as f:
1001 1012 if (mmapindexthreshold is not None and
1002 1013 self.opener.fstat(f).st_size >= mmapindexthreshold):
1003 1014 indexdata = util.buffer(util.mmapread(f))
1004 1015 else:
1005 1016 indexdata = f.read()
1006 1017 if len(indexdata) > 0:
1007 1018 v = versionformat_unpack(indexdata[:4])[0]
1008 1019 self._initempty = False
1009 1020 except IOError as inst:
1010 1021 if inst.errno != errno.ENOENT:
1011 1022 raise
1012 1023
1013 1024 self.version = v
1014 1025 self._inline = v & FLAG_INLINE_DATA
1015 1026 self._generaldelta = v & FLAG_GENERALDELTA
1016 1027 flags = v & ~0xFFFF
1017 1028 fmt = v & 0xFFFF
1018 1029 if fmt == REVLOGV0:
1019 1030 if flags:
1020 1031 raise RevlogError(_('unknown flags (%#04x) in version %d '
1021 1032 'revlog %s') %
1022 1033 (flags >> 16, fmt, self.indexfile))
1023 1034 elif fmt == REVLOGV1:
1024 1035 if flags & ~REVLOGV1_FLAGS:
1025 1036 raise RevlogError(_('unknown flags (%#04x) in version %d '
1026 1037 'revlog %s') %
1027 1038 (flags >> 16, fmt, self.indexfile))
1028 1039 elif fmt == REVLOGV2:
1029 1040 if flags & ~REVLOGV2_FLAGS:
1030 1041 raise RevlogError(_('unknown flags (%#04x) in version %d '
1031 1042 'revlog %s') %
1032 1043 (flags >> 16, fmt, self.indexfile))
1033 1044 else:
1034 1045 raise RevlogError(_('unknown version (%d) in revlog %s') %
1035 1046 (fmt, self.indexfile))
1036 1047
1037 1048 self.storedeltachains = True
1038 1049
1039 1050 self._io = revlogio()
1040 1051 if self.version == REVLOGV0:
1041 1052 self._io = revlogoldio()
1042 1053 try:
1043 1054 d = self._io.parseindex(indexdata, self._inline)
1044 1055 except (ValueError, IndexError):
1045 1056 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
1046 1057 self.index, nodemap, self._chunkcache = d
1047 1058 if nodemap is not None:
1048 1059 self.nodemap = self._nodecache = nodemap
1049 1060 if not self._chunkcache:
1050 1061 self._chunkclear()
1051 1062 # revnum -> (chain-length, sum-delta-length)
1052 1063 self._chaininfocache = {}
1053 1064 # revlog header -> revlog compressor
1054 1065 self._decompressors = {}
1055 1066
1056 1067 @util.propertycache
1057 1068 def _compressor(self):
1058 1069 return util.compengines[self._compengine].revlogcompressor()
1059 1070
1060 1071 def _indexfp(self, mode='r'):
1061 1072 """file object for the revlog's index file"""
1062 1073 args = {r'mode': mode}
1063 1074 if mode != 'r':
1064 1075 args[r'checkambig'] = self._checkambig
1065 1076 if mode == 'w':
1066 1077 args[r'atomictemp'] = True
1067 1078 return self.opener(self.indexfile, **args)
1068 1079
1069 1080 def _datafp(self, mode='r'):
1070 1081 """file object for the revlog's data file"""
1071 1082 return self.opener(self.datafile, mode=mode)
1072 1083
1073 1084 @contextlib.contextmanager
1074 1085 def _datareadfp(self, existingfp=None):
1075 1086 """file object suitable to read data"""
1076 1087 if existingfp is not None:
1077 1088 yield existingfp
1078 1089 else:
1079 1090 if self._inline:
1080 1091 func = self._indexfp
1081 1092 else:
1082 1093 func = self._datafp
1083 1094 with func() as fp:
1084 1095 yield fp
1085 1096
1086 1097 def tip(self):
1087 1098 return self.node(len(self.index) - 1)
1088 1099 def __contains__(self, rev):
1089 1100 return 0 <= rev < len(self)
1090 1101 def __len__(self):
1091 1102 return len(self.index)
1092 1103 def __iter__(self):
1093 1104 return iter(pycompat.xrange(len(self)))
1094 1105 def revs(self, start=0, stop=None):
1095 1106 """iterate over all rev in this revlog (from start to stop)"""
1096 1107 step = 1
1097 1108 length = len(self)
1098 1109 if stop is not None:
1099 1110 if start > stop:
1100 1111 step = -1
1101 1112 stop += step
1102 1113 if stop > length:
1103 1114 stop = length
1104 1115 else:
1105 1116 stop = length
1106 1117 return pycompat.xrange(start, stop, step)
1107 1118
1108 1119 @util.propertycache
1109 1120 def nodemap(self):
1110 1121 if self.index:
1111 1122 # populate mapping down to the initial node
1112 1123 node0 = self.index[0][7] # get around changelog filtering
1113 1124 self.rev(node0)
1114 1125 return self._nodecache
1115 1126
1116 1127 def hasnode(self, node):
1117 1128 try:
1118 1129 self.rev(node)
1119 1130 return True
1120 1131 except KeyError:
1121 1132 return False
1122 1133
1123 1134 def candelta(self, baserev, rev):
1124 1135 """whether two revisions (baserev, rev) can be delta-ed or not"""
1125 1136 # Disable delta if either rev requires a content-changing flag
1126 1137 # processor (ex. LFS). This is because such flag processor can alter
1127 1138 # the rawtext content that the delta will be based on, and two clients
1128 1139 # could have a same revlog node with different flags (i.e. different
1129 1140 # rawtext contents) and the delta could be incompatible.
1130 1141 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
1131 1142 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
1132 1143 return False
1133 1144 return True
1134 1145
1135 1146 def clearcaches(self):
1136 1147 self._cache = None
1137 1148 self._chainbasecache.clear()
1138 1149 self._chunkcache = (0, '')
1139 1150 self._pcache = {}
1140 1151
1141 1152 try:
1142 1153 self._nodecache.clearcaches()
1143 1154 except AttributeError:
1144 1155 self._nodecache = {nullid: nullrev}
1145 1156 self._nodepos = None
1146 1157
1147 1158 def rev(self, node):
1148 1159 try:
1149 1160 return self._nodecache[node]
1150 1161 except TypeError:
1151 1162 raise
1152 1163 except RevlogError:
1153 1164 # parsers.c radix tree lookup failed
1154 1165 if node == wdirid or node in wdirfilenodeids:
1155 1166 raise error.WdirUnsupported
1156 1167 raise LookupError(node, self.indexfile, _('no node'))
1157 1168 except KeyError:
1158 1169 # pure python cache lookup failed
1159 1170 n = self._nodecache
1160 1171 i = self.index
1161 1172 p = self._nodepos
1162 1173 if p is None:
1163 1174 p = len(i) - 1
1164 1175 else:
1165 1176 assert p < len(i)
1166 1177 for r in pycompat.xrange(p, -1, -1):
1167 1178 v = i[r][7]
1168 1179 n[v] = r
1169 1180 if v == node:
1170 1181 self._nodepos = r - 1
1171 1182 return r
1172 1183 if node == wdirid or node in wdirfilenodeids:
1173 1184 raise error.WdirUnsupported
1174 1185 raise LookupError(node, self.indexfile, _('no node'))
1175 1186
1176 1187 # Accessors for index entries.
1177 1188
1178 1189 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
1179 1190 # are flags.
1180 1191 def start(self, rev):
1181 1192 return int(self.index[rev][0] >> 16)
1182 1193
1183 1194 def flags(self, rev):
1184 1195 return self.index[rev][0] & 0xFFFF
1185 1196
1186 1197 def length(self, rev):
1187 1198 return self.index[rev][1]
1188 1199
1189 1200 def rawsize(self, rev):
1190 1201 """return the length of the uncompressed text for a given revision"""
1191 1202 l = self.index[rev][2]
1192 1203 if l >= 0:
1193 1204 return l
1194 1205
1195 1206 t = self.revision(rev, raw=True)
1196 1207 return len(t)
1197 1208
1198 1209 def size(self, rev):
1199 1210 """length of non-raw text (processed by a "read" flag processor)"""
1200 1211 # fast path: if no "read" flag processor could change the content,
1201 1212 # size is rawsize. note: ELLIPSIS is known to not change the content.
1202 1213 flags = self.flags(rev)
1203 1214 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
1204 1215 return self.rawsize(rev)
1205 1216
1206 1217 return len(self.revision(rev, raw=False))
1207 1218
1208 1219 def chainbase(self, rev):
1209 1220 base = self._chainbasecache.get(rev)
1210 1221 if base is not None:
1211 1222 return base
1212 1223
1213 1224 index = self.index
1214 1225 iterrev = rev
1215 1226 base = index[iterrev][3]
1216 1227 while base != iterrev:
1217 1228 iterrev = base
1218 1229 base = index[iterrev][3]
1219 1230
1220 1231 self._chainbasecache[rev] = base
1221 1232 return base
1222 1233
1223 1234 def linkrev(self, rev):
1224 1235 return self.index[rev][4]
1225 1236
1226 1237 def parentrevs(self, rev):
1227 1238 try:
1228 1239 entry = self.index[rev]
1229 1240 except IndexError:
1230 1241 if rev == wdirrev:
1231 1242 raise error.WdirUnsupported
1232 1243 raise
1233 1244
1234 1245 return entry[5], entry[6]
1235 1246
1236 1247 def node(self, rev):
1237 1248 try:
1238 1249 return self.index[rev][7]
1239 1250 except IndexError:
1240 1251 if rev == wdirrev:
1241 1252 raise error.WdirUnsupported
1242 1253 raise
1243 1254
1244 1255 # Derived from index values.
1245 1256
1246 1257 def end(self, rev):
1247 1258 return self.start(rev) + self.length(rev)
1248 1259
1249 1260 def parents(self, node):
1250 1261 i = self.index
1251 1262 d = i[self.rev(node)]
1252 1263 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
1253 1264
1254 1265 def chainlen(self, rev):
1255 1266 return self._chaininfo(rev)[0]
1256 1267
1257 1268 def _chaininfo(self, rev):
1258 1269 chaininfocache = self._chaininfocache
1259 1270 if rev in chaininfocache:
1260 1271 return chaininfocache[rev]
1261 1272 index = self.index
1262 1273 generaldelta = self._generaldelta
1263 1274 iterrev = rev
1264 1275 e = index[iterrev]
1265 1276 clen = 0
1266 1277 compresseddeltalen = 0
1267 1278 while iterrev != e[3]:
1268 1279 clen += 1
1269 1280 compresseddeltalen += e[1]
1270 1281 if generaldelta:
1271 1282 iterrev = e[3]
1272 1283 else:
1273 1284 iterrev -= 1
1274 1285 if iterrev in chaininfocache:
1275 1286 t = chaininfocache[iterrev]
1276 1287 clen += t[0]
1277 1288 compresseddeltalen += t[1]
1278 1289 break
1279 1290 e = index[iterrev]
1280 1291 else:
1281 1292 # Add text length of base since decompressing that also takes
1282 1293 # work. For cache hits the length is already included.
1283 1294 compresseddeltalen += e[1]
1284 1295 r = (clen, compresseddeltalen)
1285 1296 chaininfocache[rev] = r
1286 1297 return r
1287 1298
1288 1299 def _deltachain(self, rev, stoprev=None):
1289 1300 """Obtain the delta chain for a revision.
1290 1301
1291 1302 ``stoprev`` specifies a revision to stop at. If not specified, we
1292 1303 stop at the base of the chain.
1293 1304
1294 1305 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1295 1306 revs in ascending order and ``stopped`` is a bool indicating whether
1296 1307 ``stoprev`` was hit.
1297 1308 """
1298 1309 # Try C implementation.
1299 1310 try:
1300 1311 return self.index.deltachain(rev, stoprev, self._generaldelta)
1301 1312 except AttributeError:
1302 1313 pass
1303 1314
1304 1315 chain = []
1305 1316
1306 1317 # Alias to prevent attribute lookup in tight loop.
1307 1318 index = self.index
1308 1319 generaldelta = self._generaldelta
1309 1320
1310 1321 iterrev = rev
1311 1322 e = index[iterrev]
1312 1323 while iterrev != e[3] and iterrev != stoprev:
1313 1324 chain.append(iterrev)
1314 1325 if generaldelta:
1315 1326 iterrev = e[3]
1316 1327 else:
1317 1328 iterrev -= 1
1318 1329 e = index[iterrev]
1319 1330
1320 1331 if iterrev == stoprev:
1321 1332 stopped = True
1322 1333 else:
1323 1334 chain.append(iterrev)
1324 1335 stopped = False
1325 1336
1326 1337 chain.reverse()
1327 1338 return chain, stopped
1328 1339
1329 1340 def ancestors(self, revs, stoprev=0, inclusive=False):
1330 1341 """Generate the ancestors of 'revs' in reverse topological order.
1331 1342 Does not generate revs lower than stoprev.
1332 1343
1333 1344 See the documentation for ancestor.lazyancestors for more details."""
1334 1345
1335 1346 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
1336 1347 inclusive=inclusive)
1337 1348
1338 1349 def descendants(self, revs):
1339 1350 """Generate the descendants of 'revs' in revision order.
1340 1351
1341 1352 Yield a sequence of revision numbers starting with a child of
1342 1353 some rev in revs, i.e., each revision is *not* considered a
1343 1354 descendant of itself. Results are ordered by revision number (a
1344 1355 topological sort)."""
1345 1356 first = min(revs)
1346 1357 if first == nullrev:
1347 1358 for i in self:
1348 1359 yield i
1349 1360 return
1350 1361
1351 1362 seen = set(revs)
1352 1363 for i in self.revs(start=first + 1):
1353 1364 for x in self.parentrevs(i):
1354 1365 if x != nullrev and x in seen:
1355 1366 seen.add(i)
1356 1367 yield i
1357 1368 break
1358 1369
1359 1370 def findcommonmissing(self, common=None, heads=None):
1360 1371 """Return a tuple of the ancestors of common and the ancestors of heads
1361 1372 that are not ancestors of common. In revset terminology, we return the
1362 1373 tuple:
1363 1374
1364 1375 ::common, (::heads) - (::common)
1365 1376
1366 1377 The list is sorted by revision number, meaning it is
1367 1378 topologically sorted.
1368 1379
1369 1380 'heads' and 'common' are both lists of node IDs. If heads is
1370 1381 not supplied, uses all of the revlog's heads. If common is not
1371 1382 supplied, uses nullid."""
1372 1383 if common is None:
1373 1384 common = [nullid]
1374 1385 if heads is None:
1375 1386 heads = self.heads()
1376 1387
1377 1388 common = [self.rev(n) for n in common]
1378 1389 heads = [self.rev(n) for n in heads]
1379 1390
1380 1391 # we want the ancestors, but inclusive
1381 1392 class lazyset(object):
1382 1393 def __init__(self, lazyvalues):
1383 1394 self.addedvalues = set()
1384 1395 self.lazyvalues = lazyvalues
1385 1396
1386 1397 def __contains__(self, value):
1387 1398 return value in self.addedvalues or value in self.lazyvalues
1388 1399
1389 1400 def __iter__(self):
1390 1401 added = self.addedvalues
1391 1402 for r in added:
1392 1403 yield r
1393 1404 for r in self.lazyvalues:
1394 1405 if not r in added:
1395 1406 yield r
1396 1407
1397 1408 def add(self, value):
1398 1409 self.addedvalues.add(value)
1399 1410
1400 1411 def update(self, values):
1401 1412 self.addedvalues.update(values)
1402 1413
1403 1414 has = lazyset(self.ancestors(common))
1404 1415 has.add(nullrev)
1405 1416 has.update(common)
1406 1417
1407 1418 # take all ancestors from heads that aren't in has
1408 1419 missing = set()
1409 1420 visit = collections.deque(r for r in heads if r not in has)
1410 1421 while visit:
1411 1422 r = visit.popleft()
1412 1423 if r in missing:
1413 1424 continue
1414 1425 else:
1415 1426 missing.add(r)
1416 1427 for p in self.parentrevs(r):
1417 1428 if p not in has:
1418 1429 visit.append(p)
1419 1430 missing = list(missing)
1420 1431 missing.sort()
1421 1432 return has, [self.node(miss) for miss in missing]
1422 1433
1423 1434 def incrementalmissingrevs(self, common=None):
1424 1435 """Return an object that can be used to incrementally compute the
1425 1436 revision numbers of the ancestors of arbitrary sets that are not
1426 1437 ancestors of common. This is an ancestor.incrementalmissingancestors
1427 1438 object.
1428 1439
1429 1440 'common' is a list of revision numbers. If common is not supplied, uses
1430 1441 nullrev.
1431 1442 """
1432 1443 if common is None:
1433 1444 common = [nullrev]
1434 1445
1435 1446 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1436 1447
1437 1448 def findmissingrevs(self, common=None, heads=None):
1438 1449 """Return the revision numbers of the ancestors of heads that
1439 1450 are not ancestors of common.
1440 1451
1441 1452 More specifically, return a list of revision numbers corresponding to
1442 1453 nodes N such that every N satisfies the following constraints:
1443 1454
1444 1455 1. N is an ancestor of some node in 'heads'
1445 1456 2. N is not an ancestor of any node in 'common'
1446 1457
1447 1458 The list is sorted by revision number, meaning it is
1448 1459 topologically sorted.
1449 1460
1450 1461 'heads' and 'common' are both lists of revision numbers. If heads is
1451 1462 not supplied, uses all of the revlog's heads. If common is not
1452 1463 supplied, uses nullid."""
1453 1464 if common is None:
1454 1465 common = [nullrev]
1455 1466 if heads is None:
1456 1467 heads = self.headrevs()
1457 1468
1458 1469 inc = self.incrementalmissingrevs(common=common)
1459 1470 return inc.missingancestors(heads)
1460 1471
1461 1472 def findmissing(self, common=None, heads=None):
1462 1473 """Return the ancestors of heads that are not ancestors of common.
1463 1474
1464 1475 More specifically, return a list of nodes N such that every N
1465 1476 satisfies the following constraints:
1466 1477
1467 1478 1. N is an ancestor of some node in 'heads'
1468 1479 2. N is not an ancestor of any node in 'common'
1469 1480
1470 1481 The list is sorted by revision number, meaning it is
1471 1482 topologically sorted.
1472 1483
1473 1484 'heads' and 'common' are both lists of node IDs. If heads is
1474 1485 not supplied, uses all of the revlog's heads. If common is not
1475 1486 supplied, uses nullid."""
1476 1487 if common is None:
1477 1488 common = [nullid]
1478 1489 if heads is None:
1479 1490 heads = self.heads()
1480 1491
1481 1492 common = [self.rev(n) for n in common]
1482 1493 heads = [self.rev(n) for n in heads]
1483 1494
1484 1495 inc = self.incrementalmissingrevs(common=common)
1485 1496 return [self.node(r) for r in inc.missingancestors(heads)]
1486 1497
1487 1498 def nodesbetween(self, roots=None, heads=None):
1488 1499 """Return a topological path from 'roots' to 'heads'.
1489 1500
1490 1501 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1491 1502 topologically sorted list of all nodes N that satisfy both of
1492 1503 these constraints:
1493 1504
1494 1505 1. N is a descendant of some node in 'roots'
1495 1506 2. N is an ancestor of some node in 'heads'
1496 1507
1497 1508 Every node is considered to be both a descendant and an ancestor
1498 1509 of itself, so every reachable node in 'roots' and 'heads' will be
1499 1510 included in 'nodes'.
1500 1511
1501 1512 'outroots' is the list of reachable nodes in 'roots', i.e., the
1502 1513 subset of 'roots' that is returned in 'nodes'. Likewise,
1503 1514 'outheads' is the subset of 'heads' that is also in 'nodes'.
1504 1515
1505 1516 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1506 1517 unspecified, uses nullid as the only root. If 'heads' is
1507 1518 unspecified, uses list of all of the revlog's heads."""
1508 1519 nonodes = ([], [], [])
1509 1520 if roots is not None:
1510 1521 roots = list(roots)
1511 1522 if not roots:
1512 1523 return nonodes
1513 1524 lowestrev = min([self.rev(n) for n in roots])
1514 1525 else:
1515 1526 roots = [nullid] # Everybody's a descendant of nullid
1516 1527 lowestrev = nullrev
1517 1528 if (lowestrev == nullrev) and (heads is None):
1518 1529 # We want _all_ the nodes!
1519 1530 return ([self.node(r) for r in self], [nullid], list(self.heads()))
1520 1531 if heads is None:
1521 1532 # All nodes are ancestors, so the latest ancestor is the last
1522 1533 # node.
1523 1534 highestrev = len(self) - 1
1524 1535 # Set ancestors to None to signal that every node is an ancestor.
1525 1536 ancestors = None
1526 1537 # Set heads to an empty dictionary for later discovery of heads
1527 1538 heads = {}
1528 1539 else:
1529 1540 heads = list(heads)
1530 1541 if not heads:
1531 1542 return nonodes
1532 1543 ancestors = set()
1533 1544 # Turn heads into a dictionary so we can remove 'fake' heads.
1534 1545 # Also, later we will be using it to filter out the heads we can't
1535 1546 # find from roots.
1536 1547 heads = dict.fromkeys(heads, False)
1537 1548 # Start at the top and keep marking parents until we're done.
1538 1549 nodestotag = set(heads)
1539 1550 # Remember where the top was so we can use it as a limit later.
1540 1551 highestrev = max([self.rev(n) for n in nodestotag])
1541 1552 while nodestotag:
1542 1553 # grab a node to tag
1543 1554 n = nodestotag.pop()
1544 1555 # Never tag nullid
1545 1556 if n == nullid:
1546 1557 continue
1547 1558 # A node's revision number represents its place in a
1548 1559 # topologically sorted list of nodes.
1549 1560 r = self.rev(n)
1550 1561 if r >= lowestrev:
1551 1562 if n not in ancestors:
1552 1563 # If we are possibly a descendant of one of the roots
1553 1564 # and we haven't already been marked as an ancestor
1554 1565 ancestors.add(n) # Mark as ancestor
1555 1566 # Add non-nullid parents to list of nodes to tag.
1556 1567 nodestotag.update([p for p in self.parents(n) if
1557 1568 p != nullid])
1558 1569 elif n in heads: # We've seen it before, is it a fake head?
1559 1570 # So it is, real heads should not be the ancestors of
1560 1571 # any other heads.
1561 1572 heads.pop(n)
1562 1573 if not ancestors:
1563 1574 return nonodes
1564 1575 # Now that we have our set of ancestors, we want to remove any
1565 1576 # roots that are not ancestors.
1566 1577
1567 1578 # If one of the roots was nullid, everything is included anyway.
1568 1579 if lowestrev > nullrev:
1569 1580 # But, since we weren't, let's recompute the lowest rev to not
1570 1581 # include roots that aren't ancestors.
1571 1582
1572 1583 # Filter out roots that aren't ancestors of heads
1573 1584 roots = [root for root in roots if root in ancestors]
1574 1585 # Recompute the lowest revision
1575 1586 if roots:
1576 1587 lowestrev = min([self.rev(root) for root in roots])
1577 1588 else:
1578 1589 # No more roots? Return empty list
1579 1590 return nonodes
1580 1591 else:
1581 1592 # We are descending from nullid, and don't need to care about
1582 1593 # any other roots.
1583 1594 lowestrev = nullrev
1584 1595 roots = [nullid]
1585 1596 # Transform our roots list into a set.
1586 1597 descendants = set(roots)
1587 1598 # Also, keep the original roots so we can filter out roots that aren't
1588 1599 # 'real' roots (i.e. are descended from other roots).
1589 1600 roots = descendants.copy()
1590 1601 # Our topologically sorted list of output nodes.
1591 1602 orderedout = []
1592 1603 # Don't start at nullid since we don't want nullid in our output list,
1593 1604 # and if nullid shows up in descendants, empty parents will look like
1594 1605 # they're descendants.
1595 1606 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1596 1607 n = self.node(r)
1597 1608 isdescendant = False
1598 1609 if lowestrev == nullrev: # Everybody is a descendant of nullid
1599 1610 isdescendant = True
1600 1611 elif n in descendants:
1601 1612 # n is already a descendant
1602 1613 isdescendant = True
1603 1614 # This check only needs to be done here because all the roots
1604 1615 # will start being marked is descendants before the loop.
1605 1616 if n in roots:
1606 1617 # If n was a root, check if it's a 'real' root.
1607 1618 p = tuple(self.parents(n))
1608 1619 # If any of its parents are descendants, it's not a root.
1609 1620 if (p[0] in descendants) or (p[1] in descendants):
1610 1621 roots.remove(n)
1611 1622 else:
1612 1623 p = tuple(self.parents(n))
1613 1624 # A node is a descendant if either of its parents are
1614 1625 # descendants. (We seeded the dependents list with the roots
1615 1626 # up there, remember?)
1616 1627 if (p[0] in descendants) or (p[1] in descendants):
1617 1628 descendants.add(n)
1618 1629 isdescendant = True
1619 1630 if isdescendant and ((ancestors is None) or (n in ancestors)):
1620 1631 # Only include nodes that are both descendants and ancestors.
1621 1632 orderedout.append(n)
1622 1633 if (ancestors is not None) and (n in heads):
1623 1634 # We're trying to figure out which heads are reachable
1624 1635 # from roots.
1625 1636 # Mark this head as having been reached
1626 1637 heads[n] = True
1627 1638 elif ancestors is None:
1628 1639 # Otherwise, we're trying to discover the heads.
1629 1640 # Assume this is a head because if it isn't, the next step
1630 1641 # will eventually remove it.
1631 1642 heads[n] = True
1632 1643 # But, obviously its parents aren't.
1633 1644 for p in self.parents(n):
1634 1645 heads.pop(p, None)
1635 1646 heads = [head for head, flag in heads.iteritems() if flag]
1636 1647 roots = list(roots)
1637 1648 assert orderedout
1638 1649 assert roots
1639 1650 assert heads
1640 1651 return (orderedout, roots, heads)
1641 1652
1642 1653 def headrevs(self):
1643 1654 try:
1644 1655 return self.index.headrevs()
1645 1656 except AttributeError:
1646 1657 return self._headrevs()
1647 1658
1648 1659 def computephases(self, roots):
1649 1660 return self.index.computephasesmapsets(roots)
1650 1661
1651 1662 def _headrevs(self):
1652 1663 count = len(self)
1653 1664 if not count:
1654 1665 return [nullrev]
1655 1666 # we won't iter over filtered rev so nobody is a head at start
1656 1667 ishead = [0] * (count + 1)
1657 1668 index = self.index
1658 1669 for r in self:
1659 1670 ishead[r] = 1 # I may be an head
1660 1671 e = index[r]
1661 1672 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1662 1673 return [r for r, val in enumerate(ishead) if val]
1663 1674
1664 1675 def heads(self, start=None, stop=None):
1665 1676 """return the list of all nodes that have no children
1666 1677
1667 1678 if start is specified, only heads that are descendants of
1668 1679 start will be returned
1669 1680 if stop is specified, it will consider all the revs from stop
1670 1681 as if they had no children
1671 1682 """
1672 1683 if start is None and stop is None:
1673 1684 if not len(self):
1674 1685 return [nullid]
1675 1686 return [self.node(r) for r in self.headrevs()]
1676 1687
1677 1688 if start is None:
1678 1689 start = nullid
1679 1690 if stop is None:
1680 1691 stop = []
1681 1692 stoprevs = set([self.rev(n) for n in stop])
1682 1693 startrev = self.rev(start)
1683 1694 reachable = {startrev}
1684 1695 heads = {startrev}
1685 1696
1686 1697 parentrevs = self.parentrevs
1687 1698 for r in self.revs(start=startrev + 1):
1688 1699 for p in parentrevs(r):
1689 1700 if p in reachable:
1690 1701 if r not in stoprevs:
1691 1702 reachable.add(r)
1692 1703 heads.add(r)
1693 1704 if p in heads and p not in stoprevs:
1694 1705 heads.remove(p)
1695 1706
1696 1707 return [self.node(r) for r in heads]
1697 1708
1698 1709 def children(self, node):
1699 1710 """find the children of a given node"""
1700 1711 c = []
1701 1712 p = self.rev(node)
1702 1713 for r in self.revs(start=p + 1):
1703 1714 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1704 1715 if prevs:
1705 1716 for pr in prevs:
1706 1717 if pr == p:
1707 1718 c.append(self.node(r))
1708 1719 elif p == nullrev:
1709 1720 c.append(self.node(r))
1710 1721 return c
1711 1722
1712 1723 def commonancestorsheads(self, a, b):
1713 1724 """calculate all the heads of the common ancestors of nodes a and b"""
1714 1725 a, b = self.rev(a), self.rev(b)
1715 1726 ancs = self._commonancestorsheads(a, b)
1716 1727 return pycompat.maplist(self.node, ancs)
1717 1728
1718 1729 def _commonancestorsheads(self, *revs):
1719 1730 """calculate all the heads of the common ancestors of revs"""
1720 1731 try:
1721 1732 ancs = self.index.commonancestorsheads(*revs)
1722 1733 except (AttributeError, OverflowError): # C implementation failed
1723 1734 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1724 1735 return ancs
1725 1736
1726 1737 def isancestor(self, a, b):
1727 1738 """return True if node a is an ancestor of node b
1728 1739
1729 1740 A revision is considered an ancestor of itself."""
1730 1741 a, b = self.rev(a), self.rev(b)
1731 1742 return self.isancestorrev(a, b)
1732 1743
1733 1744 def isancestorrev(self, a, b):
1734 1745 """return True if revision a is an ancestor of revision b
1735 1746
1736 1747 A revision is considered an ancestor of itself.
1737 1748
1738 1749 The implementation of this is trivial but the use of
1739 1750 commonancestorsheads is not."""
1740 1751 if a == nullrev:
1741 1752 return True
1742 1753 elif a == b:
1743 1754 return True
1744 1755 elif a > b:
1745 1756 return False
1746 1757 return a in self._commonancestorsheads(a, b)
1747 1758
1748 1759 def ancestor(self, a, b):
1749 1760 """calculate the "best" common ancestor of nodes a and b"""
1750 1761
1751 1762 a, b = self.rev(a), self.rev(b)
1752 1763 try:
1753 1764 ancs = self.index.ancestors(a, b)
1754 1765 except (AttributeError, OverflowError):
1755 1766 ancs = ancestor.ancestors(self.parentrevs, a, b)
1756 1767 if ancs:
1757 1768 # choose a consistent winner when there's a tie
1758 1769 return min(map(self.node, ancs))
1759 1770 return nullid
1760 1771
1761 1772 def _match(self, id):
1762 1773 if isinstance(id, int):
1763 1774 # rev
1764 1775 return self.node(id)
1765 1776 if len(id) == 20:
1766 1777 # possibly a binary node
1767 1778 # odds of a binary node being all hex in ASCII are 1 in 10**25
1768 1779 try:
1769 1780 node = id
1770 1781 self.rev(node) # quick search the index
1771 1782 return node
1772 1783 except LookupError:
1773 1784 pass # may be partial hex id
1774 1785 try:
1775 1786 # str(rev)
1776 1787 rev = int(id)
1777 1788 if "%d" % rev != id:
1778 1789 raise ValueError
1779 1790 if rev < 0:
1780 1791 rev = len(self) + rev
1781 1792 if rev < 0 or rev >= len(self):
1782 1793 raise ValueError
1783 1794 return self.node(rev)
1784 1795 except (ValueError, OverflowError):
1785 1796 pass
1786 1797 if len(id) == 40:
1787 1798 try:
1788 1799 # a full hex nodeid?
1789 1800 node = bin(id)
1790 1801 self.rev(node)
1791 1802 return node
1792 1803 except (TypeError, LookupError):
1793 1804 pass
1794 1805
1795 1806 def _partialmatch(self, id):
1796 1807 # we don't care wdirfilenodeids as they should be always full hash
1797 1808 maybewdir = wdirhex.startswith(id)
1798 1809 try:
1799 1810 partial = self.index.partialmatch(id)
1800 1811 if partial and self.hasnode(partial):
1801 1812 if maybewdir:
1802 1813 # single 'ff...' match in radix tree, ambiguous with wdir
1803 1814 raise RevlogError
1804 1815 return partial
1805 1816 if maybewdir:
1806 1817 # no 'ff...' match in radix tree, wdir identified
1807 1818 raise error.WdirUnsupported
1808 1819 return None
1809 1820 except RevlogError:
1810 1821 # parsers.c radix tree lookup gave multiple matches
1811 1822 # fast path: for unfiltered changelog, radix tree is accurate
1812 1823 if not getattr(self, 'filteredrevs', None):
1813 1824 raise AmbiguousPrefixLookupError(id, self.indexfile,
1814 1825 _('ambiguous identifier'))
1815 1826 # fall through to slow path that filters hidden revisions
1816 1827 except (AttributeError, ValueError):
1817 1828 # we are pure python, or key was too short to search radix tree
1818 1829 pass
1819 1830
1820 1831 if id in self._pcache:
1821 1832 return self._pcache[id]
1822 1833
1823 1834 if len(id) <= 40:
1824 1835 try:
1825 1836 # hex(node)[:...]
1826 1837 l = len(id) // 2 # grab an even number of digits
1827 1838 prefix = bin(id[:l * 2])
1828 1839 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1829 1840 nl = [n for n in nl if hex(n).startswith(id) and
1830 1841 self.hasnode(n)]
1831 1842 if len(nl) > 0:
1832 1843 if len(nl) == 1 and not maybewdir:
1833 1844 self._pcache[id] = nl[0]
1834 1845 return nl[0]
1835 1846 raise AmbiguousPrefixLookupError(id, self.indexfile,
1836 1847 _('ambiguous identifier'))
1837 1848 if maybewdir:
1838 1849 raise error.WdirUnsupported
1839 1850 return None
1840 1851 except TypeError:
1841 1852 pass
1842 1853
1843 1854 def lookup(self, id):
1844 1855 """locate a node based on:
1845 1856 - revision number or str(revision number)
1846 1857 - nodeid or subset of hex nodeid
1847 1858 """
1848 1859 n = self._match(id)
1849 1860 if n is not None:
1850 1861 return n
1851 1862 n = self._partialmatch(id)
1852 1863 if n:
1853 1864 return n
1854 1865
1855 1866 raise LookupError(id, self.indexfile, _('no match found'))
1856 1867
1857 1868 def shortest(self, node, minlength=1):
1858 1869 """Find the shortest unambiguous prefix that matches node."""
1859 1870 def isvalid(prefix):
1860 1871 try:
1861 1872 node = self._partialmatch(prefix)
1862 1873 except error.RevlogError:
1863 1874 return False
1864 1875 except error.WdirUnsupported:
1865 1876 # single 'ff...' match
1866 1877 return True
1867 1878 if node is None:
1868 1879 raise LookupError(node, self.indexfile, _('no node'))
1869 1880 return True
1870 1881
1871 1882 def maybewdir(prefix):
1872 1883 return all(c == 'f' for c in prefix)
1873 1884
1874 1885 hexnode = hex(node)
1875 1886
1876 1887 def disambiguate(hexnode, minlength):
1877 1888 """Disambiguate against wdirid."""
1878 1889 for length in range(minlength, 41):
1879 1890 prefix = hexnode[:length]
1880 1891 if not maybewdir(prefix):
1881 1892 return prefix
1882 1893
1883 1894 if not getattr(self, 'filteredrevs', None):
1884 1895 try:
1885 1896 length = max(self.index.shortest(node), minlength)
1886 1897 return disambiguate(hexnode, length)
1887 1898 except RevlogError:
1888 1899 if node != wdirid:
1889 1900 raise LookupError(node, self.indexfile, _('no node'))
1890 1901 except AttributeError:
1891 1902 # Fall through to pure code
1892 1903 pass
1893 1904
1894 1905 if node == wdirid:
1895 1906 for length in range(minlength, 41):
1896 1907 prefix = hexnode[:length]
1897 1908 if isvalid(prefix):
1898 1909 return prefix
1899 1910
1900 1911 for length in range(minlength, 41):
1901 1912 prefix = hexnode[:length]
1902 1913 if isvalid(prefix):
1903 1914 return disambiguate(hexnode, length)
1904 1915
1905 1916 def cmp(self, node, text):
1906 1917 """compare text with a given file revision
1907 1918
1908 1919 returns True if text is different than what is stored.
1909 1920 """
1910 1921 p1, p2 = self.parents(node)
1911 1922 return hash(text, p1, p2) != node
1912 1923
1913 1924 def _cachesegment(self, offset, data):
1914 1925 """Add a segment to the revlog cache.
1915 1926
1916 1927 Accepts an absolute offset and the data that is at that location.
1917 1928 """
1918 1929 o, d = self._chunkcache
1919 1930 # try to add to existing cache
1920 1931 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1921 1932 self._chunkcache = o, d + data
1922 1933 else:
1923 1934 self._chunkcache = offset, data
1924 1935
1925 1936 def _readsegment(self, offset, length, df=None):
1926 1937 """Load a segment of raw data from the revlog.
1927 1938
1928 1939 Accepts an absolute offset, length to read, and an optional existing
1929 1940 file handle to read from.
1930 1941
1931 1942 If an existing file handle is passed, it will be seeked and the
1932 1943 original seek position will NOT be restored.
1933 1944
1934 1945 Returns a str or buffer of raw byte data.
1935 1946 """
1936 1947 # Cache data both forward and backward around the requested
1937 1948 # data, in a fixed size window. This helps speed up operations
1938 1949 # involving reading the revlog backwards.
1939 1950 cachesize = self._chunkcachesize
1940 1951 realoffset = offset & ~(cachesize - 1)
1941 1952 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1942 1953 - realoffset)
1943 1954 with self._datareadfp(df) as df:
1944 1955 df.seek(realoffset)
1945 1956 d = df.read(reallength)
1946 1957 self._cachesegment(realoffset, d)
1947 1958 if offset != realoffset or reallength != length:
1948 1959 return util.buffer(d, offset - realoffset, length)
1949 1960 return d
1950 1961
1951 1962 def _getsegment(self, offset, length, df=None):
1952 1963 """Obtain a segment of raw data from the revlog.
1953 1964
1954 1965 Accepts an absolute offset, length of bytes to obtain, and an
1955 1966 optional file handle to the already-opened revlog. If the file
1956 1967 handle is used, it's original seek position will not be preserved.
1957 1968
1958 1969 Requests for data may be returned from a cache.
1959 1970
1960 1971 Returns a str or a buffer instance of raw byte data.
1961 1972 """
1962 1973 o, d = self._chunkcache
1963 1974 l = len(d)
1964 1975
1965 1976 # is it in the cache?
1966 1977 cachestart = offset - o
1967 1978 cacheend = cachestart + length
1968 1979 if cachestart >= 0 and cacheend <= l:
1969 1980 if cachestart == 0 and cacheend == l:
1970 1981 return d # avoid a copy
1971 1982 return util.buffer(d, cachestart, cacheend - cachestart)
1972 1983
1973 1984 return self._readsegment(offset, length, df=df)
1974 1985
1975 1986 def _getsegmentforrevs(self, startrev, endrev, df=None):
1976 1987 """Obtain a segment of raw data corresponding to a range of revisions.
1977 1988
1978 1989 Accepts the start and end revisions and an optional already-open
1979 1990 file handle to be used for reading. If the file handle is read, its
1980 1991 seek position will not be preserved.
1981 1992
1982 1993 Requests for data may be satisfied by a cache.
1983 1994
1984 1995 Returns a 2-tuple of (offset, data) for the requested range of
1985 1996 revisions. Offset is the integer offset from the beginning of the
1986 1997 revlog and data is a str or buffer of the raw byte data.
1987 1998
1988 1999 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1989 2000 to determine where each revision's data begins and ends.
1990 2001 """
1991 2002 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1992 2003 # (functions are expensive).
1993 2004 index = self.index
1994 2005 istart = index[startrev]
1995 2006 start = int(istart[0] >> 16)
1996 2007 if startrev == endrev:
1997 2008 end = start + istart[1]
1998 2009 else:
1999 2010 iend = index[endrev]
2000 2011 end = int(iend[0] >> 16) + iend[1]
2001 2012
2002 2013 if self._inline:
2003 2014 start += (startrev + 1) * self._io.size
2004 2015 end += (endrev + 1) * self._io.size
2005 2016 length = end - start
2006 2017
2007 2018 return start, self._getsegment(start, length, df=df)
2008 2019
2009 2020 def _chunk(self, rev, df=None):
2010 2021 """Obtain a single decompressed chunk for a revision.
2011 2022
2012 2023 Accepts an integer revision and an optional already-open file handle
2013 2024 to be used for reading. If used, the seek position of the file will not
2014 2025 be preserved.
2015 2026
2016 2027 Returns a str holding uncompressed data for the requested revision.
2017 2028 """
2018 2029 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
2019 2030
2020 2031 def _chunks(self, revs, df=None, targetsize=None):
2021 2032 """Obtain decompressed chunks for the specified revisions.
2022 2033
2023 2034 Accepts an iterable of numeric revisions that are assumed to be in
2024 2035 ascending order. Also accepts an optional already-open file handle
2025 2036 to be used for reading. If used, the seek position of the file will
2026 2037 not be preserved.
2027 2038
2028 2039 This function is similar to calling ``self._chunk()`` multiple times,
2029 2040 but is faster.
2030 2041
2031 2042 Returns a list with decompressed data for each requested revision.
2032 2043 """
2033 2044 if not revs:
2034 2045 return []
2035 2046 start = self.start
2036 2047 length = self.length
2037 2048 inline = self._inline
2038 2049 iosize = self._io.size
2039 2050 buffer = util.buffer
2040 2051
2041 2052 l = []
2042 2053 ladd = l.append
2043 2054
2044 2055 if not self._withsparseread:
2045 2056 slicedchunks = (revs,)
2046 2057 else:
2047 2058 slicedchunks = _slicechunk(self, revs, targetsize=targetsize)
2048 2059
2049 2060 for revschunk in slicedchunks:
2050 2061 firstrev = revschunk[0]
2051 2062 # Skip trailing revisions with empty diff
2052 2063 for lastrev in revschunk[::-1]:
2053 2064 if length(lastrev) != 0:
2054 2065 break
2055 2066
2056 2067 try:
2057 2068 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
2058 2069 except OverflowError:
2059 2070 # issue4215 - we can't cache a run of chunks greater than
2060 2071 # 2G on Windows
2061 2072 return [self._chunk(rev, df=df) for rev in revschunk]
2062 2073
2063 2074 decomp = self.decompress
2064 2075 for rev in revschunk:
2065 2076 chunkstart = start(rev)
2066 2077 if inline:
2067 2078 chunkstart += (rev + 1) * iosize
2068 2079 chunklength = length(rev)
2069 2080 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
2070 2081
2071 2082 return l
2072 2083
2073 2084 def _chunkclear(self):
2074 2085 """Clear the raw chunk cache."""
2075 2086 self._chunkcache = (0, '')
2076 2087
2077 2088 def deltaparent(self, rev):
2078 2089 """return deltaparent of the given revision"""
2079 2090 base = self.index[rev][3]
2080 2091 if base == rev:
2081 2092 return nullrev
2082 2093 elif self._generaldelta:
2083 2094 return base
2084 2095 else:
2085 2096 return rev - 1
2086 2097
2087 2098 def revdiff(self, rev1, rev2):
2088 2099 """return or calculate a delta between two revisions
2089 2100
2090 2101 The delta calculated is in binary form and is intended to be written to
2091 2102 revlog data directly. So this function needs raw revision data.
2092 2103 """
2093 2104 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
2094 2105 return bytes(self._chunk(rev2))
2095 2106
2096 2107 return mdiff.textdiff(self.revision(rev1, raw=True),
2097 2108 self.revision(rev2, raw=True))
2098 2109
2099 2110 def revision(self, nodeorrev, _df=None, raw=False):
2100 2111 """return an uncompressed revision of a given node or revision
2101 2112 number.
2102 2113
2103 2114 _df - an existing file handle to read from. (internal-only)
2104 2115 raw - an optional argument specifying if the revision data is to be
2105 2116 treated as raw data when applying flag transforms. 'raw' should be set
2106 2117 to True when generating changegroups or in debug commands.
2107 2118 """
2108 2119 if isinstance(nodeorrev, int):
2109 2120 rev = nodeorrev
2110 2121 node = self.node(rev)
2111 2122 else:
2112 2123 node = nodeorrev
2113 2124 rev = None
2114 2125
2115 2126 cachedrev = None
2116 2127 flags = None
2117 2128 rawtext = None
2118 2129 if node == nullid:
2119 2130 return ""
2120 2131 if self._cache:
2121 2132 if self._cache[0] == node:
2122 2133 # _cache only stores rawtext
2123 2134 if raw:
2124 2135 return self._cache[2]
2125 2136 # duplicated, but good for perf
2126 2137 if rev is None:
2127 2138 rev = self.rev(node)
2128 2139 if flags is None:
2129 2140 flags = self.flags(rev)
2130 2141 # no extra flags set, no flag processor runs, text = rawtext
2131 2142 if flags == REVIDX_DEFAULT_FLAGS:
2132 2143 return self._cache[2]
2133 2144 # rawtext is reusable. need to run flag processor
2134 2145 rawtext = self._cache[2]
2135 2146
2136 2147 cachedrev = self._cache[1]
2137 2148
2138 2149 # look up what we need to read
2139 2150 if rawtext is None:
2140 2151 if rev is None:
2141 2152 rev = self.rev(node)
2142 2153
2143 2154 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2144 2155 if stopped:
2145 2156 rawtext = self._cache[2]
2146 2157
2147 2158 # drop cache to save memory
2148 2159 self._cache = None
2149 2160
2150 2161 targetsize = None
2151 2162 rawsize = self.index[rev][2]
2152 2163 if 0 <= rawsize:
2153 2164 targetsize = 4 * rawsize
2154 2165
2155 2166 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2156 2167 if rawtext is None:
2157 2168 rawtext = bytes(bins[0])
2158 2169 bins = bins[1:]
2159 2170
2160 2171 rawtext = mdiff.patches(rawtext, bins)
2161 2172 self._cache = (node, rev, rawtext)
2162 2173
2163 2174 if flags is None:
2164 2175 if rev is None:
2165 2176 rev = self.rev(node)
2166 2177 flags = self.flags(rev)
2167 2178
2168 2179 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
2169 2180 if validatehash:
2170 2181 self.checkhash(text, node, rev=rev)
2171 2182
2172 2183 return text
2173 2184
2174 2185 def hash(self, text, p1, p2):
2175 2186 """Compute a node hash.
2176 2187
2177 2188 Available as a function so that subclasses can replace the hash
2178 2189 as needed.
2179 2190 """
2180 2191 return hash(text, p1, p2)
2181 2192
2182 2193 def _processflags(self, text, flags, operation, raw=False):
2183 2194 """Inspect revision data flags and applies transforms defined by
2184 2195 registered flag processors.
2185 2196
2186 2197 ``text`` - the revision data to process
2187 2198 ``flags`` - the revision flags
2188 2199 ``operation`` - the operation being performed (read or write)
2189 2200 ``raw`` - an optional argument describing if the raw transform should be
2190 2201 applied.
2191 2202
2192 2203 This method processes the flags in the order (or reverse order if
2193 2204 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
2194 2205 flag processors registered for present flags. The order of flags defined
2195 2206 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
2196 2207
2197 2208 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
2198 2209 processed text and ``validatehash`` is a bool indicating whether the
2199 2210 returned text should be checked for hash integrity.
2200 2211
2201 2212 Note: If the ``raw`` argument is set, it has precedence over the
2202 2213 operation and will only update the value of ``validatehash``.
2203 2214 """
2204 2215 # fast path: no flag processors will run
2205 2216 if flags == 0:
2206 2217 return text, True
2207 2218 if not operation in ('read', 'write'):
2208 2219 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
2209 2220 # Check all flags are known.
2210 2221 if flags & ~REVIDX_KNOWN_FLAGS:
2211 2222 raise RevlogError(_("incompatible revision flag '%#x'") %
2212 2223 (flags & ~REVIDX_KNOWN_FLAGS))
2213 2224 validatehash = True
2214 2225 # Depending on the operation (read or write), the order might be
2215 2226 # reversed due to non-commutative transforms.
2216 2227 orderedflags = REVIDX_FLAGS_ORDER
2217 2228 if operation == 'write':
2218 2229 orderedflags = reversed(orderedflags)
2219 2230
2220 2231 for flag in orderedflags:
2221 2232 # If a flagprocessor has been registered for a known flag, apply the
2222 2233 # related operation transform and update result tuple.
2223 2234 if flag & flags:
2224 2235 vhash = True
2225 2236
2226 2237 if flag not in _flagprocessors:
2227 2238 message = _("missing processor for flag '%#x'") % (flag)
2228 2239 raise RevlogError(message)
2229 2240
2230 2241 processor = _flagprocessors[flag]
2231 2242 if processor is not None:
2232 2243 readtransform, writetransform, rawtransform = processor
2233 2244
2234 2245 if raw:
2235 2246 vhash = rawtransform(self, text)
2236 2247 elif operation == 'read':
2237 2248 text, vhash = readtransform(self, text)
2238 2249 else: # write operation
2239 2250 text, vhash = writetransform(self, text)
2240 2251 validatehash = validatehash and vhash
2241 2252
2242 2253 return text, validatehash
2243 2254
2244 2255 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2245 2256 """Check node hash integrity.
2246 2257
2247 2258 Available as a function so that subclasses can extend hash mismatch
2248 2259 behaviors as needed.
2249 2260 """
2250 2261 try:
2251 2262 if p1 is None and p2 is None:
2252 2263 p1, p2 = self.parents(node)
2253 2264 if node != self.hash(text, p1, p2):
2254 2265 revornode = rev
2255 2266 if revornode is None:
2256 2267 revornode = templatefilters.short(hex(node))
2257 2268 raise RevlogError(_("integrity check failed on %s:%s")
2258 2269 % (self.indexfile, pycompat.bytestr(revornode)))
2259 2270 except RevlogError:
2260 2271 if self._censorable and _censoredtext(text):
2261 2272 raise error.CensoredNodeError(self.indexfile, node, text)
2262 2273 raise
2263 2274
2264 2275 def _enforceinlinesize(self, tr, fp=None):
2265 2276 """Check if the revlog is too big for inline and convert if so.
2266 2277
2267 2278 This should be called after revisions are added to the revlog. If the
2268 2279 revlog has grown too large to be an inline revlog, it will convert it
2269 2280 to use multiple index and data files.
2270 2281 """
2271 2282 tiprev = len(self) - 1
2272 2283 if (not self._inline or
2273 2284 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
2274 2285 return
2275 2286
2276 2287 trinfo = tr.find(self.indexfile)
2277 2288 if trinfo is None:
2278 2289 raise RevlogError(_("%s not found in the transaction")
2279 2290 % self.indexfile)
2280 2291
2281 2292 trindex = trinfo[2]
2282 2293 if trindex is not None:
2283 2294 dataoff = self.start(trindex)
2284 2295 else:
2285 2296 # revlog was stripped at start of transaction, use all leftover data
2286 2297 trindex = len(self) - 1
2287 2298 dataoff = self.end(tiprev)
2288 2299
2289 2300 tr.add(self.datafile, dataoff)
2290 2301
2291 2302 if fp:
2292 2303 fp.flush()
2293 2304 fp.close()
2294 2305
2295 2306 with self._datafp('w') as df:
2296 2307 for r in self:
2297 2308 df.write(self._getsegmentforrevs(r, r)[1])
2298 2309
2299 2310 with self._indexfp('w') as fp:
2300 2311 self.version &= ~FLAG_INLINE_DATA
2301 2312 self._inline = False
2302 2313 io = self._io
2303 2314 for i in self:
2304 2315 e = io.packentry(self.index[i], self.node, self.version, i)
2305 2316 fp.write(e)
2306 2317
2307 2318 # the temp file replace the real index when we exit the context
2308 2319 # manager
2309 2320
2310 2321 tr.replace(self.indexfile, trindex * self._io.size)
2311 2322 self._chunkclear()
2312 2323
2313 2324 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
2314 2325 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
2315 2326 """add a revision to the log
2316 2327
2317 2328 text - the revision data to add
2318 2329 transaction - the transaction object used for rollback
2319 2330 link - the linkrev data to add
2320 2331 p1, p2 - the parent nodeids of the revision
2321 2332 cachedelta - an optional precomputed delta
2322 2333 node - nodeid of revision; typically node is not specified, and it is
2323 2334 computed by default as hash(text, p1, p2), however subclasses might
2324 2335 use different hashing method (and override checkhash() in such case)
2325 2336 flags - the known flags to set on the revision
2326 2337 deltacomputer - an optional _deltacomputer instance shared between
2327 2338 multiple calls
2328 2339 """
2329 2340 if link == nullrev:
2330 2341 raise RevlogError(_("attempted to add linkrev -1 to %s")
2331 2342 % self.indexfile)
2332 2343
2333 2344 if flags:
2334 2345 node = node or self.hash(text, p1, p2)
2335 2346
2336 2347 rawtext, validatehash = self._processflags(text, flags, 'write')
2337 2348
2338 2349 # If the flag processor modifies the revision data, ignore any provided
2339 2350 # cachedelta.
2340 2351 if rawtext != text:
2341 2352 cachedelta = None
2342 2353
2343 2354 if len(rawtext) > _maxentrysize:
2344 2355 raise RevlogError(
2345 2356 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
2346 2357 % (self.indexfile, len(rawtext)))
2347 2358
2348 2359 node = node or self.hash(rawtext, p1, p2)
2349 2360 if node in self.nodemap:
2350 2361 return node
2351 2362
2352 2363 if validatehash:
2353 2364 self.checkhash(rawtext, node, p1=p1, p2=p2)
2354 2365
2355 2366 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
2356 2367 flags, cachedelta=cachedelta,
2357 2368 deltacomputer=deltacomputer)
2358 2369
2359 2370 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
2360 2371 cachedelta=None, deltacomputer=None):
2361 2372 """add a raw revision with known flags, node and parents
2362 2373 useful when reusing a revision not stored in this revlog (ex: received
2363 2374 over wire, or read from an external bundle).
2364 2375 """
2365 2376 dfh = None
2366 2377 if not self._inline:
2367 2378 dfh = self._datafp("a+")
2368 2379 ifh = self._indexfp("a+")
2369 2380 try:
2370 2381 return self._addrevision(node, rawtext, transaction, link, p1, p2,
2371 2382 flags, cachedelta, ifh, dfh,
2372 2383 deltacomputer=deltacomputer)
2373 2384 finally:
2374 2385 if dfh:
2375 2386 dfh.close()
2376 2387 ifh.close()
2377 2388
2378 2389 def compress(self, data):
2379 2390 """Generate a possibly-compressed representation of data."""
2380 2391 if not data:
2381 2392 return '', data
2382 2393
2383 2394 compressed = self._compressor.compress(data)
2384 2395
2385 2396 if compressed:
2386 2397 # The revlog compressor added the header in the returned data.
2387 2398 return '', compressed
2388 2399
2389 2400 if data[0:1] == '\0':
2390 2401 return '', data
2391 2402 return 'u', data
2392 2403
2393 2404 def decompress(self, data):
2394 2405 """Decompress a revlog chunk.
2395 2406
2396 2407 The chunk is expected to begin with a header identifying the
2397 2408 format type so it can be routed to an appropriate decompressor.
2398 2409 """
2399 2410 if not data:
2400 2411 return data
2401 2412
2402 2413 # Revlogs are read much more frequently than they are written and many
2403 2414 # chunks only take microseconds to decompress, so performance is
2404 2415 # important here.
2405 2416 #
2406 2417 # We can make a few assumptions about revlogs:
2407 2418 #
2408 2419 # 1) the majority of chunks will be compressed (as opposed to inline
2409 2420 # raw data).
2410 2421 # 2) decompressing *any* data will likely by at least 10x slower than
2411 2422 # returning raw inline data.
2412 2423 # 3) we want to prioritize common and officially supported compression
2413 2424 # engines
2414 2425 #
2415 2426 # It follows that we want to optimize for "decompress compressed data
2416 2427 # when encoded with common and officially supported compression engines"
2417 2428 # case over "raw data" and "data encoded by less common or non-official
2418 2429 # compression engines." That is why we have the inline lookup first
2419 2430 # followed by the compengines lookup.
2420 2431 #
2421 2432 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2422 2433 # compressed chunks. And this matters for changelog and manifest reads.
2423 2434 t = data[0:1]
2424 2435
2425 2436 if t == 'x':
2426 2437 try:
2427 2438 return _zlibdecompress(data)
2428 2439 except zlib.error as e:
2429 2440 raise RevlogError(_('revlog decompress error: %s') %
2430 2441 stringutil.forcebytestr(e))
2431 2442 # '\0' is more common than 'u' so it goes first.
2432 2443 elif t == '\0':
2433 2444 return data
2434 2445 elif t == 'u':
2435 2446 return util.buffer(data, 1)
2436 2447
2437 2448 try:
2438 2449 compressor = self._decompressors[t]
2439 2450 except KeyError:
2440 2451 try:
2441 2452 engine = util.compengines.forrevlogheader(t)
2442 2453 compressor = engine.revlogcompressor()
2443 2454 self._decompressors[t] = compressor
2444 2455 except KeyError:
2445 2456 raise RevlogError(_('unknown compression type %r') % t)
2446 2457
2447 2458 return compressor.decompress(data)
2448 2459
2449 2460 def _isgooddeltainfo(self, deltainfo, revinfo):
2450 2461 """Returns True if the given delta is good. Good means that it is within
2451 2462 the disk span, disk size, and chain length bounds that we know to be
2452 2463 performant."""
2453 2464 if deltainfo is None:
2454 2465 return False
2455 2466
2456 2467 # - 'deltainfo.distance' is the distance from the base revision --
2457 2468 # bounding it limits the amount of I/O we need to do.
2458 2469 # - 'deltainfo.compresseddeltalen' is the sum of the total size of
2459 2470 # deltas we need to apply -- bounding it limits the amount of CPU
2460 2471 # we consume.
2461 2472
2462 2473 if self._sparserevlog:
2463 2474 # As sparse-read will be used, we can consider that the distance,
2464 2475 # instead of being the span of the whole chunk,
2465 2476 # is the span of the largest read chunk
2466 2477 base = deltainfo.base
2467 2478
2468 2479 if base != nullrev:
2469 2480 deltachain = self._deltachain(base)[0]
2470 2481 else:
2471 2482 deltachain = []
2472 2483
2473 2484 chunks = _slicechunk(self, deltachain, deltainfo)
2474 distance = max(map(lambda revs:_segmentspan(self, revs), chunks))
2485 all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks]
2486 distance = max(all_span)
2475 2487 else:
2476 2488 distance = deltainfo.distance
2477 2489
2478 2490 textlen = revinfo.textlen
2479 2491 defaultmax = textlen * 4
2480 2492 maxdist = self._maxdeltachainspan
2481 2493 if not maxdist:
2482 2494 maxdist = distance # ensure the conditional pass
2483 2495 maxdist = max(maxdist, defaultmax)
2484 2496 if self._sparserevlog and maxdist < self._srmingapsize:
2485 2497 # In multiple place, we are ignoring irrelevant data range below a
2486 2498 # certain size. Be also apply this tradeoff here and relax span
2487 2499 # constraint for small enought content.
2488 2500 maxdist = self._srmingapsize
2489 2501
2490 2502 # Bad delta from read span:
2491 2503 #
2492 2504 # If the span of data read is larger than the maximum allowed.
2493 2505 if maxdist < distance:
2494 2506 return False
2495 2507
2496 2508 # Bad delta from new delta size:
2497 2509 #
2498 2510 # If the delta size is larger than the target text, storing the
2499 2511 # delta will be inefficient.
2500 2512 if textlen < deltainfo.deltalen:
2501 2513 return False
2502 2514
2503 2515 # Bad delta from cumulated payload size:
2504 2516 #
2505 2517 # If the sum of delta get larger than K * target text length.
2506 2518 if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen:
2507 2519 return False
2508 2520
2509 2521 # Bad delta from chain length:
2510 2522 #
2511 2523 # If the number of delta in the chain gets too high.
2512 2524 if self._maxchainlen and self._maxchainlen < deltainfo.chainlen:
2513 2525 return False
2514 2526
2515 2527 return True
2516 2528
2517 2529 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2518 2530 cachedelta, ifh, dfh, alwayscache=False,
2519 2531 deltacomputer=None):
2520 2532 """internal function to add revisions to the log
2521 2533
2522 2534 see addrevision for argument descriptions.
2523 2535
2524 2536 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2525 2537
2526 2538 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2527 2539 be used.
2528 2540
2529 2541 invariants:
2530 2542 - rawtext is optional (can be None); if not set, cachedelta must be set.
2531 2543 if both are set, they must correspond to each other.
2532 2544 """
2533 2545 if node == nullid:
2534 2546 raise RevlogError(_("%s: attempt to add null revision") %
2535 2547 (self.indexfile))
2536 2548 if node == wdirid or node in wdirfilenodeids:
2537 2549 raise RevlogError(_("%s: attempt to add wdir revision") %
2538 2550 (self.indexfile))
2539 2551
2540 2552 if self._inline:
2541 2553 fh = ifh
2542 2554 else:
2543 2555 fh = dfh
2544 2556
2545 2557 btext = [rawtext]
2546 2558
2547 2559 curr = len(self)
2548 2560 prev = curr - 1
2549 2561 offset = self.end(prev)
2550 2562 p1r, p2r = self.rev(p1), self.rev(p2)
2551 2563
2552 2564 # full versions are inserted when the needed deltas
2553 2565 # become comparable to the uncompressed text
2554 2566 if rawtext is None:
2555 2567 # need rawtext size, before changed by flag processors, which is
2556 2568 # the non-raw size. use revlog explicitly to avoid filelog's extra
2557 2569 # logic that might remove metadata size.
2558 2570 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2559 2571 cachedelta[1])
2560 2572 else:
2561 2573 textlen = len(rawtext)
2562 2574
2563 2575 if deltacomputer is None:
2564 2576 deltacomputer = _deltacomputer(self)
2565 2577
2566 2578 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2567 2579
2568 2580 # no delta for flag processor revision (see "candelta" for why)
2569 2581 # not calling candelta since only one revision needs test, also to
2570 2582 # avoid overhead fetching flags again.
2571 2583 if flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
2572 2584 deltainfo = None
2573 2585 else:
2574 2586 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2575 2587
2576 2588 if deltainfo is not None:
2577 2589 base = deltainfo.base
2578 2590 chainbase = deltainfo.chainbase
2579 2591 data = deltainfo.data
2580 2592 l = deltainfo.deltalen
2581 2593 else:
2582 2594 rawtext = deltacomputer.buildtext(revinfo, fh)
2583 2595 data = self.compress(rawtext)
2584 2596 l = len(data[1]) + len(data[0])
2585 2597 base = chainbase = curr
2586 2598
2587 2599 e = (offset_type(offset, flags), l, textlen,
2588 2600 base, link, p1r, p2r, node)
2589 2601 self.index.append(e)
2590 2602 self.nodemap[node] = curr
2591 2603
2592 2604 entry = self._io.packentry(e, self.node, self.version, curr)
2593 2605 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
2594 2606
2595 2607 if alwayscache and rawtext is None:
2596 2608 rawtext = deltacomputer._buildtext(revinfo, fh)
2597 2609
2598 2610 if type(rawtext) == bytes: # only accept immutable objects
2599 2611 self._cache = (node, curr, rawtext)
2600 2612 self._chainbasecache[curr] = chainbase
2601 2613 return node
2602 2614
2603 2615 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2604 2616 # Files opened in a+ mode have inconsistent behavior on various
2605 2617 # platforms. Windows requires that a file positioning call be made
2606 2618 # when the file handle transitions between reads and writes. See
2607 2619 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2608 2620 # platforms, Python or the platform itself can be buggy. Some versions
2609 2621 # of Solaris have been observed to not append at the end of the file
2610 2622 # if the file was seeked to before the end. See issue4943 for more.
2611 2623 #
2612 2624 # We work around this issue by inserting a seek() before writing.
2613 2625 # Note: This is likely not necessary on Python 3.
2614 2626 ifh.seek(0, os.SEEK_END)
2615 2627 if dfh:
2616 2628 dfh.seek(0, os.SEEK_END)
2617 2629
2618 2630 curr = len(self) - 1
2619 2631 if not self._inline:
2620 2632 transaction.add(self.datafile, offset)
2621 2633 transaction.add(self.indexfile, curr * len(entry))
2622 2634 if data[0]:
2623 2635 dfh.write(data[0])
2624 2636 dfh.write(data[1])
2625 2637 ifh.write(entry)
2626 2638 else:
2627 2639 offset += curr * self._io.size
2628 2640 transaction.add(self.indexfile, offset, curr)
2629 2641 ifh.write(entry)
2630 2642 ifh.write(data[0])
2631 2643 ifh.write(data[1])
2632 2644 self._enforceinlinesize(transaction, ifh)
2633 2645
2634 2646 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2635 2647 """
2636 2648 add a delta group
2637 2649
2638 2650 given a set of deltas, add them to the revision log. the
2639 2651 first delta is against its parent, which should be in our
2640 2652 log, the rest are against the previous delta.
2641 2653
2642 2654 If ``addrevisioncb`` is defined, it will be called with arguments of
2643 2655 this revlog and the node that was added.
2644 2656 """
2645 2657
2646 2658 nodes = []
2647 2659
2648 2660 r = len(self)
2649 2661 end = 0
2650 2662 if r:
2651 2663 end = self.end(r - 1)
2652 2664 ifh = self._indexfp("a+")
2653 2665 isize = r * self._io.size
2654 2666 if self._inline:
2655 2667 transaction.add(self.indexfile, end + isize, r)
2656 2668 dfh = None
2657 2669 else:
2658 2670 transaction.add(self.indexfile, isize, r)
2659 2671 transaction.add(self.datafile, end)
2660 2672 dfh = self._datafp("a+")
2661 2673 def flush():
2662 2674 if dfh:
2663 2675 dfh.flush()
2664 2676 ifh.flush()
2665 2677 try:
2666 2678 deltacomputer = _deltacomputer(self)
2667 2679 # loop through our set of deltas
2668 2680 for data in deltas:
2669 2681 node, p1, p2, linknode, deltabase, delta, flags = data
2670 2682 link = linkmapper(linknode)
2671 2683 flags = flags or REVIDX_DEFAULT_FLAGS
2672 2684
2673 2685 nodes.append(node)
2674 2686
2675 2687 if node in self.nodemap:
2676 2688 # this can happen if two branches make the same change
2677 2689 continue
2678 2690
2679 2691 for p in (p1, p2):
2680 2692 if p not in self.nodemap:
2681 2693 raise LookupError(p, self.indexfile,
2682 2694 _('unknown parent'))
2683 2695
2684 2696 if deltabase not in self.nodemap:
2685 2697 raise LookupError(deltabase, self.indexfile,
2686 2698 _('unknown delta base'))
2687 2699
2688 2700 baserev = self.rev(deltabase)
2689 2701
2690 2702 if baserev != nullrev and self.iscensored(baserev):
2691 2703 # if base is censored, delta must be full replacement in a
2692 2704 # single patch operation
2693 2705 hlen = struct.calcsize(">lll")
2694 2706 oldlen = self.rawsize(baserev)
2695 2707 newlen = len(delta) - hlen
2696 2708 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2697 2709 raise error.CensoredBaseError(self.indexfile,
2698 2710 self.node(baserev))
2699 2711
2700 2712 if not flags and self._peek_iscensored(baserev, delta, flush):
2701 2713 flags |= REVIDX_ISCENSORED
2702 2714
2703 2715 # We assume consumers of addrevisioncb will want to retrieve
2704 2716 # the added revision, which will require a call to
2705 2717 # revision(). revision() will fast path if there is a cache
2706 2718 # hit. So, we tell _addrevision() to always cache in this case.
2707 2719 # We're only using addgroup() in the context of changegroup
2708 2720 # generation so the revision data can always be handled as raw
2709 2721 # by the flagprocessor.
2710 2722 self._addrevision(node, None, transaction, link,
2711 2723 p1, p2, flags, (baserev, delta),
2712 2724 ifh, dfh,
2713 2725 alwayscache=bool(addrevisioncb),
2714 2726 deltacomputer=deltacomputer)
2715 2727
2716 2728 if addrevisioncb:
2717 2729 addrevisioncb(self, node)
2718 2730
2719 2731 if not dfh and not self._inline:
2720 2732 # addrevision switched from inline to conventional
2721 2733 # reopen the index
2722 2734 ifh.close()
2723 2735 dfh = self._datafp("a+")
2724 2736 ifh = self._indexfp("a+")
2725 2737 finally:
2726 2738 if dfh:
2727 2739 dfh.close()
2728 2740 ifh.close()
2729 2741
2730 2742 return nodes
2731 2743
2732 2744 def iscensored(self, rev):
2733 2745 """Check if a file revision is censored."""
2734 2746 if not self._censorable:
2735 2747 return False
2736 2748
2737 2749 return self.flags(rev) & REVIDX_ISCENSORED
2738 2750
2739 2751 def _peek_iscensored(self, baserev, delta, flush):
2740 2752 """Quickly check if a delta produces a censored revision."""
2741 2753 if not self._censorable:
2742 2754 return False
2743 2755
2744 2756 # Fragile heuristic: unless new file meta keys are added alphabetically
2745 2757 # preceding "censored", all censored revisions are prefixed by
2746 2758 # "\1\ncensored:". A delta producing such a censored revision must be a
2747 2759 # full-replacement delta, so we inspect the first and only patch in the
2748 2760 # delta for this prefix.
2749 2761 hlen = struct.calcsize(">lll")
2750 2762 if len(delta) <= hlen:
2751 2763 return False
2752 2764
2753 2765 oldlen = self.rawsize(baserev)
2754 2766 newlen = len(delta) - hlen
2755 2767 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2756 2768 return False
2757 2769
2758 2770 add = "\1\ncensored:"
2759 2771 addlen = len(add)
2760 2772 return newlen >= addlen and delta[hlen:hlen + addlen] == add
2761 2773
2762 2774 def getstrippoint(self, minlink):
2763 2775 """find the minimum rev that must be stripped to strip the linkrev
2764 2776
2765 2777 Returns a tuple containing the minimum rev and a set of all revs that
2766 2778 have linkrevs that will be broken by this strip.
2767 2779 """
2768 2780 brokenrevs = set()
2769 2781 strippoint = len(self)
2770 2782
2771 2783 heads = {}
2772 2784 futurelargelinkrevs = set()
2773 2785 for head in self.headrevs():
2774 2786 headlinkrev = self.linkrev(head)
2775 2787 heads[head] = headlinkrev
2776 2788 if headlinkrev >= minlink:
2777 2789 futurelargelinkrevs.add(headlinkrev)
2778 2790
2779 2791 # This algorithm involves walking down the rev graph, starting at the
2780 2792 # heads. Since the revs are topologically sorted according to linkrev,
2781 2793 # once all head linkrevs are below the minlink, we know there are
2782 2794 # no more revs that could have a linkrev greater than minlink.
2783 2795 # So we can stop walking.
2784 2796 while futurelargelinkrevs:
2785 2797 strippoint -= 1
2786 2798 linkrev = heads.pop(strippoint)
2787 2799
2788 2800 if linkrev < minlink:
2789 2801 brokenrevs.add(strippoint)
2790 2802 else:
2791 2803 futurelargelinkrevs.remove(linkrev)
2792 2804
2793 2805 for p in self.parentrevs(strippoint):
2794 2806 if p != nullrev:
2795 2807 plinkrev = self.linkrev(p)
2796 2808 heads[p] = plinkrev
2797 2809 if plinkrev >= minlink:
2798 2810 futurelargelinkrevs.add(plinkrev)
2799 2811
2800 2812 return strippoint, brokenrevs
2801 2813
2802 2814 def strip(self, minlink, transaction):
2803 2815 """truncate the revlog on the first revision with a linkrev >= minlink
2804 2816
2805 2817 This function is called when we're stripping revision minlink and
2806 2818 its descendants from the repository.
2807 2819
2808 2820 We have to remove all revisions with linkrev >= minlink, because
2809 2821 the equivalent changelog revisions will be renumbered after the
2810 2822 strip.
2811 2823
2812 2824 So we truncate the revlog on the first of these revisions, and
2813 2825 trust that the caller has saved the revisions that shouldn't be
2814 2826 removed and that it'll re-add them after this truncation.
2815 2827 """
2816 2828 if len(self) == 0:
2817 2829 return
2818 2830
2819 2831 rev, _ = self.getstrippoint(minlink)
2820 2832 if rev == len(self):
2821 2833 return
2822 2834
2823 2835 # first truncate the files on disk
2824 2836 end = self.start(rev)
2825 2837 if not self._inline:
2826 2838 transaction.add(self.datafile, end)
2827 2839 end = rev * self._io.size
2828 2840 else:
2829 2841 end += rev * self._io.size
2830 2842
2831 2843 transaction.add(self.indexfile, end)
2832 2844
2833 2845 # then reset internal state in memory to forget those revisions
2834 2846 self._cache = None
2835 2847 self._chaininfocache = {}
2836 2848 self._chunkclear()
2837 2849 for x in pycompat.xrange(rev, len(self)):
2838 2850 del self.nodemap[self.node(x)]
2839 2851
2840 2852 del self.index[rev:-1]
2841 2853 self._nodepos = None
2842 2854
2843 2855 def checksize(self):
2844 2856 expected = 0
2845 2857 if len(self):
2846 2858 expected = max(0, self.end(len(self) - 1))
2847 2859
2848 2860 try:
2849 2861 with self._datafp() as f:
2850 2862 f.seek(0, 2)
2851 2863 actual = f.tell()
2852 2864 dd = actual - expected
2853 2865 except IOError as inst:
2854 2866 if inst.errno != errno.ENOENT:
2855 2867 raise
2856 2868 dd = 0
2857 2869
2858 2870 try:
2859 2871 f = self.opener(self.indexfile)
2860 2872 f.seek(0, 2)
2861 2873 actual = f.tell()
2862 2874 f.close()
2863 2875 s = self._io.size
2864 2876 i = max(0, actual // s)
2865 2877 di = actual - (i * s)
2866 2878 if self._inline:
2867 2879 databytes = 0
2868 2880 for r in self:
2869 2881 databytes += max(0, self.length(r))
2870 2882 dd = 0
2871 2883 di = actual - len(self) * s - databytes
2872 2884 except IOError as inst:
2873 2885 if inst.errno != errno.ENOENT:
2874 2886 raise
2875 2887 di = 0
2876 2888
2877 2889 return (dd, di)
2878 2890
2879 2891 def files(self):
2880 2892 res = [self.indexfile]
2881 2893 if not self._inline:
2882 2894 res.append(self.datafile)
2883 2895 return res
2884 2896
2885 2897 DELTAREUSEALWAYS = 'always'
2886 2898 DELTAREUSESAMEREVS = 'samerevs'
2887 2899 DELTAREUSENEVER = 'never'
2888 2900
2889 2901 DELTAREUSEFULLADD = 'fulladd'
2890 2902
2891 2903 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2892 2904
2893 2905 def clone(self, tr, destrevlog, addrevisioncb=None,
2894 2906 deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
2895 2907 """Copy this revlog to another, possibly with format changes.
2896 2908
2897 2909 The destination revlog will contain the same revisions and nodes.
2898 2910 However, it may not be bit-for-bit identical due to e.g. delta encoding
2899 2911 differences.
2900 2912
2901 2913 The ``deltareuse`` argument control how deltas from the existing revlog
2902 2914 are preserved in the destination revlog. The argument can have the
2903 2915 following values:
2904 2916
2905 2917 DELTAREUSEALWAYS
2906 2918 Deltas will always be reused (if possible), even if the destination
2907 2919 revlog would not select the same revisions for the delta. This is the
2908 2920 fastest mode of operation.
2909 2921 DELTAREUSESAMEREVS
2910 2922 Deltas will be reused if the destination revlog would pick the same
2911 2923 revisions for the delta. This mode strikes a balance between speed
2912 2924 and optimization.
2913 2925 DELTAREUSENEVER
2914 2926 Deltas will never be reused. This is the slowest mode of execution.
2915 2927 This mode can be used to recompute deltas (e.g. if the diff/delta
2916 2928 algorithm changes).
2917 2929
2918 2930 Delta computation can be slow, so the choice of delta reuse policy can
2919 2931 significantly affect run time.
2920 2932
2921 2933 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2922 2934 two extremes. Deltas will be reused if they are appropriate. But if the
2923 2935 delta could choose a better revision, it will do so. This means if you
2924 2936 are converting a non-generaldelta revlog to a generaldelta revlog,
2925 2937 deltas will be recomputed if the delta's parent isn't a parent of the
2926 2938 revision.
2927 2939
2928 2940 In addition to the delta policy, the ``deltabothparents`` argument
2929 2941 controls whether to compute deltas against both parents for merges.
2930 2942 By default, the current default is used.
2931 2943 """
2932 2944 if deltareuse not in self.DELTAREUSEALL:
2933 2945 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2934 2946
2935 2947 if len(destrevlog):
2936 2948 raise ValueError(_('destination revlog is not empty'))
2937 2949
2938 2950 if getattr(self, 'filteredrevs', None):
2939 2951 raise ValueError(_('source revlog has filtered revisions'))
2940 2952 if getattr(destrevlog, 'filteredrevs', None):
2941 2953 raise ValueError(_('destination revlog has filtered revisions'))
2942 2954
2943 2955 # lazydeltabase controls whether to reuse a cached delta, if possible.
2944 2956 oldlazydeltabase = destrevlog._lazydeltabase
2945 2957 oldamd = destrevlog._deltabothparents
2946 2958
2947 2959 try:
2948 2960 if deltareuse == self.DELTAREUSEALWAYS:
2949 2961 destrevlog._lazydeltabase = True
2950 2962 elif deltareuse == self.DELTAREUSESAMEREVS:
2951 2963 destrevlog._lazydeltabase = False
2952 2964
2953 2965 destrevlog._deltabothparents = deltabothparents or oldamd
2954 2966
2955 2967 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2956 2968 self.DELTAREUSESAMEREVS)
2957 2969
2958 2970 deltacomputer = _deltacomputer(destrevlog)
2959 2971 index = self.index
2960 2972 for rev in self:
2961 2973 entry = index[rev]
2962 2974
2963 2975 # Some classes override linkrev to take filtered revs into
2964 2976 # account. Use raw entry from index.
2965 2977 flags = entry[0] & 0xffff
2966 2978 linkrev = entry[4]
2967 2979 p1 = index[entry[5]][7]
2968 2980 p2 = index[entry[6]][7]
2969 2981 node = entry[7]
2970 2982
2971 2983 # (Possibly) reuse the delta from the revlog if allowed and
2972 2984 # the revlog chunk is a delta.
2973 2985 cachedelta = None
2974 2986 rawtext = None
2975 2987 if populatecachedelta:
2976 2988 dp = self.deltaparent(rev)
2977 2989 if dp != nullrev:
2978 2990 cachedelta = (dp, bytes(self._chunk(rev)))
2979 2991
2980 2992 if not cachedelta:
2981 2993 rawtext = self.revision(rev, raw=True)
2982 2994
2983 2995
2984 2996 if deltareuse == self.DELTAREUSEFULLADD:
2985 2997 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2986 2998 cachedelta=cachedelta,
2987 2999 node=node, flags=flags,
2988 3000 deltacomputer=deltacomputer)
2989 3001 else:
2990 3002 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2991 3003 checkambig=False)
2992 3004 dfh = None
2993 3005 if not destrevlog._inline:
2994 3006 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2995 3007 try:
2996 3008 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2997 3009 p2, flags, cachedelta, ifh, dfh,
2998 3010 deltacomputer=deltacomputer)
2999 3011 finally:
3000 3012 if dfh:
3001 3013 dfh.close()
3002 3014 ifh.close()
3003 3015
3004 3016 if addrevisioncb:
3005 3017 addrevisioncb(self, rev, node)
3006 3018 finally:
3007 3019 destrevlog._lazydeltabase = oldlazydeltabase
3008 3020 destrevlog._deltabothparents = oldamd
@@ -1,1277 +1,1277 b''
1 1 #testcases b2-pushkey b2-binary
2 2
3 3 #if b2-pushkey
4 4 $ cat << EOF >> $HGRCPATH
5 5 > [devel]
6 6 > legacy.exchange=bookmarks
7 7 > EOF
8 8 #endif
9 9
10 10 #require serve
11 11
12 12 $ cat << EOF >> $HGRCPATH
13 13 > [ui]
14 14 > logtemplate={rev}:{node|short} {desc|firstline}
15 15 > [phases]
16 16 > publish=False
17 17 > [experimental]
18 18 > evolution.createmarkers=True
19 19 > evolution.exchange=True
20 20 > EOF
21 21
22 22 $ cat > $TESTTMP/hook.sh <<'EOF'
23 23 > echo "test-hook-bookmark: $HG_BOOKMARK: $HG_OLDNODE -> $HG_NODE"
24 24 > EOF
25 25 $ TESTHOOK="hooks.txnclose-bookmark.test=sh $TESTTMP/hook.sh"
26 26
27 27 initialize
28 28
29 29 $ hg init a
30 30 $ cd a
31 31 $ echo 'test' > test
32 32 $ hg commit -Am'test'
33 33 adding test
34 34
35 35 set bookmarks
36 36
37 37 $ hg bookmark X
38 38 $ hg bookmark Y
39 39 $ hg bookmark Z
40 40
41 41 import bookmark by name
42 42
43 43 $ hg init ../b
44 44 $ cd ../b
45 45 $ hg book Y
46 46 $ hg book
47 47 * Y -1:000000000000
48 48 $ hg pull ../a --config "$TESTHOOK"
49 49 pulling from ../a
50 50 requesting all changes
51 51 adding changesets
52 52 adding manifests
53 53 adding file changes
54 54 added 1 changesets with 1 changes to 1 files
55 55 adding remote bookmark X
56 56 updating bookmark Y
57 57 adding remote bookmark Z
58 58 new changesets 4e3505fd9583
59 59 test-hook-bookmark: X: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
60 60 test-hook-bookmark: Y: 0000000000000000000000000000000000000000 -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
61 61 test-hook-bookmark: Z: -> 4e3505fd95835d721066b76e75dbb8cc554d7f77
62 62 (run 'hg update' to get a working copy)
63 63 $ hg bookmarks
64 64 X 0:4e3505fd9583
65 65 * Y 0:4e3505fd9583
66 66 Z 0:4e3505fd9583
67 67 $ hg debugpushkey ../a namespaces
68 68 bookmarks
69 69 namespaces
70 70 obsolete
71 71 phases
72 72 $ hg debugpushkey ../a bookmarks
73 73 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
74 74 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
75 75 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
76 76
77 77 delete the bookmark to re-pull it
78 78
79 79 $ hg book -d X
80 80 $ hg pull -B X ../a
81 81 pulling from ../a
82 82 no changes found
83 83 adding remote bookmark X
84 84
85 85 finally no-op pull
86 86
87 87 $ hg pull -B X ../a
88 88 pulling from ../a
89 89 no changes found
90 90 $ hg bookmark
91 91 X 0:4e3505fd9583
92 92 * Y 0:4e3505fd9583
93 93 Z 0:4e3505fd9583
94 94
95 95 export bookmark by name
96 96
97 97 $ hg bookmark W
98 98 $ hg bookmark foo
99 99 $ hg bookmark foobar
100 100 $ hg push -B W ../a
101 101 pushing to ../a
102 102 searching for changes
103 103 no changes found
104 104 exporting bookmark W
105 105 [1]
106 106 $ hg -R ../a bookmarks
107 107 W -1:000000000000
108 108 X 0:4e3505fd9583
109 109 Y 0:4e3505fd9583
110 110 * Z 0:4e3505fd9583
111 111
112 112 delete a remote bookmark
113 113
114 114 $ hg book -d W
115 115
116 116 #if b2-pushkey
117 117
118 118 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
119 119 pushing to ../a
120 120 query 1; heads
121 121 searching for changes
122 122 all remote heads known locally
123 123 listing keys for "phases"
124 124 checking for updated bookmarks
125 125 listing keys for "bookmarks"
126 126 no changes found
127 127 bundle2-output-bundle: "HG20", 4 parts total
128 128 bundle2-output: start emission of HG20 stream
129 129 bundle2-output: bundle parameter:
130 130 bundle2-output: start of parts
131 131 bundle2-output: bundle part: "replycaps"
132 132 bundle2-output-part: "replycaps" 222 bytes payload
133 133 bundle2-output: part 0: "REPLYCAPS"
134 134 bundle2-output: header chunk size: 16
135 135 bundle2-output: payload chunk size: 222
136 136 bundle2-output: closing payload chunk
137 137 bundle2-output: bundle part: "check:bookmarks"
138 138 bundle2-output-part: "check:bookmarks" 23 bytes payload
139 139 bundle2-output: part 1: "CHECK:BOOKMARKS"
140 140 bundle2-output: header chunk size: 22
141 141 bundle2-output: payload chunk size: 23
142 142 bundle2-output: closing payload chunk
143 143 bundle2-output: bundle part: "check:phases"
144 bundle2-output-part: "check:phases" 48 bytes payload
144 bundle2-output-part: "check:phases" 24 bytes payload
145 145 bundle2-output: part 2: "CHECK:PHASES"
146 146 bundle2-output: header chunk size: 19
147 bundle2-output: payload chunk size: 48
147 bundle2-output: payload chunk size: 24
148 148 bundle2-output: closing payload chunk
149 149 bundle2-output: bundle part: "pushkey"
150 150 bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload
151 151 bundle2-output: part 3: "PUSHKEY"
152 152 bundle2-output: header chunk size: 90
153 153 bundle2-output: closing payload chunk
154 154 bundle2-output: end of bundle
155 155 bundle2-input: start processing of HG20 stream
156 156 bundle2-input: reading bundle2 stream parameters
157 157 bundle2-input-bundle: with-transaction
158 158 bundle2-input: start extraction of bundle2 parts
159 159 bundle2-input: part header size: 16
160 160 bundle2-input: part type: "REPLYCAPS"
161 161 bundle2-input: part id: "0"
162 162 bundle2-input: part parameters: 0
163 163 bundle2-input: found a handler for part replycaps
164 164 bundle2-input-part: "replycaps" supported
165 165 bundle2-input: payload chunk size: 222
166 166 bundle2-input: payload chunk size: 0
167 167 bundle2-input-part: total payload size 222
168 168 bundle2-input: part header size: 22
169 169 bundle2-input: part type: "CHECK:BOOKMARKS"
170 170 bundle2-input: part id: "1"
171 171 bundle2-input: part parameters: 0
172 172 bundle2-input: found a handler for part check:bookmarks
173 173 bundle2-input-part: "check:bookmarks" supported
174 174 bundle2-input: payload chunk size: 23
175 175 bundle2-input: payload chunk size: 0
176 176 bundle2-input-part: total payload size 23
177 177 bundle2-input: part header size: 19
178 178 bundle2-input: part type: "CHECK:PHASES"
179 179 bundle2-input: part id: "2"
180 180 bundle2-input: part parameters: 0
181 181 bundle2-input: found a handler for part check:phases
182 182 bundle2-input-part: "check:phases" supported
183 bundle2-input: payload chunk size: 48
183 bundle2-input: payload chunk size: 24
184 184 bundle2-input: payload chunk size: 0
185 bundle2-input-part: total payload size 48
185 bundle2-input-part: total payload size 24
186 186 bundle2-input: part header size: 90
187 187 bundle2-input: part type: "PUSHKEY"
188 188 bundle2-input: part id: "3"
189 189 bundle2-input: part parameters: 4
190 190 bundle2-input: found a handler for part pushkey
191 191 bundle2-input-part: "pushkey" (params: 4 mandatory) supported
192 192 pushing key for "bookmarks:W"
193 193 bundle2-input: payload chunk size: 0
194 194 bundle2-input: part header size: 0
195 195 bundle2-input: end of bundle2 stream
196 196 bundle2-input-bundle: 3 parts total
197 197 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
198 198 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
199 199 bundle2-output-bundle: "HG20", 1 parts total
200 200 bundle2-output: start emission of HG20 stream
201 201 bundle2-output: bundle parameter:
202 202 bundle2-output: start of parts
203 203 bundle2-output: bundle part: "reply:pushkey"
204 204 bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
205 205 bundle2-output: part 0: "REPLY:PUSHKEY"
206 206 bundle2-output: header chunk size: 43
207 207 bundle2-output: closing payload chunk
208 208 bundle2-output: end of bundle
209 209 bundle2-input: start processing of HG20 stream
210 210 bundle2-input: reading bundle2 stream parameters
211 211 bundle2-input-bundle: no-transaction
212 212 bundle2-input: start extraction of bundle2 parts
213 213 bundle2-input: part header size: 43
214 214 bundle2-input: part type: "REPLY:PUSHKEY"
215 215 bundle2-input: part id: "0"
216 216 bundle2-input: part parameters: 2
217 217 bundle2-input: found a handler for part reply:pushkey
218 218 bundle2-input-part: "reply:pushkey" (params: 0 advisory) supported
219 219 bundle2-input: payload chunk size: 0
220 220 bundle2-input: part header size: 0
221 221 bundle2-input: end of bundle2 stream
222 222 bundle2-input-bundle: 0 parts total
223 223 deleting remote bookmark W
224 224 listing keys for "phases"
225 225 [1]
226 226
227 227 #endif
228 228 #if b2-binary
229 229
230 230 $ hg push -B W ../a --config "$TESTHOOK" --debug --config devel.bundle2.debug=yes
231 231 pushing to ../a
232 232 query 1; heads
233 233 searching for changes
234 234 all remote heads known locally
235 235 listing keys for "phases"
236 236 checking for updated bookmarks
237 237 listing keys for "bookmarks"
238 238 no changes found
239 239 bundle2-output-bundle: "HG20", 4 parts total
240 240 bundle2-output: start emission of HG20 stream
241 241 bundle2-output: bundle parameter:
242 242 bundle2-output: start of parts
243 243 bundle2-output: bundle part: "replycaps"
244 244 bundle2-output-part: "replycaps" 222 bytes payload
245 245 bundle2-output: part 0: "REPLYCAPS"
246 246 bundle2-output: header chunk size: 16
247 247 bundle2-output: payload chunk size: 222
248 248 bundle2-output: closing payload chunk
249 249 bundle2-output: bundle part: "check:bookmarks"
250 250 bundle2-output-part: "check:bookmarks" 23 bytes payload
251 251 bundle2-output: part 1: "CHECK:BOOKMARKS"
252 252 bundle2-output: header chunk size: 22
253 253 bundle2-output: payload chunk size: 23
254 254 bundle2-output: closing payload chunk
255 255 bundle2-output: bundle part: "check:phases"
256 bundle2-output-part: "check:phases" 48 bytes payload
256 bundle2-output-part: "check:phases" 24 bytes payload
257 257 bundle2-output: part 2: "CHECK:PHASES"
258 258 bundle2-output: header chunk size: 19
259 bundle2-output: payload chunk size: 48
259 bundle2-output: payload chunk size: 24
260 260 bundle2-output: closing payload chunk
261 261 bundle2-output: bundle part: "bookmarks"
262 262 bundle2-output-part: "bookmarks" 23 bytes payload
263 263 bundle2-output: part 3: "BOOKMARKS"
264 264 bundle2-output: header chunk size: 16
265 265 bundle2-output: payload chunk size: 23
266 266 bundle2-output: closing payload chunk
267 267 bundle2-output: end of bundle
268 268 bundle2-input: start processing of HG20 stream
269 269 bundle2-input: reading bundle2 stream parameters
270 270 bundle2-input-bundle: with-transaction
271 271 bundle2-input: start extraction of bundle2 parts
272 272 bundle2-input: part header size: 16
273 273 bundle2-input: part type: "REPLYCAPS"
274 274 bundle2-input: part id: "0"
275 275 bundle2-input: part parameters: 0
276 276 bundle2-input: found a handler for part replycaps
277 277 bundle2-input-part: "replycaps" supported
278 278 bundle2-input: payload chunk size: 222
279 279 bundle2-input: payload chunk size: 0
280 280 bundle2-input-part: total payload size 222
281 281 bundle2-input: part header size: 22
282 282 bundle2-input: part type: "CHECK:BOOKMARKS"
283 283 bundle2-input: part id: "1"
284 284 bundle2-input: part parameters: 0
285 285 bundle2-input: found a handler for part check:bookmarks
286 286 bundle2-input-part: "check:bookmarks" supported
287 287 bundle2-input: payload chunk size: 23
288 288 bundle2-input: payload chunk size: 0
289 289 bundle2-input-part: total payload size 23
290 290 bundle2-input: part header size: 19
291 291 bundle2-input: part type: "CHECK:PHASES"
292 292 bundle2-input: part id: "2"
293 293 bundle2-input: part parameters: 0
294 294 bundle2-input: found a handler for part check:phases
295 295 bundle2-input-part: "check:phases" supported
296 bundle2-input: payload chunk size: 48
296 bundle2-input: payload chunk size: 24
297 297 bundle2-input: payload chunk size: 0
298 bundle2-input-part: total payload size 48
298 bundle2-input-part: total payload size 24
299 299 bundle2-input: part header size: 16
300 300 bundle2-input: part type: "BOOKMARKS"
301 301 bundle2-input: part id: "3"
302 302 bundle2-input: part parameters: 0
303 303 bundle2-input: found a handler for part bookmarks
304 304 bundle2-input-part: "bookmarks" supported
305 305 bundle2-input: payload chunk size: 23
306 306 bundle2-input: payload chunk size: 0
307 307 bundle2-input-part: total payload size 23
308 308 bundle2-input: part header size: 0
309 309 bundle2-input: end of bundle2 stream
310 310 bundle2-input-bundle: 3 parts total
311 311 running hook txnclose-bookmark.test: sh $TESTTMP/hook.sh
312 312 test-hook-bookmark: W: 0000000000000000000000000000000000000000 ->
313 313 bundle2-output-bundle: "HG20", 0 parts total
314 314 bundle2-output: start emission of HG20 stream
315 315 bundle2-output: bundle parameter:
316 316 bundle2-output: start of parts
317 317 bundle2-output: end of bundle
318 318 bundle2-input: start processing of HG20 stream
319 319 bundle2-input: reading bundle2 stream parameters
320 320 bundle2-input-bundle: no-transaction
321 321 bundle2-input: start extraction of bundle2 parts
322 322 bundle2-input: part header size: 0
323 323 bundle2-input: end of bundle2 stream
324 324 bundle2-input-bundle: 0 parts total
325 325 deleting remote bookmark W
326 326 listing keys for "phases"
327 327 [1]
328 328
329 329 #endif
330 330
331 331 export the active bookmark
332 332
333 333 $ hg bookmark V
334 334 $ hg push -B . ../a
335 335 pushing to ../a
336 336 searching for changes
337 337 no changes found
338 338 exporting bookmark V
339 339 [1]
340 340
341 341 exporting the active bookmark with 'push -B .'
342 342 demand that one of the bookmarks is activated
343 343
344 344 $ hg update -r default
345 345 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 346 (leaving bookmark V)
347 347 $ hg push -B . ../a
348 348 abort: no active bookmark
349 349 [255]
350 350 $ hg update -r V
351 351 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
352 352 (activating bookmark V)
353 353
354 354 delete the bookmark
355 355
356 356 $ hg book -d V
357 357 $ hg push -B V ../a
358 358 pushing to ../a
359 359 searching for changes
360 360 no changes found
361 361 deleting remote bookmark V
362 362 [1]
363 363 $ hg up foobar
364 364 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
365 365 (activating bookmark foobar)
366 366
367 367 push/pull name that doesn't exist
368 368
369 369 $ hg push -B badname ../a
370 370 pushing to ../a
371 371 searching for changes
372 372 bookmark badname does not exist on the local or remote repository!
373 373 no changes found
374 374 [2]
375 375 $ hg pull -B anotherbadname ../a
376 376 pulling from ../a
377 377 abort: remote bookmark anotherbadname not found!
378 378 [255]
379 379
380 380 divergent bookmarks
381 381
382 382 $ cd ../a
383 383 $ echo c1 > f1
384 384 $ hg ci -Am1
385 385 adding f1
386 386 $ hg book -f @
387 387 $ hg book -f X
388 388 $ hg book
389 389 @ 1:0d2164f0ce0d
390 390 * X 1:0d2164f0ce0d
391 391 Y 0:4e3505fd9583
392 392 Z 1:0d2164f0ce0d
393 393
394 394 $ cd ../b
395 395 $ hg up
396 396 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
397 397 updating bookmark foobar
398 398 $ echo c2 > f2
399 399 $ hg ci -Am2
400 400 adding f2
401 401 $ hg book -if @
402 402 $ hg book -if X
403 403 $ hg book
404 404 @ 1:9b140be10808
405 405 X 1:9b140be10808
406 406 Y 0:4e3505fd9583
407 407 Z 0:4e3505fd9583
408 408 foo -1:000000000000
409 409 * foobar 1:9b140be10808
410 410
411 411 $ hg pull --config paths.foo=../a foo --config "$TESTHOOK"
412 412 pulling from $TESTTMP/a
413 413 searching for changes
414 414 adding changesets
415 415 adding manifests
416 416 adding file changes
417 417 added 1 changesets with 1 changes to 1 files (+1 heads)
418 418 divergent bookmark @ stored as @foo
419 419 divergent bookmark X stored as X@foo
420 420 updating bookmark Z
421 421 new changesets 0d2164f0ce0d
422 422 test-hook-bookmark: @foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
423 423 test-hook-bookmark: X@foo: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
424 424 test-hook-bookmark: Z: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
425 425 (run 'hg heads' to see heads, 'hg merge' to merge)
426 426 $ hg book
427 427 @ 1:9b140be10808
428 428 @foo 2:0d2164f0ce0d
429 429 X 1:9b140be10808
430 430 X@foo 2:0d2164f0ce0d
431 431 Y 0:4e3505fd9583
432 432 Z 2:0d2164f0ce0d
433 433 foo -1:000000000000
434 434 * foobar 1:9b140be10808
435 435
436 436 (test that too many divergence of bookmark)
437 437
438 438 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -r 000000000000 "X@${i}"; done
439 439 $ hg pull ../a
440 440 pulling from ../a
441 441 searching for changes
442 442 no changes found
443 443 warning: failed to assign numbered name to divergent bookmark X
444 444 divergent bookmark @ stored as @1
445 445 $ hg bookmarks | grep '^ X' | grep -v ':000000000000'
446 446 X 1:9b140be10808
447 447 X@foo 2:0d2164f0ce0d
448 448
449 449 (test that remotely diverged bookmarks are reused if they aren't changed)
450 450
451 451 $ hg bookmarks | grep '^ @'
452 452 @ 1:9b140be10808
453 453 @1 2:0d2164f0ce0d
454 454 @foo 2:0d2164f0ce0d
455 455 $ hg pull ../a
456 456 pulling from ../a
457 457 searching for changes
458 458 no changes found
459 459 warning: failed to assign numbered name to divergent bookmark X
460 460 divergent bookmark @ stored as @1
461 461 $ hg bookmarks | grep '^ @'
462 462 @ 1:9b140be10808
463 463 @1 2:0d2164f0ce0d
464 464 @foo 2:0d2164f0ce0d
465 465
466 466 $ $PYTHON $TESTDIR/seq.py 1 100 | while read i; do hg bookmarks -d "X@${i}"; done
467 467 $ hg bookmarks -d "@1"
468 468
469 469 $ hg push -f ../a
470 470 pushing to ../a
471 471 searching for changes
472 472 adding changesets
473 473 adding manifests
474 474 adding file changes
475 475 added 1 changesets with 1 changes to 1 files (+1 heads)
476 476 $ hg -R ../a book
477 477 @ 1:0d2164f0ce0d
478 478 * X 1:0d2164f0ce0d
479 479 Y 0:4e3505fd9583
480 480 Z 1:0d2164f0ce0d
481 481
482 482 explicit pull should overwrite the local version (issue4439)
483 483
484 484 $ hg update -r X
485 485 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
486 486 (activating bookmark X)
487 487 $ hg pull --config paths.foo=../a foo -B . --config "$TESTHOOK"
488 488 pulling from $TESTTMP/a
489 489 no changes found
490 490 divergent bookmark @ stored as @foo
491 491 importing bookmark X
492 492 test-hook-bookmark: @foo: 0d2164f0ce0d8f1d6f94351eba04b794909be66c -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
493 493 test-hook-bookmark: X: 9b140be1080824d768c5a4691a564088eede71f9 -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
494 494
495 495 reinstall state for further testing:
496 496
497 497 $ hg book -fr 9b140be10808 X
498 498
499 499 revsets should not ignore divergent bookmarks
500 500
501 501 $ hg bookmark -fr 1 Z
502 502 $ hg log -r 'bookmark()' --template '{rev}:{node|short} {bookmarks}\n'
503 503 0:4e3505fd9583 Y
504 504 1:9b140be10808 @ X Z foobar
505 505 2:0d2164f0ce0d @foo X@foo
506 506 $ hg log -r 'bookmark("X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
507 507 2:0d2164f0ce0d @foo X@foo
508 508 $ hg log -r 'bookmark("re:X@foo")' --template '{rev}:{node|short} {bookmarks}\n'
509 509 2:0d2164f0ce0d @foo X@foo
510 510
511 511 update a remote bookmark from a non-head to a head
512 512
513 513 $ hg up -q Y
514 514 $ echo c3 > f2
515 515 $ hg ci -Am3
516 516 adding f2
517 517 created new head
518 518 $ hg push ../a --config "$TESTHOOK"
519 519 pushing to ../a
520 520 searching for changes
521 521 adding changesets
522 522 adding manifests
523 523 adding file changes
524 524 added 1 changesets with 1 changes to 1 files (+1 heads)
525 525 test-hook-bookmark: Y: 4e3505fd95835d721066b76e75dbb8cc554d7f77 -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
526 526 updating bookmark Y
527 527 $ hg -R ../a book
528 528 @ 1:0d2164f0ce0d
529 529 * X 1:0d2164f0ce0d
530 530 Y 3:f6fc62dde3c0
531 531 Z 1:0d2164f0ce0d
532 532
533 533 update a bookmark in the middle of a client pulling changes
534 534
535 535 $ cd ..
536 536 $ hg clone -q a pull-race
537 537
538 538 We want to use http because it is stateless and therefore more susceptible to
539 539 race conditions
540 540
541 541 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
542 542 $ cat pull-race.pid >> $DAEMON_PIDS
543 543
544 544 $ cat <<EOF > $TESTTMP/out_makecommit.sh
545 545 > #!/bin/sh
546 546 > hg ci -Am5
547 547 > echo committed in pull-race
548 548 > EOF
549 549
550 550 $ hg clone -q http://localhost:$HGPORT/ pull-race2 --config "$TESTHOOK"
551 551 test-hook-bookmark: @: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
552 552 test-hook-bookmark: X: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
553 553 test-hook-bookmark: Y: -> f6fc62dde3c0771e29704af56ba4d8af77abcc2f
554 554 test-hook-bookmark: Z: -> 0d2164f0ce0d8f1d6f94351eba04b794909be66c
555 555 $ cd pull-race
556 556 $ hg up -q Y
557 557 $ echo c4 > f2
558 558 $ hg ci -Am4
559 559 $ echo c5 > f3
560 560 $ cat <<EOF > .hg/hgrc
561 561 > [hooks]
562 562 > outgoing.makecommit = sh $TESTTMP/out_makecommit.sh
563 563 > EOF
564 564
565 565 (new config needs a server restart)
566 566
567 567 $ cd ..
568 568 $ killdaemons.py
569 569 $ hg serve -R pull-race -p $HGPORT -d --pid-file=pull-race.pid -E main-error.log
570 570 $ cat pull-race.pid >> $DAEMON_PIDS
571 571 $ cd pull-race2
572 572 $ hg -R $TESTTMP/pull-race book
573 573 @ 1:0d2164f0ce0d
574 574 X 1:0d2164f0ce0d
575 575 * Y 4:b0a5eff05604
576 576 Z 1:0d2164f0ce0d
577 577 $ hg pull
578 578 pulling from http://localhost:$HGPORT/
579 579 searching for changes
580 580 adding changesets
581 581 adding manifests
582 582 adding file changes
583 583 added 1 changesets with 1 changes to 1 files
584 584 updating bookmark Y
585 585 new changesets b0a5eff05604
586 586 (run 'hg update' to get a working copy)
587 587 $ hg book
588 588 * @ 1:0d2164f0ce0d
589 589 X 1:0d2164f0ce0d
590 590 Y 4:b0a5eff05604
591 591 Z 1:0d2164f0ce0d
592 592
593 593 Update a bookmark right after the initial lookup -B (issue4689)
594 594
595 595 $ echo c6 > ../pull-race/f3 # to be committed during the race
596 596 $ cat <<EOF > $TESTTMP/listkeys_makecommit.sh
597 597 > #!/bin/sh
598 598 > if hg st | grep -q M; then
599 599 > hg commit -m race
600 600 > echo committed in pull-race
601 601 > else
602 602 > exit 0
603 603 > fi
604 604 > EOF
605 605 $ cat <<EOF > ../pull-race/.hg/hgrc
606 606 > [hooks]
607 607 > # If anything to commit, commit it right after the first key listing used
608 608 > # during lookup. This makes the commit appear before the actual getbundle
609 609 > # call.
610 610 > listkeys.makecommit= sh $TESTTMP/listkeys_makecommit.sh
611 611 > EOF
612 612
613 613 (new config need server restart)
614 614
615 615 $ killdaemons.py
616 616 $ hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
617 617 $ cat ../pull-race.pid >> $DAEMON_PIDS
618 618
619 619 $ hg -R $TESTTMP/pull-race book
620 620 @ 1:0d2164f0ce0d
621 621 X 1:0d2164f0ce0d
622 622 * Y 5:35d1ef0a8d1b
623 623 Z 1:0d2164f0ce0d
624 624 $ hg update -r Y
625 625 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
626 626 (activating bookmark Y)
627 627 $ hg pull -B .
628 628 pulling from http://localhost:$HGPORT/
629 629 searching for changes
630 630 adding changesets
631 631 adding manifests
632 632 adding file changes
633 633 added 1 changesets with 1 changes to 1 files
634 634 updating bookmark Y
635 635 new changesets 35d1ef0a8d1b
636 636 (run 'hg update' to get a working copy)
637 637 $ hg book
638 638 @ 1:0d2164f0ce0d
639 639 X 1:0d2164f0ce0d
640 640 * Y 5:35d1ef0a8d1b
641 641 Z 1:0d2164f0ce0d
642 642
643 643 (done with this section of the test)
644 644
645 645 $ killdaemons.py
646 646 $ cd ../b
647 647
648 648 diverging a remote bookmark fails
649 649
650 650 $ hg up -q 4e3505fd9583
651 651 $ echo c4 > f2
652 652 $ hg ci -Am4
653 653 adding f2
654 654 created new head
655 655 $ echo c5 > f2
656 656 $ hg ci -Am5
657 657 $ hg log -G
658 658 @ 5:c922c0139ca0 5
659 659 |
660 660 o 4:4efff6d98829 4
661 661 |
662 662 | o 3:f6fc62dde3c0 3
663 663 |/
664 664 | o 2:0d2164f0ce0d 1
665 665 |/
666 666 | o 1:9b140be10808 2
667 667 |/
668 668 o 0:4e3505fd9583 test
669 669
670 670
671 671 $ hg book -f Y
672 672
673 673 $ cat <<EOF > ../a/.hg/hgrc
674 674 > [web]
675 675 > push_ssl = false
676 676 > allow_push = *
677 677 > EOF
678 678
679 679 $ hg serve -R ../a -p $HGPORT2 -d --pid-file=../hg2.pid
680 680 $ cat ../hg2.pid >> $DAEMON_PIDS
681 681
682 682 $ hg push http://localhost:$HGPORT2/
683 683 pushing to http://localhost:$HGPORT2/
684 684 searching for changes
685 685 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
686 686 (merge or see 'hg help push' for details about pushing new heads)
687 687 [255]
688 688 $ hg -R ../a book
689 689 @ 1:0d2164f0ce0d
690 690 * X 1:0d2164f0ce0d
691 691 Y 3:f6fc62dde3c0
692 692 Z 1:0d2164f0ce0d
693 693
694 694
695 695 Unrelated marker does not alter the decision
696 696
697 697 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
698 698 $ hg push http://localhost:$HGPORT2/
699 699 pushing to http://localhost:$HGPORT2/
700 700 searching for changes
701 701 abort: push creates new remote head c922c0139ca0 with bookmark 'Y'!
702 702 (merge or see 'hg help push' for details about pushing new heads)
703 703 [255]
704 704 $ hg -R ../a book
705 705 @ 1:0d2164f0ce0d
706 706 * X 1:0d2164f0ce0d
707 707 Y 3:f6fc62dde3c0
708 708 Z 1:0d2164f0ce0d
709 709
710 710 Update to a successor works
711 711
712 712 $ hg id --debug -r 3
713 713 f6fc62dde3c0771e29704af56ba4d8af77abcc2f
714 714 $ hg id --debug -r 4
715 715 4efff6d98829d9c824c621afd6e3f01865f5439f
716 716 $ hg id --debug -r 5
717 717 c922c0139ca03858f655e4a2af4dd02796a63969 tip Y
718 718 $ hg debugobsolete f6fc62dde3c0771e29704af56ba4d8af77abcc2f cccccccccccccccccccccccccccccccccccccccc
719 719 obsoleted 1 changesets
720 720 $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc 4efff6d98829d9c824c621afd6e3f01865f5439f
721 721 $ hg push http://localhost:$HGPORT2/
722 722 pushing to http://localhost:$HGPORT2/
723 723 searching for changes
724 724 remote: adding changesets
725 725 remote: adding manifests
726 726 remote: adding file changes
727 727 remote: added 2 changesets with 2 changes to 1 files (+1 heads)
728 728 remote: 2 new obsolescence markers
729 729 remote: obsoleted 1 changesets
730 730 updating bookmark Y
731 731 $ hg -R ../a book
732 732 @ 1:0d2164f0ce0d
733 733 * X 1:0d2164f0ce0d
734 734 Y 5:c922c0139ca0
735 735 Z 1:0d2164f0ce0d
736 736
737 737 hgweb
738 738
739 739 $ cat <<EOF > .hg/hgrc
740 740 > [web]
741 741 > push_ssl = false
742 742 > allow_push = *
743 743 > EOF
744 744
745 745 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
746 746 $ cat ../hg.pid >> $DAEMON_PIDS
747 747 $ cd ../a
748 748
749 749 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
750 750 bookmarks
751 751 namespaces
752 752 obsolete
753 753 phases
754 754 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
755 755 @ 9b140be1080824d768c5a4691a564088eede71f9
756 756 X 9b140be1080824d768c5a4691a564088eede71f9
757 757 Y c922c0139ca03858f655e4a2af4dd02796a63969
758 758 Z 9b140be1080824d768c5a4691a564088eede71f9
759 759 foo 0000000000000000000000000000000000000000
760 760 foobar 9b140be1080824d768c5a4691a564088eede71f9
761 761 $ hg out -B http://localhost:$HGPORT/
762 762 comparing with http://localhost:$HGPORT/
763 763 searching for changed bookmarks
764 764 @ 0d2164f0ce0d
765 765 X 0d2164f0ce0d
766 766 Z 0d2164f0ce0d
767 767 foo
768 768 foobar
769 769 $ hg push -B Z http://localhost:$HGPORT/
770 770 pushing to http://localhost:$HGPORT/
771 771 searching for changes
772 772 no changes found
773 773 updating bookmark Z
774 774 [1]
775 775 $ hg book -d Z
776 776 $ hg in -B http://localhost:$HGPORT/
777 777 comparing with http://localhost:$HGPORT/
778 778 searching for changed bookmarks
779 779 @ 9b140be10808
780 780 X 9b140be10808
781 781 Z 0d2164f0ce0d
782 782 foo 000000000000
783 783 foobar 9b140be10808
784 784 $ hg pull -B Z http://localhost:$HGPORT/
785 785 pulling from http://localhost:$HGPORT/
786 786 no changes found
787 787 divergent bookmark @ stored as @1
788 788 divergent bookmark X stored as X@1
789 789 adding remote bookmark Z
790 790 adding remote bookmark foo
791 791 adding remote bookmark foobar
792 792 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
793 793 requesting all changes
794 794 adding changesets
795 795 adding manifests
796 796 adding file changes
797 797 added 5 changesets with 5 changes to 3 files (+2 heads)
798 798 2 new obsolescence markers
799 799 new changesets 4e3505fd9583:c922c0139ca0
800 800 updating to bookmark @
801 801 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
802 802 $ hg -R cloned-bookmarks bookmarks
803 803 * @ 1:9b140be10808
804 804 X 1:9b140be10808
805 805 Y 4:c922c0139ca0
806 806 Z 2:0d2164f0ce0d
807 807 foo -1:000000000000
808 808 foobar 1:9b140be10808
809 809
810 810 $ cd ..
811 811
812 812 Test to show result of bookmarks comparison
813 813
814 814 $ mkdir bmcomparison
815 815 $ cd bmcomparison
816 816
817 817 $ hg init source
818 818 $ hg -R source debugbuilddag '+2*2*3*4'
819 819 $ hg -R source log -G --template '{rev}:{node|short}'
820 820 o 4:e7bd5218ca15
821 821 |
822 822 | o 3:6100d3090acf
823 823 |/
824 824 | o 2:fa942426a6fd
825 825 |/
826 826 | o 1:66f7d451a68b
827 827 |/
828 828 o 0:1ea73414a91b
829 829
830 830 $ hg -R source bookmarks -r 0 SAME
831 831 $ hg -R source bookmarks -r 0 ADV_ON_REPO1
832 832 $ hg -R source bookmarks -r 0 ADV_ON_REPO2
833 833 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO1
834 834 $ hg -R source bookmarks -r 0 DIFF_ADV_ON_REPO2
835 835 $ hg -R source bookmarks -r 1 DIVERGED
836 836
837 837 $ hg clone -U source repo1
838 838
839 839 (test that incoming/outgoing exit with 1, if there is no bookmark to
840 840 be exchanged)
841 841
842 842 $ hg -R repo1 incoming -B
843 843 comparing with $TESTTMP/bmcomparison/source
844 844 searching for changed bookmarks
845 845 no changed bookmarks found
846 846 [1]
847 847 $ hg -R repo1 outgoing -B
848 848 comparing with $TESTTMP/bmcomparison/source
849 849 searching for changed bookmarks
850 850 no changed bookmarks found
851 851 [1]
852 852
853 853 $ hg -R repo1 bookmarks -f -r 1 ADD_ON_REPO1
854 854 $ hg -R repo1 bookmarks -f -r 2 ADV_ON_REPO1
855 855 $ hg -R repo1 bookmarks -f -r 3 DIFF_ADV_ON_REPO1
856 856 $ hg -R repo1 bookmarks -f -r 3 DIFF_DIVERGED
857 857 $ hg -R repo1 -q --config extensions.mq= strip 4
858 858 $ hg -R repo1 log -G --template '{node|short} ({bookmarks})'
859 859 o 6100d3090acf (DIFF_ADV_ON_REPO1 DIFF_DIVERGED)
860 860 |
861 861 | o fa942426a6fd (ADV_ON_REPO1)
862 862 |/
863 863 | o 66f7d451a68b (ADD_ON_REPO1 DIVERGED)
864 864 |/
865 865 o 1ea73414a91b (ADV_ON_REPO2 DIFF_ADV_ON_REPO2 SAME)
866 866
867 867
868 868 $ hg clone -U source repo2
869 869 $ hg -R repo2 bookmarks -f -r 1 ADD_ON_REPO2
870 870 $ hg -R repo2 bookmarks -f -r 1 ADV_ON_REPO2
871 871 $ hg -R repo2 bookmarks -f -r 2 DIVERGED
872 872 $ hg -R repo2 bookmarks -f -r 4 DIFF_ADV_ON_REPO2
873 873 $ hg -R repo2 bookmarks -f -r 4 DIFF_DIVERGED
874 874 $ hg -R repo2 -q --config extensions.mq= strip 3
875 875 $ hg -R repo2 log -G --template '{node|short} ({bookmarks})'
876 876 o e7bd5218ca15 (DIFF_ADV_ON_REPO2 DIFF_DIVERGED)
877 877 |
878 878 | o fa942426a6fd (DIVERGED)
879 879 |/
880 880 | o 66f7d451a68b (ADD_ON_REPO2 ADV_ON_REPO2)
881 881 |/
882 882 o 1ea73414a91b (ADV_ON_REPO1 DIFF_ADV_ON_REPO1 SAME)
883 883
884 884
885 885 (test that difference of bookmarks between repositories are fully shown)
886 886
887 887 $ hg -R repo1 incoming -B repo2 -v
888 888 comparing with repo2
889 889 searching for changed bookmarks
890 890 ADD_ON_REPO2 66f7d451a68b added
891 891 ADV_ON_REPO2 66f7d451a68b advanced
892 892 DIFF_ADV_ON_REPO2 e7bd5218ca15 changed
893 893 DIFF_DIVERGED e7bd5218ca15 changed
894 894 DIVERGED fa942426a6fd diverged
895 895 $ hg -R repo1 outgoing -B repo2 -v
896 896 comparing with repo2
897 897 searching for changed bookmarks
898 898 ADD_ON_REPO1 66f7d451a68b added
899 899 ADD_ON_REPO2 deleted
900 900 ADV_ON_REPO1 fa942426a6fd advanced
901 901 DIFF_ADV_ON_REPO1 6100d3090acf advanced
902 902 DIFF_ADV_ON_REPO2 1ea73414a91b changed
903 903 DIFF_DIVERGED 6100d3090acf changed
904 904 DIVERGED 66f7d451a68b diverged
905 905
906 906 $ hg -R repo2 incoming -B repo1 -v
907 907 comparing with repo1
908 908 searching for changed bookmarks
909 909 ADD_ON_REPO1 66f7d451a68b added
910 910 ADV_ON_REPO1 fa942426a6fd advanced
911 911 DIFF_ADV_ON_REPO1 6100d3090acf changed
912 912 DIFF_DIVERGED 6100d3090acf changed
913 913 DIVERGED 66f7d451a68b diverged
914 914 $ hg -R repo2 outgoing -B repo1 -v
915 915 comparing with repo1
916 916 searching for changed bookmarks
917 917 ADD_ON_REPO1 deleted
918 918 ADD_ON_REPO2 66f7d451a68b added
919 919 ADV_ON_REPO2 66f7d451a68b advanced
920 920 DIFF_ADV_ON_REPO1 1ea73414a91b changed
921 921 DIFF_ADV_ON_REPO2 e7bd5218ca15 advanced
922 922 DIFF_DIVERGED e7bd5218ca15 changed
923 923 DIVERGED fa942426a6fd diverged
924 924
925 925 $ cd ..
926 926
927 927 Pushing a bookmark should only push the changes required by that
928 928 bookmark, not all outgoing changes:
929 929 $ hg clone http://localhost:$HGPORT/ addmarks
930 930 requesting all changes
931 931 adding changesets
932 932 adding manifests
933 933 adding file changes
934 934 added 5 changesets with 5 changes to 3 files (+2 heads)
935 935 2 new obsolescence markers
936 936 new changesets 4e3505fd9583:c922c0139ca0
937 937 updating to bookmark @
938 938 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
939 939 $ cd addmarks
940 940 $ echo foo > foo
941 941 $ hg add foo
942 942 $ hg commit -m 'add foo'
943 943 $ echo bar > bar
944 944 $ hg add bar
945 945 $ hg commit -m 'add bar'
946 946 $ hg co "tip^"
947 947 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
948 948 (leaving bookmark @)
949 949 $ hg book add-foo
950 950 $ hg book -r tip add-bar
951 951 Note: this push *must* push only a single changeset, as that's the point
952 952 of this test.
953 953 $ hg push -B add-foo --traceback
954 954 pushing to http://localhost:$HGPORT/
955 955 searching for changes
956 956 remote: adding changesets
957 957 remote: adding manifests
958 958 remote: adding file changes
959 959 remote: added 1 changesets with 1 changes to 1 files
960 960 exporting bookmark add-foo
961 961
962 962 pushing a new bookmark on a new head does not require -f if -B is specified
963 963
964 964 $ hg up -q X
965 965 $ hg book W
966 966 $ echo c5 > f2
967 967 $ hg ci -Am5
968 968 created new head
969 969 $ hg push -B .
970 970 pushing to http://localhost:$HGPORT/
971 971 searching for changes
972 972 remote: adding changesets
973 973 remote: adding manifests
974 974 remote: adding file changes
975 975 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
976 976 exporting bookmark W
977 977 $ hg -R ../b id -r W
978 978 cc978a373a53 tip W
979 979
980 980 pushing an existing but divergent bookmark with -B still requires -f
981 981
982 982 $ hg clone -q . ../r
983 983 $ hg up -q X
984 984 $ echo 1 > f2
985 985 $ hg ci -qAml
986 986
987 987 $ cd ../r
988 988 $ hg up -q X
989 989 $ echo 2 > f2
990 990 $ hg ci -qAmr
991 991 $ hg push -B X
992 992 pushing to $TESTTMP/addmarks
993 993 searching for changes
994 994 remote has heads on branch 'default' that are not known locally: a2a606d9ff1b
995 995 abort: push creates new remote head 54694f811df9 with bookmark 'X'!
996 996 (pull and merge or see 'hg help push' for details about pushing new heads)
997 997 [255]
998 998 $ cd ../addmarks
999 999
1000 1000 Check summary output for incoming/outgoing bookmarks
1001 1001
1002 1002 $ hg bookmarks -d X
1003 1003 $ hg bookmarks -d Y
1004 1004 $ hg summary --remote | grep '^remote:'
1005 1005 remote: *, 2 incoming bookmarks, 1 outgoing bookmarks (glob)
1006 1006
1007 1007 $ cd ..
1008 1008
1009 1009 pushing an unchanged bookmark should result in no changes
1010 1010
1011 1011 $ hg init unchanged-a
1012 1012 $ hg init unchanged-b
1013 1013 $ cd unchanged-a
1014 1014 $ echo initial > foo
1015 1015 $ hg commit -A -m initial
1016 1016 adding foo
1017 1017 $ hg bookmark @
1018 1018 $ hg push -B @ ../unchanged-b
1019 1019 pushing to ../unchanged-b
1020 1020 searching for changes
1021 1021 adding changesets
1022 1022 adding manifests
1023 1023 adding file changes
1024 1024 added 1 changesets with 1 changes to 1 files
1025 1025 exporting bookmark @
1026 1026
1027 1027 $ hg push -B @ ../unchanged-b
1028 1028 pushing to ../unchanged-b
1029 1029 searching for changes
1030 1030 no changes found
1031 1031 [1]
1032 1032
1033 1033 Pushing a really long bookmark should work fine (issue5165)
1034 1034 ===============================================
1035 1035
1036 1036 #if b2-binary
1037 1037 >>> with open('longname', 'w') as f:
1038 1038 ... f.write('wat' * 100) and None
1039 1039 $ hg book `cat longname`
1040 1040 $ hg push -B `cat longname` ../unchanged-b
1041 1041 pushing to ../unchanged-b
1042 1042 searching for changes
1043 1043 no changes found
1044 1044 exporting bookmark (wat){100} (re)
1045 1045 [1]
1046 1046 $ hg -R ../unchanged-b book --delete `cat longname`
1047 1047
1048 1048 Test again but forcing bundle2 exchange to make sure that doesn't regress.
1049 1049
1050 1050 $ hg push -B `cat longname` ../unchanged-b --config devel.legacy.exchange=bundle1
1051 1051 pushing to ../unchanged-b
1052 1052 searching for changes
1053 1053 no changes found
1054 1054 exporting bookmark (wat){100} (re)
1055 1055 [1]
1056 1056 $ hg -R ../unchanged-b book --delete `cat longname`
1057 1057 $ hg book --delete `cat longname`
1058 1058 $ hg co @
1059 1059 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1060 1060 (activating bookmark @)
1061 1061 #endif
1062 1062
1063 1063 Check hook preventing push (issue4455)
1064 1064 ======================================
1065 1065
1066 1066 $ hg bookmarks
1067 1067 * @ 0:55482a6fb4b1
1068 1068 $ hg log -G
1069 1069 @ 0:55482a6fb4b1 initial
1070 1070
1071 1071 $ hg init ../issue4455-dest
1072 1072 $ hg push ../issue4455-dest # changesets only
1073 1073 pushing to ../issue4455-dest
1074 1074 searching for changes
1075 1075 adding changesets
1076 1076 adding manifests
1077 1077 adding file changes
1078 1078 added 1 changesets with 1 changes to 1 files
1079 1079 $ cat >> .hg/hgrc << EOF
1080 1080 > [paths]
1081 1081 > local=../issue4455-dest/
1082 1082 > ssh=ssh://user@dummy/issue4455-dest
1083 1083 > http=http://localhost:$HGPORT/
1084 1084 > [ui]
1085 1085 > ssh=$PYTHON "$TESTDIR/dummyssh"
1086 1086 > EOF
1087 1087 $ cat >> ../issue4455-dest/.hg/hgrc << EOF
1088 1088 > [hooks]
1089 1089 > prepushkey=false
1090 1090 > [web]
1091 1091 > push_ssl = false
1092 1092 > allow_push = *
1093 1093 > EOF
1094 1094 $ killdaemons.py
1095 1095 $ hg serve -R ../issue4455-dest -p $HGPORT -d --pid-file=../issue4455.pid -E ../issue4455-error.log
1096 1096 $ cat ../issue4455.pid >> $DAEMON_PIDS
1097 1097
1098 1098 Local push
1099 1099 ----------
1100 1100
1101 1101 #if b2-pushkey
1102 1102
1103 1103 $ hg push -B @ local
1104 1104 pushing to $TESTTMP/issue4455-dest
1105 1105 searching for changes
1106 1106 no changes found
1107 1107 pushkey-abort: prepushkey hook exited with status 1
1108 1108 abort: exporting bookmark @ failed!
1109 1109 [255]
1110 1110
1111 1111 #endif
1112 1112 #if b2-binary
1113 1113
1114 1114 $ hg push -B @ local
1115 1115 pushing to $TESTTMP/issue4455-dest
1116 1116 searching for changes
1117 1117 no changes found
1118 1118 abort: prepushkey hook exited with status 1
1119 1119 [255]
1120 1120
1121 1121 #endif
1122 1122
1123 1123 $ hg -R ../issue4455-dest/ bookmarks
1124 1124 no bookmarks set
1125 1125
1126 1126 Using ssh
1127 1127 ---------
1128 1128
1129 1129 #if b2-pushkey
1130 1130
1131 1131 $ hg push -B @ ssh # bundle2+
1132 1132 pushing to ssh://user@dummy/issue4455-dest
1133 1133 searching for changes
1134 1134 no changes found
1135 1135 remote: pushkey-abort: prepushkey hook exited with status 1
1136 1136 abort: exporting bookmark @ failed!
1137 1137 [255]
1138 1138
1139 1139 $ hg -R ../issue4455-dest/ bookmarks
1140 1140 no bookmarks set
1141 1141
1142 1142 $ hg push -B @ ssh --config devel.legacy.exchange=bundle1
1143 1143 pushing to ssh://user@dummy/issue4455-dest
1144 1144 searching for changes
1145 1145 no changes found
1146 1146 remote: pushkey-abort: prepushkey hook exited with status 1
1147 1147 exporting bookmark @ failed!
1148 1148 [1]
1149 1149
1150 1150 #endif
1151 1151 #if b2-binary
1152 1152
1153 1153 $ hg push -B @ ssh # bundle2+
1154 1154 pushing to ssh://user@dummy/issue4455-dest
1155 1155 searching for changes
1156 1156 no changes found
1157 1157 remote: prepushkey hook exited with status 1
1158 1158 abort: push failed on remote
1159 1159 [255]
1160 1160
1161 1161 #endif
1162 1162
1163 1163 $ hg -R ../issue4455-dest/ bookmarks
1164 1164 no bookmarks set
1165 1165
1166 1166 Using http
1167 1167 ----------
1168 1168
1169 1169 #if b2-pushkey
1170 1170 $ hg push -B @ http # bundle2+
1171 1171 pushing to http://localhost:$HGPORT/
1172 1172 searching for changes
1173 1173 no changes found
1174 1174 remote: pushkey-abort: prepushkey hook exited with status 1
1175 1175 abort: exporting bookmark @ failed!
1176 1176 [255]
1177 1177
1178 1178 $ hg -R ../issue4455-dest/ bookmarks
1179 1179 no bookmarks set
1180 1180
1181 1181 $ hg push -B @ http --config devel.legacy.exchange=bundle1
1182 1182 pushing to http://localhost:$HGPORT/
1183 1183 searching for changes
1184 1184 no changes found
1185 1185 remote: pushkey-abort: prepushkey hook exited with status 1
1186 1186 exporting bookmark @ failed!
1187 1187 [1]
1188 1188
1189 1189 #endif
1190 1190
1191 1191 #if b2-binary
1192 1192
1193 1193 $ hg push -B @ ssh # bundle2+
1194 1194 pushing to ssh://user@dummy/issue4455-dest
1195 1195 searching for changes
1196 1196 no changes found
1197 1197 remote: prepushkey hook exited with status 1
1198 1198 abort: push failed on remote
1199 1199 [255]
1200 1200
1201 1201 #endif
1202 1202
1203 1203 $ hg -R ../issue4455-dest/ bookmarks
1204 1204 no bookmarks set
1205 1205
1206 1206 $ cd ..
1207 1207
1208 1208 Test that pre-pushkey compat for bookmark works as expected (issue5777)
1209 1209
1210 1210 $ cat << EOF >> $HGRCPATH
1211 1211 > [ui]
1212 1212 > ssh="$PYTHON" "$TESTDIR/dummyssh"
1213 1213 > [server]
1214 1214 > bookmarks-pushkey-compat = yes
1215 1215 > EOF
1216 1216
1217 1217 $ hg init server
1218 1218 $ echo foo > server/a
1219 1219 $ hg -R server book foo
1220 1220 $ hg -R server commit -Am a
1221 1221 adding a
1222 1222 $ hg clone ssh://user@dummy/server client
1223 1223 requesting all changes
1224 1224 adding changesets
1225 1225 adding manifests
1226 1226 adding file changes
1227 1227 added 1 changesets with 1 changes to 1 files
1228 1228 new changesets 79513d0d7716
1229 1229 updating to branch default
1230 1230 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1231 1231
1232 1232 Forbid bookmark move on the server
1233 1233
1234 1234 $ cat << EOF >> $TESTDIR/no-bm-move.sh
1235 1235 > #!/bin/sh
1236 1236 > echo \$HG_NAMESPACE | grep -v bookmarks
1237 1237 > EOF
1238 1238 $ cat << EOF >> server/.hg/hgrc
1239 1239 > [hooks]
1240 1240 > prepushkey.no-bm-move= sh $TESTDIR/no-bm-move.sh
1241 1241 > EOF
1242 1242
1243 1243 pushing changeset is okay
1244 1244
1245 1245 $ echo bar >> client/a
1246 1246 $ hg -R client commit -m b
1247 1247 $ hg -R client push
1248 1248 pushing to ssh://user@dummy/server
1249 1249 searching for changes
1250 1250 remote: adding changesets
1251 1251 remote: adding manifests
1252 1252 remote: adding file changes
1253 1253 remote: added 1 changesets with 1 changes to 1 files
1254 1254
1255 1255 attempt to move the bookmark is rejected
1256 1256
1257 1257 $ hg -R client book foo -r .
1258 1258 moving bookmark 'foo' forward from 79513d0d7716
1259 1259
1260 1260 #if b2-pushkey
1261 1261 $ hg -R client push
1262 1262 pushing to ssh://user@dummy/server
1263 1263 searching for changes
1264 1264 no changes found
1265 1265 remote: pushkey-abort: prepushkey.no-bm-move hook exited with status 1
1266 1266 abort: updating bookmark foo failed!
1267 1267 [255]
1268 1268 #endif
1269 1269 #if b2-binary
1270 1270 $ hg -R client push
1271 1271 pushing to ssh://user@dummy/server
1272 1272 searching for changes
1273 1273 no changes found
1274 1274 remote: prepushkey.no-bm-move hook exited with status 1
1275 1275 abort: push failed on remote
1276 1276 [255]
1277 1277 #endif
@@ -1,215 +1,220 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perfstatusext=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help perfstatusext
42 42 perfstatusext extension - helper extension to measure performance
43 43
44 44 list of commands:
45 45
46 46 perfaddremove
47 47 (no help text available)
48 48 perfancestors
49 49 (no help text available)
50 50 perfancestorset
51 51 (no help text available)
52 52 perfannotate (no help text available)
53 53 perfbdiff benchmark a bdiff between revisions
54 54 perfbookmarks
55 55 benchmark parsing bookmarks from disk to memory
56 56 perfbranchmap
57 57 benchmark the update of a branchmap
58 58 perfbranchmapload
59 59 benchmark reading the branchmap
60 60 perfbundleread
61 61 Benchmark reading of bundle files.
62 62 perfcca (no help text available)
63 63 perfchangegroupchangelog
64 64 Benchmark producing a changelog group for a changegroup.
65 65 perfchangeset
66 66 (no help text available)
67 67 perfctxfiles (no help text available)
68 68 perfdiffwd Profile diff of working directory changes
69 69 perfdirfoldmap
70 70 (no help text available)
71 71 perfdirs (no help text available)
72 72 perfdirstate (no help text available)
73 73 perfdirstatedirs
74 74 (no help text available)
75 75 perfdirstatefoldmap
76 76 (no help text available)
77 77 perfdirstatewrite
78 78 (no help text available)
79 79 perffncacheencode
80 80 (no help text available)
81 81 perffncacheload
82 82 (no help text available)
83 83 perffncachewrite
84 84 (no help text available)
85 85 perfheads (no help text available)
86 86 perfindex (no help text available)
87 87 perflinelogedits
88 88 (no help text available)
89 89 perfloadmarkers
90 90 benchmark the time to parse the on-disk markers for a repo
91 91 perflog (no help text available)
92 92 perflookup (no help text available)
93 93 perflrucachedict
94 94 (no help text available)
95 95 perfmanifest benchmark the time to read a manifest from disk and return a
96 96 usable
97 97 perfmergecalculate
98 98 (no help text available)
99 99 perfmoonwalk benchmark walking the changelog backwards
100 100 perfnodelookup
101 101 (no help text available)
102 102 perfparents (no help text available)
103 103 perfpathcopies
104 104 (no help text available)
105 105 perfphases benchmark phasesets computation
106 perfphasesremote
107 benchmark time needed to analyse phases of the remote server
106 108 perfrawfiles (no help text available)
107 109 perfrevlogchunks
108 110 Benchmark operations on revlog chunks.
109 111 perfrevlogindex
110 112 Benchmark operations against a revlog index.
111 113 perfrevlogrevision
112 114 Benchmark obtaining a revlog revision.
113 115 perfrevlogrevisions
114 116 Benchmark reading a series of revisions from a revlog.
115 117 perfrevrange (no help text available)
116 118 perfrevset benchmark the execution time of a revset
117 119 perfstartup (no help text available)
118 120 perfstatus (no help text available)
119 121 perftags (no help text available)
120 122 perftemplating
121 123 test the rendering time of a given template
122 124 perfunidiff benchmark a unified diff between revisions
123 125 perfvolatilesets
124 126 benchmark the computation of various volatile set
125 127 perfwalk (no help text available)
126 128 perfwrite microbenchmark ui.write
127 129
128 130 (use 'hg help -v perfstatusext' to show built-in aliases and global options)
129 131 $ hg perfaddremove
130 132 $ hg perfancestors
131 133 $ hg perfancestorset 2
132 134 $ hg perfannotate a
133 135 $ hg perfbdiff -c 1
134 136 $ hg perfbdiff --alldata 1
135 137 $ hg perfunidiff -c 1
136 138 $ hg perfunidiff --alldata 1
137 139 $ hg perfbookmarks
138 140 $ hg perfbranchmap
139 141 $ hg perfcca
140 142 $ hg perfchangegroupchangelog
141 143 $ hg perfchangeset 2
142 144 $ hg perfctxfiles 2
143 145 $ hg perfdiffwd
144 146 $ hg perfdirfoldmap
145 147 $ hg perfdirs
146 148 $ hg perfdirstate
147 149 $ hg perfdirstatedirs
148 150 $ hg perfdirstatefoldmap
149 151 $ hg perfdirstatewrite
150 152 #if repofncache
151 153 $ hg perffncacheencode
152 154 $ hg perffncacheload
153 155 $ hg debugrebuildfncache
154 156 fncache already up to date
155 157 $ hg perffncachewrite
156 158 $ hg debugrebuildfncache
157 159 fncache already up to date
158 160 #endif
159 161 $ hg perfheads
160 162 $ hg perfindex
161 163 $ hg perflinelogedits -n 1
162 164 $ hg perfloadmarkers
163 165 $ hg perflog
164 166 $ hg perflookup 2
165 167 $ hg perflrucache
166 168 $ hg perfmanifest 2
167 169 $ hg perfmergecalculate -r 3
168 170 $ hg perfmoonwalk
169 171 $ hg perfnodelookup 2
170 172 $ hg perfpathcopies 1 2
171 173 $ hg perfrawfiles 2
172 174 $ hg perfrevlogindex -c
173 175 #if reporevlogstore
174 176 $ hg perfrevlogrevisions .hg/store/data/a.i
175 177 #endif
176 178 $ hg perfrevlogrevision -m 0
177 179 $ hg perfrevlogchunks -c
178 180 $ hg perfrevrange
179 181 $ hg perfrevset 'all()'
180 182 $ hg perfstartup
181 183 $ hg perfstatus
182 184 $ hg perftags
183 185 $ hg perftemplating
184 186 $ hg perfvolatilesets
185 187 $ hg perfwalk
186 188 $ hg perfparents
187 189
188 190 test actual output
189 191 ------------------
190 192
191 193 normal output:
192 194
193 195 $ hg perfheads --config perf.stub=no
194 196 ! wall * comb * user * sys * (best of *) (glob)
195 197
196 198 detailed output:
197 199
198 200 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
199 201 ! wall * comb * user * sys * (best of *) (glob)
200 202 ! wall * comb * user * sys * (max of *) (glob)
201 203 ! wall * comb * user * sys * (avg of *) (glob)
202 204 ! wall * comb * user * sys * (median of *) (glob)
203 205
204 206 Check perf.py for historical portability
205 207 ----------------------------------------
206 208
207 209 $ cd "$TESTDIR/.."
208 210
209 211 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
210 212 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
211 213 > "$TESTDIR"/check-perf-code.py contrib/perf.py
212 214 contrib/perf.py:\d+: (re)
213 215 > from mercurial import (
214 216 import newer module separately in try clause for early Mercurial
217 contrib/perf.py:\d+: (re)
218 > from mercurial import (
219 import newer module separately in try clause for early Mercurial
215 220 [1]
General Comments 0
You need to be logged in to leave comments. Login now