##// END OF EJS Templates
config: add experimental argument to the config registrar...
Navaneeth Suresh -
r42987:e67d9b6b default draft
parent child Browse files
Show More
@@ -1,3063 +1,3068 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96 dir(registrar) # forcibly load it
97 97 except ImportError:
98 98 registrar = None
99 99 try:
100 100 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
101 101 except ImportError:
102 102 pass
103 103 try:
104 104 from mercurial.utils import repoviewutil # since 5.0
105 105 except ImportError:
106 106 repoviewutil = None
107 107 try:
108 108 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
109 109 except ImportError:
110 110 pass
111 111 try:
112 112 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
113 113 except ImportError:
114 114 pass
115 115
116 116 try:
117 117 from mercurial import profiling
118 118 except ImportError:
119 119 profiling = None
120 120
121 121 def identity(a):
122 122 return a
123 123
124 124 try:
125 125 from mercurial import pycompat
126 126 getargspec = pycompat.getargspec # added to module after 4.5
127 127 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
128 128 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
129 129 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
130 130 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
131 131 if pycompat.ispy3:
132 132 _maxint = sys.maxsize # per py3 docs for replacing maxint
133 133 else:
134 134 _maxint = sys.maxint
135 135 except (ImportError, AttributeError):
136 136 import inspect
137 137 getargspec = inspect.getargspec
138 138 _byteskwargs = identity
139 139 fsencode = identity # no py3 support
140 140 _maxint = sys.maxint # no py3 support
141 141 _sysstr = lambda x: x # no py3 support
142 142 _xrange = xrange
143 143
144 144 try:
145 145 # 4.7+
146 146 queue = pycompat.queue.Queue
147 147 except (AttributeError, ImportError):
148 148 # <4.7.
149 149 try:
150 150 queue = pycompat.queue
151 151 except (AttributeError, ImportError):
152 152 queue = util.queue
153 153
154 154 try:
155 155 from mercurial import logcmdutil
156 156 makelogtemplater = logcmdutil.maketemplater
157 157 except (AttributeError, ImportError):
158 158 try:
159 159 makelogtemplater = cmdutil.makelogtemplater
160 160 except (AttributeError, ImportError):
161 161 makelogtemplater = None
162 162
163 163 # for "historical portability":
164 164 # define util.safehasattr forcibly, because util.safehasattr has been
165 165 # available since 1.9.3 (or 94b200a11cf7)
166 166 _undefined = object()
167 167 def safehasattr(thing, attr):
168 168 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
169 169 setattr(util, 'safehasattr', safehasattr)
170 170
171 171 # for "historical portability":
172 172 # define util.timer forcibly, because util.timer has been available
173 173 # since ae5d60bb70c9
174 174 if safehasattr(time, 'perf_counter'):
175 175 util.timer = time.perf_counter
176 176 elif os.name == b'nt':
177 177 util.timer = time.clock
178 178 else:
179 179 util.timer = time.time
180 180
181 181 # for "historical portability":
182 182 # use locally defined empty option list, if formatteropts isn't
183 183 # available, because commands.formatteropts has been available since
184 184 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
185 185 # available since 2.2 (or ae5f92e154d3)
186 186 formatteropts = getattr(cmdutil, "formatteropts",
187 187 getattr(commands, "formatteropts", []))
188 188
189 189 # for "historical portability":
190 190 # use locally defined option list, if debugrevlogopts isn't available,
191 191 # because commands.debugrevlogopts has been available since 3.7 (or
192 192 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
193 193 # since 1.9 (or a79fea6b3e77).
194 194 revlogopts = getattr(cmdutil, "debugrevlogopts",
195 195 getattr(commands, "debugrevlogopts", [
196 196 (b'c', b'changelog', False, (b'open changelog')),
197 197 (b'm', b'manifest', False, (b'open manifest')),
198 198 (b'', b'dir', False, (b'open directory manifest')),
199 199 ]))
200 200
201 201 cmdtable = {}
202 202
203 203 # for "historical portability":
204 204 # define parsealiases locally, because cmdutil.parsealiases has been
205 205 # available since 1.5 (or 6252852b4332)
206 206 def parsealiases(cmd):
207 207 return cmd.split(b"|")
208 208
209 209 if safehasattr(registrar, 'command'):
210 210 command = registrar.command(cmdtable)
211 211 elif safehasattr(cmdutil, 'command'):
212 212 command = cmdutil.command(cmdtable)
213 213 if b'norepo' not in getargspec(command).args:
214 214 # for "historical portability":
215 215 # wrap original cmdutil.command, because "norepo" option has
216 216 # been available since 3.1 (or 75a96326cecb)
217 217 _command = command
218 218 def command(name, options=(), synopsis=None, norepo=False):
219 219 if norepo:
220 220 commands.norepo += b' %s' % b' '.join(parsealiases(name))
221 221 return _command(name, list(options), synopsis)
222 222 else:
223 223 # for "historical portability":
224 224 # define "@command" annotation locally, because cmdutil.command
225 225 # has been available since 1.9 (or 2daa5179e73f)
226 226 def command(name, options=(), synopsis=None, norepo=False):
227 227 def decorator(func):
228 228 if synopsis:
229 229 cmdtable[name] = func, list(options), synopsis
230 230 else:
231 231 cmdtable[name] = func, list(options)
232 232 if norepo:
233 233 commands.norepo += b' %s' % b' '.join(parsealiases(name))
234 234 return func
235 235 return decorator
236 236
237 237 try:
238 238 import mercurial.registrar
239 239 import mercurial.configitems
240 240 configtable = {}
241 241 configitem = mercurial.registrar.configitem(configtable)
242 242 configitem(b'perf', b'presleep',
243 243 default=mercurial.configitems.dynamicdefault,
244 experimental=True,
244 245 )
245 246 configitem(b'perf', b'stub',
246 247 default=mercurial.configitems.dynamicdefault,
248 experimental=True,
247 249 )
248 250 configitem(b'perf', b'parentscount',
249 251 default=mercurial.configitems.dynamicdefault,
252 experimental=True,
250 253 )
251 254 configitem(b'perf', b'all-timing',
252 255 default=mercurial.configitems.dynamicdefault,
256 experimental=True,
253 257 )
254 258 configitem(b'perf', b'pre-run',
255 259 default=mercurial.configitems.dynamicdefault,
256 260 )
257 261 configitem(b'perf', b'profile-benchmark',
258 262 default=mercurial.configitems.dynamicdefault,
259 263 )
260 264 configitem(b'perf', b'run-limits',
261 265 default=mercurial.configitems.dynamicdefault,
266 experimental=True,
262 267 )
263 268 except (ImportError, AttributeError):
264 269 pass
265 270
266 271 def getlen(ui):
267 272 if ui.configbool(b"perf", b"stub", False):
268 273 return lambda x: 1
269 274 return len
270 275
271 276 class noop(object):
272 277 """dummy context manager"""
273 278 def __enter__(self):
274 279 pass
275 280 def __exit__(self, *args):
276 281 pass
277 282
278 283 NOOPCTX = noop()
279 284
280 285 def gettimer(ui, opts=None):
281 286 """return a timer function and formatter: (timer, formatter)
282 287
283 288 This function exists to gather the creation of formatter in a single
284 289 place instead of duplicating it in all performance commands."""
285 290
286 291 # enforce an idle period before execution to counteract power management
287 292 # experimental config: perf.presleep
288 293 time.sleep(getint(ui, b"perf", b"presleep", 1))
289 294
290 295 if opts is None:
291 296 opts = {}
292 297 # redirect all to stderr unless buffer api is in use
293 298 if not ui._buffers:
294 299 ui = ui.copy()
295 300 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
296 301 if uifout:
297 302 # for "historical portability":
298 303 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
299 304 uifout.set(ui.ferr)
300 305
301 306 # get a formatter
302 307 uiformatter = getattr(ui, 'formatter', None)
303 308 if uiformatter:
304 309 fm = uiformatter(b'perf', opts)
305 310 else:
306 311 # for "historical portability":
307 312 # define formatter locally, because ui.formatter has been
308 313 # available since 2.2 (or ae5f92e154d3)
309 314 from mercurial import node
310 315 class defaultformatter(object):
311 316 """Minimized composition of baseformatter and plainformatter
312 317 """
313 318 def __init__(self, ui, topic, opts):
314 319 self._ui = ui
315 320 if ui.debugflag:
316 321 self.hexfunc = node.hex
317 322 else:
318 323 self.hexfunc = node.short
319 324 def __nonzero__(self):
320 325 return False
321 326 __bool__ = __nonzero__
322 327 def startitem(self):
323 328 pass
324 329 def data(self, **data):
325 330 pass
326 331 def write(self, fields, deftext, *fielddata, **opts):
327 332 self._ui.write(deftext % fielddata, **opts)
328 333 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
329 334 if cond:
330 335 self._ui.write(deftext % fielddata, **opts)
331 336 def plain(self, text, **opts):
332 337 self._ui.write(text, **opts)
333 338 def end(self):
334 339 pass
335 340 fm = defaultformatter(ui, b'perf', opts)
336 341
337 342 # stub function, runs code only once instead of in a loop
338 343 # experimental config: perf.stub
339 344 if ui.configbool(b"perf", b"stub", False):
340 345 return functools.partial(stub_timer, fm), fm
341 346
342 347 # experimental config: perf.all-timing
343 348 displayall = ui.configbool(b"perf", b"all-timing", False)
344 349
345 350 # experimental config: perf.run-limits
346 351 limitspec = ui.configlist(b"perf", b"run-limits", [])
347 352 limits = []
348 353 for item in limitspec:
349 354 parts = item.split(b'-', 1)
350 355 if len(parts) < 2:
351 356 ui.warn((b'malformatted run limit entry, missing "-": %s\n'
352 357 % item))
353 358 continue
354 359 try:
355 360 time_limit = float(pycompat.sysstr(parts[0]))
356 361 except ValueError as e:
357 362 ui.warn((b'malformatted run limit entry, %s: %s\n'
358 363 % (pycompat.bytestr(e), item)))
359 364 continue
360 365 try:
361 366 run_limit = int(pycompat.sysstr(parts[1]))
362 367 except ValueError as e:
363 368 ui.warn((b'malformatted run limit entry, %s: %s\n'
364 369 % (pycompat.bytestr(e), item)))
365 370 continue
366 371 limits.append((time_limit, run_limit))
367 372 if not limits:
368 373 limits = DEFAULTLIMITS
369 374
370 375 profiler = None
371 376 if profiling is not None:
372 377 if ui.configbool(b"perf", b"profile-benchmark", False):
373 378 profiler = profiling.profile(ui)
374 379
375 380 prerun = getint(ui, b"perf", b"pre-run", 0)
376 381 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
377 382 prerun=prerun, profiler=profiler)
378 383 return t, fm
379 384
380 385 def stub_timer(fm, func, setup=None, title=None):
381 386 if setup is not None:
382 387 setup()
383 388 func()
384 389
385 390 @contextlib.contextmanager
386 391 def timeone():
387 392 r = []
388 393 ostart = os.times()
389 394 cstart = util.timer()
390 395 yield r
391 396 cstop = util.timer()
392 397 ostop = os.times()
393 398 a, b = ostart, ostop
394 399 r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
395 400
396 401
397 402 # list of stop condition (elapsed time, minimal run count)
398 403 DEFAULTLIMITS = (
399 404 (3.0, 100),
400 405 (10.0, 3),
401 406 )
402 407
403 408 def _timer(fm, func, setup=None, title=None, displayall=False,
404 409 limits=DEFAULTLIMITS, prerun=0, profiler=None):
405 410 gc.collect()
406 411 results = []
407 412 begin = util.timer()
408 413 count = 0
409 414 if profiler is None:
410 415 profiler = NOOPCTX
411 416 for i in range(prerun):
412 417 if setup is not None:
413 418 setup()
414 419 func()
415 420 keepgoing = True
416 421 while keepgoing:
417 422 if setup is not None:
418 423 setup()
419 424 with profiler:
420 425 with timeone() as item:
421 426 r = func()
422 427 profiler = NOOPCTX
423 428 count += 1
424 429 results.append(item[0])
425 430 cstop = util.timer()
426 431 # Look for a stop condition.
427 432 elapsed = cstop - begin
428 433 for t, mincount in limits:
429 434 if elapsed >= t and count >= mincount:
430 435 keepgoing = False
431 436 break
432 437
433 438 formatone(fm, results, title=title, result=r,
434 439 displayall=displayall)
435 440
436 441 def formatone(fm, timings, title=None, result=None, displayall=False):
437 442
438 443 count = len(timings)
439 444
440 445 fm.startitem()
441 446
442 447 if title:
443 448 fm.write(b'title', b'! %s\n', title)
444 449 if result:
445 450 fm.write(b'result', b'! result: %s\n', result)
446 451 def display(role, entry):
447 452 prefix = b''
448 453 if role != b'best':
449 454 prefix = b'%s.' % role
450 455 fm.plain(b'!')
451 456 fm.write(prefix + b'wall', b' wall %f', entry[0])
452 457 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
453 458 fm.write(prefix + b'user', b' user %f', entry[1])
454 459 fm.write(prefix + b'sys', b' sys %f', entry[2])
455 460 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
456 461 fm.plain(b'\n')
457 462 timings.sort()
458 463 min_val = timings[0]
459 464 display(b'best', min_val)
460 465 if displayall:
461 466 max_val = timings[-1]
462 467 display(b'max', max_val)
463 468 avg = tuple([sum(x) / count for x in zip(*timings)])
464 469 display(b'avg', avg)
465 470 median = timings[len(timings) // 2]
466 471 display(b'median', median)
467 472
468 473 # utilities for historical portability
469 474
470 475 def getint(ui, section, name, default):
471 476 # for "historical portability":
472 477 # ui.configint has been available since 1.9 (or fa2b596db182)
473 478 v = ui.config(section, name, None)
474 479 if v is None:
475 480 return default
476 481 try:
477 482 return int(v)
478 483 except ValueError:
479 484 raise error.ConfigError((b"%s.%s is not an integer ('%s')")
480 485 % (section, name, v))
481 486
482 487 def safeattrsetter(obj, name, ignoremissing=False):
483 488 """Ensure that 'obj' has 'name' attribute before subsequent setattr
484 489
485 490 This function is aborted, if 'obj' doesn't have 'name' attribute
486 491 at runtime. This avoids overlooking removal of an attribute, which
487 492 breaks assumption of performance measurement, in the future.
488 493
489 494 This function returns the object to (1) assign a new value, and
490 495 (2) restore an original value to the attribute.
491 496
492 497 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
493 498 abortion, and this function returns None. This is useful to
494 499 examine an attribute, which isn't ensured in all Mercurial
495 500 versions.
496 501 """
497 502 if not util.safehasattr(obj, name):
498 503 if ignoremissing:
499 504 return None
500 505 raise error.Abort((b"missing attribute %s of %s might break assumption"
501 506 b" of performance measurement") % (name, obj))
502 507
503 508 origvalue = getattr(obj, _sysstr(name))
504 509 class attrutil(object):
505 510 def set(self, newvalue):
506 511 setattr(obj, _sysstr(name), newvalue)
507 512 def restore(self):
508 513 setattr(obj, _sysstr(name), origvalue)
509 514
510 515 return attrutil()
511 516
512 517 # utilities to examine each internal API changes
513 518
514 519 def getbranchmapsubsettable():
515 520 # for "historical portability":
516 521 # subsettable is defined in:
517 522 # - branchmap since 2.9 (or 175c6fd8cacc)
518 523 # - repoview since 2.5 (or 59a9f18d4587)
519 524 # - repoviewutil since 5.0
520 525 for mod in (branchmap, repoview, repoviewutil):
521 526 subsettable = getattr(mod, 'subsettable', None)
522 527 if subsettable:
523 528 return subsettable
524 529
525 530 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
526 531 # branchmap and repoview modules exist, but subsettable attribute
527 532 # doesn't)
528 533 raise error.Abort((b"perfbranchmap not available with this Mercurial"),
529 534 hint=b"use 2.5 or later")
530 535
531 536 def getsvfs(repo):
532 537 """Return appropriate object to access files under .hg/store
533 538 """
534 539 # for "historical portability":
535 540 # repo.svfs has been available since 2.3 (or 7034365089bf)
536 541 svfs = getattr(repo, 'svfs', None)
537 542 if svfs:
538 543 return svfs
539 544 else:
540 545 return getattr(repo, 'sopener')
541 546
542 547 def getvfs(repo):
543 548 """Return appropriate object to access files under .hg
544 549 """
545 550 # for "historical portability":
546 551 # repo.vfs has been available since 2.3 (or 7034365089bf)
547 552 vfs = getattr(repo, 'vfs', None)
548 553 if vfs:
549 554 return vfs
550 555 else:
551 556 return getattr(repo, 'opener')
552 557
553 558 def repocleartagscachefunc(repo):
554 559 """Return the function to clear tags cache according to repo internal API
555 560 """
556 561 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
557 562 # in this case, setattr(repo, '_tagscache', None) or so isn't
558 563 # correct way to clear tags cache, because existing code paths
559 564 # expect _tagscache to be a structured object.
560 565 def clearcache():
561 566 # _tagscache has been filteredpropertycache since 2.5 (or
562 567 # 98c867ac1330), and delattr() can't work in such case
563 568 if b'_tagscache' in vars(repo):
564 569 del repo.__dict__[b'_tagscache']
565 570 return clearcache
566 571
567 572 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
568 573 if repotags: # since 1.4 (or 5614a628d173)
569 574 return lambda : repotags.set(None)
570 575
571 576 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
572 577 if repotagscache: # since 0.6 (or d7df759d0e97)
573 578 return lambda : repotagscache.set(None)
574 579
575 580 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
576 581 # this point, but it isn't so problematic, because:
577 582 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
578 583 # in perftags() causes failure soon
579 584 # - perf.py itself has been available since 1.1 (or eb240755386d)
580 585 raise error.Abort((b"tags API of this hg command is unknown"))
581 586
582 587 # utilities to clear cache
583 588
584 589 def clearfilecache(obj, attrname):
585 590 unfiltered = getattr(obj, 'unfiltered', None)
586 591 if unfiltered is not None:
587 592 obj = obj.unfiltered()
588 593 if attrname in vars(obj):
589 594 delattr(obj, attrname)
590 595 obj._filecache.pop(attrname, None)
591 596
592 597 def clearchangelog(repo):
593 598 if repo is not repo.unfiltered():
594 599 object.__setattr__(repo, r'_clcachekey', None)
595 600 object.__setattr__(repo, r'_clcache', None)
596 601 clearfilecache(repo.unfiltered(), 'changelog')
597 602
598 603 # perf commands
599 604
600 605 @command(b'perfwalk', formatteropts)
601 606 def perfwalk(ui, repo, *pats, **opts):
602 607 opts = _byteskwargs(opts)
603 608 timer, fm = gettimer(ui, opts)
604 609 m = scmutil.match(repo[None], pats, {})
605 610 timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True,
606 611 ignored=False))))
607 612 fm.end()
608 613
609 614 @command(b'perfannotate', formatteropts)
610 615 def perfannotate(ui, repo, f, **opts):
611 616 opts = _byteskwargs(opts)
612 617 timer, fm = gettimer(ui, opts)
613 618 fc = repo[b'.'][f]
614 619 timer(lambda: len(fc.annotate(True)))
615 620 fm.end()
616 621
617 622 @command(b'perfstatus',
618 623 [(b'u', b'unknown', False,
619 624 b'ask status to look for unknown files')] + formatteropts)
620 625 def perfstatus(ui, repo, **opts):
621 626 opts = _byteskwargs(opts)
622 627 #m = match.always(repo.root, repo.getcwd())
623 628 #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
624 629 # False))))
625 630 timer, fm = gettimer(ui, opts)
626 631 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
627 632 fm.end()
628 633
629 634 @command(b'perfaddremove', formatteropts)
630 635 def perfaddremove(ui, repo, **opts):
631 636 opts = _byteskwargs(opts)
632 637 timer, fm = gettimer(ui, opts)
633 638 try:
634 639 oldquiet = repo.ui.quiet
635 640 repo.ui.quiet = True
636 641 matcher = scmutil.match(repo[None])
637 642 opts[b'dry_run'] = True
638 643 if b'uipathfn' in getargspec(scmutil.addremove).args:
639 644 uipathfn = scmutil.getuipathfn(repo)
640 645 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
641 646 else:
642 647 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
643 648 finally:
644 649 repo.ui.quiet = oldquiet
645 650 fm.end()
646 651
647 652 def clearcaches(cl):
648 653 # behave somewhat consistently across internal API changes
649 654 if util.safehasattr(cl, b'clearcaches'):
650 655 cl.clearcaches()
651 656 elif util.safehasattr(cl, b'_nodecache'):
652 657 from mercurial.node import nullid, nullrev
653 658 cl._nodecache = {nullid: nullrev}
654 659 cl._nodepos = None
655 660
656 661 @command(b'perfheads', formatteropts)
657 662 def perfheads(ui, repo, **opts):
658 663 """benchmark the computation of a changelog heads"""
659 664 opts = _byteskwargs(opts)
660 665 timer, fm = gettimer(ui, opts)
661 666 cl = repo.changelog
662 667 def s():
663 668 clearcaches(cl)
664 669 def d():
665 670 len(cl.headrevs())
666 671 timer(d, setup=s)
667 672 fm.end()
668 673
669 674 @command(b'perftags', formatteropts+
670 675 [
671 676 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
672 677 ])
673 678 def perftags(ui, repo, **opts):
674 679 opts = _byteskwargs(opts)
675 680 timer, fm = gettimer(ui, opts)
676 681 repocleartagscache = repocleartagscachefunc(repo)
677 682 clearrevlogs = opts[b'clear_revlogs']
678 683 def s():
679 684 if clearrevlogs:
680 685 clearchangelog(repo)
681 686 clearfilecache(repo.unfiltered(), 'manifest')
682 687 repocleartagscache()
683 688 def t():
684 689 return len(repo.tags())
685 690 timer(t, setup=s)
686 691 fm.end()
687 692
688 693 @command(b'perfancestors', formatteropts)
689 694 def perfancestors(ui, repo, **opts):
690 695 opts = _byteskwargs(opts)
691 696 timer, fm = gettimer(ui, opts)
692 697 heads = repo.changelog.headrevs()
693 698 def d():
694 699 for a in repo.changelog.ancestors(heads):
695 700 pass
696 701 timer(d)
697 702 fm.end()
698 703
699 704 @command(b'perfancestorset', formatteropts)
700 705 def perfancestorset(ui, repo, revset, **opts):
701 706 opts = _byteskwargs(opts)
702 707 timer, fm = gettimer(ui, opts)
703 708 revs = repo.revs(revset)
704 709 heads = repo.changelog.headrevs()
705 710 def d():
706 711 s = repo.changelog.ancestors(heads)
707 712 for rev in revs:
708 713 rev in s
709 714 timer(d)
710 715 fm.end()
711 716
712 717 @command(b'perfdiscovery', formatteropts, b'PATH')
713 718 def perfdiscovery(ui, repo, path, **opts):
714 719 """benchmark discovery between local repo and the peer at given path
715 720 """
716 721 repos = [repo, None]
717 722 timer, fm = gettimer(ui, opts)
718 723 path = ui.expandpath(path)
719 724
720 725 def s():
721 726 repos[1] = hg.peer(ui, opts, path)
722 727 def d():
723 728 setdiscovery.findcommonheads(ui, *repos)
724 729 timer(d, setup=s)
725 730 fm.end()
726 731
727 732 @command(b'perfbookmarks', formatteropts +
728 733 [
729 734 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
730 735 ])
731 736 def perfbookmarks(ui, repo, **opts):
732 737 """benchmark parsing bookmarks from disk to memory"""
733 738 opts = _byteskwargs(opts)
734 739 timer, fm = gettimer(ui, opts)
735 740
736 741 clearrevlogs = opts[b'clear_revlogs']
737 742 def s():
738 743 if clearrevlogs:
739 744 clearchangelog(repo)
740 745 clearfilecache(repo, b'_bookmarks')
741 746 def d():
742 747 repo._bookmarks
743 748 timer(d, setup=s)
744 749 fm.end()
745 750
746 751 @command(b'perfbundleread', formatteropts, b'BUNDLE')
747 752 def perfbundleread(ui, repo, bundlepath, **opts):
748 753 """Benchmark reading of bundle files.
749 754
750 755 This command is meant to isolate the I/O part of bundle reading as
751 756 much as possible.
752 757 """
753 758 from mercurial import (
754 759 bundle2,
755 760 exchange,
756 761 streamclone,
757 762 )
758 763
759 764 opts = _byteskwargs(opts)
760 765
761 766 def makebench(fn):
762 767 def run():
763 768 with open(bundlepath, b'rb') as fh:
764 769 bundle = exchange.readbundle(ui, fh, bundlepath)
765 770 fn(bundle)
766 771
767 772 return run
768 773
769 774 def makereadnbytes(size):
770 775 def run():
771 776 with open(bundlepath, b'rb') as fh:
772 777 bundle = exchange.readbundle(ui, fh, bundlepath)
773 778 while bundle.read(size):
774 779 pass
775 780
776 781 return run
777 782
778 783 def makestdioread(size):
779 784 def run():
780 785 with open(bundlepath, b'rb') as fh:
781 786 while fh.read(size):
782 787 pass
783 788
784 789 return run
785 790
786 791 # bundle1
787 792
788 793 def deltaiter(bundle):
789 794 for delta in bundle.deltaiter():
790 795 pass
791 796
792 797 def iterchunks(bundle):
793 798 for chunk in bundle.getchunks():
794 799 pass
795 800
796 801 # bundle2
797 802
798 803 def forwardchunks(bundle):
799 804 for chunk in bundle._forwardchunks():
800 805 pass
801 806
802 807 def iterparts(bundle):
803 808 for part in bundle.iterparts():
804 809 pass
805 810
806 811 def iterpartsseekable(bundle):
807 812 for part in bundle.iterparts(seekable=True):
808 813 pass
809 814
810 815 def seek(bundle):
811 816 for part in bundle.iterparts(seekable=True):
812 817 part.seek(0, os.SEEK_END)
813 818
814 819 def makepartreadnbytes(size):
815 820 def run():
816 821 with open(bundlepath, b'rb') as fh:
817 822 bundle = exchange.readbundle(ui, fh, bundlepath)
818 823 for part in bundle.iterparts():
819 824 while part.read(size):
820 825 pass
821 826
822 827 return run
823 828
824 829 benches = [
825 830 (makestdioread(8192), b'read(8k)'),
826 831 (makestdioread(16384), b'read(16k)'),
827 832 (makestdioread(32768), b'read(32k)'),
828 833 (makestdioread(131072), b'read(128k)'),
829 834 ]
830 835
831 836 with open(bundlepath, b'rb') as fh:
832 837 bundle = exchange.readbundle(ui, fh, bundlepath)
833 838
834 839 if isinstance(bundle, changegroup.cg1unpacker):
835 840 benches.extend([
836 841 (makebench(deltaiter), b'cg1 deltaiter()'),
837 842 (makebench(iterchunks), b'cg1 getchunks()'),
838 843 (makereadnbytes(8192), b'cg1 read(8k)'),
839 844 (makereadnbytes(16384), b'cg1 read(16k)'),
840 845 (makereadnbytes(32768), b'cg1 read(32k)'),
841 846 (makereadnbytes(131072), b'cg1 read(128k)'),
842 847 ])
843 848 elif isinstance(bundle, bundle2.unbundle20):
844 849 benches.extend([
845 850 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
846 851 (makebench(iterparts), b'bundle2 iterparts()'),
847 852 (makebench(iterpartsseekable), b'bundle2 iterparts() seekable'),
848 853 (makebench(seek), b'bundle2 part seek()'),
849 854 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
850 855 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
851 856 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
852 857 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
853 858 ])
854 859 elif isinstance(bundle, streamclone.streamcloneapplier):
855 860 raise error.Abort(b'stream clone bundles not supported')
856 861 else:
857 862 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
858 863
859 864 for fn, title in benches:
860 865 timer, fm = gettimer(ui, opts)
861 866 timer(fn, title=title)
862 867 fm.end()
863 868
864 869 @command(b'perfchangegroupchangelog', formatteropts +
865 870 [(b'', b'cgversion', b'02', b'changegroup version'),
866 871 (b'r', b'rev', b'', b'revisions to add to changegroup')])
867 872 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
868 873 """Benchmark producing a changelog group for a changegroup.
869 874
870 875 This measures the time spent processing the changelog during a
871 876 bundle operation. This occurs during `hg bundle` and on a server
872 877 processing a `getbundle` wire protocol request (handles clones
873 878 and pull requests).
874 879
875 880 By default, all revisions are added to the changegroup.
876 881 """
877 882 opts = _byteskwargs(opts)
878 883 cl = repo.changelog
879 884 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
880 885 bundler = changegroup.getbundler(cgversion, repo)
881 886
882 887 def d():
883 888 state, chunks = bundler._generatechangelog(cl, nodes)
884 889 for chunk in chunks:
885 890 pass
886 891
887 892 timer, fm = gettimer(ui, opts)
888 893
889 894 # Terminal printing can interfere with timing. So disable it.
890 895 with ui.configoverride({(b'progress', b'disable'): True}):
891 896 timer(d)
892 897
893 898 fm.end()
894 899
895 900 @command(b'perfdirs', formatteropts)
896 901 def perfdirs(ui, repo, **opts):
897 902 opts = _byteskwargs(opts)
898 903 timer, fm = gettimer(ui, opts)
899 904 dirstate = repo.dirstate
900 905 b'a' in dirstate
901 906 def d():
902 907 dirstate.hasdir(b'a')
903 908 del dirstate._map._dirs
904 909 timer(d)
905 910 fm.end()
906 911
907 912 @command(b'perfdirstate', formatteropts)
908 913 def perfdirstate(ui, repo, **opts):
909 914 opts = _byteskwargs(opts)
910 915 timer, fm = gettimer(ui, opts)
911 916 b"a" in repo.dirstate
912 917 def d():
913 918 repo.dirstate.invalidate()
914 919 b"a" in repo.dirstate
915 920 timer(d)
916 921 fm.end()
917 922
918 923 @command(b'perfdirstatedirs', formatteropts)
919 924 def perfdirstatedirs(ui, repo, **opts):
920 925 opts = _byteskwargs(opts)
921 926 timer, fm = gettimer(ui, opts)
922 927 b"a" in repo.dirstate
923 928 def d():
924 929 repo.dirstate.hasdir(b"a")
925 930 del repo.dirstate._map._dirs
926 931 timer(d)
927 932 fm.end()
928 933
929 934 @command(b'perfdirstatefoldmap', formatteropts)
930 935 def perfdirstatefoldmap(ui, repo, **opts):
931 936 opts = _byteskwargs(opts)
932 937 timer, fm = gettimer(ui, opts)
933 938 dirstate = repo.dirstate
934 939 b'a' in dirstate
935 940 def d():
936 941 dirstate._map.filefoldmap.get(b'a')
937 942 del dirstate._map.filefoldmap
938 943 timer(d)
939 944 fm.end()
940 945
941 946 @command(b'perfdirfoldmap', formatteropts)
942 947 def perfdirfoldmap(ui, repo, **opts):
943 948 opts = _byteskwargs(opts)
944 949 timer, fm = gettimer(ui, opts)
945 950 dirstate = repo.dirstate
946 951 b'a' in dirstate
947 952 def d():
948 953 dirstate._map.dirfoldmap.get(b'a')
949 954 del dirstate._map.dirfoldmap
950 955 del dirstate._map._dirs
951 956 timer(d)
952 957 fm.end()
953 958
954 959 @command(b'perfdirstatewrite', formatteropts)
955 960 def perfdirstatewrite(ui, repo, **opts):
956 961 opts = _byteskwargs(opts)
957 962 timer, fm = gettimer(ui, opts)
958 963 ds = repo.dirstate
959 964 b"a" in ds
960 965 def d():
961 966 ds._dirty = True
962 967 ds.write(repo.currenttransaction())
963 968 timer(d)
964 969 fm.end()
965 970
966 971 def _getmergerevs(repo, opts):
967 972 """parse command argument to return rev involved in merge
968 973
969 974 input: options dictionnary with `rev`, `from` and `bse`
970 975 output: (localctx, otherctx, basectx)
971 976 """
972 977 if opts[b'from']:
973 978 fromrev = scmutil.revsingle(repo, opts[b'from'])
974 979 wctx = repo[fromrev]
975 980 else:
976 981 wctx = repo[None]
977 982 # we don't want working dir files to be stat'd in the benchmark, so
978 983 # prime that cache
979 984 wctx.dirty()
980 985 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
981 986 if opts[b'base']:
982 987 fromrev = scmutil.revsingle(repo, opts[b'base'])
983 988 ancestor = repo[fromrev]
984 989 else:
985 990 ancestor = wctx.ancestor(rctx)
986 991 return (wctx, rctx, ancestor)
987 992
988 993 @command(b'perfmergecalculate',
989 994 [
990 995 (b'r', b'rev', b'.', b'rev to merge against'),
991 996 (b'', b'from', b'', b'rev to merge from'),
992 997 (b'', b'base', b'', b'the revision to use as base'),
993 998 ] + formatteropts)
994 999 def perfmergecalculate(ui, repo, **opts):
995 1000 opts = _byteskwargs(opts)
996 1001 timer, fm = gettimer(ui, opts)
997 1002
998 1003 wctx, rctx, ancestor = _getmergerevs(repo, opts)
999 1004 def d():
1000 1005 # acceptremote is True because we don't want prompts in the middle of
1001 1006 # our benchmark
1002 1007 merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False,
1003 1008 acceptremote=True, followcopies=True)
1004 1009 timer(d)
1005 1010 fm.end()
1006 1011
1007 1012 @command(b'perfmergecopies',
1008 1013 [
1009 1014 (b'r', b'rev', b'.', b'rev to merge against'),
1010 1015 (b'', b'from', b'', b'rev to merge from'),
1011 1016 (b'', b'base', b'', b'the revision to use as base'),
1012 1017 ] + formatteropts)
1013 1018 def perfmergecopies(ui, repo, **opts):
1014 1019 """measure runtime of `copies.mergecopies`"""
1015 1020 opts = _byteskwargs(opts)
1016 1021 timer, fm = gettimer(ui, opts)
1017 1022 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1018 1023 def d():
1019 1024 # acceptremote is True because we don't want prompts in the middle of
1020 1025 # our benchmark
1021 1026 copies.mergecopies(repo, wctx, rctx, ancestor)
1022 1027 timer(d)
1023 1028 fm.end()
1024 1029
1025 1030 @command(b'perfpathcopies', [], b"REV REV")
1026 1031 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1027 1032 """benchmark the copy tracing logic"""
1028 1033 opts = _byteskwargs(opts)
1029 1034 timer, fm = gettimer(ui, opts)
1030 1035 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1031 1036 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1032 1037 def d():
1033 1038 copies.pathcopies(ctx1, ctx2)
1034 1039 timer(d)
1035 1040 fm.end()
1036 1041
1037 1042 @command(b'perfphases',
1038 1043 [(b'', b'full', False, b'include file reading time too'),
1039 1044 ], b"")
1040 1045 def perfphases(ui, repo, **opts):
1041 1046 """benchmark phasesets computation"""
1042 1047 opts = _byteskwargs(opts)
1043 1048 timer, fm = gettimer(ui, opts)
1044 1049 _phases = repo._phasecache
1045 1050 full = opts.get(b'full')
1046 1051 def d():
1047 1052 phases = _phases
1048 1053 if full:
1049 1054 clearfilecache(repo, b'_phasecache')
1050 1055 phases = repo._phasecache
1051 1056 phases.invalidate()
1052 1057 phases.loadphaserevs(repo)
1053 1058 timer(d)
1054 1059 fm.end()
1055 1060
1056 1061 @command(b'perfphasesremote',
1057 1062 [], b"[DEST]")
1058 1063 def perfphasesremote(ui, repo, dest=None, **opts):
1059 1064 """benchmark time needed to analyse phases of the remote server"""
1060 1065 from mercurial.node import (
1061 1066 bin,
1062 1067 )
1063 1068 from mercurial import (
1064 1069 exchange,
1065 1070 hg,
1066 1071 phases,
1067 1072 )
1068 1073 opts = _byteskwargs(opts)
1069 1074 timer, fm = gettimer(ui, opts)
1070 1075
1071 1076 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1072 1077 if not path:
1073 1078 raise error.Abort((b'default repository not configured!'),
1074 1079 hint=(b"see 'hg help config.paths'"))
1075 1080 dest = path.pushloc or path.loc
1076 1081 ui.status((b'analysing phase of %s\n') % util.hidepassword(dest))
1077 1082 other = hg.peer(repo, opts, dest)
1078 1083
1079 1084 # easier to perform discovery through the operation
1080 1085 op = exchange.pushoperation(repo, other)
1081 1086 exchange._pushdiscoverychangeset(op)
1082 1087
1083 1088 remotesubset = op.fallbackheads
1084 1089
1085 1090 with other.commandexecutor() as e:
1086 1091 remotephases = e.callcommand(b'listkeys',
1087 1092 {b'namespace': b'phases'}).result()
1088 1093 del other
1089 1094 publishing = remotephases.get(b'publishing', False)
1090 1095 if publishing:
1091 1096 ui.status((b'publishing: yes\n'))
1092 1097 else:
1093 1098 ui.status((b'publishing: no\n'))
1094 1099
1095 1100 nodemap = repo.changelog.nodemap
1096 1101 nonpublishroots = 0
1097 1102 for nhex, phase in remotephases.iteritems():
1098 1103 if nhex == b'publishing': # ignore data related to publish option
1099 1104 continue
1100 1105 node = bin(nhex)
1101 1106 if node in nodemap and int(phase):
1102 1107 nonpublishroots += 1
1103 1108 ui.status((b'number of roots: %d\n') % len(remotephases))
1104 1109 ui.status((b'number of known non public roots: %d\n') % nonpublishroots)
1105 1110 def d():
1106 1111 phases.remotephasessummary(repo,
1107 1112 remotesubset,
1108 1113 remotephases)
1109 1114 timer(d)
1110 1115 fm.end()
1111 1116
1112 1117 @command(b'perfmanifest',[
1113 1118 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1114 1119 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1115 1120 ] + formatteropts, b'REV|NODE')
1116 1121 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1117 1122 """benchmark the time to read a manifest from disk and return a usable
1118 1123 dict-like object
1119 1124
1120 1125 Manifest caches are cleared before retrieval."""
1121 1126 opts = _byteskwargs(opts)
1122 1127 timer, fm = gettimer(ui, opts)
1123 1128 if not manifest_rev:
1124 1129 ctx = scmutil.revsingle(repo, rev, rev)
1125 1130 t = ctx.manifestnode()
1126 1131 else:
1127 1132 from mercurial.node import bin
1128 1133
1129 1134 if len(rev) == 40:
1130 1135 t = bin(rev)
1131 1136 else:
1132 1137 try:
1133 1138 rev = int(rev)
1134 1139
1135 1140 if util.safehasattr(repo.manifestlog, b'getstorage'):
1136 1141 t = repo.manifestlog.getstorage(b'').node(rev)
1137 1142 else:
1138 1143 t = repo.manifestlog._revlog.lookup(rev)
1139 1144 except ValueError:
1140 1145 raise error.Abort(b'manifest revision must be integer or full '
1141 1146 b'node')
1142 1147 def d():
1143 1148 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1144 1149 repo.manifestlog[t].read()
1145 1150 timer(d)
1146 1151 fm.end()
1147 1152
1148 1153 @command(b'perfchangeset', formatteropts)
1149 1154 def perfchangeset(ui, repo, rev, **opts):
1150 1155 opts = _byteskwargs(opts)
1151 1156 timer, fm = gettimer(ui, opts)
1152 1157 n = scmutil.revsingle(repo, rev).node()
1153 1158 def d():
1154 1159 repo.changelog.read(n)
1155 1160 #repo.changelog._cache = None
1156 1161 timer(d)
1157 1162 fm.end()
1158 1163
1159 1164 @command(b'perfignore', formatteropts)
1160 1165 def perfignore(ui, repo, **opts):
1161 1166 """benchmark operation related to computing ignore"""
1162 1167 opts = _byteskwargs(opts)
1163 1168 timer, fm = gettimer(ui, opts)
1164 1169 dirstate = repo.dirstate
1165 1170
1166 1171 def setupone():
1167 1172 dirstate.invalidate()
1168 1173 clearfilecache(dirstate, b'_ignore')
1169 1174
1170 1175 def runone():
1171 1176 dirstate._ignore
1172 1177
1173 1178 timer(runone, setup=setupone, title=b"load")
1174 1179 fm.end()
1175 1180
1176 1181 @command(b'perfindex', [
1177 1182 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1178 1183 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1179 1184 ] + formatteropts)
1180 1185 def perfindex(ui, repo, **opts):
1181 1186 """benchmark index creation time followed by a lookup
1182 1187
1183 1188 The default is to look `tip` up. Depending on the index implementation,
1184 1189 the revision looked up can matters. For example, an implementation
1185 1190 scanning the index will have a faster lookup time for `--rev tip` than for
1186 1191 `--rev 0`. The number of looked up revisions and their order can also
1187 1192 matters.
1188 1193
1189 1194 Example of useful set to test:
1190 1195 * tip
1191 1196 * 0
1192 1197 * -10:
1193 1198 * :10
1194 1199 * -10: + :10
1195 1200 * :10: + -10:
1196 1201 * -10000:
1197 1202 * -10000: + 0
1198 1203
1199 1204 It is not currently possible to check for lookup of a missing node. For
1200 1205 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1201 1206 import mercurial.revlog
1202 1207 opts = _byteskwargs(opts)
1203 1208 timer, fm = gettimer(ui, opts)
1204 1209 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1205 1210 if opts[b'no_lookup']:
1206 1211 if opts['rev']:
1207 1212 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1208 1213 nodes = []
1209 1214 elif not opts[b'rev']:
1210 1215 nodes = [repo[b"tip"].node()]
1211 1216 else:
1212 1217 revs = scmutil.revrange(repo, opts[b'rev'])
1213 1218 cl = repo.changelog
1214 1219 nodes = [cl.node(r) for r in revs]
1215 1220
1216 1221 unfi = repo.unfiltered()
1217 1222 # find the filecache func directly
1218 1223 # This avoid polluting the benchmark with the filecache logic
1219 1224 makecl = unfi.__class__.changelog.func
1220 1225 def setup():
1221 1226 # probably not necessary, but for good measure
1222 1227 clearchangelog(unfi)
1223 1228 def d():
1224 1229 cl = makecl(unfi)
1225 1230 for n in nodes:
1226 1231 cl.rev(n)
1227 1232 timer(d, setup=setup)
1228 1233 fm.end()
1229 1234
1230 1235 @command(b'perfnodemap', [
1231 1236 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1232 1237 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1233 1238 ] + formatteropts)
1234 1239 def perfnodemap(ui, repo, **opts):
1235 1240 """benchmark the time necessary to look up revision from a cold nodemap
1236 1241
1237 1242 Depending on the implementation, the amount and order of revision we look
1238 1243 up can varies. Example of useful set to test:
1239 1244 * tip
1240 1245 * 0
1241 1246 * -10:
1242 1247 * :10
1243 1248 * -10: + :10
1244 1249 * :10: + -10:
1245 1250 * -10000:
1246 1251 * -10000: + 0
1247 1252
1248 1253 The command currently focus on valid binary lookup. Benchmarking for
1249 1254 hexlookup, prefix lookup and missing lookup would also be valuable.
1250 1255 """
1251 1256 import mercurial.revlog
1252 1257 opts = _byteskwargs(opts)
1253 1258 timer, fm = gettimer(ui, opts)
1254 1259 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1255 1260
1256 1261 unfi = repo.unfiltered()
1257 1262 clearcaches = opts['clear_caches']
1258 1263 # find the filecache func directly
1259 1264 # This avoid polluting the benchmark with the filecache logic
1260 1265 makecl = unfi.__class__.changelog.func
1261 1266 if not opts[b'rev']:
1262 1267 raise error.Abort('use --rev to specify revisions to look up')
1263 1268 revs = scmutil.revrange(repo, opts[b'rev'])
1264 1269 cl = repo.changelog
1265 1270 nodes = [cl.node(r) for r in revs]
1266 1271
1267 1272 # use a list to pass reference to a nodemap from one closure to the next
1268 1273 nodeget = [None]
1269 1274 def setnodeget():
1270 1275 # probably not necessary, but for good measure
1271 1276 clearchangelog(unfi)
1272 1277 nodeget[0] = makecl(unfi).nodemap.get
1273 1278
1274 1279 def d():
1275 1280 get = nodeget[0]
1276 1281 for n in nodes:
1277 1282 get(n)
1278 1283
1279 1284 setup = None
1280 1285 if clearcaches:
1281 1286 def setup():
1282 1287 setnodeget()
1283 1288 else:
1284 1289 setnodeget()
1285 1290 d() # prewarm the data structure
1286 1291 timer(d, setup=setup)
1287 1292 fm.end()
1288 1293
1289 1294 @command(b'perfstartup', formatteropts)
1290 1295 def perfstartup(ui, repo, **opts):
1291 1296 opts = _byteskwargs(opts)
1292 1297 timer, fm = gettimer(ui, opts)
1293 1298 def d():
1294 1299 if os.name != r'nt':
1295 1300 os.system(b"HGRCPATH= %s version -q > /dev/null" %
1296 1301 fsencode(sys.argv[0]))
1297 1302 else:
1298 1303 os.environ[r'HGRCPATH'] = r' '
1299 1304 os.system(r"%s version -q > NUL" % sys.argv[0])
1300 1305 timer(d)
1301 1306 fm.end()
1302 1307
1303 1308 @command(b'perfparents', formatteropts)
1304 1309 def perfparents(ui, repo, **opts):
1305 1310 """benchmark the time necessary to fetch one changeset's parents.
1306 1311
1307 1312 The fetch is done using the `node identifier`, traversing all object layers
1308 1313 from the repository object. The first N revisions will be used for this
1309 1314 benchmark. N is controlled by the ``perf.parentscount`` config option
1310 1315 (default: 1000).
1311 1316 """
1312 1317 opts = _byteskwargs(opts)
1313 1318 timer, fm = gettimer(ui, opts)
1314 1319 # control the number of commits perfparents iterates over
1315 1320 # experimental config: perf.parentscount
1316 1321 count = getint(ui, b"perf", b"parentscount", 1000)
1317 1322 if len(repo.changelog) < count:
1318 1323 raise error.Abort(b"repo needs %d commits for this test" % count)
1319 1324 repo = repo.unfiltered()
1320 1325 nl = [repo.changelog.node(i) for i in _xrange(count)]
1321 1326 def d():
1322 1327 for n in nl:
1323 1328 repo.changelog.parents(n)
1324 1329 timer(d)
1325 1330 fm.end()
1326 1331
1327 1332 @command(b'perfctxfiles', formatteropts)
1328 1333 def perfctxfiles(ui, repo, x, **opts):
1329 1334 opts = _byteskwargs(opts)
1330 1335 x = int(x)
1331 1336 timer, fm = gettimer(ui, opts)
1332 1337 def d():
1333 1338 len(repo[x].files())
1334 1339 timer(d)
1335 1340 fm.end()
1336 1341
1337 1342 @command(b'perfrawfiles', formatteropts)
1338 1343 def perfrawfiles(ui, repo, x, **opts):
1339 1344 opts = _byteskwargs(opts)
1340 1345 x = int(x)
1341 1346 timer, fm = gettimer(ui, opts)
1342 1347 cl = repo.changelog
1343 1348 def d():
1344 1349 len(cl.read(x)[3])
1345 1350 timer(d)
1346 1351 fm.end()
1347 1352
1348 1353 @command(b'perflookup', formatteropts)
1349 1354 def perflookup(ui, repo, rev, **opts):
1350 1355 opts = _byteskwargs(opts)
1351 1356 timer, fm = gettimer(ui, opts)
1352 1357 timer(lambda: len(repo.lookup(rev)))
1353 1358 fm.end()
1354 1359
1355 1360 @command(b'perflinelogedits',
1356 1361 [(b'n', b'edits', 10000, b'number of edits'),
1357 1362 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1358 1363 ], norepo=True)
1359 1364 def perflinelogedits(ui, **opts):
1360 1365 from mercurial import linelog
1361 1366
1362 1367 opts = _byteskwargs(opts)
1363 1368
1364 1369 edits = opts[b'edits']
1365 1370 maxhunklines = opts[b'max_hunk_lines']
1366 1371
1367 1372 maxb1 = 100000
1368 1373 random.seed(0)
1369 1374 randint = random.randint
1370 1375 currentlines = 0
1371 1376 arglist = []
1372 1377 for rev in _xrange(edits):
1373 1378 a1 = randint(0, currentlines)
1374 1379 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1375 1380 b1 = randint(0, maxb1)
1376 1381 b2 = randint(b1, b1 + maxhunklines)
1377 1382 currentlines += (b2 - b1) - (a2 - a1)
1378 1383 arglist.append((rev, a1, a2, b1, b2))
1379 1384
1380 1385 def d():
1381 1386 ll = linelog.linelog()
1382 1387 for args in arglist:
1383 1388 ll.replacelines(*args)
1384 1389
1385 1390 timer, fm = gettimer(ui, opts)
1386 1391 timer(d)
1387 1392 fm.end()
1388 1393
1389 1394 @command(b'perfrevrange', formatteropts)
1390 1395 def perfrevrange(ui, repo, *specs, **opts):
1391 1396 opts = _byteskwargs(opts)
1392 1397 timer, fm = gettimer(ui, opts)
1393 1398 revrange = scmutil.revrange
1394 1399 timer(lambda: len(revrange(repo, specs)))
1395 1400 fm.end()
1396 1401
1397 1402 @command(b'perfnodelookup', formatteropts)
1398 1403 def perfnodelookup(ui, repo, rev, **opts):
1399 1404 opts = _byteskwargs(opts)
1400 1405 timer, fm = gettimer(ui, opts)
1401 1406 import mercurial.revlog
1402 1407 mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
1403 1408 n = scmutil.revsingle(repo, rev).node()
1404 1409 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1405 1410 def d():
1406 1411 cl.rev(n)
1407 1412 clearcaches(cl)
1408 1413 timer(d)
1409 1414 fm.end()
1410 1415
1411 1416 @command(b'perflog',
1412 1417 [(b'', b'rename', False, b'ask log to follow renames')
1413 1418 ] + formatteropts)
1414 1419 def perflog(ui, repo, rev=None, **opts):
1415 1420 opts = _byteskwargs(opts)
1416 1421 if rev is None:
1417 1422 rev=[]
1418 1423 timer, fm = gettimer(ui, opts)
1419 1424 ui.pushbuffer()
1420 1425 timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'',
1421 1426 copies=opts.get(b'rename')))
1422 1427 ui.popbuffer()
1423 1428 fm.end()
1424 1429
1425 1430 @command(b'perfmoonwalk', formatteropts)
1426 1431 def perfmoonwalk(ui, repo, **opts):
1427 1432 """benchmark walking the changelog backwards
1428 1433
1429 1434 This also loads the changelog data for each revision in the changelog.
1430 1435 """
1431 1436 opts = _byteskwargs(opts)
1432 1437 timer, fm = gettimer(ui, opts)
1433 1438 def moonwalk():
1434 1439 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1435 1440 ctx = repo[i]
1436 1441 ctx.branch() # read changelog data (in addition to the index)
1437 1442 timer(moonwalk)
1438 1443 fm.end()
1439 1444
1440 1445 @command(b'perftemplating',
1441 1446 [(b'r', b'rev', [], b'revisions to run the template on'),
1442 1447 ] + formatteropts)
1443 1448 def perftemplating(ui, repo, testedtemplate=None, **opts):
1444 1449 """test the rendering time of a given template"""
1445 1450 if makelogtemplater is None:
1446 1451 raise error.Abort((b"perftemplating not available with this Mercurial"),
1447 1452 hint=b"use 4.3 or later")
1448 1453
1449 1454 opts = _byteskwargs(opts)
1450 1455
1451 1456 nullui = ui.copy()
1452 1457 nullui.fout = open(os.devnull, r'wb')
1453 1458 nullui.disablepager()
1454 1459 revs = opts.get(b'rev')
1455 1460 if not revs:
1456 1461 revs = [b'all()']
1457 1462 revs = list(scmutil.revrange(repo, revs))
1458 1463
1459 1464 defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]'
1460 1465 b' {author|person}: {desc|firstline}\n')
1461 1466 if testedtemplate is None:
1462 1467 testedtemplate = defaulttemplate
1463 1468 displayer = makelogtemplater(nullui, repo, testedtemplate)
1464 1469 def format():
1465 1470 for r in revs:
1466 1471 ctx = repo[r]
1467 1472 displayer.show(ctx)
1468 1473 displayer.flush(ctx)
1469 1474
1470 1475 timer, fm = gettimer(ui, opts)
1471 1476 timer(format)
1472 1477 fm.end()
1473 1478
1474 1479 @command(b'perfhelper-mergecopies', formatteropts +
1475 1480 [
1476 1481 (b'r', b'revs', [], b'restrict search to these revisions'),
1477 1482 (b'', b'timing', False, b'provides extra data (costly)'),
1478 1483 ])
1479 1484 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1480 1485 """find statistics about potential parameters for `perfmergecopies`
1481 1486
1482 1487 This command find (base, p1, p2) triplet relevant for copytracing
1483 1488 benchmarking in the context of a merge. It reports values for some of the
1484 1489 parameters that impact merge copy tracing time during merge.
1485 1490
1486 1491 If `--timing` is set, rename detection is run and the associated timing
1487 1492 will be reported. The extra details come at the cost of slower command
1488 1493 execution.
1489 1494
1490 1495 Since rename detection is only run once, other factors might easily
1491 1496 affect the precision of the timing. However it should give a good
1492 1497 approximation of which revision triplets are very costly.
1493 1498 """
1494 1499 opts = _byteskwargs(opts)
1495 1500 fm = ui.formatter(b'perf', opts)
1496 1501 dotiming = opts[b'timing']
1497 1502
1498 1503 output_template = [
1499 1504 ("base", "%(base)12s"),
1500 1505 ("p1", "%(p1.node)12s"),
1501 1506 ("p2", "%(p2.node)12s"),
1502 1507 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1503 1508 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1504 1509 ("p1.renames", "%(p1.renamedfiles)12d"),
1505 1510 ("p1.time", "%(p1.time)12.3f"),
1506 1511 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1507 1512 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1508 1513 ("p2.renames", "%(p2.renamedfiles)12d"),
1509 1514 ("p2.time", "%(p2.time)12.3f"),
1510 1515 ("renames", "%(nbrenamedfiles)12d"),
1511 1516 ("total.time", "%(time)12.3f"),
1512 1517 ]
1513 1518 if not dotiming:
1514 1519 output_template = [i for i in output_template
1515 1520 if not ('time' in i[0] or 'renames' in i[0])]
1516 1521 header_names = [h for (h, v) in output_template]
1517 1522 output = ' '.join([v for (h, v) in output_template]) + '\n'
1518 1523 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1519 1524 fm.plain(header % tuple(header_names))
1520 1525
1521 1526 if not revs:
1522 1527 revs = ['all()']
1523 1528 revs = scmutil.revrange(repo, revs)
1524 1529
1525 1530 roi = repo.revs('merge() and %ld', revs)
1526 1531 for r in roi:
1527 1532 ctx = repo[r]
1528 1533 p1 = ctx.p1()
1529 1534 p2 = ctx.p2()
1530 1535 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1531 1536 for b in bases:
1532 1537 b = repo[b]
1533 1538 p1missing = copies._computeforwardmissing(b, p1)
1534 1539 p2missing = copies._computeforwardmissing(b, p2)
1535 1540 data = {
1536 1541 b'base': b.hex(),
1537 1542 b'p1.node': p1.hex(),
1538 1543 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1539 1544 b'p1.nbmissingfiles': len(p1missing),
1540 1545 b'p2.node': p2.hex(),
1541 1546 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1542 1547 b'p2.nbmissingfiles': len(p2missing),
1543 1548 }
1544 1549 if dotiming:
1545 1550 begin = util.timer()
1546 1551 mergedata = copies.mergecopies(repo, p1, p2, b)
1547 1552 end = util.timer()
1548 1553 # not very stable timing since we did only one run
1549 1554 data['time'] = end - begin
1550 1555 # mergedata contains five dicts: "copy", "movewithdir",
1551 1556 # "diverge", "renamedelete" and "dirmove".
1552 1557 # The first 4 are about renamed file so lets count that.
1553 1558 renames = len(mergedata[0])
1554 1559 renames += len(mergedata[1])
1555 1560 renames += len(mergedata[2])
1556 1561 renames += len(mergedata[3])
1557 1562 data['nbrenamedfiles'] = renames
1558 1563 begin = util.timer()
1559 1564 p1renames = copies.pathcopies(b, p1)
1560 1565 end = util.timer()
1561 1566 data['p1.time'] = end - begin
1562 1567 begin = util.timer()
1563 1568 p2renames = copies.pathcopies(b, p2)
1564 1569 data['p2.time'] = end - begin
1565 1570 end = util.timer()
1566 1571 data['p1.renamedfiles'] = len(p1renames)
1567 1572 data['p2.renamedfiles'] = len(p2renames)
1568 1573 fm.startitem()
1569 1574 fm.data(**data)
1570 1575 # make node pretty for the human output
1571 1576 out = data.copy()
1572 1577 out['base'] = fm.hexfunc(b.node())
1573 1578 out['p1.node'] = fm.hexfunc(p1.node())
1574 1579 out['p2.node'] = fm.hexfunc(p2.node())
1575 1580 fm.plain(output % out)
1576 1581
1577 1582 fm.end()
1578 1583
1579 1584 @command(b'perfhelper-pathcopies', formatteropts +
1580 1585 [
1581 1586 (b'r', b'revs', [], b'restrict search to these revisions'),
1582 1587 (b'', b'timing', False, b'provides extra data (costly)'),
1583 1588 ])
1584 1589 def perfhelperpathcopies(ui, repo, revs=[], **opts):
1585 1590 """find statistic about potential parameters for the `perftracecopies`
1586 1591
1587 1592 This command find source-destination pair relevant for copytracing testing.
1588 1593 It report value for some of the parameters that impact copy tracing time.
1589 1594
1590 1595 If `--timing` is set, rename detection is run and the associated timing
1591 1596 will be reported. The extra details comes at the cost of a slower command
1592 1597 execution.
1593 1598
1594 1599 Since the rename detection is only run once, other factors might easily
1595 1600 affect the precision of the timing. However it should give a good
1596 1601 approximation of which revision pairs are very costly.
1597 1602 """
1598 1603 opts = _byteskwargs(opts)
1599 1604 fm = ui.formatter(b'perf', opts)
1600 1605 dotiming = opts[b'timing']
1601 1606
1602 1607 if dotiming:
1603 1608 header = '%12s %12s %12s %12s %12s %12s\n'
1604 1609 output = ("%(source)12s %(destination)12s "
1605 1610 "%(nbrevs)12d %(nbmissingfiles)12d "
1606 1611 "%(nbrenamedfiles)12d %(time)18.5f\n")
1607 1612 header_names = ("source", "destination", "nb-revs", "nb-files",
1608 1613 "nb-renames", "time")
1609 1614 fm.plain(header % header_names)
1610 1615 else:
1611 1616 header = '%12s %12s %12s %12s\n'
1612 1617 output = ("%(source)12s %(destination)12s "
1613 1618 "%(nbrevs)12d %(nbmissingfiles)12d\n")
1614 1619 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
1615 1620
1616 1621 if not revs:
1617 1622 revs = ['all()']
1618 1623 revs = scmutil.revrange(repo, revs)
1619 1624
1620 1625 roi = repo.revs('merge() and %ld', revs)
1621 1626 for r in roi:
1622 1627 ctx = repo[r]
1623 1628 p1 = ctx.p1().rev()
1624 1629 p2 = ctx.p2().rev()
1625 1630 bases = repo.changelog._commonancestorsheads(p1, p2)
1626 1631 for p in (p1, p2):
1627 1632 for b in bases:
1628 1633 base = repo[b]
1629 1634 parent = repo[p]
1630 1635 missing = copies._computeforwardmissing(base, parent)
1631 1636 if not missing:
1632 1637 continue
1633 1638 data = {
1634 1639 b'source': base.hex(),
1635 1640 b'destination': parent.hex(),
1636 1641 b'nbrevs': len(repo.revs('%d::%d', b, p)),
1637 1642 b'nbmissingfiles': len(missing),
1638 1643 }
1639 1644 if dotiming:
1640 1645 begin = util.timer()
1641 1646 renames = copies.pathcopies(base, parent)
1642 1647 end = util.timer()
1643 1648 # not very stable timing since we did only one run
1644 1649 data['time'] = end - begin
1645 1650 data['nbrenamedfiles'] = len(renames)
1646 1651 fm.startitem()
1647 1652 fm.data(**data)
1648 1653 out = data.copy()
1649 1654 out['source'] = fm.hexfunc(base.node())
1650 1655 out['destination'] = fm.hexfunc(parent.node())
1651 1656 fm.plain(output % out)
1652 1657
1653 1658 fm.end()
1654 1659
1655 1660 @command(b'perfcca', formatteropts)
1656 1661 def perfcca(ui, repo, **opts):
1657 1662 opts = _byteskwargs(opts)
1658 1663 timer, fm = gettimer(ui, opts)
1659 1664 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
1660 1665 fm.end()
1661 1666
1662 1667 @command(b'perffncacheload', formatteropts)
1663 1668 def perffncacheload(ui, repo, **opts):
1664 1669 opts = _byteskwargs(opts)
1665 1670 timer, fm = gettimer(ui, opts)
1666 1671 s = repo.store
1667 1672 def d():
1668 1673 s.fncache._load()
1669 1674 timer(d)
1670 1675 fm.end()
1671 1676
1672 1677 @command(b'perffncachewrite', formatteropts)
1673 1678 def perffncachewrite(ui, repo, **opts):
1674 1679 opts = _byteskwargs(opts)
1675 1680 timer, fm = gettimer(ui, opts)
1676 1681 s = repo.store
1677 1682 lock = repo.lock()
1678 1683 s.fncache._load()
1679 1684 tr = repo.transaction(b'perffncachewrite')
1680 1685 tr.addbackup(b'fncache')
1681 1686 def d():
1682 1687 s.fncache._dirty = True
1683 1688 s.fncache.write(tr)
1684 1689 timer(d)
1685 1690 tr.close()
1686 1691 lock.release()
1687 1692 fm.end()
1688 1693
1689 1694 @command(b'perffncacheencode', formatteropts)
1690 1695 def perffncacheencode(ui, repo, **opts):
1691 1696 opts = _byteskwargs(opts)
1692 1697 timer, fm = gettimer(ui, opts)
1693 1698 s = repo.store
1694 1699 s.fncache._load()
1695 1700 def d():
1696 1701 for p in s.fncache.entries:
1697 1702 s.encode(p)
1698 1703 timer(d)
1699 1704 fm.end()
1700 1705
1701 1706 def _bdiffworker(q, blocks, xdiff, ready, done):
1702 1707 while not done.is_set():
1703 1708 pair = q.get()
1704 1709 while pair is not None:
1705 1710 if xdiff:
1706 1711 mdiff.bdiff.xdiffblocks(*pair)
1707 1712 elif blocks:
1708 1713 mdiff.bdiff.blocks(*pair)
1709 1714 else:
1710 1715 mdiff.textdiff(*pair)
1711 1716 q.task_done()
1712 1717 pair = q.get()
1713 1718 q.task_done() # for the None one
1714 1719 with ready:
1715 1720 ready.wait()
1716 1721
1717 1722 def _manifestrevision(repo, mnode):
1718 1723 ml = repo.manifestlog
1719 1724
1720 1725 if util.safehasattr(ml, b'getstorage'):
1721 1726 store = ml.getstorage(b'')
1722 1727 else:
1723 1728 store = ml._revlog
1724 1729
1725 1730 return store.revision(mnode)
1726 1731
1727 1732 @command(b'perfbdiff', revlogopts + formatteropts + [
1728 1733 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1729 1734 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
1730 1735 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
1731 1736 (b'', b'blocks', False, b'test computing diffs into blocks'),
1732 1737 (b'', b'xdiff', False, b'use xdiff algorithm'),
1733 1738 ],
1734 1739
1735 1740 b'-c|-m|FILE REV')
1736 1741 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
1737 1742 """benchmark a bdiff between revisions
1738 1743
1739 1744 By default, benchmark a bdiff between its delta parent and itself.
1740 1745
1741 1746 With ``--count``, benchmark bdiffs between delta parents and self for N
1742 1747 revisions starting at the specified revision.
1743 1748
1744 1749 With ``--alldata``, assume the requested revision is a changeset and
1745 1750 measure bdiffs for all changes related to that changeset (manifest
1746 1751 and filelogs).
1747 1752 """
1748 1753 opts = _byteskwargs(opts)
1749 1754
1750 1755 if opts[b'xdiff'] and not opts[b'blocks']:
1751 1756 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
1752 1757
1753 1758 if opts[b'alldata']:
1754 1759 opts[b'changelog'] = True
1755 1760
1756 1761 if opts.get(b'changelog') or opts.get(b'manifest'):
1757 1762 file_, rev = None, file_
1758 1763 elif rev is None:
1759 1764 raise error.CommandError(b'perfbdiff', b'invalid arguments')
1760 1765
1761 1766 blocks = opts[b'blocks']
1762 1767 xdiff = opts[b'xdiff']
1763 1768 textpairs = []
1764 1769
1765 1770 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
1766 1771
1767 1772 startrev = r.rev(r.lookup(rev))
1768 1773 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1769 1774 if opts[b'alldata']:
1770 1775 # Load revisions associated with changeset.
1771 1776 ctx = repo[rev]
1772 1777 mtext = _manifestrevision(repo, ctx.manifestnode())
1773 1778 for pctx in ctx.parents():
1774 1779 pman = _manifestrevision(repo, pctx.manifestnode())
1775 1780 textpairs.append((pman, mtext))
1776 1781
1777 1782 # Load filelog revisions by iterating manifest delta.
1778 1783 man = ctx.manifest()
1779 1784 pman = ctx.p1().manifest()
1780 1785 for filename, change in pman.diff(man).items():
1781 1786 fctx = repo.file(filename)
1782 1787 f1 = fctx.revision(change[0][0] or -1)
1783 1788 f2 = fctx.revision(change[1][0] or -1)
1784 1789 textpairs.append((f1, f2))
1785 1790 else:
1786 1791 dp = r.deltaparent(rev)
1787 1792 textpairs.append((r.revision(dp), r.revision(rev)))
1788 1793
1789 1794 withthreads = threads > 0
1790 1795 if not withthreads:
1791 1796 def d():
1792 1797 for pair in textpairs:
1793 1798 if xdiff:
1794 1799 mdiff.bdiff.xdiffblocks(*pair)
1795 1800 elif blocks:
1796 1801 mdiff.bdiff.blocks(*pair)
1797 1802 else:
1798 1803 mdiff.textdiff(*pair)
1799 1804 else:
1800 1805 q = queue()
1801 1806 for i in _xrange(threads):
1802 1807 q.put(None)
1803 1808 ready = threading.Condition()
1804 1809 done = threading.Event()
1805 1810 for i in _xrange(threads):
1806 1811 threading.Thread(target=_bdiffworker,
1807 1812 args=(q, blocks, xdiff, ready, done)).start()
1808 1813 q.join()
1809 1814 def d():
1810 1815 for pair in textpairs:
1811 1816 q.put(pair)
1812 1817 for i in _xrange(threads):
1813 1818 q.put(None)
1814 1819 with ready:
1815 1820 ready.notify_all()
1816 1821 q.join()
1817 1822 timer, fm = gettimer(ui, opts)
1818 1823 timer(d)
1819 1824 fm.end()
1820 1825
1821 1826 if withthreads:
1822 1827 done.set()
1823 1828 for i in _xrange(threads):
1824 1829 q.put(None)
1825 1830 with ready:
1826 1831 ready.notify_all()
1827 1832
1828 1833 @command(b'perfunidiff', revlogopts + formatteropts + [
1829 1834 (b'', b'count', 1, b'number of revisions to test (when using --startrev)'),
1830 1835 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
1831 1836 ], b'-c|-m|FILE REV')
1832 1837 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
1833 1838 """benchmark a unified diff between revisions
1834 1839
1835 1840 This doesn't include any copy tracing - it's just a unified diff
1836 1841 of the texts.
1837 1842
1838 1843 By default, benchmark a diff between its delta parent and itself.
1839 1844
1840 1845 With ``--count``, benchmark diffs between delta parents and self for N
1841 1846 revisions starting at the specified revision.
1842 1847
1843 1848 With ``--alldata``, assume the requested revision is a changeset and
1844 1849 measure diffs for all changes related to that changeset (manifest
1845 1850 and filelogs).
1846 1851 """
1847 1852 opts = _byteskwargs(opts)
1848 1853 if opts[b'alldata']:
1849 1854 opts[b'changelog'] = True
1850 1855
1851 1856 if opts.get(b'changelog') or opts.get(b'manifest'):
1852 1857 file_, rev = None, file_
1853 1858 elif rev is None:
1854 1859 raise error.CommandError(b'perfunidiff', b'invalid arguments')
1855 1860
1856 1861 textpairs = []
1857 1862
1858 1863 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
1859 1864
1860 1865 startrev = r.rev(r.lookup(rev))
1861 1866 for rev in range(startrev, min(startrev + count, len(r) - 1)):
1862 1867 if opts[b'alldata']:
1863 1868 # Load revisions associated with changeset.
1864 1869 ctx = repo[rev]
1865 1870 mtext = _manifestrevision(repo, ctx.manifestnode())
1866 1871 for pctx in ctx.parents():
1867 1872 pman = _manifestrevision(repo, pctx.manifestnode())
1868 1873 textpairs.append((pman, mtext))
1869 1874
1870 1875 # Load filelog revisions by iterating manifest delta.
1871 1876 man = ctx.manifest()
1872 1877 pman = ctx.p1().manifest()
1873 1878 for filename, change in pman.diff(man).items():
1874 1879 fctx = repo.file(filename)
1875 1880 f1 = fctx.revision(change[0][0] or -1)
1876 1881 f2 = fctx.revision(change[1][0] or -1)
1877 1882 textpairs.append((f1, f2))
1878 1883 else:
1879 1884 dp = r.deltaparent(rev)
1880 1885 textpairs.append((r.revision(dp), r.revision(rev)))
1881 1886
1882 1887 def d():
1883 1888 for left, right in textpairs:
1884 1889 # The date strings don't matter, so we pass empty strings.
1885 1890 headerlines, hunks = mdiff.unidiff(
1886 1891 left, b'', right, b'', b'left', b'right', binary=False)
1887 1892 # consume iterators in roughly the way patch.py does
1888 1893 b'\n'.join(headerlines)
1889 1894 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
1890 1895 timer, fm = gettimer(ui, opts)
1891 1896 timer(d)
1892 1897 fm.end()
1893 1898
1894 1899 @command(b'perfdiffwd', formatteropts)
1895 1900 def perfdiffwd(ui, repo, **opts):
1896 1901 """Profile diff of working directory changes"""
1897 1902 opts = _byteskwargs(opts)
1898 1903 timer, fm = gettimer(ui, opts)
1899 1904 options = {
1900 1905 'w': 'ignore_all_space',
1901 1906 'b': 'ignore_space_change',
1902 1907 'B': 'ignore_blank_lines',
1903 1908 }
1904 1909
1905 1910 for diffopt in ('', 'w', 'b', 'B', 'wB'):
1906 1911 opts = dict((options[c], b'1') for c in diffopt)
1907 1912 def d():
1908 1913 ui.pushbuffer()
1909 1914 commands.diff(ui, repo, **opts)
1910 1915 ui.popbuffer()
1911 1916 diffopt = diffopt.encode('ascii')
1912 1917 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
1913 1918 timer(d, title=title)
1914 1919 fm.end()
1915 1920
1916 1921 @command(b'perfrevlogindex', revlogopts + formatteropts,
1917 1922 b'-c|-m|FILE')
1918 1923 def perfrevlogindex(ui, repo, file_=None, **opts):
1919 1924 """Benchmark operations against a revlog index.
1920 1925
1921 1926 This tests constructing a revlog instance, reading index data,
1922 1927 parsing index data, and performing various operations related to
1923 1928 index data.
1924 1929 """
1925 1930
1926 1931 opts = _byteskwargs(opts)
1927 1932
1928 1933 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
1929 1934
1930 1935 opener = getattr(rl, 'opener') # trick linter
1931 1936 indexfile = rl.indexfile
1932 1937 data = opener.read(indexfile)
1933 1938
1934 1939 header = struct.unpack(b'>I', data[0:4])[0]
1935 1940 version = header & 0xFFFF
1936 1941 if version == 1:
1937 1942 revlogio = revlog.revlogio()
1938 1943 inline = header & (1 << 16)
1939 1944 else:
1940 1945 raise error.Abort((b'unsupported revlog version: %d') % version)
1941 1946
1942 1947 rllen = len(rl)
1943 1948
1944 1949 node0 = rl.node(0)
1945 1950 node25 = rl.node(rllen // 4)
1946 1951 node50 = rl.node(rllen // 2)
1947 1952 node75 = rl.node(rllen // 4 * 3)
1948 1953 node100 = rl.node(rllen - 1)
1949 1954
1950 1955 allrevs = range(rllen)
1951 1956 allrevsrev = list(reversed(allrevs))
1952 1957 allnodes = [rl.node(rev) for rev in range(rllen)]
1953 1958 allnodesrev = list(reversed(allnodes))
1954 1959
1955 1960 def constructor():
1956 1961 revlog.revlog(opener, indexfile)
1957 1962
1958 1963 def read():
1959 1964 with opener(indexfile) as fh:
1960 1965 fh.read()
1961 1966
1962 1967 def parseindex():
1963 1968 revlogio.parseindex(data, inline)
1964 1969
1965 1970 def getentry(revornode):
1966 1971 index = revlogio.parseindex(data, inline)[0]
1967 1972 index[revornode]
1968 1973
1969 1974 def getentries(revs, count=1):
1970 1975 index = revlogio.parseindex(data, inline)[0]
1971 1976
1972 1977 for i in range(count):
1973 1978 for rev in revs:
1974 1979 index[rev]
1975 1980
1976 1981 def resolvenode(node):
1977 1982 nodemap = revlogio.parseindex(data, inline)[1]
1978 1983 # This only works for the C code.
1979 1984 if nodemap is None:
1980 1985 return
1981 1986
1982 1987 try:
1983 1988 nodemap[node]
1984 1989 except error.RevlogError:
1985 1990 pass
1986 1991
1987 1992 def resolvenodes(nodes, count=1):
1988 1993 nodemap = revlogio.parseindex(data, inline)[1]
1989 1994 if nodemap is None:
1990 1995 return
1991 1996
1992 1997 for i in range(count):
1993 1998 for node in nodes:
1994 1999 try:
1995 2000 nodemap[node]
1996 2001 except error.RevlogError:
1997 2002 pass
1998 2003
1999 2004 benches = [
2000 2005 (constructor, b'revlog constructor'),
2001 2006 (read, b'read'),
2002 2007 (parseindex, b'create index object'),
2003 2008 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2004 2009 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2005 2010 (lambda: resolvenode(node0), b'look up node at rev 0'),
2006 2011 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2007 2012 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2008 2013 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2009 2014 (lambda: resolvenode(node100), b'look up node at tip'),
2010 2015 # 2x variation is to measure caching impact.
2011 2016 (lambda: resolvenodes(allnodes),
2012 2017 b'look up all nodes (forward)'),
2013 2018 (lambda: resolvenodes(allnodes, 2),
2014 2019 b'look up all nodes 2x (forward)'),
2015 2020 (lambda: resolvenodes(allnodesrev),
2016 2021 b'look up all nodes (reverse)'),
2017 2022 (lambda: resolvenodes(allnodesrev, 2),
2018 2023 b'look up all nodes 2x (reverse)'),
2019 2024 (lambda: getentries(allrevs),
2020 2025 b'retrieve all index entries (forward)'),
2021 2026 (lambda: getentries(allrevs, 2),
2022 2027 b'retrieve all index entries 2x (forward)'),
2023 2028 (lambda: getentries(allrevsrev),
2024 2029 b'retrieve all index entries (reverse)'),
2025 2030 (lambda: getentries(allrevsrev, 2),
2026 2031 b'retrieve all index entries 2x (reverse)'),
2027 2032 ]
2028 2033
2029 2034 for fn, title in benches:
2030 2035 timer, fm = gettimer(ui, opts)
2031 2036 timer(fn, title=title)
2032 2037 fm.end()
2033 2038
2034 2039 @command(b'perfrevlogrevisions', revlogopts + formatteropts +
2035 2040 [(b'd', b'dist', 100, b'distance between the revisions'),
2036 2041 (b's', b'startrev', 0, b'revision to start reading at'),
2037 2042 (b'', b'reverse', False, b'read in reverse')],
2038 2043 b'-c|-m|FILE')
2039 2044 def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False,
2040 2045 **opts):
2041 2046 """Benchmark reading a series of revisions from a revlog.
2042 2047
2043 2048 By default, we read every ``-d/--dist`` revision from 0 to tip of
2044 2049 the specified revlog.
2045 2050
2046 2051 The start revision can be defined via ``-s/--startrev``.
2047 2052 """
2048 2053 opts = _byteskwargs(opts)
2049 2054
2050 2055 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2051 2056 rllen = getlen(ui)(rl)
2052 2057
2053 2058 if startrev < 0:
2054 2059 startrev = rllen + startrev
2055 2060
2056 2061 def d():
2057 2062 rl.clearcaches()
2058 2063
2059 2064 beginrev = startrev
2060 2065 endrev = rllen
2061 2066 dist = opts[b'dist']
2062 2067
2063 2068 if reverse:
2064 2069 beginrev, endrev = endrev - 1, beginrev - 1
2065 2070 dist = -1 * dist
2066 2071
2067 2072 for x in _xrange(beginrev, endrev, dist):
2068 2073 # Old revisions don't support passing int.
2069 2074 n = rl.node(x)
2070 2075 rl.revision(n)
2071 2076
2072 2077 timer, fm = gettimer(ui, opts)
2073 2078 timer(d)
2074 2079 fm.end()
2075 2080
2076 2081 @command(b'perfrevlogwrite', revlogopts + formatteropts +
2077 2082 [(b's', b'startrev', 1000, b'revision to start writing at'),
2078 2083 (b'', b'stoprev', -1, b'last revision to write'),
2079 2084 (b'', b'count', 3, b'number of passes to perform'),
2080 2085 (b'', b'details', False, b'print timing for every revisions tested'),
2081 2086 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2082 2087 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2083 2088 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2084 2089 ],
2085 2090 b'-c|-m|FILE')
2086 2091 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2087 2092 """Benchmark writing a series of revisions to a revlog.
2088 2093
2089 2094 Possible source values are:
2090 2095 * `full`: add from a full text (default).
2091 2096 * `parent-1`: add from a delta to the first parent
2092 2097 * `parent-2`: add from a delta to the second parent if it exists
2093 2098 (use a delta from the first parent otherwise)
2094 2099 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2095 2100 * `storage`: add from the existing precomputed deltas
2096 2101
2097 2102 Note: This performance command measures performance in a custom way. As a
2098 2103 result some of the global configuration of the 'perf' command does not
2099 2104 apply to it:
2100 2105
2101 2106 * ``pre-run``: disabled
2102 2107
2103 2108 * ``profile-benchmark``: disabled
2104 2109
2105 2110 * ``run-limits``: disabled use --count instead
2106 2111 """
2107 2112 opts = _byteskwargs(opts)
2108 2113
2109 2114 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2110 2115 rllen = getlen(ui)(rl)
2111 2116 if startrev < 0:
2112 2117 startrev = rllen + startrev
2113 2118 if stoprev < 0:
2114 2119 stoprev = rllen + stoprev
2115 2120
2116 2121 lazydeltabase = opts['lazydeltabase']
2117 2122 source = opts['source']
2118 2123 clearcaches = opts['clear_caches']
2119 2124 validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
2120 2125 b'storage')
2121 2126 if source not in validsource:
2122 2127 raise error.Abort('invalid source type: %s' % source)
2123 2128
2124 2129 ### actually gather results
2125 2130 count = opts['count']
2126 2131 if count <= 0:
2127 2132 raise error.Abort('invalide run count: %d' % count)
2128 2133 allresults = []
2129 2134 for c in range(count):
2130 2135 timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
2131 2136 lazydeltabase=lazydeltabase,
2132 2137 clearcaches=clearcaches)
2133 2138 allresults.append(timing)
2134 2139
2135 2140 ### consolidate the results in a single list
2136 2141 results = []
2137 2142 for idx, (rev, t) in enumerate(allresults[0]):
2138 2143 ts = [t]
2139 2144 for other in allresults[1:]:
2140 2145 orev, ot = other[idx]
2141 2146 assert orev == rev
2142 2147 ts.append(ot)
2143 2148 results.append((rev, ts))
2144 2149 resultcount = len(results)
2145 2150
2146 2151 ### Compute and display relevant statistics
2147 2152
2148 2153 # get a formatter
2149 2154 fm = ui.formatter(b'perf', opts)
2150 2155 displayall = ui.configbool(b"perf", b"all-timing", False)
2151 2156
2152 2157 # print individual details if requested
2153 2158 if opts['details']:
2154 2159 for idx, item in enumerate(results, 1):
2155 2160 rev, data = item
2156 2161 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2157 2162 formatone(fm, data, title=title, displayall=displayall)
2158 2163
2159 2164 # sorts results by median time
2160 2165 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2161 2166 # list of (name, index) to display)
2162 2167 relevants = [
2163 2168 ("min", 0),
2164 2169 ("10%", resultcount * 10 // 100),
2165 2170 ("25%", resultcount * 25 // 100),
2166 2171 ("50%", resultcount * 70 // 100),
2167 2172 ("75%", resultcount * 75 // 100),
2168 2173 ("90%", resultcount * 90 // 100),
2169 2174 ("95%", resultcount * 95 // 100),
2170 2175 ("99%", resultcount * 99 // 100),
2171 2176 ("99.9%", resultcount * 999 // 1000),
2172 2177 ("99.99%", resultcount * 9999 // 10000),
2173 2178 ("99.999%", resultcount * 99999 // 100000),
2174 2179 ("max", -1),
2175 2180 ]
2176 2181 if not ui.quiet:
2177 2182 for name, idx in relevants:
2178 2183 data = results[idx]
2179 2184 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2180 2185 formatone(fm, data[1], title=title, displayall=displayall)
2181 2186
2182 2187 # XXX summing that many float will not be very precise, we ignore this fact
2183 2188 # for now
2184 2189 totaltime = []
2185 2190 for item in allresults:
2186 2191 totaltime.append((sum(x[1][0] for x in item),
2187 2192 sum(x[1][1] for x in item),
2188 2193 sum(x[1][2] for x in item),)
2189 2194 )
2190 2195 formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
2191 2196 displayall=displayall)
2192 2197 fm.end()
2193 2198
2194 2199 class _faketr(object):
2195 2200 def add(s, x, y, z=None):
2196 2201 return None
2197 2202
2198 2203 def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
2199 2204 lazydeltabase=True, clearcaches=True):
2200 2205 timings = []
2201 2206 tr = _faketr()
2202 2207 with _temprevlog(ui, orig, startrev) as dest:
2203 2208 dest._lazydeltabase = lazydeltabase
2204 2209 revs = list(orig.revs(startrev, stoprev))
2205 2210 total = len(revs)
2206 2211 topic = 'adding'
2207 2212 if runidx is not None:
2208 2213 topic += ' (run #%d)' % runidx
2209 2214 # Support both old and new progress API
2210 2215 if util.safehasattr(ui, 'makeprogress'):
2211 2216 progress = ui.makeprogress(topic, unit='revs', total=total)
2212 2217 def updateprogress(pos):
2213 2218 progress.update(pos)
2214 2219 def completeprogress():
2215 2220 progress.complete()
2216 2221 else:
2217 2222 def updateprogress(pos):
2218 2223 ui.progress(topic, pos, unit='revs', total=total)
2219 2224 def completeprogress():
2220 2225 ui.progress(topic, None, unit='revs', total=total)
2221 2226
2222 2227 for idx, rev in enumerate(revs):
2223 2228 updateprogress(idx)
2224 2229 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2225 2230 if clearcaches:
2226 2231 dest.index.clearcaches()
2227 2232 dest.clearcaches()
2228 2233 with timeone() as r:
2229 2234 dest.addrawrevision(*addargs, **addkwargs)
2230 2235 timings.append((rev, r[0]))
2231 2236 updateprogress(total)
2232 2237 completeprogress()
2233 2238 return timings
2234 2239
2235 2240 def _getrevisionseed(orig, rev, tr, source):
2236 2241 from mercurial.node import nullid
2237 2242
2238 2243 linkrev = orig.linkrev(rev)
2239 2244 node = orig.node(rev)
2240 2245 p1, p2 = orig.parents(node)
2241 2246 flags = orig.flags(rev)
2242 2247 cachedelta = None
2243 2248 text = None
2244 2249
2245 2250 if source == b'full':
2246 2251 text = orig.revision(rev)
2247 2252 elif source == b'parent-1':
2248 2253 baserev = orig.rev(p1)
2249 2254 cachedelta = (baserev, orig.revdiff(p1, rev))
2250 2255 elif source == b'parent-2':
2251 2256 parent = p2
2252 2257 if p2 == nullid:
2253 2258 parent = p1
2254 2259 baserev = orig.rev(parent)
2255 2260 cachedelta = (baserev, orig.revdiff(parent, rev))
2256 2261 elif source == b'parent-smallest':
2257 2262 p1diff = orig.revdiff(p1, rev)
2258 2263 parent = p1
2259 2264 diff = p1diff
2260 2265 if p2 != nullid:
2261 2266 p2diff = orig.revdiff(p2, rev)
2262 2267 if len(p1diff) > len(p2diff):
2263 2268 parent = p2
2264 2269 diff = p2diff
2265 2270 baserev = orig.rev(parent)
2266 2271 cachedelta = (baserev, diff)
2267 2272 elif source == b'storage':
2268 2273 baserev = orig.deltaparent(rev)
2269 2274 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2270 2275
2271 2276 return ((text, tr, linkrev, p1, p2),
2272 2277 {'node': node, 'flags': flags, 'cachedelta': cachedelta})
2273 2278
2274 2279 @contextlib.contextmanager
2275 2280 def _temprevlog(ui, orig, truncaterev):
2276 2281 from mercurial import vfs as vfsmod
2277 2282
2278 2283 if orig._inline:
2279 2284 raise error.Abort('not supporting inline revlog (yet)')
2280 2285 revlogkwargs = {}
2281 2286 k = 'upperboundcomp'
2282 2287 if util.safehasattr(orig, k):
2283 2288 revlogkwargs[k] = getattr(orig, k)
2284 2289
2285 2290 origindexpath = orig.opener.join(orig.indexfile)
2286 2291 origdatapath = orig.opener.join(orig.datafile)
2287 2292 indexname = 'revlog.i'
2288 2293 dataname = 'revlog.d'
2289 2294
2290 2295 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2291 2296 try:
2292 2297 # copy the data file in a temporary directory
2293 2298 ui.debug('copying data in %s\n' % tmpdir)
2294 2299 destindexpath = os.path.join(tmpdir, 'revlog.i')
2295 2300 destdatapath = os.path.join(tmpdir, 'revlog.d')
2296 2301 shutil.copyfile(origindexpath, destindexpath)
2297 2302 shutil.copyfile(origdatapath, destdatapath)
2298 2303
2299 2304 # remove the data we want to add again
2300 2305 ui.debug('truncating data to be rewritten\n')
2301 2306 with open(destindexpath, 'ab') as index:
2302 2307 index.seek(0)
2303 2308 index.truncate(truncaterev * orig._io.size)
2304 2309 with open(destdatapath, 'ab') as data:
2305 2310 data.seek(0)
2306 2311 data.truncate(orig.start(truncaterev))
2307 2312
2308 2313 # instantiate a new revlog from the temporary copy
2309 2314 ui.debug('truncating adding to be rewritten\n')
2310 2315 vfs = vfsmod.vfs(tmpdir)
2311 2316 vfs.options = getattr(orig.opener, 'options', None)
2312 2317
2313 2318 dest = revlog.revlog(vfs,
2314 2319 indexfile=indexname,
2315 2320 datafile=dataname, **revlogkwargs)
2316 2321 if dest._inline:
2317 2322 raise error.Abort('not supporting inline revlog (yet)')
2318 2323 # make sure internals are initialized
2319 2324 dest.revision(len(dest) - 1)
2320 2325 yield dest
2321 2326 del dest, vfs
2322 2327 finally:
2323 2328 shutil.rmtree(tmpdir, True)
2324 2329
2325 2330 @command(b'perfrevlogchunks', revlogopts + formatteropts +
2326 2331 [(b'e', b'engines', b'', b'compression engines to use'),
2327 2332 (b's', b'startrev', 0, b'revision to start at')],
2328 2333 b'-c|-m|FILE')
2329 2334 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2330 2335 """Benchmark operations on revlog chunks.
2331 2336
2332 2337 Logically, each revlog is a collection of fulltext revisions. However,
2333 2338 stored within each revlog are "chunks" of possibly compressed data. This
2334 2339 data needs to be read and decompressed or compressed and written.
2335 2340
2336 2341 This command measures the time it takes to read+decompress and recompress
2337 2342 chunks in a revlog. It effectively isolates I/O and compression performance.
2338 2343 For measurements of higher-level operations like resolving revisions,
2339 2344 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2340 2345 """
2341 2346 opts = _byteskwargs(opts)
2342 2347
2343 2348 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2344 2349
2345 2350 # _chunkraw was renamed to _getsegmentforrevs.
2346 2351 try:
2347 2352 segmentforrevs = rl._getsegmentforrevs
2348 2353 except AttributeError:
2349 2354 segmentforrevs = rl._chunkraw
2350 2355
2351 2356 # Verify engines argument.
2352 2357 if engines:
2353 2358 engines = set(e.strip() for e in engines.split(b','))
2354 2359 for engine in engines:
2355 2360 try:
2356 2361 util.compressionengines[engine]
2357 2362 except KeyError:
2358 2363 raise error.Abort(b'unknown compression engine: %s' % engine)
2359 2364 else:
2360 2365 engines = []
2361 2366 for e in util.compengines:
2362 2367 engine = util.compengines[e]
2363 2368 try:
2364 2369 if engine.available():
2365 2370 engine.revlogcompressor().compress(b'dummy')
2366 2371 engines.append(e)
2367 2372 except NotImplementedError:
2368 2373 pass
2369 2374
2370 2375 revs = list(rl.revs(startrev, len(rl) - 1))
2371 2376
2372 2377 def rlfh(rl):
2373 2378 if rl._inline:
2374 2379 return getsvfs(repo)(rl.indexfile)
2375 2380 else:
2376 2381 return getsvfs(repo)(rl.datafile)
2377 2382
2378 2383 def doread():
2379 2384 rl.clearcaches()
2380 2385 for rev in revs:
2381 2386 segmentforrevs(rev, rev)
2382 2387
2383 2388 def doreadcachedfh():
2384 2389 rl.clearcaches()
2385 2390 fh = rlfh(rl)
2386 2391 for rev in revs:
2387 2392 segmentforrevs(rev, rev, df=fh)
2388 2393
2389 2394 def doreadbatch():
2390 2395 rl.clearcaches()
2391 2396 segmentforrevs(revs[0], revs[-1])
2392 2397
2393 2398 def doreadbatchcachedfh():
2394 2399 rl.clearcaches()
2395 2400 fh = rlfh(rl)
2396 2401 segmentforrevs(revs[0], revs[-1], df=fh)
2397 2402
2398 2403 def dochunk():
2399 2404 rl.clearcaches()
2400 2405 fh = rlfh(rl)
2401 2406 for rev in revs:
2402 2407 rl._chunk(rev, df=fh)
2403 2408
2404 2409 chunks = [None]
2405 2410
2406 2411 def dochunkbatch():
2407 2412 rl.clearcaches()
2408 2413 fh = rlfh(rl)
2409 2414 # Save chunks as a side-effect.
2410 2415 chunks[0] = rl._chunks(revs, df=fh)
2411 2416
2412 2417 def docompress(compressor):
2413 2418 rl.clearcaches()
2414 2419
2415 2420 try:
2416 2421 # Swap in the requested compression engine.
2417 2422 oldcompressor = rl._compressor
2418 2423 rl._compressor = compressor
2419 2424 for chunk in chunks[0]:
2420 2425 rl.compress(chunk)
2421 2426 finally:
2422 2427 rl._compressor = oldcompressor
2423 2428
2424 2429 benches = [
2425 2430 (lambda: doread(), b'read'),
2426 2431 (lambda: doreadcachedfh(), b'read w/ reused fd'),
2427 2432 (lambda: doreadbatch(), b'read batch'),
2428 2433 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
2429 2434 (lambda: dochunk(), b'chunk'),
2430 2435 (lambda: dochunkbatch(), b'chunk batch'),
2431 2436 ]
2432 2437
2433 2438 for engine in sorted(engines):
2434 2439 compressor = util.compengines[engine].revlogcompressor()
2435 2440 benches.append((functools.partial(docompress, compressor),
2436 2441 b'compress w/ %s' % engine))
2437 2442
2438 2443 for fn, title in benches:
2439 2444 timer, fm = gettimer(ui, opts)
2440 2445 timer(fn, title=title)
2441 2446 fm.end()
2442 2447
2443 2448 @command(b'perfrevlogrevision', revlogopts + formatteropts +
2444 2449 [(b'', b'cache', False, b'use caches instead of clearing')],
2445 2450 b'-c|-m|FILE REV')
2446 2451 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
2447 2452 """Benchmark obtaining a revlog revision.
2448 2453
2449 2454 Obtaining a revlog revision consists of roughly the following steps:
2450 2455
2451 2456 1. Compute the delta chain
2452 2457 2. Slice the delta chain if applicable
2453 2458 3. Obtain the raw chunks for that delta chain
2454 2459 4. Decompress each raw chunk
2455 2460 5. Apply binary patches to obtain fulltext
2456 2461 6. Verify hash of fulltext
2457 2462
2458 2463 This command measures the time spent in each of these phases.
2459 2464 """
2460 2465 opts = _byteskwargs(opts)
2461 2466
2462 2467 if opts.get(b'changelog') or opts.get(b'manifest'):
2463 2468 file_, rev = None, file_
2464 2469 elif rev is None:
2465 2470 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
2466 2471
2467 2472 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
2468 2473
2469 2474 # _chunkraw was renamed to _getsegmentforrevs.
2470 2475 try:
2471 2476 segmentforrevs = r._getsegmentforrevs
2472 2477 except AttributeError:
2473 2478 segmentforrevs = r._chunkraw
2474 2479
2475 2480 node = r.lookup(rev)
2476 2481 rev = r.rev(node)
2477 2482
2478 2483 def getrawchunks(data, chain):
2479 2484 start = r.start
2480 2485 length = r.length
2481 2486 inline = r._inline
2482 2487 iosize = r._io.size
2483 2488 buffer = util.buffer
2484 2489
2485 2490 chunks = []
2486 2491 ladd = chunks.append
2487 2492 for idx, item in enumerate(chain):
2488 2493 offset = start(item[0])
2489 2494 bits = data[idx]
2490 2495 for rev in item:
2491 2496 chunkstart = start(rev)
2492 2497 if inline:
2493 2498 chunkstart += (rev + 1) * iosize
2494 2499 chunklength = length(rev)
2495 2500 ladd(buffer(bits, chunkstart - offset, chunklength))
2496 2501
2497 2502 return chunks
2498 2503
2499 2504 def dodeltachain(rev):
2500 2505 if not cache:
2501 2506 r.clearcaches()
2502 2507 r._deltachain(rev)
2503 2508
2504 2509 def doread(chain):
2505 2510 if not cache:
2506 2511 r.clearcaches()
2507 2512 for item in slicedchain:
2508 2513 segmentforrevs(item[0], item[-1])
2509 2514
2510 2515 def doslice(r, chain, size):
2511 2516 for s in slicechunk(r, chain, targetsize=size):
2512 2517 pass
2513 2518
2514 2519 def dorawchunks(data, chain):
2515 2520 if not cache:
2516 2521 r.clearcaches()
2517 2522 getrawchunks(data, chain)
2518 2523
2519 2524 def dodecompress(chunks):
2520 2525 decomp = r.decompress
2521 2526 for chunk in chunks:
2522 2527 decomp(chunk)
2523 2528
2524 2529 def dopatch(text, bins):
2525 2530 if not cache:
2526 2531 r.clearcaches()
2527 2532 mdiff.patches(text, bins)
2528 2533
2529 2534 def dohash(text):
2530 2535 if not cache:
2531 2536 r.clearcaches()
2532 2537 r.checkhash(text, node, rev=rev)
2533 2538
2534 2539 def dorevision():
2535 2540 if not cache:
2536 2541 r.clearcaches()
2537 2542 r.revision(node)
2538 2543
2539 2544 try:
2540 2545 from mercurial.revlogutils.deltas import slicechunk
2541 2546 except ImportError:
2542 2547 slicechunk = getattr(revlog, '_slicechunk', None)
2543 2548
2544 2549 size = r.length(rev)
2545 2550 chain = r._deltachain(rev)[0]
2546 2551 if not getattr(r, '_withsparseread', False):
2547 2552 slicedchain = (chain,)
2548 2553 else:
2549 2554 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
2550 2555 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
2551 2556 rawchunks = getrawchunks(data, slicedchain)
2552 2557 bins = r._chunks(chain)
2553 2558 text = bytes(bins[0])
2554 2559 bins = bins[1:]
2555 2560 text = mdiff.patches(text, bins)
2556 2561
2557 2562 benches = [
2558 2563 (lambda: dorevision(), b'full'),
2559 2564 (lambda: dodeltachain(rev), b'deltachain'),
2560 2565 (lambda: doread(chain), b'read'),
2561 2566 ]
2562 2567
2563 2568 if getattr(r, '_withsparseread', False):
2564 2569 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
2565 2570 benches.append(slicing)
2566 2571
2567 2572 benches.extend([
2568 2573 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
2569 2574 (lambda: dodecompress(rawchunks), b'decompress'),
2570 2575 (lambda: dopatch(text, bins), b'patch'),
2571 2576 (lambda: dohash(text), b'hash'),
2572 2577 ])
2573 2578
2574 2579 timer, fm = gettimer(ui, opts)
2575 2580 for fn, title in benches:
2576 2581 timer(fn, title=title)
2577 2582 fm.end()
2578 2583
2579 2584 @command(b'perfrevset',
2580 2585 [(b'C', b'clear', False, b'clear volatile cache between each call.'),
2581 2586 (b'', b'contexts', False, b'obtain changectx for each revision')]
2582 2587 + formatteropts, b"REVSET")
2583 2588 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
2584 2589 """benchmark the execution time of a revset
2585 2590
2586 2591 Use the --clean option if need to evaluate the impact of build volatile
2587 2592 revisions set cache on the revset execution. Volatile cache hold filtered
2588 2593 and obsolete related cache."""
2589 2594 opts = _byteskwargs(opts)
2590 2595
2591 2596 timer, fm = gettimer(ui, opts)
2592 2597 def d():
2593 2598 if clear:
2594 2599 repo.invalidatevolatilesets()
2595 2600 if contexts:
2596 2601 for ctx in repo.set(expr): pass
2597 2602 else:
2598 2603 for r in repo.revs(expr): pass
2599 2604 timer(d)
2600 2605 fm.end()
2601 2606
2602 2607 @command(b'perfvolatilesets',
2603 2608 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
2604 2609 ] + formatteropts)
2605 2610 def perfvolatilesets(ui, repo, *names, **opts):
2606 2611 """benchmark the computation of various volatile set
2607 2612
2608 2613 Volatile set computes element related to filtering and obsolescence."""
2609 2614 opts = _byteskwargs(opts)
2610 2615 timer, fm = gettimer(ui, opts)
2611 2616 repo = repo.unfiltered()
2612 2617
2613 2618 def getobs(name):
2614 2619 def d():
2615 2620 repo.invalidatevolatilesets()
2616 2621 if opts[b'clear_obsstore']:
2617 2622 clearfilecache(repo, b'obsstore')
2618 2623 obsolete.getrevs(repo, name)
2619 2624 return d
2620 2625
2621 2626 allobs = sorted(obsolete.cachefuncs)
2622 2627 if names:
2623 2628 allobs = [n for n in allobs if n in names]
2624 2629
2625 2630 for name in allobs:
2626 2631 timer(getobs(name), title=name)
2627 2632
2628 2633 def getfiltered(name):
2629 2634 def d():
2630 2635 repo.invalidatevolatilesets()
2631 2636 if opts[b'clear_obsstore']:
2632 2637 clearfilecache(repo, b'obsstore')
2633 2638 repoview.filterrevs(repo, name)
2634 2639 return d
2635 2640
2636 2641 allfilter = sorted(repoview.filtertable)
2637 2642 if names:
2638 2643 allfilter = [n for n in allfilter if n in names]
2639 2644
2640 2645 for name in allfilter:
2641 2646 timer(getfiltered(name), title=name)
2642 2647 fm.end()
2643 2648
2644 2649 @command(b'perfbranchmap',
2645 2650 [(b'f', b'full', False,
2646 2651 b'Includes build time of subset'),
2647 2652 (b'', b'clear-revbranch', False,
2648 2653 b'purge the revbranch cache between computation'),
2649 2654 ] + formatteropts)
2650 2655 def perfbranchmap(ui, repo, *filternames, **opts):
2651 2656 """benchmark the update of a branchmap
2652 2657
2653 2658 This benchmarks the full repo.branchmap() call with read and write disabled
2654 2659 """
2655 2660 opts = _byteskwargs(opts)
2656 2661 full = opts.get(b"full", False)
2657 2662 clear_revbranch = opts.get(b"clear_revbranch", False)
2658 2663 timer, fm = gettimer(ui, opts)
2659 2664 def getbranchmap(filtername):
2660 2665 """generate a benchmark function for the filtername"""
2661 2666 if filtername is None:
2662 2667 view = repo
2663 2668 else:
2664 2669 view = repo.filtered(filtername)
2665 2670 if util.safehasattr(view._branchcaches, '_per_filter'):
2666 2671 filtered = view._branchcaches._per_filter
2667 2672 else:
2668 2673 # older versions
2669 2674 filtered = view._branchcaches
2670 2675 def d():
2671 2676 if clear_revbranch:
2672 2677 repo.revbranchcache()._clear()
2673 2678 if full:
2674 2679 view._branchcaches.clear()
2675 2680 else:
2676 2681 filtered.pop(filtername, None)
2677 2682 view.branchmap()
2678 2683 return d
2679 2684 # add filter in smaller subset to bigger subset
2680 2685 possiblefilters = set(repoview.filtertable)
2681 2686 if filternames:
2682 2687 possiblefilters &= set(filternames)
2683 2688 subsettable = getbranchmapsubsettable()
2684 2689 allfilters = []
2685 2690 while possiblefilters:
2686 2691 for name in possiblefilters:
2687 2692 subset = subsettable.get(name)
2688 2693 if subset not in possiblefilters:
2689 2694 break
2690 2695 else:
2691 2696 assert False, b'subset cycle %s!' % possiblefilters
2692 2697 allfilters.append(name)
2693 2698 possiblefilters.remove(name)
2694 2699
2695 2700 # warm the cache
2696 2701 if not full:
2697 2702 for name in allfilters:
2698 2703 repo.filtered(name).branchmap()
2699 2704 if not filternames or b'unfiltered' in filternames:
2700 2705 # add unfiltered
2701 2706 allfilters.append(None)
2702 2707
2703 2708 if util.safehasattr(branchmap.branchcache, 'fromfile'):
2704 2709 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
2705 2710 branchcacheread.set(classmethod(lambda *args: None))
2706 2711 else:
2707 2712 # older versions
2708 2713 branchcacheread = safeattrsetter(branchmap, b'read')
2709 2714 branchcacheread.set(lambda *args: None)
2710 2715 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
2711 2716 branchcachewrite.set(lambda *args: None)
2712 2717 try:
2713 2718 for name in allfilters:
2714 2719 printname = name
2715 2720 if name is None:
2716 2721 printname = b'unfiltered'
2717 2722 timer(getbranchmap(name), title=str(printname))
2718 2723 finally:
2719 2724 branchcacheread.restore()
2720 2725 branchcachewrite.restore()
2721 2726 fm.end()
2722 2727
2723 2728 @command(b'perfbranchmapupdate', [
2724 2729 (b'', b'base', [], b'subset of revision to start from'),
2725 2730 (b'', b'target', [], b'subset of revision to end with'),
2726 2731 (b'', b'clear-caches', False, b'clear cache between each runs')
2727 2732 ] + formatteropts)
2728 2733 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
2729 2734 """benchmark branchmap update from for <base> revs to <target> revs
2730 2735
2731 2736 If `--clear-caches` is passed, the following items will be reset before
2732 2737 each update:
2733 2738 * the changelog instance and associated indexes
2734 2739 * the rev-branch-cache instance
2735 2740
2736 2741 Examples:
2737 2742
2738 2743 # update for the one last revision
2739 2744 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
2740 2745
2741 2746 $ update for change coming with a new branch
2742 2747 $ hg perfbranchmapupdate --base 'stable' --target 'default'
2743 2748 """
2744 2749 from mercurial import branchmap
2745 2750 from mercurial import repoview
2746 2751 opts = _byteskwargs(opts)
2747 2752 timer, fm = gettimer(ui, opts)
2748 2753 clearcaches = opts[b'clear_caches']
2749 2754 unfi = repo.unfiltered()
2750 2755 x = [None] # used to pass data between closure
2751 2756
2752 2757 # we use a `list` here to avoid possible side effect from smartset
2753 2758 baserevs = list(scmutil.revrange(repo, base))
2754 2759 targetrevs = list(scmutil.revrange(repo, target))
2755 2760 if not baserevs:
2756 2761 raise error.Abort(b'no revisions selected for --base')
2757 2762 if not targetrevs:
2758 2763 raise error.Abort(b'no revisions selected for --target')
2759 2764
2760 2765 # make sure the target branchmap also contains the one in the base
2761 2766 targetrevs = list(set(baserevs) | set(targetrevs))
2762 2767 targetrevs.sort()
2763 2768
2764 2769 cl = repo.changelog
2765 2770 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
2766 2771 allbaserevs.sort()
2767 2772 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
2768 2773
2769 2774 newrevs = list(alltargetrevs.difference(allbaserevs))
2770 2775 newrevs.sort()
2771 2776
2772 2777 allrevs = frozenset(unfi.changelog.revs())
2773 2778 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
2774 2779 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
2775 2780
2776 2781 def basefilter(repo, visibilityexceptions=None):
2777 2782 return basefilterrevs
2778 2783
2779 2784 def targetfilter(repo, visibilityexceptions=None):
2780 2785 return targetfilterrevs
2781 2786
2782 2787 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
2783 2788 ui.status(msg % (len(allbaserevs), len(newrevs)))
2784 2789 if targetfilterrevs:
2785 2790 msg = b'(%d revisions still filtered)\n'
2786 2791 ui.status(msg % len(targetfilterrevs))
2787 2792
2788 2793 try:
2789 2794 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
2790 2795 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
2791 2796
2792 2797 baserepo = repo.filtered(b'__perf_branchmap_update_base')
2793 2798 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
2794 2799
2795 2800 # try to find an existing branchmap to reuse
2796 2801 subsettable = getbranchmapsubsettable()
2797 2802 candidatefilter = subsettable.get(None)
2798 2803 while candidatefilter is not None:
2799 2804 candidatebm = repo.filtered(candidatefilter).branchmap()
2800 2805 if candidatebm.validfor(baserepo):
2801 2806 filtered = repoview.filterrevs(repo, candidatefilter)
2802 2807 missing = [r for r in allbaserevs if r in filtered]
2803 2808 base = candidatebm.copy()
2804 2809 base.update(baserepo, missing)
2805 2810 break
2806 2811 candidatefilter = subsettable.get(candidatefilter)
2807 2812 else:
2808 2813 # no suitable subset where found
2809 2814 base = branchmap.branchcache()
2810 2815 base.update(baserepo, allbaserevs)
2811 2816
2812 2817 def setup():
2813 2818 x[0] = base.copy()
2814 2819 if clearcaches:
2815 2820 unfi._revbranchcache = None
2816 2821 clearchangelog(repo)
2817 2822
2818 2823 def bench():
2819 2824 x[0].update(targetrepo, newrevs)
2820 2825
2821 2826 timer(bench, setup=setup)
2822 2827 fm.end()
2823 2828 finally:
2824 2829 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
2825 2830 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
2826 2831
2827 2832 @command(b'perfbranchmapload', [
2828 2833 (b'f', b'filter', b'', b'Specify repoview filter'),
2829 2834 (b'', b'list', False, b'List brachmap filter caches'),
2830 2835 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
2831 2836
2832 2837 ] + formatteropts)
2833 2838 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
2834 2839 """benchmark reading the branchmap"""
2835 2840 opts = _byteskwargs(opts)
2836 2841 clearrevlogs = opts[b'clear_revlogs']
2837 2842
2838 2843 if list:
2839 2844 for name, kind, st in repo.cachevfs.readdir(stat=True):
2840 2845 if name.startswith(b'branch2'):
2841 2846 filtername = name.partition(b'-')[2] or b'unfiltered'
2842 2847 ui.status(b'%s - %s\n'
2843 2848 % (filtername, util.bytecount(st.st_size)))
2844 2849 return
2845 2850 if not filter:
2846 2851 filter = None
2847 2852 subsettable = getbranchmapsubsettable()
2848 2853 if filter is None:
2849 2854 repo = repo.unfiltered()
2850 2855 else:
2851 2856 repo = repoview.repoview(repo, filter)
2852 2857
2853 2858 repo.branchmap() # make sure we have a relevant, up to date branchmap
2854 2859
2855 2860 try:
2856 2861 fromfile = branchmap.branchcache.fromfile
2857 2862 except AttributeError:
2858 2863 # older versions
2859 2864 fromfile = branchmap.read
2860 2865
2861 2866 currentfilter = filter
2862 2867 # try once without timer, the filter may not be cached
2863 2868 while fromfile(repo) is None:
2864 2869 currentfilter = subsettable.get(currentfilter)
2865 2870 if currentfilter is None:
2866 2871 raise error.Abort(b'No branchmap cached for %s repo'
2867 2872 % (filter or b'unfiltered'))
2868 2873 repo = repo.filtered(currentfilter)
2869 2874 timer, fm = gettimer(ui, opts)
2870 2875 def setup():
2871 2876 if clearrevlogs:
2872 2877 clearchangelog(repo)
2873 2878 def bench():
2874 2879 fromfile(repo)
2875 2880 timer(bench, setup=setup)
2876 2881 fm.end()
2877 2882
2878 2883 @command(b'perfloadmarkers')
2879 2884 def perfloadmarkers(ui, repo):
2880 2885 """benchmark the time to parse the on-disk markers for a repo
2881 2886
2882 2887 Result is the number of markers in the repo."""
2883 2888 timer, fm = gettimer(ui)
2884 2889 svfs = getsvfs(repo)
2885 2890 timer(lambda: len(obsolete.obsstore(svfs)))
2886 2891 fm.end()
2887 2892
2888 2893 @command(b'perflrucachedict', formatteropts +
2889 2894 [(b'', b'costlimit', 0, b'maximum total cost of items in cache'),
2890 2895 (b'', b'mincost', 0, b'smallest cost of items in cache'),
2891 2896 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
2892 2897 (b'', b'size', 4, b'size of cache'),
2893 2898 (b'', b'gets', 10000, b'number of key lookups'),
2894 2899 (b'', b'sets', 10000, b'number of key sets'),
2895 2900 (b'', b'mixed', 10000, b'number of mixed mode operations'),
2896 2901 (b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')],
2897 2902 norepo=True)
2898 2903 def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4,
2899 2904 gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts):
2900 2905 opts = _byteskwargs(opts)
2901 2906
2902 2907 def doinit():
2903 2908 for i in _xrange(10000):
2904 2909 util.lrucachedict(size)
2905 2910
2906 2911 costrange = list(range(mincost, maxcost + 1))
2907 2912
2908 2913 values = []
2909 2914 for i in _xrange(size):
2910 2915 values.append(random.randint(0, _maxint))
2911 2916
2912 2917 # Get mode fills the cache and tests raw lookup performance with no
2913 2918 # eviction.
2914 2919 getseq = []
2915 2920 for i in _xrange(gets):
2916 2921 getseq.append(random.choice(values))
2917 2922
2918 2923 def dogets():
2919 2924 d = util.lrucachedict(size)
2920 2925 for v in values:
2921 2926 d[v] = v
2922 2927 for key in getseq:
2923 2928 value = d[key]
2924 2929 value # silence pyflakes warning
2925 2930
2926 2931 def dogetscost():
2927 2932 d = util.lrucachedict(size, maxcost=costlimit)
2928 2933 for i, v in enumerate(values):
2929 2934 d.insert(v, v, cost=costs[i])
2930 2935 for key in getseq:
2931 2936 try:
2932 2937 value = d[key]
2933 2938 value # silence pyflakes warning
2934 2939 except KeyError:
2935 2940 pass
2936 2941
2937 2942 # Set mode tests insertion speed with cache eviction.
2938 2943 setseq = []
2939 2944 costs = []
2940 2945 for i in _xrange(sets):
2941 2946 setseq.append(random.randint(0, _maxint))
2942 2947 costs.append(random.choice(costrange))
2943 2948
2944 2949 def doinserts():
2945 2950 d = util.lrucachedict(size)
2946 2951 for v in setseq:
2947 2952 d.insert(v, v)
2948 2953
2949 2954 def doinsertscost():
2950 2955 d = util.lrucachedict(size, maxcost=costlimit)
2951 2956 for i, v in enumerate(setseq):
2952 2957 d.insert(v, v, cost=costs[i])
2953 2958
2954 2959 def dosets():
2955 2960 d = util.lrucachedict(size)
2956 2961 for v in setseq:
2957 2962 d[v] = v
2958 2963
2959 2964 # Mixed mode randomly performs gets and sets with eviction.
2960 2965 mixedops = []
2961 2966 for i in _xrange(mixed):
2962 2967 r = random.randint(0, 100)
2963 2968 if r < mixedgetfreq:
2964 2969 op = 0
2965 2970 else:
2966 2971 op = 1
2967 2972
2968 2973 mixedops.append((op,
2969 2974 random.randint(0, size * 2),
2970 2975 random.choice(costrange)))
2971 2976
2972 2977 def domixed():
2973 2978 d = util.lrucachedict(size)
2974 2979
2975 2980 for op, v, cost in mixedops:
2976 2981 if op == 0:
2977 2982 try:
2978 2983 d[v]
2979 2984 except KeyError:
2980 2985 pass
2981 2986 else:
2982 2987 d[v] = v
2983 2988
2984 2989 def domixedcost():
2985 2990 d = util.lrucachedict(size, maxcost=costlimit)
2986 2991
2987 2992 for op, v, cost in mixedops:
2988 2993 if op == 0:
2989 2994 try:
2990 2995 d[v]
2991 2996 except KeyError:
2992 2997 pass
2993 2998 else:
2994 2999 d.insert(v, v, cost=cost)
2995 3000
2996 3001 benches = [
2997 3002 (doinit, b'init'),
2998 3003 ]
2999 3004
3000 3005 if costlimit:
3001 3006 benches.extend([
3002 3007 (dogetscost, b'gets w/ cost limit'),
3003 3008 (doinsertscost, b'inserts w/ cost limit'),
3004 3009 (domixedcost, b'mixed w/ cost limit'),
3005 3010 ])
3006 3011 else:
3007 3012 benches.extend([
3008 3013 (dogets, b'gets'),
3009 3014 (doinserts, b'inserts'),
3010 3015 (dosets, b'sets'),
3011 3016 (domixed, b'mixed')
3012 3017 ])
3013 3018
3014 3019 for fn, title in benches:
3015 3020 timer, fm = gettimer(ui, opts)
3016 3021 timer(fn, title=title)
3017 3022 fm.end()
3018 3023
3019 3024 @command(b'perfwrite', formatteropts)
3020 3025 def perfwrite(ui, repo, **opts):
3021 3026 """microbenchmark ui.write
3022 3027 """
3023 3028 opts = _byteskwargs(opts)
3024 3029
3025 3030 timer, fm = gettimer(ui, opts)
3026 3031 def write():
3027 3032 for i in range(100000):
3028 3033 ui.write((b'Testing write performance\n'))
3029 3034 timer(write)
3030 3035 fm.end()
3031 3036
3032 3037 def uisetup(ui):
3033 3038 if (util.safehasattr(cmdutil, b'openrevlog') and
3034 3039 not util.safehasattr(commands, b'debugrevlogopts')):
3035 3040 # for "historical portability":
3036 3041 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3037 3042 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3038 3043 # openrevlog() should cause failure, because it has been
3039 3044 # available since 3.5 (or 49c583ca48c4).
3040 3045 def openrevlog(orig, repo, cmd, file_, opts):
3041 3046 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3042 3047 raise error.Abort(b"This version doesn't support --dir option",
3043 3048 hint=b"use 3.5 or later")
3044 3049 return orig(repo, cmd, file_, opts)
3045 3050 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3046 3051
3047 3052 @command(b'perfprogress', formatteropts + [
3048 3053 (b'', b'topic', b'topic', b'topic for progress messages'),
3049 3054 (b'c', b'total', 1000000, b'total value we are progressing to'),
3050 3055 ], norepo=True)
3051 3056 def perfprogress(ui, topic=None, total=None, **opts):
3052 3057 """printing of progress bars"""
3053 3058 opts = _byteskwargs(opts)
3054 3059
3055 3060 timer, fm = gettimer(ui, opts)
3056 3061
3057 3062 def doprogress():
3058 3063 with ui.makeprogress(topic, total=total) as progress:
3059 3064 for i in pycompat.xrange(total):
3060 3065 progress.increment()
3061 3066
3062 3067 timer(doprogress)
3063 3068 fm.end()
@@ -1,850 +1,851 b''
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 fsmonitor is incompatible with the largefiles and eol extensions, and
22 22 will disable itself if any of those are active.
23 23
24 24 The following configuration options exist:
25 25
26 26 ::
27 27
28 28 [fsmonitor]
29 29 mode = {off, on, paranoid}
30 30
31 31 When `mode = off`, fsmonitor will disable itself (similar to not loading the
32 32 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
33 33 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
34 34 and ensure that the results are consistent.
35 35
36 36 ::
37 37
38 38 [fsmonitor]
39 39 timeout = (float)
40 40
41 41 A value, in seconds, that determines how long fsmonitor will wait for Watchman
42 42 to return results. Defaults to `2.0`.
43 43
44 44 ::
45 45
46 46 [fsmonitor]
47 47 blacklistusers = (list of userids)
48 48
49 49 A list of usernames for which fsmonitor will disable itself altogether.
50 50
51 51 ::
52 52
53 53 [fsmonitor]
54 54 walk_on_invalidate = (boolean)
55 55
56 56 Whether or not to walk the whole repo ourselves when our cached state has been
57 57 invalidated, for example when Watchman has been restarted or .hgignore rules
58 58 have been changed. Walking the repo in that case can result in competing for
59 59 I/O with Watchman. For large repos it is recommended to set this value to
60 60 false. You may wish to set this to true if you have a very fast filesystem
61 61 that can outpace the IPC overhead of getting the result data for the full repo
62 62 from Watchman. Defaults to false.
63 63
64 64 ::
65 65
66 66 [fsmonitor]
67 67 warn_when_unused = (boolean)
68 68
69 69 Whether to print a warning during certain operations when fsmonitor would be
70 70 beneficial to performance but isn't enabled.
71 71
72 72 ::
73 73
74 74 [fsmonitor]
75 75 warn_update_file_count = (integer)
76 76
77 77 If ``warn_when_unused`` is set and fsmonitor isn't enabled, a warning will
78 78 be printed during working directory updates if this many files will be
79 79 created.
80 80 '''
81 81
82 82 # Platforms Supported
83 83 # ===================
84 84 #
85 85 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
86 86 # even under severe loads.
87 87 #
88 88 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
89 89 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
90 90 # user testing under normal loads.
91 91 #
92 92 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
93 93 # very little testing has been done.
94 94 #
95 95 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
96 96 #
97 97 # Known Issues
98 98 # ============
99 99 #
100 100 # * fsmonitor will disable itself if any of the following extensions are
101 101 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
102 102 # * fsmonitor will produce incorrect results if nested repos that are not
103 103 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
104 104 #
105 105 # The issues related to nested repos and subrepos are probably not fundamental
106 106 # ones. Patches to fix them are welcome.
107 107
108 108 from __future__ import absolute_import
109 109
110 110 import codecs
111 111 import hashlib
112 112 import os
113 113 import stat
114 114 import sys
115 115 import tempfile
116 116 import weakref
117 117
118 118 from mercurial.i18n import _
119 119 from mercurial.node import (
120 120 hex,
121 121 )
122 122
123 123 from mercurial import (
124 124 context,
125 125 encoding,
126 126 error,
127 127 extensions,
128 128 localrepo,
129 129 merge,
130 130 pathutil,
131 131 pycompat,
132 132 registrar,
133 133 scmutil,
134 134 util,
135 135 )
136 136 from mercurial import match as matchmod
137 137
138 138 from . import (
139 139 pywatchman,
140 140 state,
141 141 watchmanclient,
142 142 )
143 143
144 144 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
145 145 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
146 146 # be specifying the version(s) of Mercurial they are tested with, or
147 147 # leave the attribute unspecified.
148 148 testedwith = 'ships-with-hg-core'
149 149
150 150 configtable = {}
151 151 configitem = registrar.configitem(configtable)
152 152
153 153 configitem('fsmonitor', 'mode',
154 154 default='on',
155 155 )
156 156 configitem('fsmonitor', 'walk_on_invalidate',
157 157 default=False,
158 158 )
159 159 configitem('fsmonitor', 'timeout',
160 160 default='2',
161 161 )
162 162 configitem('fsmonitor', 'blacklistusers',
163 163 default=list,
164 164 )
165 165 configitem('fsmonitor', 'watchman_exe',
166 166 default='watchman',
167 167 )
168 168 configitem('fsmonitor', 'verbose',
169 169 default=True,
170 experimental=True,
170 171 )
171 172 configitem('experimental', 'fsmonitor.transaction_notify',
172 173 default=False,
173 174 )
174 175
175 176 # This extension is incompatible with the following blacklisted extensions
176 177 # and will disable itself when encountering one of these:
177 178 _blacklist = ['largefiles', 'eol']
178 179
179 180 def debuginstall(ui, fm):
180 181 fm.write("fsmonitor-watchman",
181 182 _("fsmonitor checking for watchman binary... (%s)\n"),
182 183 ui.configpath("fsmonitor", "watchman_exe"))
183 184 root = tempfile.mkdtemp()
184 185 c = watchmanclient.client(ui, root)
185 186 err = None
186 187 try:
187 188 v = c.command("version")
188 189 fm.write("fsmonitor-watchman-version",
189 190 _(" watchman binary version %s\n"), v["version"])
190 191 except watchmanclient.Unavailable as e:
191 192 err = str(e)
192 193 fm.condwrite(err, "fsmonitor-watchman-error",
193 194 _(" watchman binary missing or broken: %s\n"), err)
194 195 return 1 if err else 0
195 196
196 197 def _handleunavailable(ui, state, ex):
197 198 """Exception handler for Watchman interaction exceptions"""
198 199 if isinstance(ex, watchmanclient.Unavailable):
199 200 # experimental config: fsmonitor.verbose
200 201 if ex.warn and ui.configbool('fsmonitor', 'verbose'):
201 202 if 'illegal_fstypes' not in str(ex):
202 203 ui.warn(str(ex) + '\n')
203 204 if ex.invalidate:
204 205 state.invalidate()
205 206 # experimental config: fsmonitor.verbose
206 207 if ui.configbool('fsmonitor', 'verbose'):
207 208 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
208 209 else:
209 210 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
210 211
211 212 def _hashignore(ignore):
212 213 """Calculate hash for ignore patterns and filenames
213 214
214 215 If this information changes between Mercurial invocations, we can't
215 216 rely on Watchman information anymore and have to re-scan the working
216 217 copy.
217 218
218 219 """
219 220 sha1 = hashlib.sha1()
220 221 sha1.update(repr(ignore))
221 222 return sha1.hexdigest()
222 223
223 224 _watchmanencoding = pywatchman.encoding.get_local_encoding()
224 225 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
225 226 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
226 227
227 228 def _watchmantofsencoding(path):
228 229 """Fix path to match watchman and local filesystem encoding
229 230
230 231 watchman's paths encoding can differ from filesystem encoding. For example,
231 232 on Windows, it's always utf-8.
232 233 """
233 234 try:
234 235 decoded = path.decode(_watchmanencoding)
235 236 except UnicodeDecodeError as e:
236 237 raise error.Abort(str(e), hint='watchman encoding error')
237 238
238 239 try:
239 240 encoded = decoded.encode(_fsencoding, 'strict')
240 241 except UnicodeEncodeError as e:
241 242 raise error.Abort(str(e))
242 243
243 244 return encoded
244 245
245 246 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
246 247 '''Replacement for dirstate.walk, hooking into Watchman.
247 248
248 249 Whenever full is False, ignored is False, and the Watchman client is
249 250 available, use Watchman combined with saved state to possibly return only a
250 251 subset of files.'''
251 252 def bail(reason):
252 253 self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason)
253 254 return orig(match, subrepos, unknown, ignored, full=True)
254 255
255 256 if full:
256 257 return bail('full rewalk requested')
257 258 if ignored:
258 259 return bail('listing ignored files')
259 260 if not self._watchmanclient.available():
260 261 return bail('client unavailable')
261 262 state = self._fsmonitorstate
262 263 clock, ignorehash, notefiles = state.get()
263 264 if not clock:
264 265 if state.walk_on_invalidate:
265 266 return bail('no clock')
266 267 # Initial NULL clock value, see
267 268 # https://facebook.github.io/watchman/docs/clockspec.html
268 269 clock = 'c:0:0'
269 270 notefiles = []
270 271
271 272 ignore = self._ignore
272 273 dirignore = self._dirignore
273 274 if unknown:
274 275 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
275 276 # ignore list changed -- can't rely on Watchman state any more
276 277 if state.walk_on_invalidate:
277 278 return bail('ignore rules changed')
278 279 notefiles = []
279 280 clock = 'c:0:0'
280 281 else:
281 282 # always ignore
282 283 ignore = util.always
283 284 dirignore = util.always
284 285
285 286 matchfn = match.matchfn
286 287 matchalways = match.always()
287 288 dmap = self._map
288 289 if util.safehasattr(dmap, '_map'):
289 290 # for better performance, directly access the inner dirstate map if the
290 291 # standard dirstate implementation is in use.
291 292 dmap = dmap._map
292 293 nonnormalset = self._map.nonnormalset
293 294
294 295 copymap = self._map.copymap
295 296 getkind = stat.S_IFMT
296 297 dirkind = stat.S_IFDIR
297 298 regkind = stat.S_IFREG
298 299 lnkkind = stat.S_IFLNK
299 300 join = self._join
300 301 normcase = util.normcase
301 302 fresh_instance = False
302 303
303 304 exact = skipstep3 = False
304 305 if match.isexact(): # match.exact
305 306 exact = True
306 307 dirignore = util.always # skip step 2
307 308 elif match.prefix(): # match.match, no patterns
308 309 skipstep3 = True
309 310
310 311 if not exact and self._checkcase:
311 312 # note that even though we could receive directory entries, we're only
312 313 # interested in checking if a file with the same name exists. So only
313 314 # normalize files if possible.
314 315 normalize = self._normalizefile
315 316 skipstep3 = False
316 317 else:
317 318 normalize = None
318 319
319 320 # step 1: find all explicit files
320 321 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
321 322
322 323 skipstep3 = skipstep3 and not (work or dirsnotfound)
323 324 work = [d for d in work if not dirignore(d[0])]
324 325
325 326 if not work and (exact or skipstep3):
326 327 for s in subrepos:
327 328 del results[s]
328 329 del results['.hg']
329 330 return results
330 331
331 332 # step 2: query Watchman
332 333 try:
333 334 # Use the user-configured timeout for the query.
334 335 # Add a little slack over the top of the user query to allow for
335 336 # overheads while transferring the data
336 337 self._watchmanclient.settimeout(state.timeout + 0.1)
337 338 result = self._watchmanclient.command('query', {
338 339 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
339 340 'since': clock,
340 341 'expression': [
341 342 'not', [
342 343 'anyof', ['dirname', '.hg'],
343 344 ['name', '.hg', 'wholename']
344 345 ]
345 346 ],
346 347 'sync_timeout': int(state.timeout * 1000),
347 348 'empty_on_fresh_instance': state.walk_on_invalidate,
348 349 })
349 350 except Exception as ex:
350 351 _handleunavailable(self._ui, state, ex)
351 352 self._watchmanclient.clearconnection()
352 353 return bail('exception during run')
353 354 else:
354 355 # We need to propagate the last observed clock up so that we
355 356 # can use it for our next query
356 357 state.setlastclock(result['clock'])
357 358 if result['is_fresh_instance']:
358 359 if state.walk_on_invalidate:
359 360 state.invalidate()
360 361 return bail('fresh instance')
361 362 fresh_instance = True
362 363 # Ignore any prior noteable files from the state info
363 364 notefiles = []
364 365
365 366 # for file paths which require normalization and we encounter a case
366 367 # collision, we store our own foldmap
367 368 if normalize:
368 369 foldmap = dict((normcase(k), k) for k in results)
369 370
370 371 switch_slashes = pycompat.ossep == '\\'
371 372 # The order of the results is, strictly speaking, undefined.
372 373 # For case changes on a case insensitive filesystem we may receive
373 374 # two entries, one with exists=True and another with exists=False.
374 375 # The exists=True entries in the same response should be interpreted
375 376 # as being happens-after the exists=False entries due to the way that
376 377 # Watchman tracks files. We use this property to reconcile deletes
377 378 # for name case changes.
378 379 for entry in result['files']:
379 380 fname = entry['name']
380 381 if _fixencoding:
381 382 fname = _watchmantofsencoding(fname)
382 383 if switch_slashes:
383 384 fname = fname.replace('\\', '/')
384 385 if normalize:
385 386 normed = normcase(fname)
386 387 fname = normalize(fname, True, True)
387 388 foldmap[normed] = fname
388 389 fmode = entry['mode']
389 390 fexists = entry['exists']
390 391 kind = getkind(fmode)
391 392
392 393 if '/.hg/' in fname or fname.endswith('/.hg'):
393 394 return bail('nested-repo-detected')
394 395
395 396 if not fexists:
396 397 # if marked as deleted and we don't already have a change
397 398 # record, mark it as deleted. If we already have an entry
398 399 # for fname then it was either part of walkexplicit or was
399 400 # an earlier result that was a case change
400 401 if fname not in results and fname in dmap and (
401 402 matchalways or matchfn(fname)):
402 403 results[fname] = None
403 404 elif kind == dirkind:
404 405 if fname in dmap and (matchalways or matchfn(fname)):
405 406 results[fname] = None
406 407 elif kind == regkind or kind == lnkkind:
407 408 if fname in dmap:
408 409 if matchalways or matchfn(fname):
409 410 results[fname] = entry
410 411 elif (matchalways or matchfn(fname)) and not ignore(fname):
411 412 results[fname] = entry
412 413 elif fname in dmap and (matchalways or matchfn(fname)):
413 414 results[fname] = None
414 415
415 416 # step 3: query notable files we don't already know about
416 417 # XXX try not to iterate over the entire dmap
417 418 if normalize:
418 419 # any notable files that have changed case will already be handled
419 420 # above, so just check membership in the foldmap
420 421 notefiles = set((normalize(f, True, True) for f in notefiles
421 422 if normcase(f) not in foldmap))
422 423 visit = set((f for f in notefiles if (f not in results and matchfn(f)
423 424 and (f in dmap or not ignore(f)))))
424 425
425 426 if not fresh_instance:
426 427 if matchalways:
427 428 visit.update(f for f in nonnormalset if f not in results)
428 429 visit.update(f for f in copymap if f not in results)
429 430 else:
430 431 visit.update(f for f in nonnormalset
431 432 if f not in results and matchfn(f))
432 433 visit.update(f for f in copymap
433 434 if f not in results and matchfn(f))
434 435 else:
435 436 if matchalways:
436 437 visit.update(f for f, st in dmap.iteritems() if f not in results)
437 438 visit.update(f for f in copymap if f not in results)
438 439 else:
439 440 visit.update(f for f, st in dmap.iteritems()
440 441 if f not in results and matchfn(f))
441 442 visit.update(f for f in copymap
442 443 if f not in results and matchfn(f))
443 444
444 445 audit = pathutil.pathauditor(self._root, cached=True).check
445 446 auditpass = [f for f in visit if audit(f)]
446 447 auditpass.sort()
447 448 auditfail = visit.difference(auditpass)
448 449 for f in auditfail:
449 450 results[f] = None
450 451
451 452 nf = iter(auditpass).next
452 453 for st in util.statfiles([join(f) for f in auditpass]):
453 454 f = nf()
454 455 if st or f in dmap:
455 456 results[f] = st
456 457
457 458 for s in subrepos:
458 459 del results[s]
459 460 del results['.hg']
460 461 return results
461 462
462 463 def overridestatus(
463 464 orig, self, node1='.', node2=None, match=None, ignored=False,
464 465 clean=False, unknown=False, listsubrepos=False):
465 466 listignored = ignored
466 467 listclean = clean
467 468 listunknown = unknown
468 469
469 470 def _cmpsets(l1, l2):
470 471 try:
471 472 if 'FSMONITOR_LOG_FILE' in encoding.environ:
472 473 fn = encoding.environ['FSMONITOR_LOG_FILE']
473 474 f = open(fn, 'wb')
474 475 else:
475 476 fn = 'fsmonitorfail.log'
476 477 f = self.vfs.open(fn, 'wb')
477 478 except (IOError, OSError):
478 479 self.ui.warn(_('warning: unable to write to %s\n') % fn)
479 480 return
480 481
481 482 try:
482 483 for i, (s1, s2) in enumerate(zip(l1, l2)):
483 484 if set(s1) != set(s2):
484 485 f.write('sets at position %d are unequal\n' % i)
485 486 f.write('watchman returned: %s\n' % s1)
486 487 f.write('stat returned: %s\n' % s2)
487 488 finally:
488 489 f.close()
489 490
490 491 if isinstance(node1, context.changectx):
491 492 ctx1 = node1
492 493 else:
493 494 ctx1 = self[node1]
494 495 if isinstance(node2, context.changectx):
495 496 ctx2 = node2
496 497 else:
497 498 ctx2 = self[node2]
498 499
499 500 working = ctx2.rev() is None
500 501 parentworking = working and ctx1 == self['.']
501 502 match = match or matchmod.always()
502 503
503 504 # Maybe we can use this opportunity to update Watchman's state.
504 505 # Mercurial uses workingcommitctx and/or memctx to represent the part of
505 506 # the workingctx that is to be committed. So don't update the state in
506 507 # that case.
507 508 # HG_PENDING is set in the environment when the dirstate is being updated
508 509 # in the middle of a transaction; we must not update our state in that
509 510 # case, or we risk forgetting about changes in the working copy.
510 511 updatestate = (parentworking and match.always() and
511 512 not isinstance(ctx2, (context.workingcommitctx,
512 513 context.memctx)) and
513 514 'HG_PENDING' not in encoding.environ)
514 515
515 516 try:
516 517 if self._fsmonitorstate.walk_on_invalidate:
517 518 # Use a short timeout to query the current clock. If that
518 519 # takes too long then we assume that the service will be slow
519 520 # to answer our query.
520 521 # walk_on_invalidate indicates that we prefer to walk the
521 522 # tree ourselves because we can ignore portions that Watchman
522 523 # cannot and we tend to be faster in the warmer buffer cache
523 524 # cases.
524 525 self._watchmanclient.settimeout(0.1)
525 526 else:
526 527 # Give Watchman more time to potentially complete its walk
527 528 # and return the initial clock. In this mode we assume that
528 529 # the filesystem will be slower than parsing a potentially
529 530 # very large Watchman result set.
530 531 self._watchmanclient.settimeout(
531 532 self._fsmonitorstate.timeout + 0.1)
532 533 startclock = self._watchmanclient.getcurrentclock()
533 534 except Exception as ex:
534 535 self._watchmanclient.clearconnection()
535 536 _handleunavailable(self.ui, self._fsmonitorstate, ex)
536 537 # boo, Watchman failed. bail
537 538 return orig(node1, node2, match, listignored, listclean,
538 539 listunknown, listsubrepos)
539 540
540 541 if updatestate:
541 542 # We need info about unknown files. This may make things slower the
542 543 # first time, but whatever.
543 544 stateunknown = True
544 545 else:
545 546 stateunknown = listunknown
546 547
547 548 if updatestate:
548 549 ps = poststatus(startclock)
549 550 self.addpostdsstatus(ps)
550 551
551 552 r = orig(node1, node2, match, listignored, listclean, stateunknown,
552 553 listsubrepos)
553 554 modified, added, removed, deleted, unknown, ignored, clean = r
554 555
555 556 if not listunknown:
556 557 unknown = []
557 558
558 559 # don't do paranoid checks if we're not going to query Watchman anyway
559 560 full = listclean or match.traversedir is not None
560 561 if self._fsmonitorstate.mode == 'paranoid' and not full:
561 562 # run status again and fall back to the old walk this time
562 563 self.dirstate._fsmonitordisable = True
563 564
564 565 # shut the UI up
565 566 quiet = self.ui.quiet
566 567 self.ui.quiet = True
567 568 fout, ferr = self.ui.fout, self.ui.ferr
568 569 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
569 570
570 571 try:
571 572 rv2 = orig(
572 573 node1, node2, match, listignored, listclean, listunknown,
573 574 listsubrepos)
574 575 finally:
575 576 self.dirstate._fsmonitordisable = False
576 577 self.ui.quiet = quiet
577 578 self.ui.fout, self.ui.ferr = fout, ferr
578 579
579 580 # clean isn't tested since it's set to True above
580 581 with self.wlock():
581 582 _cmpsets(
582 583 [modified, added, removed, deleted, unknown, ignored, clean],
583 584 rv2)
584 585 modified, added, removed, deleted, unknown, ignored, clean = rv2
585 586
586 587 return scmutil.status(
587 588 modified, added, removed, deleted, unknown, ignored, clean)
588 589
589 590 class poststatus(object):
590 591 def __init__(self, startclock):
591 592 self._startclock = startclock
592 593
593 594 def __call__(self, wctx, status):
594 595 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
595 596 hashignore = _hashignore(wctx.repo().dirstate._ignore)
596 597 notefiles = (status.modified + status.added + status.removed +
597 598 status.deleted + status.unknown)
598 599 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
599 600
600 601 def makedirstate(repo, dirstate):
601 602 class fsmonitordirstate(dirstate.__class__):
602 603 def _fsmonitorinit(self, repo):
603 604 # _fsmonitordisable is used in paranoid mode
604 605 self._fsmonitordisable = False
605 606 self._fsmonitorstate = repo._fsmonitorstate
606 607 self._watchmanclient = repo._watchmanclient
607 608 self._repo = weakref.proxy(repo)
608 609
609 610 def walk(self, *args, **kwargs):
610 611 orig = super(fsmonitordirstate, self).walk
611 612 if self._fsmonitordisable:
612 613 return orig(*args, **kwargs)
613 614 return overridewalk(orig, self, *args, **kwargs)
614 615
615 616 def rebuild(self, *args, **kwargs):
616 617 self._fsmonitorstate.invalidate()
617 618 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
618 619
619 620 def invalidate(self, *args, **kwargs):
620 621 self._fsmonitorstate.invalidate()
621 622 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
622 623
623 624 dirstate.__class__ = fsmonitordirstate
624 625 dirstate._fsmonitorinit(repo)
625 626
626 627 def wrapdirstate(orig, self):
627 628 ds = orig(self)
628 629 # only override the dirstate when Watchman is available for the repo
629 630 if util.safehasattr(self, '_fsmonitorstate'):
630 631 makedirstate(self, ds)
631 632 return ds
632 633
633 634 def extsetup(ui):
634 635 extensions.wrapfilecache(
635 636 localrepo.localrepository, 'dirstate', wrapdirstate)
636 637 if pycompat.isdarwin:
637 638 # An assist for avoiding the dangling-symlink fsevents bug
638 639 extensions.wrapfunction(os, 'symlink', wrapsymlink)
639 640
640 641 extensions.wrapfunction(merge, 'update', wrapupdate)
641 642
642 643 def wrapsymlink(orig, source, link_name):
643 644 ''' if we create a dangling symlink, also touch the parent dir
644 645 to encourage fsevents notifications to work more correctly '''
645 646 try:
646 647 return orig(source, link_name)
647 648 finally:
648 649 try:
649 650 os.utime(os.path.dirname(link_name), None)
650 651 except OSError:
651 652 pass
652 653
653 654 class state_update(object):
654 655 ''' This context manager is responsible for dispatching the state-enter
655 656 and state-leave signals to the watchman service. The enter and leave
656 657 methods can be invoked manually (for scenarios where context manager
657 658 semantics are not possible). If parameters oldnode and newnode are None,
658 659 they will be populated based on current working copy in enter and
659 660 leave, respectively. Similarly, if the distance is none, it will be
660 661 calculated based on the oldnode and newnode in the leave method.'''
661 662
662 663 def __init__(self, repo, name, oldnode=None, newnode=None, distance=None,
663 664 partial=False):
664 665 self.repo = repo.unfiltered()
665 666 self.name = name
666 667 self.oldnode = oldnode
667 668 self.newnode = newnode
668 669 self.distance = distance
669 670 self.partial = partial
670 671 self._lock = None
671 672 self.need_leave = False
672 673
673 674 def __enter__(self):
674 675 self.enter()
675 676
676 677 def enter(self):
677 678 # Make sure we have a wlock prior to sending notifications to watchman.
678 679 # We don't want to race with other actors. In the update case,
679 680 # merge.update is going to take the wlock almost immediately. We are
680 681 # effectively extending the lock around several short sanity checks.
681 682 if self.oldnode is None:
682 683 self.oldnode = self.repo['.'].node()
683 684
684 685 if self.repo.currentwlock() is None:
685 686 if util.safehasattr(self.repo, 'wlocknostateupdate'):
686 687 self._lock = self.repo.wlocknostateupdate()
687 688 else:
688 689 self._lock = self.repo.wlock()
689 690 self.need_leave = self._state(
690 691 'state-enter',
691 692 hex(self.oldnode))
692 693 return self
693 694
694 695 def __exit__(self, type_, value, tb):
695 696 abort = True if type_ else False
696 697 self.exit(abort=abort)
697 698
698 699 def exit(self, abort=False):
699 700 try:
700 701 if self.need_leave:
701 702 status = 'failed' if abort else 'ok'
702 703 if self.newnode is None:
703 704 self.newnode = self.repo['.'].node()
704 705 if self.distance is None:
705 706 self.distance = calcdistance(
706 707 self.repo, self.oldnode, self.newnode)
707 708 self._state(
708 709 'state-leave',
709 710 hex(self.newnode),
710 711 status=status)
711 712 finally:
712 713 self.need_leave = False
713 714 if self._lock:
714 715 self._lock.release()
715 716
716 717 def _state(self, cmd, commithash, status='ok'):
717 718 if not util.safehasattr(self.repo, '_watchmanclient'):
718 719 return False
719 720 try:
720 721 self.repo._watchmanclient.command(cmd, {
721 722 'name': self.name,
722 723 'metadata': {
723 724 # the target revision
724 725 'rev': commithash,
725 726 # approximate number of commits between current and target
726 727 'distance': self.distance if self.distance else 0,
727 728 # success/failure (only really meaningful for state-leave)
728 729 'status': status,
729 730 # whether the working copy parent is changing
730 731 'partial': self.partial,
731 732 }})
732 733 return True
733 734 except Exception as e:
734 735 # Swallow any errors; fire and forget
735 736 self.repo.ui.log(
736 737 'watchman', 'Exception %s while running %s\n', e, cmd)
737 738 return False
738 739
739 740 # Estimate the distance between two nodes
740 741 def calcdistance(repo, oldnode, newnode):
741 742 anc = repo.changelog.ancestor(oldnode, newnode)
742 743 ancrev = repo[anc].rev()
743 744 distance = (abs(repo[oldnode].rev() - ancrev)
744 745 + abs(repo[newnode].rev() - ancrev))
745 746 return distance
746 747
747 748 # Bracket working copy updates with calls to the watchman state-enter
748 749 # and state-leave commands. This allows clients to perform more intelligent
749 750 # settling during bulk file change scenarios
750 751 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
751 752 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
752 753 mergeancestor=False, labels=None, matcher=None, **kwargs):
753 754
754 755 distance = 0
755 756 partial = True
756 757 oldnode = repo['.'].node()
757 758 newnode = repo[node].node()
758 759 if matcher is None or matcher.always():
759 760 partial = False
760 761 distance = calcdistance(repo.unfiltered(), oldnode, newnode)
761 762
762 763 with state_update(repo, name="hg.update", oldnode=oldnode, newnode=newnode,
763 764 distance=distance, partial=partial):
764 765 return orig(
765 766 repo, node, branchmerge, force, ancestor, mergeancestor,
766 767 labels, matcher, **kwargs)
767 768
768 769 def repo_has_depth_one_nested_repo(repo):
769 770 for f in repo.wvfs.listdir():
770 771 if os.path.isdir(os.path.join(repo.root, f, '.hg')):
771 772 msg = 'fsmonitor: sub-repository %r detected, fsmonitor disabled\n'
772 773 repo.ui.debug(msg % f)
773 774 return True
774 775 return False
775 776
776 777 def reposetup(ui, repo):
777 778 # We don't work with largefiles or inotify
778 779 exts = extensions.enabled()
779 780 for ext in _blacklist:
780 781 if ext in exts:
781 782 ui.warn(_('The fsmonitor extension is incompatible with the %s '
782 783 'extension and has been disabled.\n') % ext)
783 784 return
784 785
785 786 if repo.local():
786 787 # We don't work with subrepos either.
787 788 #
788 789 # if repo[None].substate can cause a dirstate parse, which is too
789 790 # slow. Instead, look for a file called hgsubstate,
790 791 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
791 792 return
792 793
793 794 if repo_has_depth_one_nested_repo(repo):
794 795 return
795 796
796 797 fsmonitorstate = state.state(repo)
797 798 if fsmonitorstate.mode == 'off':
798 799 return
799 800
800 801 try:
801 802 client = watchmanclient.client(repo.ui, repo._root)
802 803 except Exception as ex:
803 804 _handleunavailable(ui, fsmonitorstate, ex)
804 805 return
805 806
806 807 repo._fsmonitorstate = fsmonitorstate
807 808 repo._watchmanclient = client
808 809
809 810 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
810 811 if cached:
811 812 # at this point since fsmonitorstate wasn't present,
812 813 # repo.dirstate is not a fsmonitordirstate
813 814 makedirstate(repo, dirstate)
814 815
815 816 class fsmonitorrepo(repo.__class__):
816 817 def status(self, *args, **kwargs):
817 818 orig = super(fsmonitorrepo, self).status
818 819 return overridestatus(orig, self, *args, **kwargs)
819 820
820 821 def wlocknostateupdate(self, *args, **kwargs):
821 822 return super(fsmonitorrepo, self).wlock(*args, **kwargs)
822 823
823 824 def wlock(self, *args, **kwargs):
824 825 l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
825 826 if not ui.configbool(
826 827 "experimental", "fsmonitor.transaction_notify"):
827 828 return l
828 829 if l.held != 1:
829 830 return l
830 831 origrelease = l.releasefn
831 832
832 833 def staterelease():
833 834 if origrelease:
834 835 origrelease()
835 836 if l.stateupdate:
836 837 l.stateupdate.exit()
837 838 l.stateupdate = None
838 839
839 840 try:
840 841 l.stateupdate = None
841 842 l.stateupdate = state_update(self, name="hg.transaction")
842 843 l.stateupdate.enter()
843 844 l.releasefn = staterelease
844 845 except Exception as e:
845 846 # Swallow any errors; fire and forget
846 847 self.ui.log(
847 848 'watchman', 'Exception in state update %s\n', e)
848 849 return l
849 850
850 851 repo.__class__ = fsmonitorrepo
@@ -1,1113 +1,1113 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial import (
136 136 changegroup,
137 137 changelog,
138 138 cmdutil,
139 139 commands,
140 140 configitems,
141 141 context,
142 142 copies,
143 143 debugcommands as hgdebugcommands,
144 144 dispatch,
145 145 error,
146 146 exchange,
147 147 extensions,
148 148 hg,
149 149 localrepo,
150 150 match,
151 151 merge,
152 152 node as nodemod,
153 153 patch,
154 154 pycompat,
155 155 registrar,
156 156 repair,
157 157 repoview,
158 158 revset,
159 159 scmutil,
160 160 smartset,
161 161 streamclone,
162 162 util,
163 163 )
164 164 from . import (
165 165 constants,
166 166 debugcommands,
167 167 fileserverclient,
168 168 remotefilectx,
169 169 remotefilelog,
170 170 remotefilelogserver,
171 171 repack as repackmod,
172 172 shallowbundle,
173 173 shallowrepo,
174 174 shallowstore,
175 175 shallowutil,
176 176 shallowverifier,
177 177 )
178 178
179 179 # ensures debug commands are registered
180 180 hgdebugcommands.command
181 181
182 182 cmdtable = {}
183 183 command = registrar.command(cmdtable)
184 184
185 185 configtable = {}
186 186 configitem = registrar.configitem(configtable)
187 187
188 188 configitem('remotefilelog', 'debug', default=False)
189 189
190 190 configitem('remotefilelog', 'reponame', default='')
191 191 configitem('remotefilelog', 'cachepath', default=None)
192 192 configitem('remotefilelog', 'cachegroup', default=None)
193 193 configitem('remotefilelog', 'cacheprocess', default=None)
194 194 configitem('remotefilelog', 'cacheprocess.includepath', default=None)
195 195 configitem("remotefilelog", "cachelimit", default="1000 GB")
196 196
197 197 configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
198 198 alias=[('remotefilelog', 'fallbackrepo')])
199 199
200 200 configitem('remotefilelog', 'validatecachelog', default=None)
201 201 configitem('remotefilelog', 'validatecache', default='on')
202 202 configitem('remotefilelog', 'server', default=None)
203 203 configitem('remotefilelog', 'servercachepath', default=None)
204 204 configitem("remotefilelog", "serverexpiration", default=30)
205 205 configitem('remotefilelog', 'backgroundrepack', default=False)
206 206 configitem('remotefilelog', 'bgprefetchrevs', default=None)
207 207 configitem('remotefilelog', 'pullprefetch', default=None)
208 208 configitem('remotefilelog', 'backgroundprefetch', default=False)
209 209 configitem('remotefilelog', 'prefetchdelay', default=120)
210 210 configitem('remotefilelog', 'prefetchdays', default=14)
211 211
212 212 configitem('remotefilelog', 'getfilesstep', default=10000)
213 213 configitem('remotefilelog', 'getfilestype', default='optimistic')
214 214 configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
215 215 configitem('remotefilelog', 'fetchwarning', default='')
216 216
217 217 configitem('remotefilelog', 'includepattern', default=None)
218 218 configitem('remotefilelog', 'excludepattern', default=None)
219 219
220 220 configitem('remotefilelog', 'gcrepack', default=False)
221 221 configitem('remotefilelog', 'repackonhggc', default=False)
222 configitem('repack', 'chainorphansbysize', default=True)
222 configitem('repack', 'chainorphansbysize', default=True, experimental=True)
223 223
224 224 configitem('packs', 'maxpacksize', default=0)
225 225 configitem('packs', 'maxchainlen', default=1000)
226 226
227 227 # default TTL limit is 30 days
228 228 _defaultlimit = 60 * 60 * 24 * 30
229 229 configitem('remotefilelog', 'nodettl', default=_defaultlimit)
230 230
231 231 configitem('remotefilelog', 'data.gencountlimit', default=2),
232 232 configitem('remotefilelog', 'data.generations',
233 233 default=['1GB', '100MB', '1MB'])
234 234 configitem('remotefilelog', 'data.maxrepackpacks', default=50)
235 235 configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
236 236 configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
237 237
238 238 configitem('remotefilelog', 'history.gencountlimit', default=2),
239 239 configitem('remotefilelog', 'history.generations', default=['100MB'])
240 240 configitem('remotefilelog', 'history.maxrepackpacks', default=50)
241 241 configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
242 242 configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
243 243
244 244 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
245 245 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
246 246 # be specifying the version(s) of Mercurial they are tested with, or
247 247 # leave the attribute unspecified.
248 248 testedwith = 'ships-with-hg-core'
249 249
250 250 repoclass = localrepo.localrepository
251 251 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
252 252
253 253 isenabled = shallowutil.isenabled
254 254
255 255 def uisetup(ui):
256 256 """Wraps user facing Mercurial commands to swap them out with shallow
257 257 versions.
258 258 """
259 259 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
260 260
261 261 entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
262 262 entry[1].append(('', 'shallow', None,
263 263 _("create a shallow clone which uses remote file "
264 264 "history")))
265 265
266 266 extensions.wrapcommand(commands.table, 'debugindex',
267 267 debugcommands.debugindex)
268 268 extensions.wrapcommand(commands.table, 'debugindexdot',
269 269 debugcommands.debugindexdot)
270 270 extensions.wrapcommand(commands.table, 'log', log)
271 271 extensions.wrapcommand(commands.table, 'pull', pull)
272 272
273 273 # Prevent 'hg manifest --all'
274 274 def _manifest(orig, ui, repo, *args, **opts):
275 275 if (isenabled(repo) and opts.get(r'all')):
276 276 raise error.Abort(_("--all is not supported in a shallow repo"))
277 277
278 278 return orig(ui, repo, *args, **opts)
279 279 extensions.wrapcommand(commands.table, "manifest", _manifest)
280 280
281 281 # Wrap remotefilelog with lfs code
282 282 def _lfsloaded(loaded=False):
283 283 lfsmod = None
284 284 try:
285 285 lfsmod = extensions.find('lfs')
286 286 except KeyError:
287 287 pass
288 288 if lfsmod:
289 289 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
290 290 fileserverclient._lfsmod = lfsmod
291 291 extensions.afterloaded('lfs', _lfsloaded)
292 292
293 293 # debugdata needs remotefilelog.len to work
294 294 extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
295 295
296 296 changegroup.cgpacker = shallowbundle.shallowcg1packer
297 297
298 298 extensions.wrapfunction(changegroup, '_addchangegroupfiles',
299 299 shallowbundle.addchangegroupfiles)
300 300 extensions.wrapfunction(
301 301 changegroup, 'makechangegroup', shallowbundle.makechangegroup)
302 302 extensions.wrapfunction(localrepo, 'makestore', storewrapper)
303 303 extensions.wrapfunction(exchange, 'pull', exchangepull)
304 304 extensions.wrapfunction(merge, 'applyupdates', applyupdates)
305 305 extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
306 306 extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
307 307 extensions.wrapfunction(scmutil, '_findrenames', findrenames)
308 308 extensions.wrapfunction(copies, '_computeforwardmissing',
309 309 computeforwardmissing)
310 310 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
311 311 extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
312 312 extensions.wrapfunction(context.changectx, 'filectx', filectx)
313 313 extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
314 314 extensions.wrapfunction(patch, 'trydiff', trydiff)
315 315 extensions.wrapfunction(hg, 'verify', _verify)
316 316 scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
317 317
318 318 # disappointing hacks below
319 319 extensions.wrapfunction(scmutil, 'getrenamedfn', getrenamedfn)
320 320 extensions.wrapfunction(revset, 'filelog', filelogrevset)
321 321 revset.symbols['filelog'] = revset.filelog
322 322 extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
323 323
324 324
325 325 def cloneshallow(orig, ui, repo, *args, **opts):
326 326 if opts.get(r'shallow'):
327 327 repos = []
328 328 def pull_shallow(orig, self, *args, **kwargs):
329 329 if not isenabled(self):
330 330 repos.append(self.unfiltered())
331 331 # set up the client hooks so the post-clone update works
332 332 setupclient(self.ui, self.unfiltered())
333 333
334 334 # setupclient fixed the class on the repo itself
335 335 # but we also need to fix it on the repoview
336 336 if isinstance(self, repoview.repoview):
337 337 self.__class__.__bases__ = (self.__class__.__bases__[0],
338 338 self.unfiltered().__class__)
339 339 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
340 340 self._writerequirements()
341 341
342 342 # Since setupclient hadn't been called, exchange.pull was not
343 343 # wrapped. So we need to manually invoke our version of it.
344 344 return exchangepull(orig, self, *args, **kwargs)
345 345 else:
346 346 return orig(self, *args, **kwargs)
347 347 extensions.wrapfunction(exchange, 'pull', pull_shallow)
348 348
349 349 # Wrap the stream logic to add requirements and to pass include/exclude
350 350 # patterns around.
351 351 def setup_streamout(repo, remote):
352 352 # Replace remote.stream_out with a version that sends file
353 353 # patterns.
354 354 def stream_out_shallow(orig):
355 355 caps = remote.capabilities()
356 356 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
357 357 opts = {}
358 358 if repo.includepattern:
359 359 opts[r'includepattern'] = '\0'.join(repo.includepattern)
360 360 if repo.excludepattern:
361 361 opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
362 362 return remote._callstream('stream_out_shallow', **opts)
363 363 else:
364 364 return orig()
365 365 extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
366 366 def stream_wrap(orig, op):
367 367 setup_streamout(op.repo, op.remote)
368 368 return orig(op)
369 369 extensions.wrapfunction(
370 370 streamclone, 'maybeperformlegacystreamclone', stream_wrap)
371 371
372 372 def canperformstreamclone(orig, pullop, bundle2=False):
373 373 # remotefilelog is currently incompatible with the
374 374 # bundle2 flavor of streamclones, so force us to use
375 375 # v1 instead.
376 376 if 'v2' in pullop.remotebundle2caps.get('stream', []):
377 377 pullop.remotebundle2caps['stream'] = [
378 378 c for c in pullop.remotebundle2caps['stream']
379 379 if c != 'v2']
380 380 if bundle2:
381 381 return False, None
382 382 supported, requirements = orig(pullop, bundle2=bundle2)
383 383 if requirements is not None:
384 384 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
385 385 return supported, requirements
386 386 extensions.wrapfunction(
387 387 streamclone, 'canperformstreamclone', canperformstreamclone)
388 388
389 389 try:
390 390 orig(ui, repo, *args, **opts)
391 391 finally:
392 392 if opts.get(r'shallow'):
393 393 for r in repos:
394 394 if util.safehasattr(r, 'fileservice'):
395 395 r.fileservice.close()
396 396
397 397 def debugdatashallow(orig, *args, **kwds):
398 398 oldlen = remotefilelog.remotefilelog.__len__
399 399 try:
400 400 remotefilelog.remotefilelog.__len__ = lambda x: 1
401 401 return orig(*args, **kwds)
402 402 finally:
403 403 remotefilelog.remotefilelog.__len__ = oldlen
404 404
405 405 def reposetup(ui, repo):
406 406 if not repo.local():
407 407 return
408 408
409 409 # put here intentionally bc doesnt work in uisetup
410 410 ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
411 411 ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
412 412
413 413 isserverenabled = ui.configbool('remotefilelog', 'server')
414 414 isshallowclient = isenabled(repo)
415 415
416 416 if isserverenabled and isshallowclient:
417 417 raise RuntimeError("Cannot be both a server and shallow client.")
418 418
419 419 if isshallowclient:
420 420 setupclient(ui, repo)
421 421
422 422 if isserverenabled:
423 423 remotefilelogserver.setupserver(ui, repo)
424 424
425 425 def setupclient(ui, repo):
426 426 if not isinstance(repo, localrepo.localrepository):
427 427 return
428 428
429 429 # Even clients get the server setup since they need to have the
430 430 # wireprotocol endpoints registered.
431 431 remotefilelogserver.onetimesetup(ui)
432 432 onetimeclientsetup(ui)
433 433
434 434 shallowrepo.wraprepo(repo)
435 435 repo.store = shallowstore.wrapstore(repo.store)
436 436
437 437 def storewrapper(orig, requirements, path, vfstype):
438 438 s = orig(requirements, path, vfstype)
439 439 if constants.SHALLOWREPO_REQUIREMENT in requirements:
440 440 s = shallowstore.wrapstore(s)
441 441
442 442 return s
443 443
444 444 # prefetch files before update
445 445 def applyupdates(orig, repo, actions, wctx, mctx, overwrite, wantfiledata,
446 446 labels=None):
447 447 if isenabled(repo):
448 448 manifest = mctx.manifest()
449 449 files = []
450 450 for f, args, msg in actions['g']:
451 451 files.append((f, hex(manifest[f])))
452 452 # batch fetch the needed files from the server
453 453 repo.fileservice.prefetch(files)
454 454 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata,
455 455 labels=labels)
456 456
457 457 # Prefetch merge checkunknownfiles
458 458 def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
459 459 *args, **kwargs):
460 460 if isenabled(repo):
461 461 files = []
462 462 sparsematch = repo.maybesparsematch(mctx.rev())
463 463 for f, (m, actionargs, msg) in actions.iteritems():
464 464 if sparsematch and not sparsematch(f):
465 465 continue
466 466 if m in ('c', 'dc', 'cm'):
467 467 files.append((f, hex(mctx.filenode(f))))
468 468 elif m == 'dg':
469 469 f2 = actionargs[0]
470 470 files.append((f2, hex(mctx.filenode(f2))))
471 471 # batch fetch the needed files from the server
472 472 repo.fileservice.prefetch(files)
473 473 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
474 474
475 475 # Prefetch files before status attempts to look at their size and contents
476 476 def checklookup(orig, self, files):
477 477 repo = self._repo
478 478 if isenabled(repo):
479 479 prefetchfiles = []
480 480 for parent in self._parents:
481 481 for f in files:
482 482 if f in parent:
483 483 prefetchfiles.append((f, hex(parent.filenode(f))))
484 484 # batch fetch the needed files from the server
485 485 repo.fileservice.prefetch(prefetchfiles)
486 486 return orig(self, files)
487 487
488 488 # Prefetch the logic that compares added and removed files for renames
489 489 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
490 490 if isenabled(repo):
491 491 files = []
492 492 pmf = repo['.'].manifest()
493 493 for f in removed:
494 494 if f in pmf:
495 495 files.append((f, hex(pmf[f])))
496 496 # batch fetch the needed files from the server
497 497 repo.fileservice.prefetch(files)
498 498 return orig(repo, matcher, added, removed, *args, **kwargs)
499 499
500 500 # prefetch files before pathcopies check
501 501 def computeforwardmissing(orig, a, b, match=None):
502 502 missing = orig(a, b, match=match)
503 503 repo = a._repo
504 504 if isenabled(repo):
505 505 mb = b.manifest()
506 506
507 507 files = []
508 508 sparsematch = repo.maybesparsematch(b.rev())
509 509 if sparsematch:
510 510 sparsemissing = set()
511 511 for f in missing:
512 512 if sparsematch(f):
513 513 files.append((f, hex(mb[f])))
514 514 sparsemissing.add(f)
515 515 missing = sparsemissing
516 516
517 517 # batch fetch the needed files from the server
518 518 repo.fileservice.prefetch(files)
519 519 return missing
520 520
521 521 # close cache miss server connection after the command has finished
522 522 def runcommand(orig, lui, repo, *args, **kwargs):
523 523 fileservice = None
524 524 # repo can be None when running in chg:
525 525 # - at startup, reposetup was called because serve is not norepo
526 526 # - a norepo command like "help" is called
527 527 if repo and isenabled(repo):
528 528 fileservice = repo.fileservice
529 529 try:
530 530 return orig(lui, repo, *args, **kwargs)
531 531 finally:
532 532 if fileservice:
533 533 fileservice.close()
534 534
535 535 # prevent strip from stripping remotefilelogs
536 536 def _collectbrokencsets(orig, repo, files, striprev):
537 537 if isenabled(repo):
538 538 files = list([f for f in files if not repo.shallowmatch(f)])
539 539 return orig(repo, files, striprev)
540 540
541 541 # changectx wrappers
542 542 def filectx(orig, self, path, fileid=None, filelog=None):
543 543 if fileid is None:
544 544 fileid = self.filenode(path)
545 545 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
546 546 return remotefilectx.remotefilectx(self._repo, path, fileid=fileid,
547 547 changectx=self, filelog=filelog)
548 548 return orig(self, path, fileid=fileid, filelog=filelog)
549 549
550 550 def workingfilectx(orig, self, path, filelog=None):
551 551 if (isenabled(self._repo) and self._repo.shallowmatch(path)):
552 552 return remotefilectx.remoteworkingfilectx(self._repo, path,
553 553 workingctx=self,
554 554 filelog=filelog)
555 555 return orig(self, path, filelog=filelog)
556 556
557 557 # prefetch required revisions before a diff
558 558 def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
559 559 copy, getfilectx, *args, **kwargs):
560 560 if isenabled(repo):
561 561 prefetch = []
562 562 mf1 = ctx1.manifest()
563 563 for fname in modified + added + removed:
564 564 if fname in mf1:
565 565 fnode = getfilectx(fname, ctx1).filenode()
566 566 # fnode can be None if it's a edited working ctx file
567 567 if fnode:
568 568 prefetch.append((fname, hex(fnode)))
569 569 if fname not in removed:
570 570 fnode = getfilectx(fname, ctx2).filenode()
571 571 if fnode:
572 572 prefetch.append((fname, hex(fnode)))
573 573
574 574 repo.fileservice.prefetch(prefetch)
575 575
576 576 return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
577 577 getfilectx, *args, **kwargs)
578 578
579 579 # Prevent verify from processing files
580 580 # a stub for mercurial.hg.verify()
581 581 def _verify(orig, repo, level=None):
582 582 lock = repo.lock()
583 583 try:
584 584 return shallowverifier.shallowverifier(repo).verify()
585 585 finally:
586 586 lock.release()
587 587
588 588
589 589 clientonetime = False
590 590 def onetimeclientsetup(ui):
591 591 global clientonetime
592 592 if clientonetime:
593 593 return
594 594 clientonetime = True
595 595
596 596 # Don't commit filelogs until we know the commit hash, since the hash
597 597 # is present in the filelog blob.
598 598 # This violates Mercurial's filelog->manifest->changelog write order,
599 599 # but is generally fine for client repos.
600 600 pendingfilecommits = []
601 601 def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
602 602 flags, cachedelta=None, _metatuple=None):
603 603 if isinstance(link, int):
604 604 pendingfilecommits.append(
605 605 (self, rawtext, transaction, link, p1, p2, node, flags,
606 606 cachedelta, _metatuple))
607 607 return node
608 608 else:
609 609 return orig(self, rawtext, transaction, link, p1, p2, node, flags,
610 610 cachedelta, _metatuple=_metatuple)
611 611 extensions.wrapfunction(
612 612 remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
613 613
614 614 def changelogadd(orig, self, *args):
615 615 oldlen = len(self)
616 616 node = orig(self, *args)
617 617 newlen = len(self)
618 618 if oldlen != newlen:
619 619 for oldargs in pendingfilecommits:
620 620 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
621 621 linknode = self.node(link)
622 622 if linknode == node:
623 623 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
624 624 else:
625 625 raise error.ProgrammingError(
626 626 'pending multiple integer revisions are not supported')
627 627 else:
628 628 # "link" is actually wrong here (it is set to len(changelog))
629 629 # if changelog remains unchanged, skip writing file revisions
630 630 # but still do a sanity check about pending multiple revisions
631 631 if len(set(x[3] for x in pendingfilecommits)) > 1:
632 632 raise error.ProgrammingError(
633 633 'pending multiple integer revisions are not supported')
634 634 del pendingfilecommits[:]
635 635 return node
636 636 extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
637 637
638 638 def getrenamedfn(orig, repo, endrev=None):
639 639 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
640 640 return orig(repo, endrev)
641 641
642 642 rcache = {}
643 643
644 644 def getrenamed(fn, rev):
645 645 '''looks up all renames for a file (up to endrev) the first
646 646 time the file is given. It indexes on the changerev and only
647 647 parses the manifest if linkrev != changerev.
648 648 Returns rename info for fn at changerev rev.'''
649 649 if rev in rcache.setdefault(fn, {}):
650 650 return rcache[fn][rev]
651 651
652 652 try:
653 653 fctx = repo[rev].filectx(fn)
654 654 for ancestor in fctx.ancestors():
655 655 if ancestor.path() == fn:
656 656 renamed = ancestor.renamed()
657 657 rcache[fn][ancestor.rev()] = renamed and renamed[0]
658 658
659 659 renamed = fctx.renamed()
660 660 return renamed and renamed[0]
661 661 except error.LookupError:
662 662 return None
663 663
664 664 return getrenamed
665 665
666 666 def walkfilerevs(orig, repo, match, follow, revs, fncache):
667 667 if not isenabled(repo):
668 668 return orig(repo, match, follow, revs, fncache)
669 669
670 670 # remotefilelog's can't be walked in rev order, so throw.
671 671 # The caller will see the exception and walk the commit tree instead.
672 672 if not follow:
673 673 raise cmdutil.FileWalkError("Cannot walk via filelog")
674 674
675 675 wanted = set()
676 676 minrev, maxrev = min(revs), max(revs)
677 677
678 678 pctx = repo['.']
679 679 for filename in match.files():
680 680 if filename not in pctx:
681 681 raise error.Abort(_('cannot follow file not in parent '
682 682 'revision: "%s"') % filename)
683 683 fctx = pctx[filename]
684 684
685 685 linkrev = fctx.linkrev()
686 686 if linkrev >= minrev and linkrev <= maxrev:
687 687 fncache.setdefault(linkrev, []).append(filename)
688 688 wanted.add(linkrev)
689 689
690 690 for ancestor in fctx.ancestors():
691 691 linkrev = ancestor.linkrev()
692 692 if linkrev >= minrev and linkrev <= maxrev:
693 693 fncache.setdefault(linkrev, []).append(ancestor.path())
694 694 wanted.add(linkrev)
695 695
696 696 return wanted
697 697
698 698 def filelogrevset(orig, repo, subset, x):
699 699 """``filelog(pattern)``
700 700 Changesets connected to the specified filelog.
701 701
702 702 For performance reasons, ``filelog()`` does not show every changeset
703 703 that affects the requested file(s). See :hg:`help log` for details. For
704 704 a slower, more accurate result, use ``file()``.
705 705 """
706 706
707 707 if not isenabled(repo):
708 708 return orig(repo, subset, x)
709 709
710 710 # i18n: "filelog" is a keyword
711 711 pat = revset.getstring(x, _("filelog requires a pattern"))
712 712 m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
713 713 ctx=repo[None])
714 714 s = set()
715 715
716 716 if not match.patkind(pat):
717 717 # slow
718 718 for r in subset:
719 719 ctx = repo[r]
720 720 cfiles = ctx.files()
721 721 for f in m.files():
722 722 if f in cfiles:
723 723 s.add(ctx.rev())
724 724 break
725 725 else:
726 726 # partial
727 727 files = (f for f in repo[None] if m(f))
728 728 for f in files:
729 729 fctx = repo[None].filectx(f)
730 730 s.add(fctx.linkrev())
731 731 for actx in fctx.ancestors():
732 732 s.add(actx.linkrev())
733 733
734 734 return smartset.baseset([r for r in subset if r in s])
735 735
736 736 @command('gc', [], _('hg gc [REPO...]'), norepo=True)
737 737 def gc(ui, *args, **opts):
738 738 '''garbage collect the client and server filelog caches
739 739 '''
740 740 cachepaths = set()
741 741
742 742 # get the system client cache
743 743 systemcache = shallowutil.getcachepath(ui, allowempty=True)
744 744 if systemcache:
745 745 cachepaths.add(systemcache)
746 746
747 747 # get repo client and server cache
748 748 repopaths = []
749 749 pwd = ui.environ.get('PWD')
750 750 if pwd:
751 751 repopaths.append(pwd)
752 752
753 753 repopaths.extend(args)
754 754 repos = []
755 755 for repopath in repopaths:
756 756 try:
757 757 repo = hg.peer(ui, {}, repopath)
758 758 repos.append(repo)
759 759
760 760 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
761 761 if repocache:
762 762 cachepaths.add(repocache)
763 763 except error.RepoError:
764 764 pass
765 765
766 766 # gc client cache
767 767 for cachepath in cachepaths:
768 768 gcclient(ui, cachepath)
769 769
770 770 # gc server cache
771 771 for repo in repos:
772 772 remotefilelogserver.gcserver(ui, repo._repo)
773 773
774 774 def gcclient(ui, cachepath):
775 775 # get list of repos that use this cache
776 776 repospath = os.path.join(cachepath, 'repos')
777 777 if not os.path.exists(repospath):
778 778 ui.warn(_("no known cache at %s\n") % cachepath)
779 779 return
780 780
781 781 reposfile = open(repospath, 'rb')
782 782 repos = {r[:-1] for r in reposfile.readlines()}
783 783 reposfile.close()
784 784
785 785 # build list of useful files
786 786 validrepos = []
787 787 keepkeys = set()
788 788
789 789 sharedcache = None
790 790 filesrepacked = False
791 791
792 792 count = 0
793 793 progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
794 794 total=len(repos))
795 795 for path in repos:
796 796 progress.update(count)
797 797 count += 1
798 798 try:
799 799 path = ui.expandpath(os.path.normpath(path))
800 800 except TypeError as e:
801 801 ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
802 802 traceback.print_exc()
803 803 continue
804 804 try:
805 805 peer = hg.peer(ui, {}, path)
806 806 repo = peer._repo
807 807 except error.RepoError:
808 808 continue
809 809
810 810 validrepos.append(path)
811 811
812 812 # Protect against any repo or config changes that have happened since
813 813 # this repo was added to the repos file. We'd rather this loop succeed
814 814 # and too much be deleted, than the loop fail and nothing gets deleted.
815 815 if not isenabled(repo):
816 816 continue
817 817
818 818 if not util.safehasattr(repo, 'name'):
819 819 ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
820 820 continue
821 821
822 822 # If garbage collection on repack and repack on hg gc are enabled
823 823 # then loose files are repacked and garbage collected.
824 824 # Otherwise regular garbage collection is performed.
825 825 repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
826 826 gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
827 827 if repackonhggc and gcrepack:
828 828 try:
829 829 repackmod.incrementalrepack(repo)
830 830 filesrepacked = True
831 831 continue
832 832 except (IOError, repackmod.RepackAlreadyRunning):
833 833 # If repack cannot be performed due to not enough disk space
834 834 # continue doing garbage collection of loose files w/o repack
835 835 pass
836 836
837 837 reponame = repo.name
838 838 if not sharedcache:
839 839 sharedcache = repo.sharedstore
840 840
841 841 # Compute a keepset which is not garbage collected
842 842 def keyfn(fname, fnode):
843 843 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
844 844 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
845 845
846 846 progress.complete()
847 847
848 848 # write list of valid repos back
849 849 oldumask = os.umask(0o002)
850 850 try:
851 851 reposfile = open(repospath, 'wb')
852 852 reposfile.writelines([("%s\n" % r) for r in validrepos])
853 853 reposfile.close()
854 854 finally:
855 855 os.umask(oldumask)
856 856
857 857 # prune cache
858 858 if sharedcache is not None:
859 859 sharedcache.gc(keepkeys)
860 860 elif not filesrepacked:
861 861 ui.warn(_("warning: no valid repos in repofile\n"))
862 862
863 863 def log(orig, ui, repo, *pats, **opts):
864 864 if not isenabled(repo):
865 865 return orig(ui, repo, *pats, **opts)
866 866
867 867 follow = opts.get(r'follow')
868 868 revs = opts.get(r'rev')
869 869 if pats:
870 870 # Force slowpath for non-follow patterns and follows that start from
871 871 # non-working-copy-parent revs.
872 872 if not follow or revs:
873 873 # This forces the slowpath
874 874 opts[r'removed'] = True
875 875
876 876 # If this is a non-follow log without any revs specified, recommend that
877 877 # the user add -f to speed it up.
878 878 if not follow and not revs:
879 879 match = scmutil.match(repo['.'], pats, pycompat.byteskwargs(opts))
880 880 isfile = not match.anypats()
881 881 if isfile:
882 882 for file in match.files():
883 883 if not os.path.isfile(repo.wjoin(file)):
884 884 isfile = False
885 885 break
886 886
887 887 if isfile:
888 888 ui.warn(_("warning: file log can be slow on large repos - " +
889 889 "use -f to speed it up\n"))
890 890
891 891 return orig(ui, repo, *pats, **opts)
892 892
893 893 def revdatelimit(ui, revset):
894 894 """Update revset so that only changesets no older than 'prefetchdays' days
895 895 are included. The default value is set to 14 days. If 'prefetchdays' is set
896 896 to zero or negative value then date restriction is not applied.
897 897 """
898 898 days = ui.configint('remotefilelog', 'prefetchdays')
899 899 if days > 0:
900 900 revset = '(%s) & date(-%s)' % (revset, days)
901 901 return revset
902 902
903 903 def readytofetch(repo):
904 904 """Check that enough time has passed since the last background prefetch.
905 905 This only relates to prefetches after operations that change the working
906 906 copy parent. Default delay between background prefetches is 2 minutes.
907 907 """
908 908 timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
909 909 fname = repo.vfs.join('lastprefetch')
910 910
911 911 ready = False
912 912 with open(fname, 'a'):
913 913 # the with construct above is used to avoid race conditions
914 914 modtime = os.path.getmtime(fname)
915 915 if (time.time() - modtime) > timeout:
916 916 os.utime(fname, None)
917 917 ready = True
918 918
919 919 return ready
920 920
921 921 def wcpprefetch(ui, repo, **kwargs):
922 922 """Prefetches in background revisions specified by bgprefetchrevs revset.
923 923 Does background repack if backgroundrepack flag is set in config.
924 924 """
925 925 shallow = isenabled(repo)
926 926 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
927 927 isready = readytofetch(repo)
928 928
929 929 if not (shallow and bgprefetchrevs and isready):
930 930 return
931 931
932 932 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
933 933 # update a revset with a date limit
934 934 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
935 935
936 936 def anon():
937 937 if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
938 938 return
939 939 repo.ranprefetch = True
940 940 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
941 941
942 942 repo._afterlock(anon)
943 943
944 944 def pull(orig, ui, repo, *pats, **opts):
945 945 result = orig(ui, repo, *pats, **opts)
946 946
947 947 if isenabled(repo):
948 948 # prefetch if it's configured
949 949 prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
950 950 bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
951 951 bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
952 952
953 953 if prefetchrevset:
954 954 ui.status(_("prefetching file contents\n"))
955 955 revs = scmutil.revrange(repo, [prefetchrevset])
956 956 base = repo['.'].rev()
957 957 if bgprefetch:
958 958 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
959 959 else:
960 960 repo.prefetch(revs, base=base)
961 961 if bgrepack:
962 962 repackmod.backgroundrepack(repo, incremental=True)
963 963 elif bgrepack:
964 964 repackmod.backgroundrepack(repo, incremental=True)
965 965
966 966 return result
967 967
968 968 def exchangepull(orig, repo, remote, *args, **kwargs):
969 969 # Hook into the callstream/getbundle to insert bundle capabilities
970 970 # during a pull.
971 971 def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
972 972 **kwargs):
973 973 if not bundlecaps:
974 974 bundlecaps = set()
975 975 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
976 976 return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
977 977 **kwargs)
978 978
979 979 if util.safehasattr(remote, '_callstream'):
980 980 remote._localrepo = repo
981 981 elif util.safehasattr(remote, 'getbundle'):
982 982 extensions.wrapfunction(remote, 'getbundle', localgetbundle)
983 983
984 984 return orig(repo, remote, *args, **kwargs)
985 985
986 986 def _fileprefetchhook(repo, revs, match):
987 987 if isenabled(repo):
988 988 allfiles = []
989 989 for rev in revs:
990 990 if rev == nodemod.wdirrev or rev is None:
991 991 continue
992 992 ctx = repo[rev]
993 993 mf = ctx.manifest()
994 994 sparsematch = repo.maybesparsematch(ctx.rev())
995 995 for path in ctx.walk(match):
996 996 if (not sparsematch or sparsematch(path)) and path in mf:
997 997 allfiles.append((path, hex(mf[path])))
998 998 repo.fileservice.prefetch(allfiles)
999 999
1000 1000 @command('debugremotefilelog', [
1001 1001 ('d', 'decompress', None, _('decompress the filelog first')),
1002 1002 ], _('hg debugremotefilelog <path>'), norepo=True)
1003 1003 def debugremotefilelog(ui, path, **opts):
1004 1004 return debugcommands.debugremotefilelog(ui, path, **opts)
1005 1005
1006 1006 @command('verifyremotefilelog', [
1007 1007 ('d', 'decompress', None, _('decompress the filelogs first')),
1008 1008 ], _('hg verifyremotefilelogs <directory>'), norepo=True)
1009 1009 def verifyremotefilelog(ui, path, **opts):
1010 1010 return debugcommands.verifyremotefilelog(ui, path, **opts)
1011 1011
1012 1012 @command('debugdatapack', [
1013 1013 ('', 'long', None, _('print the long hashes')),
1014 1014 ('', 'node', '', _('dump the contents of node'), 'NODE'),
1015 1015 ], _('hg debugdatapack <paths>'), norepo=True)
1016 1016 def debugdatapack(ui, *paths, **opts):
1017 1017 return debugcommands.debugdatapack(ui, *paths, **opts)
1018 1018
1019 1019 @command('debughistorypack', [
1020 1020 ], _('hg debughistorypack <path>'), norepo=True)
1021 1021 def debughistorypack(ui, path, **opts):
1022 1022 return debugcommands.debughistorypack(ui, path)
1023 1023
1024 1024 @command('debugkeepset', [
1025 1025 ], _('hg debugkeepset'))
1026 1026 def debugkeepset(ui, repo, **opts):
1027 1027 # The command is used to measure keepset computation time
1028 1028 def keyfn(fname, fnode):
1029 1029 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1030 1030 repackmod.keepset(repo, keyfn)
1031 1031 return
1032 1032
1033 1033 @command('debugwaitonrepack', [
1034 1034 ], _('hg debugwaitonrepack'))
1035 1035 def debugwaitonrepack(ui, repo, **opts):
1036 1036 return debugcommands.debugwaitonrepack(repo)
1037 1037
1038 1038 @command('debugwaitonprefetch', [
1039 1039 ], _('hg debugwaitonprefetch'))
1040 1040 def debugwaitonprefetch(ui, repo, **opts):
1041 1041 return debugcommands.debugwaitonprefetch(repo)
1042 1042
1043 1043 def resolveprefetchopts(ui, opts):
1044 1044 if not opts.get('rev'):
1045 1045 revset = ['.', 'draft()']
1046 1046
1047 1047 prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
1048 1048 if prefetchrevset:
1049 1049 revset.append('(%s)' % prefetchrevset)
1050 1050 bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
1051 1051 if bgprefetchrevs:
1052 1052 revset.append('(%s)' % bgprefetchrevs)
1053 1053 revset = '+'.join(revset)
1054 1054
1055 1055 # update a revset with a date limit
1056 1056 revset = revdatelimit(ui, revset)
1057 1057
1058 1058 opts['rev'] = [revset]
1059 1059
1060 1060 if not opts.get('base'):
1061 1061 opts['base'] = None
1062 1062
1063 1063 return opts
1064 1064
1065 1065 @command('prefetch', [
1066 1066 ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
1067 1067 ('', 'repack', False, _('run repack after prefetch')),
1068 1068 ('b', 'base', '', _("rev that is assumed to already be local")),
1069 1069 ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
1070 1070 def prefetch(ui, repo, *pats, **opts):
1071 1071 """prefetch file revisions from the server
1072 1072
1073 1073 Prefetchs file revisions for the specified revs and stores them in the
1074 1074 local remotefilelog cache. If no rev is specified, the default rev is
1075 1075 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1076 1076 File names or patterns can be used to limit which files are downloaded.
1077 1077
1078 1078 Return 0 on success.
1079 1079 """
1080 1080 opts = pycompat.byteskwargs(opts)
1081 1081 if not isenabled(repo):
1082 1082 raise error.Abort(_("repo is not shallow"))
1083 1083
1084 1084 opts = resolveprefetchopts(ui, opts)
1085 1085 revs = scmutil.revrange(repo, opts.get('rev'))
1086 1086 repo.prefetch(revs, opts.get('base'), pats, opts)
1087 1087
1088 1088 # Run repack in background
1089 1089 if opts.get('repack'):
1090 1090 repackmod.backgroundrepack(repo, incremental=True)
1091 1091
1092 1092 @command('repack', [
1093 1093 ('', 'background', None, _('run in a background process'), None),
1094 1094 ('', 'incremental', None, _('do an incremental repack'), None),
1095 1095 ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
1096 1096 ], _('hg repack [OPTIONS]'))
1097 1097 def repack_(ui, repo, *pats, **opts):
1098 1098 if opts.get(r'background'):
1099 1099 repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
1100 1100 packsonly=opts.get(r'packsonly', False))
1101 1101 return
1102 1102
1103 1103 options = {'packsonly': opts.get(r'packsonly')}
1104 1104
1105 1105 try:
1106 1106 if opts.get(r'incremental'):
1107 1107 repackmod.incrementalrepack(repo, options=options)
1108 1108 else:
1109 1109 repackmod.fullrepack(repo, options=options)
1110 1110 except repackmod.RepackAlreadyRunning as ex:
1111 1111 # Don't propogate the exception if the repack is already in
1112 1112 # progress, since we want the command to exit 0.
1113 1113 repo.ui.warn('%s\n' % ex)
@@ -1,1174 +1,1175 b''
1 1 # sqlitestore.py - Storage backend that uses SQLite
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """store repository data in SQLite (EXPERIMENTAL)
9 9
10 10 The sqlitestore extension enables the storage of repository data in SQLite.
11 11
12 12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 13 GUARANTEES. This means that repositories created with this extension may
14 14 only be usable with the exact version of this extension/Mercurial that was
15 15 used. The extension attempts to enforce this in order to prevent repository
16 16 corruption.
17 17
18 18 In addition, several features are not yet supported or have known bugs:
19 19
20 20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 21 data is not yet stored in SQLite.
22 22 * Transactions are not robust. If the process is aborted at the right time
23 23 during transaction close/rollback, the repository could be in an inconsistent
24 24 state. This problem will diminish once all repository data is tracked by
25 25 SQLite.
26 26 * Bundle repositories do not work (the ability to use e.g.
27 27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 28 existing repository).
29 29 * Various other features don't work.
30 30
31 31 This extension should work for basic clone/pull, update, and commit workflows.
32 32 Some history rewriting operations may fail due to lack of support for bundle
33 33 repositories.
34 34
35 35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 37 """
38 38
39 39 # To run the test suite with repos using SQLite by default, execute the
40 40 # following:
41 41 #
42 42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 43 # --extra-config-opt extensions.sqlitestore= \
44 44 # --extra-config-opt storage.new-repo-backend=sqlite
45 45
46 46 from __future__ import absolute_import
47 47
48 48 import hashlib
49 49 import sqlite3
50 50 import struct
51 51 import threading
52 52 import zlib
53 53
54 54 from mercurial.i18n import _
55 55 from mercurial.node import (
56 56 nullid,
57 57 nullrev,
58 58 short,
59 59 )
60 60 from mercurial.thirdparty import (
61 61 attr,
62 62 )
63 63 from mercurial import (
64 64 ancestor,
65 65 dagop,
66 66 encoding,
67 67 error,
68 68 extensions,
69 69 localrepo,
70 70 mdiff,
71 71 pycompat,
72 72 registrar,
73 73 repository,
74 74 util,
75 75 verify,
76 76 )
77 77 from mercurial.utils import (
78 78 interfaceutil,
79 79 storageutil,
80 80 )
81 81
82 82 try:
83 83 from mercurial import zstd
84 84 zstd.__version__
85 85 except ImportError:
86 86 zstd = None
87 87
88 88 configtable = {}
89 89 configitem = registrar.configitem(configtable)
90 90
91 91 # experimental config: storage.sqlite.compression
92 92 configitem('storage', 'sqlite.compression',
93 default='zstd' if zstd else 'zlib')
93 default='zstd' if zstd else 'zlib',
94 experimental=True)
94 95
95 96 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
96 97 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
97 98 # be specifying the version(s) of Mercurial they are tested with, or
98 99 # leave the attribute unspecified.
99 100 testedwith = 'ships-with-hg-core'
100 101
101 102 REQUIREMENT = b'exp-sqlite-001'
102 103 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
103 104 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
104 105 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
105 106 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
106 107
107 108 CURRENT_SCHEMA_VERSION = 1
108 109
109 110 COMPRESSION_NONE = 1
110 111 COMPRESSION_ZSTD = 2
111 112 COMPRESSION_ZLIB = 3
112 113
113 114 FLAG_CENSORED = 1
114 115 FLAG_MISSING_P1 = 2
115 116 FLAG_MISSING_P2 = 4
116 117
117 118 CREATE_SCHEMA = [
118 119 # Deltas are stored as content-indexed blobs.
119 120 # compression column holds COMPRESSION_* constant for how the
120 121 # delta is encoded.
121 122
122 123 r'CREATE TABLE delta ('
123 124 r' id INTEGER PRIMARY KEY, '
124 125 r' compression INTEGER NOT NULL, '
125 126 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
126 127 r' delta BLOB NOT NULL '
127 128 r')',
128 129
129 130 # Tracked paths are denormalized to integers to avoid redundant
130 131 # storage of the path name.
131 132 r'CREATE TABLE filepath ('
132 133 r' id INTEGER PRIMARY KEY, '
133 134 r' path BLOB NOT NULL '
134 135 r')',
135 136
136 137 r'CREATE UNIQUE INDEX filepath_path '
137 138 r' ON filepath (path)',
138 139
139 140 # We have a single table for all file revision data.
140 141 # Each file revision is uniquely described by a (path, rev) and
141 142 # (path, node).
142 143 #
143 144 # Revision data is stored as a pointer to the delta producing this
144 145 # revision and the file revision whose delta should be applied before
145 146 # that one. One can reconstruct the delta chain by recursively following
146 147 # the delta base revision pointers until one encounters NULL.
147 148 #
148 149 # flags column holds bitwise integer flags controlling storage options.
149 150 # These flags are defined by the FLAG_* constants.
150 151 r'CREATE TABLE fileindex ('
151 152 r' id INTEGER PRIMARY KEY, '
152 153 r' pathid INTEGER REFERENCES filepath(id), '
153 154 r' revnum INTEGER NOT NULL, '
154 155 r' p1rev INTEGER NOT NULL, '
155 156 r' p2rev INTEGER NOT NULL, '
156 157 r' linkrev INTEGER NOT NULL, '
157 158 r' flags INTEGER NOT NULL, '
158 159 r' deltaid INTEGER REFERENCES delta(id), '
159 160 r' deltabaseid INTEGER REFERENCES fileindex(id), '
160 161 r' node BLOB NOT NULL '
161 162 r')',
162 163
163 164 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 165 r' ON fileindex (pathid, revnum)',
165 166
166 167 r'CREATE UNIQUE INDEX fileindex_pathnode '
167 168 r' ON fileindex (pathid, node)',
168 169
169 170 # Provide a view over all file data for convenience.
170 171 r'CREATE VIEW filedata AS '
171 172 r'SELECT '
172 173 r' fileindex.id AS id, '
173 174 r' filepath.id AS pathid, '
174 175 r' filepath.path AS path, '
175 176 r' fileindex.revnum AS revnum, '
176 177 r' fileindex.node AS node, '
177 178 r' fileindex.p1rev AS p1rev, '
178 179 r' fileindex.p2rev AS p2rev, '
179 180 r' fileindex.linkrev AS linkrev, '
180 181 r' fileindex.flags AS flags, '
181 182 r' fileindex.deltaid AS deltaid, '
182 183 r' fileindex.deltabaseid AS deltabaseid '
183 184 r'FROM filepath, fileindex '
184 185 r'WHERE fileindex.pathid=filepath.id',
185 186
186 187 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
187 188 ]
188 189
189 190 def resolvedeltachain(db, pathid, node, revisioncache,
190 191 stoprids, zstddctx=None):
191 192 """Resolve a delta chain for a file node."""
192 193
193 194 # TODO the "not in ({stops})" here is possibly slowing down the query
194 195 # because it needs to perform the lookup on every recursive invocation.
195 196 # This could possibly be faster if we created a temporary query with
196 197 # baseid "poisoned" to null and limited the recursive filter to
197 198 # "is not null".
198 199 res = db.execute(
199 200 r'WITH RECURSIVE '
200 201 r' deltachain(deltaid, baseid) AS ('
201 202 r' SELECT deltaid, deltabaseid FROM fileindex '
202 203 r' WHERE pathid=? AND node=? '
203 204 r' UNION ALL '
204 205 r' SELECT fileindex.deltaid, deltabaseid '
205 206 r' FROM fileindex, deltachain '
206 207 r' WHERE '
207 208 r' fileindex.id=deltachain.baseid '
208 209 r' AND deltachain.baseid IS NOT NULL '
209 210 r' AND fileindex.id NOT IN ({stops}) '
210 211 r' ) '
211 212 r'SELECT deltachain.baseid, compression, delta '
212 213 r'FROM deltachain, delta '
213 214 r'WHERE delta.id=deltachain.deltaid'.format(
214 215 stops=r','.join([r'?'] * len(stoprids))),
215 216 tuple([pathid, node] + list(stoprids.keys())))
216 217
217 218 deltas = []
218 219 lastdeltabaseid = None
219 220
220 221 for deltabaseid, compression, delta in res:
221 222 lastdeltabaseid = deltabaseid
222 223
223 224 if compression == COMPRESSION_ZSTD:
224 225 delta = zstddctx.decompress(delta)
225 226 elif compression == COMPRESSION_NONE:
226 227 delta = delta
227 228 elif compression == COMPRESSION_ZLIB:
228 229 delta = zlib.decompress(delta)
229 230 else:
230 231 raise SQLiteStoreError('unhandled compression type: %d' %
231 232 compression)
232 233
233 234 deltas.append(delta)
234 235
235 236 if lastdeltabaseid in stoprids:
236 237 basetext = revisioncache[stoprids[lastdeltabaseid]]
237 238 else:
238 239 basetext = deltas.pop()
239 240
240 241 deltas.reverse()
241 242 fulltext = mdiff.patches(basetext, deltas)
242 243
243 244 # SQLite returns buffer instances for blob columns on Python 2. This
244 245 # type can propagate through the delta application layer. Because
245 246 # downstream callers assume revisions are bytes, cast as needed.
246 247 if not isinstance(fulltext, bytes):
247 248 fulltext = bytes(delta)
248 249
249 250 return fulltext
250 251
251 252 def insertdelta(db, compression, hash, delta):
252 253 try:
253 254 return db.execute(
254 255 r'INSERT INTO delta (compression, hash, delta) '
255 256 r'VALUES (?, ?, ?)',
256 257 (compression, hash, delta)).lastrowid
257 258 except sqlite3.IntegrityError:
258 259 return db.execute(
259 260 r'SELECT id FROM delta WHERE hash=?',
260 261 (hash,)).fetchone()[0]
261 262
262 263 class SQLiteStoreError(error.StorageError):
263 264 pass
264 265
265 266 @attr.s
266 267 class revisionentry(object):
267 268 rid = attr.ib()
268 269 rev = attr.ib()
269 270 node = attr.ib()
270 271 p1rev = attr.ib()
271 272 p2rev = attr.ib()
272 273 p1node = attr.ib()
273 274 p2node = attr.ib()
274 275 linkrev = attr.ib()
275 276 flags = attr.ib()
276 277
277 278 @interfaceutil.implementer(repository.irevisiondelta)
278 279 @attr.s(slots=True)
279 280 class sqliterevisiondelta(object):
280 281 node = attr.ib()
281 282 p1node = attr.ib()
282 283 p2node = attr.ib()
283 284 basenode = attr.ib()
284 285 flags = attr.ib()
285 286 baserevisionsize = attr.ib()
286 287 revision = attr.ib()
287 288 delta = attr.ib()
288 289 linknode = attr.ib(default=None)
289 290
290 291 @interfaceutil.implementer(repository.iverifyproblem)
291 292 @attr.s(frozen=True)
292 293 class sqliteproblem(object):
293 294 warning = attr.ib(default=None)
294 295 error = attr.ib(default=None)
295 296 node = attr.ib(default=None)
296 297
297 298 @interfaceutil.implementer(repository.ifilestorage)
298 299 class sqlitefilestore(object):
299 300 """Implements storage for an individual tracked path."""
300 301
301 302 def __init__(self, db, path, compression):
302 303 self._db = db
303 304 self._path = path
304 305
305 306 self._pathid = None
306 307
307 308 # revnum -> node
308 309 self._revtonode = {}
309 310 # node -> revnum
310 311 self._nodetorev = {}
311 312 # node -> data structure
312 313 self._revisions = {}
313 314
314 315 self._revisioncache = util.lrucachedict(10)
315 316
316 317 self._compengine = compression
317 318
318 319 if compression == 'zstd':
319 320 self._cctx = zstd.ZstdCompressor(level=3)
320 321 self._dctx = zstd.ZstdDecompressor()
321 322 else:
322 323 self._cctx = None
323 324 self._dctx = None
324 325
325 326 self._refreshindex()
326 327
327 328 def _refreshindex(self):
328 329 self._revtonode = {}
329 330 self._nodetorev = {}
330 331 self._revisions = {}
331 332
332 333 res = list(self._db.execute(
333 334 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
334 335
335 336 if not res:
336 337 self._pathid = None
337 338 return
338 339
339 340 self._pathid = res[0][0]
340 341
341 342 res = self._db.execute(
342 343 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
343 344 r'FROM fileindex '
344 345 r'WHERE pathid=? '
345 346 r'ORDER BY revnum ASC',
346 347 (self._pathid,))
347 348
348 349 for i, row in enumerate(res):
349 350 rid, rev, node, p1rev, p2rev, linkrev, flags = row
350 351
351 352 if i != rev:
352 353 raise SQLiteStoreError(_('sqlite database has inconsistent '
353 354 'revision numbers'))
354 355
355 356 if p1rev == nullrev:
356 357 p1node = nullid
357 358 else:
358 359 p1node = self._revtonode[p1rev]
359 360
360 361 if p2rev == nullrev:
361 362 p2node = nullid
362 363 else:
363 364 p2node = self._revtonode[p2rev]
364 365
365 366 entry = revisionentry(
366 367 rid=rid,
367 368 rev=rev,
368 369 node=node,
369 370 p1rev=p1rev,
370 371 p2rev=p2rev,
371 372 p1node=p1node,
372 373 p2node=p2node,
373 374 linkrev=linkrev,
374 375 flags=flags)
375 376
376 377 self._revtonode[rev] = node
377 378 self._nodetorev[node] = rev
378 379 self._revisions[node] = entry
379 380
380 381 # Start of ifileindex interface.
381 382
382 383 def __len__(self):
383 384 return len(self._revisions)
384 385
385 386 def __iter__(self):
386 387 return iter(pycompat.xrange(len(self._revisions)))
387 388
388 389 def hasnode(self, node):
389 390 if node == nullid:
390 391 return False
391 392
392 393 return node in self._nodetorev
393 394
394 395 def revs(self, start=0, stop=None):
395 396 return storageutil.iterrevs(len(self._revisions), start=start,
396 397 stop=stop)
397 398
398 399 def parents(self, node):
399 400 if node == nullid:
400 401 return nullid, nullid
401 402
402 403 if node not in self._revisions:
403 404 raise error.LookupError(node, self._path, _('no node'))
404 405
405 406 entry = self._revisions[node]
406 407 return entry.p1node, entry.p2node
407 408
408 409 def parentrevs(self, rev):
409 410 if rev == nullrev:
410 411 return nullrev, nullrev
411 412
412 413 if rev not in self._revtonode:
413 414 raise IndexError(rev)
414 415
415 416 entry = self._revisions[self._revtonode[rev]]
416 417 return entry.p1rev, entry.p2rev
417 418
418 419 def rev(self, node):
419 420 if node == nullid:
420 421 return nullrev
421 422
422 423 if node not in self._nodetorev:
423 424 raise error.LookupError(node, self._path, _('no node'))
424 425
425 426 return self._nodetorev[node]
426 427
427 428 def node(self, rev):
428 429 if rev == nullrev:
429 430 return nullid
430 431
431 432 if rev not in self._revtonode:
432 433 raise IndexError(rev)
433 434
434 435 return self._revtonode[rev]
435 436
436 437 def lookup(self, node):
437 438 return storageutil.fileidlookup(self, node, self._path)
438 439
439 440 def linkrev(self, rev):
440 441 if rev == nullrev:
441 442 return nullrev
442 443
443 444 if rev not in self._revtonode:
444 445 raise IndexError(rev)
445 446
446 447 entry = self._revisions[self._revtonode[rev]]
447 448 return entry.linkrev
448 449
449 450 def iscensored(self, rev):
450 451 if rev == nullrev:
451 452 return False
452 453
453 454 if rev not in self._revtonode:
454 455 raise IndexError(rev)
455 456
456 457 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
457 458
458 459 def commonancestorsheads(self, node1, node2):
459 460 rev1 = self.rev(node1)
460 461 rev2 = self.rev(node2)
461 462
462 463 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
463 464 return pycompat.maplist(self.node, ancestors)
464 465
465 466 def descendants(self, revs):
466 467 # TODO we could implement this using a recursive SQL query, which
467 468 # might be faster.
468 469 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
469 470
470 471 def heads(self, start=None, stop=None):
471 472 if start is None and stop is None:
472 473 if not len(self):
473 474 return [nullid]
474 475
475 476 startrev = self.rev(start) if start is not None else nullrev
476 477 stoprevs = {self.rev(n) for n in stop or []}
477 478
478 479 revs = dagop.headrevssubset(self.revs, self.parentrevs,
479 480 startrev=startrev, stoprevs=stoprevs)
480 481
481 482 return [self.node(rev) for rev in revs]
482 483
483 484 def children(self, node):
484 485 rev = self.rev(node)
485 486
486 487 res = self._db.execute(
487 488 r'SELECT'
488 489 r' node '
489 490 r' FROM filedata '
490 491 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
491 492 r' ORDER BY revnum ASC',
492 493 (self._path, rev, rev))
493 494
494 495 return [row[0] for row in res]
495 496
496 497 # End of ifileindex interface.
497 498
498 499 # Start of ifiledata interface.
499 500
500 501 def size(self, rev):
501 502 if rev == nullrev:
502 503 return 0
503 504
504 505 if rev not in self._revtonode:
505 506 raise IndexError(rev)
506 507
507 508 node = self._revtonode[rev]
508 509
509 510 if self.renamed(node):
510 511 return len(self.read(node))
511 512
512 513 return len(self.revision(node))
513 514
514 515 def revision(self, node, raw=False, _verifyhash=True):
515 516 if node in (nullid, nullrev):
516 517 return b''
517 518
518 519 if isinstance(node, int):
519 520 node = self.node(node)
520 521
521 522 if node not in self._nodetorev:
522 523 raise error.LookupError(node, self._path, _('no node'))
523 524
524 525 if node in self._revisioncache:
525 526 return self._revisioncache[node]
526 527
527 528 # Because we have a fulltext revision cache, we are able to
528 529 # short-circuit delta chain traversal and decompression as soon as
529 530 # we encounter a revision in the cache.
530 531
531 532 stoprids = {self._revisions[n].rid: n
532 533 for n in self._revisioncache}
533 534
534 535 if not stoprids:
535 536 stoprids[-1] = None
536 537
537 538 fulltext = resolvedeltachain(self._db, self._pathid, node,
538 539 self._revisioncache, stoprids,
539 540 zstddctx=self._dctx)
540 541
541 542 # Don't verify hashes if parent nodes were rewritten, as the hash
542 543 # wouldn't verify.
543 544 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
544 545 _verifyhash = False
545 546
546 547 if _verifyhash:
547 548 self._checkhash(fulltext, node)
548 549 self._revisioncache[node] = fulltext
549 550
550 551 return fulltext
551 552
552 553 def rawdata(self, *args, **kwargs):
553 554 return self.revision(*args, **kwargs)
554 555
555 556 def read(self, node):
556 557 return storageutil.filtermetadata(self.revision(node))
557 558
558 559 def renamed(self, node):
559 560 return storageutil.filerevisioncopied(self, node)
560 561
561 562 def cmp(self, node, fulltext):
562 563 return not storageutil.filedataequivalent(self, node, fulltext)
563 564
564 565 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
565 566 assumehaveparentrevisions=False,
566 567 deltamode=repository.CG_DELTAMODE_STD):
567 568 if nodesorder not in ('nodes', 'storage', 'linear', None):
568 569 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
569 570 nodesorder)
570 571
571 572 nodes = [n for n in nodes if n != nullid]
572 573
573 574 if not nodes:
574 575 return
575 576
576 577 # TODO perform in a single query.
577 578 res = self._db.execute(
578 579 r'SELECT revnum, deltaid FROM fileindex '
579 580 r'WHERE pathid=? '
580 581 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
581 582 tuple([self._pathid] + nodes))
582 583
583 584 deltabases = {}
584 585
585 586 for rev, deltaid in res:
586 587 res = self._db.execute(
587 588 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
588 589 (self._pathid, deltaid))
589 590 deltabases[rev] = res.fetchone()[0]
590 591
591 592 # TODO define revdifffn so we can use delta from storage.
592 593 for delta in storageutil.emitrevisions(
593 594 self, nodes, nodesorder, sqliterevisiondelta,
594 595 deltaparentfn=deltabases.__getitem__,
595 596 revisiondata=revisiondata,
596 597 assumehaveparentrevisions=assumehaveparentrevisions,
597 598 deltamode=deltamode):
598 599
599 600 yield delta
600 601
601 602 # End of ifiledata interface.
602 603
603 604 # Start of ifilemutation interface.
604 605
605 606 def add(self, filedata, meta, transaction, linkrev, p1, p2):
606 607 if meta or filedata.startswith(b'\x01\n'):
607 608 filedata = storageutil.packmeta(meta, filedata)
608 609
609 610 return self.addrevision(filedata, transaction, linkrev, p1, p2)
610 611
611 612 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
612 613 flags=0, cachedelta=None):
613 614 if flags:
614 615 raise SQLiteStoreError(_('flags not supported on revisions'))
615 616
616 617 validatehash = node is not None
617 618 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
618 619
619 620 if validatehash:
620 621 self._checkhash(revisiondata, node, p1, p2)
621 622
622 623 if node in self._nodetorev:
623 624 return node
624 625
625 626 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
626 627 p1, p2)
627 628
628 629 self._revisioncache[node] = revisiondata
629 630 return node
630 631
631 632 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
632 633 maybemissingparents=False):
633 634 nodes = []
634 635
635 636 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
636 637 storeflags = 0
637 638
638 639 if wireflags & repository.REVISION_FLAG_CENSORED:
639 640 storeflags |= FLAG_CENSORED
640 641
641 642 if wireflags & ~repository.REVISION_FLAG_CENSORED:
642 643 raise SQLiteStoreError('unhandled revision flag')
643 644
644 645 if maybemissingparents:
645 646 if p1 != nullid and not self.hasnode(p1):
646 647 p1 = nullid
647 648 storeflags |= FLAG_MISSING_P1
648 649
649 650 if p2 != nullid and not self.hasnode(p2):
650 651 p2 = nullid
651 652 storeflags |= FLAG_MISSING_P2
652 653
653 654 baserev = self.rev(deltabase)
654 655
655 656 # If base is censored, delta must be full replacement in a single
656 657 # patch operation.
657 658 if baserev != nullrev and self.iscensored(baserev):
658 659 hlen = struct.calcsize('>lll')
659 660 oldlen = len(self.revision(deltabase, raw=True,
660 661 _verifyhash=False))
661 662 newlen = len(delta) - hlen
662 663
663 664 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
664 665 raise error.CensoredBaseError(self._path,
665 666 deltabase)
666 667
667 668 if (not (storeflags & FLAG_CENSORED)
668 669 and storageutil.deltaiscensored(
669 670 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
670 671 storeflags |= FLAG_CENSORED
671 672
672 673 linkrev = linkmapper(linknode)
673 674
674 675 nodes.append(node)
675 676
676 677 if node in self._revisions:
677 678 # Possibly reset parents to make them proper.
678 679 entry = self._revisions[node]
679 680
680 681 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
681 682 entry.p1node = p1
682 683 entry.p1rev = self._nodetorev[p1]
683 684 entry.flags &= ~FLAG_MISSING_P1
684 685
685 686 self._db.execute(
686 687 r'UPDATE fileindex SET p1rev=?, flags=? '
687 688 r'WHERE id=?',
688 689 (self._nodetorev[p1], entry.flags, entry.rid))
689 690
690 691 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
691 692 entry.p2node = p2
692 693 entry.p2rev = self._nodetorev[p2]
693 694 entry.flags &= ~FLAG_MISSING_P2
694 695
695 696 self._db.execute(
696 697 r'UPDATE fileindex SET p2rev=?, flags=? '
697 698 r'WHERE id=?',
698 699 (self._nodetorev[p1], entry.flags, entry.rid))
699 700
700 701 continue
701 702
702 703 if deltabase == nullid:
703 704 text = mdiff.patch(b'', delta)
704 705 storedelta = None
705 706 else:
706 707 text = None
707 708 storedelta = (deltabase, delta)
708 709
709 710 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
710 711 storedelta=storedelta, flags=storeflags)
711 712
712 713 if addrevisioncb:
713 714 addrevisioncb(self, node)
714 715
715 716 return nodes
716 717
717 718 def censorrevision(self, tr, censornode, tombstone=b''):
718 719 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
719 720
720 721 # This restriction is cargo culted from revlogs and makes no sense for
721 722 # SQLite, since columns can be resized at will.
722 723 if len(tombstone) > len(self.revision(censornode, raw=True)):
723 724 raise error.Abort(_('censor tombstone must be no longer than '
724 725 'censored data'))
725 726
726 727 # We need to replace the censored revision's data with the tombstone.
727 728 # But replacing that data will have implications for delta chains that
728 729 # reference it.
729 730 #
730 731 # While "better," more complex strategies are possible, we do something
731 732 # simple: we find delta chain children of the censored revision and we
732 733 # replace those incremental deltas with fulltexts of their corresponding
733 734 # revision. Then we delete the now-unreferenced delta and original
734 735 # revision and insert a replacement.
735 736
736 737 # Find the delta to be censored.
737 738 censoreddeltaid = self._db.execute(
738 739 r'SELECT deltaid FROM fileindex WHERE id=?',
739 740 (self._revisions[censornode].rid,)).fetchone()[0]
740 741
741 742 # Find all its delta chain children.
742 743 # TODO once we support storing deltas for !files, we'll need to look
743 744 # for those delta chains too.
744 745 rows = list(self._db.execute(
745 746 r'SELECT id, pathid, node FROM fileindex '
746 747 r'WHERE deltabaseid=? OR deltaid=?',
747 748 (censoreddeltaid, censoreddeltaid)))
748 749
749 750 for row in rows:
750 751 rid, pathid, node = row
751 752
752 753 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
753 754 zstddctx=self._dctx)
754 755
755 756 deltahash = hashlib.sha1(fulltext).digest()
756 757
757 758 if self._compengine == 'zstd':
758 759 deltablob = self._cctx.compress(fulltext)
759 760 compression = COMPRESSION_ZSTD
760 761 elif self._compengine == 'zlib':
761 762 deltablob = zlib.compress(fulltext)
762 763 compression = COMPRESSION_ZLIB
763 764 elif self._compengine == 'none':
764 765 deltablob = fulltext
765 766 compression = COMPRESSION_NONE
766 767 else:
767 768 raise error.ProgrammingError('unhandled compression engine: %s'
768 769 % self._compengine)
769 770
770 771 if len(deltablob) >= len(fulltext):
771 772 deltablob = fulltext
772 773 compression = COMPRESSION_NONE
773 774
774 775 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
775 776
776 777 self._db.execute(
777 778 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
778 779 r'WHERE id=?', (deltaid, rid))
779 780
780 781 # Now create the tombstone delta and replace the delta on the censored
781 782 # node.
782 783 deltahash = hashlib.sha1(tombstone).digest()
783 784 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
784 785 deltahash, tombstone)
785 786
786 787 flags = self._revisions[censornode].flags
787 788 flags |= FLAG_CENSORED
788 789
789 790 self._db.execute(
790 791 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
791 792 r'WHERE pathid=? AND node=?',
792 793 (flags, tombstonedeltaid, self._pathid, censornode))
793 794
794 795 self._db.execute(
795 796 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
796 797
797 798 self._refreshindex()
798 799 self._revisioncache.clear()
799 800
800 801 def getstrippoint(self, minlink):
801 802 return storageutil.resolvestripinfo(minlink, len(self) - 1,
802 803 [self.rev(n) for n in self.heads()],
803 804 self.linkrev,
804 805 self.parentrevs)
805 806
806 807 def strip(self, minlink, transaction):
807 808 if not len(self):
808 809 return
809 810
810 811 rev, _ignored = self.getstrippoint(minlink)
811 812
812 813 if rev == len(self):
813 814 return
814 815
815 816 for rev in self.revs(rev):
816 817 self._db.execute(
817 818 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
818 819 (self._pathid, self.node(rev)))
819 820
820 821 # TODO how should we garbage collect data in delta table?
821 822
822 823 self._refreshindex()
823 824
824 825 # End of ifilemutation interface.
825 826
826 827 # Start of ifilestorage interface.
827 828
828 829 def files(self):
829 830 return []
830 831
831 832 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
832 833 revisionscount=False, trackedsize=False,
833 834 storedsize=False):
834 835 d = {}
835 836
836 837 if exclusivefiles:
837 838 d['exclusivefiles'] = []
838 839
839 840 if sharedfiles:
840 841 # TODO list sqlite file(s) here.
841 842 d['sharedfiles'] = []
842 843
843 844 if revisionscount:
844 845 d['revisionscount'] = len(self)
845 846
846 847 if trackedsize:
847 848 d['trackedsize'] = sum(len(self.revision(node))
848 849 for node in self._nodetorev)
849 850
850 851 if storedsize:
851 852 # TODO implement this?
852 853 d['storedsize'] = None
853 854
854 855 return d
855 856
856 857 def verifyintegrity(self, state):
857 858 state['skipread'] = set()
858 859
859 860 for rev in self:
860 861 node = self.node(rev)
861 862
862 863 try:
863 864 self.revision(node)
864 865 except Exception as e:
865 866 yield sqliteproblem(
866 867 error=_('unpacking %s: %s') % (short(node), e),
867 868 node=node)
868 869
869 870 state['skipread'].add(node)
870 871
871 872 # End of ifilestorage interface.
872 873
873 874 def _checkhash(self, fulltext, node, p1=None, p2=None):
874 875 if p1 is None and p2 is None:
875 876 p1, p2 = self.parents(node)
876 877
877 878 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
878 879 return
879 880
880 881 try:
881 882 del self._revisioncache[node]
882 883 except KeyError:
883 884 pass
884 885
885 886 if storageutil.iscensoredtext(fulltext):
886 887 raise error.CensoredNodeError(self._path, node, fulltext)
887 888
888 889 raise SQLiteStoreError(_('integrity check failed on %s') %
889 890 self._path)
890 891
891 892 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
892 893 p1, p2, storedelta=None, flags=0):
893 894 if self._pathid is None:
894 895 res = self._db.execute(
895 896 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
896 897 self._pathid = res.lastrowid
897 898
898 899 # For simplicity, always store a delta against p1.
899 900 # TODO we need a lot more logic here to make behavior reasonable.
900 901
901 902 if storedelta:
902 903 deltabase, delta = storedelta
903 904
904 905 if isinstance(deltabase, int):
905 906 deltabase = self.node(deltabase)
906 907
907 908 else:
908 909 assert revisiondata is not None
909 910 deltabase = p1
910 911
911 912 if deltabase == nullid:
912 913 delta = revisiondata
913 914 else:
914 915 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
915 916 revisiondata)
916 917
917 918 # File index stores a pointer to its delta and the parent delta.
918 919 # The parent delta is stored via a pointer to the fileindex PK.
919 920 if deltabase == nullid:
920 921 baseid = None
921 922 else:
922 923 baseid = self._revisions[deltabase].rid
923 924
924 925 # Deltas are stored with a hash of their content. This allows
925 926 # us to de-duplicate. The table is configured to ignore conflicts
926 927 # and it is faster to just insert and silently noop than to look
927 928 # first.
928 929 deltahash = hashlib.sha1(delta).digest()
929 930
930 931 if self._compengine == 'zstd':
931 932 deltablob = self._cctx.compress(delta)
932 933 compression = COMPRESSION_ZSTD
933 934 elif self._compengine == 'zlib':
934 935 deltablob = zlib.compress(delta)
935 936 compression = COMPRESSION_ZLIB
936 937 elif self._compengine == 'none':
937 938 deltablob = delta
938 939 compression = COMPRESSION_NONE
939 940 else:
940 941 raise error.ProgrammingError('unhandled compression engine: %s' %
941 942 self._compengine)
942 943
943 944 # Don't store compressed data if it isn't practical.
944 945 if len(deltablob) >= len(delta):
945 946 deltablob = delta
946 947 compression = COMPRESSION_NONE
947 948
948 949 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
949 950
950 951 rev = len(self)
951 952
952 953 if p1 == nullid:
953 954 p1rev = nullrev
954 955 else:
955 956 p1rev = self._nodetorev[p1]
956 957
957 958 if p2 == nullid:
958 959 p2rev = nullrev
959 960 else:
960 961 p2rev = self._nodetorev[p2]
961 962
962 963 rid = self._db.execute(
963 964 r'INSERT INTO fileindex ('
964 965 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
965 966 r' deltaid, deltabaseid) '
966 967 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
967 968 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
968 969 deltaid, baseid)
969 970 ).lastrowid
970 971
971 972 entry = revisionentry(
972 973 rid=rid,
973 974 rev=rev,
974 975 node=node,
975 976 p1rev=p1rev,
976 977 p2rev=p2rev,
977 978 p1node=p1,
978 979 p2node=p2,
979 980 linkrev=linkrev,
980 981 flags=flags)
981 982
982 983 self._nodetorev[node] = rev
983 984 self._revtonode[rev] = node
984 985 self._revisions[node] = entry
985 986
986 987 return node
987 988
988 989 class sqliterepository(localrepo.localrepository):
989 990 def cancopy(self):
990 991 return False
991 992
992 993 def transaction(self, *args, **kwargs):
993 994 current = self.currenttransaction()
994 995
995 996 tr = super(sqliterepository, self).transaction(*args, **kwargs)
996 997
997 998 if current:
998 999 return tr
999 1000
1000 1001 self._dbconn.execute(r'BEGIN TRANSACTION')
1001 1002
1002 1003 def committransaction(_):
1003 1004 self._dbconn.commit()
1004 1005
1005 1006 tr.addfinalize('sqlitestore', committransaction)
1006 1007
1007 1008 return tr
1008 1009
1009 1010 @property
1010 1011 def _dbconn(self):
1011 1012 # SQLite connections can only be used on the thread that created
1012 1013 # them. In most cases, this "just works." However, hgweb uses
1013 1014 # multiple threads.
1014 1015 tid = threading.current_thread().ident
1015 1016
1016 1017 if self._db:
1017 1018 if self._db[0] == tid:
1018 1019 return self._db[1]
1019 1020
1020 1021 db = makedb(self.svfs.join('db.sqlite'))
1021 1022 self._db = (tid, db)
1022 1023
1023 1024 return db
1024 1025
1025 1026 def makedb(path):
1026 1027 """Construct a database handle for a database at path."""
1027 1028
1028 1029 db = sqlite3.connect(encoding.strfromlocal(path))
1029 1030 db.text_factory = bytes
1030 1031
1031 1032 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1032 1033
1033 1034 # New database.
1034 1035 if res == 0:
1035 1036 for statement in CREATE_SCHEMA:
1036 1037 db.execute(statement)
1037 1038
1038 1039 db.commit()
1039 1040
1040 1041 elif res == CURRENT_SCHEMA_VERSION:
1041 1042 pass
1042 1043
1043 1044 else:
1044 1045 raise error.Abort(_('sqlite database has unrecognized version'))
1045 1046
1046 1047 db.execute(r'PRAGMA journal_mode=WAL')
1047 1048
1048 1049 return db
1049 1050
1050 1051 def featuresetup(ui, supported):
1051 1052 supported.add(REQUIREMENT)
1052 1053
1053 1054 if zstd:
1054 1055 supported.add(REQUIREMENT_ZSTD)
1055 1056
1056 1057 supported.add(REQUIREMENT_ZLIB)
1057 1058 supported.add(REQUIREMENT_NONE)
1058 1059 supported.add(REQUIREMENT_SHALLOW_FILES)
1059 1060 supported.add(repository.NARROW_REQUIREMENT)
1060 1061
1061 1062 def newreporequirements(orig, ui, createopts):
1062 1063 if createopts['backend'] != 'sqlite':
1063 1064 return orig(ui, createopts)
1064 1065
1065 1066 # This restriction can be lifted once we have more confidence.
1066 1067 if 'sharedrepo' in createopts:
1067 1068 raise error.Abort(_('shared repositories not supported with SQLite '
1068 1069 'store'))
1069 1070
1070 1071 # This filtering is out of an abundance of caution: we want to ensure
1071 1072 # we honor creation options and we do that by annotating exactly the
1072 1073 # creation options we recognize.
1073 1074 known = {
1074 1075 'narrowfiles',
1075 1076 'backend',
1076 1077 'shallowfilestore',
1077 1078 }
1078 1079
1079 1080 unsupported = set(createopts) - known
1080 1081 if unsupported:
1081 1082 raise error.Abort(_('SQLite store does not support repo creation '
1082 1083 'option: %s') % ', '.join(sorted(unsupported)))
1083 1084
1084 1085 # Since we're a hybrid store that still relies on revlogs, we fall back
1085 1086 # to using the revlogv1 backend's storage requirements then adding our
1086 1087 # own requirement.
1087 1088 createopts['backend'] = 'revlogv1'
1088 1089 requirements = orig(ui, createopts)
1089 1090 requirements.add(REQUIREMENT)
1090 1091
1091 1092 compression = ui.config('storage', 'sqlite.compression')
1092 1093
1093 1094 if compression == 'zstd' and not zstd:
1094 1095 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1095 1096 'zstandard compression not available to this '
1096 1097 'Mercurial install'))
1097 1098
1098 1099 if compression == 'zstd':
1099 1100 requirements.add(REQUIREMENT_ZSTD)
1100 1101 elif compression == 'zlib':
1101 1102 requirements.add(REQUIREMENT_ZLIB)
1102 1103 elif compression == 'none':
1103 1104 requirements.add(REQUIREMENT_NONE)
1104 1105 else:
1105 1106 raise error.Abort(_('unknown compression engine defined in '
1106 1107 'storage.sqlite.compression: %s') % compression)
1107 1108
1108 1109 if createopts.get('shallowfilestore'):
1109 1110 requirements.add(REQUIREMENT_SHALLOW_FILES)
1110 1111
1111 1112 return requirements
1112 1113
1113 1114 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1114 1115 class sqlitefilestorage(object):
1115 1116 """Repository file storage backed by SQLite."""
1116 1117 def file(self, path):
1117 1118 if path[0] == b'/':
1118 1119 path = path[1:]
1119 1120
1120 1121 if REQUIREMENT_ZSTD in self.requirements:
1121 1122 compression = 'zstd'
1122 1123 elif REQUIREMENT_ZLIB in self.requirements:
1123 1124 compression = 'zlib'
1124 1125 elif REQUIREMENT_NONE in self.requirements:
1125 1126 compression = 'none'
1126 1127 else:
1127 1128 raise error.Abort(_('unable to determine what compression engine '
1128 1129 'to use for SQLite storage'))
1129 1130
1130 1131 return sqlitefilestore(self._dbconn, path, compression)
1131 1132
1132 1133 def makefilestorage(orig, requirements, features, **kwargs):
1133 1134 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1134 1135 if REQUIREMENT in requirements:
1135 1136 if REQUIREMENT_SHALLOW_FILES in requirements:
1136 1137 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1137 1138
1138 1139 return sqlitefilestorage
1139 1140 else:
1140 1141 return orig(requirements=requirements, features=features, **kwargs)
1141 1142
1142 1143 def makemain(orig, ui, requirements, **kwargs):
1143 1144 if REQUIREMENT in requirements:
1144 1145 if REQUIREMENT_ZSTD in requirements and not zstd:
1145 1146 raise error.Abort(_('repository uses zstandard compression, which '
1146 1147 'is not available to this Mercurial install'))
1147 1148
1148 1149 return sqliterepository
1149 1150
1150 1151 return orig(requirements=requirements, **kwargs)
1151 1152
1152 1153 def verifierinit(orig, self, *args, **kwargs):
1153 1154 orig(self, *args, **kwargs)
1154 1155
1155 1156 # We don't care that files in the store don't align with what is
1156 1157 # advertised. So suppress these warnings.
1157 1158 self.warnorphanstorefiles = False
1158 1159
1159 1160 def extsetup(ui):
1160 1161 localrepo.featuresetupfuncs.add(featuresetup)
1161 1162 extensions.wrapfunction(localrepo, 'newreporequirements',
1162 1163 newreporequirements)
1163 1164 extensions.wrapfunction(localrepo, 'makefilestorage',
1164 1165 makefilestorage)
1165 1166 extensions.wrapfunction(localrepo, 'makemain',
1166 1167 makemain)
1167 1168 extensions.wrapfunction(verify.verifier, '__init__',
1168 1169 verifierinit)
1169 1170
1170 1171 def reposetup(ui, repo):
1171 1172 if isinstance(repo, sqliterepository):
1172 1173 repo._db = None
1173 1174
1174 1175 # TODO check for bundlerepository?
@@ -1,1505 +1,1521 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = "extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config='warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31 class configitem(object):
32 32 """represent a known config item
33 33
34 34 :section: the official config section where to find this item,
35 35 :name: the official name within the section,
36 36 :default: default value for this item,
37 37 :alias: optional list of tuples as alternatives,
38 38 :generic: this is a generic definition, match name using regular expression.
39 39 """
40 40
41 41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0, experimental=False):
43 43 self.section = section
44 44 self.name = name
45 45 self.default = default
46 46 self.alias = list(alias)
47 47 self.generic = generic
48 48 self.priority = priority
49 self.experimental = experimental
49 50 self._re = None
50 51 if generic:
51 52 self._re = re.compile(self.name)
52 53
53 54 class itemregister(dict):
54 55 """A specialized dictionary that can handle wild-card selection"""
55 56
56 57 def __init__(self):
57 58 super(itemregister, self).__init__()
58 59 self._generics = set()
59 60
60 61 def update(self, other):
61 62 super(itemregister, self).update(other)
62 63 self._generics.update(other._generics)
63 64
64 65 def __setitem__(self, key, item):
65 66 super(itemregister, self).__setitem__(key, item)
66 67 if item.generic:
67 68 self._generics.add(item)
68 69
69 70 def get(self, key):
70 71 baseitem = super(itemregister, self).get(key)
71 72 if baseitem is not None and not baseitem.generic:
72 73 return baseitem
73 74
74 75 # search for a matching generic item
75 76 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 77 for item in generics:
77 78 # we use 'match' instead of 'search' to make the matching simpler
78 79 # for people unfamiliar with regular expression. Having the match
79 80 # rooted to the start of the string will produce less surprising
80 81 # result for user writing simple regex for sub-attribute.
81 82 #
82 83 # For example using "color\..*" match produces an unsurprising
83 84 # result, while using search could suddenly match apparently
84 85 # unrelated configuration that happens to contains "color."
85 86 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 87 # some match to avoid the need to prefix most pattern with "^".
87 88 # The "^" seems more error prone.
88 89 if item._re.match(key):
89 90 return item
90 91
91 92 return None
92 93
93 94 coreitems = {}
94 95
95 96 def _register(configtable, *args, **kwargs):
96 97 item = configitem(*args, **kwargs)
97 98 section = configtable.setdefault(item.section, itemregister())
98 99 if item.name in section:
99 100 msg = "duplicated config item registration for '%s.%s'"
100 101 raise error.ProgrammingError(msg % (item.section, item.name))
101 102 section[item.name] = item
102 103
103 104 # special value for case where the default is derived from other values
104 105 dynamicdefault = object()
105 106
106 107 # Registering actual config items
107 108
108 109 def getitemregister(configtable):
109 110 f = functools.partial(_register, configtable)
110 111 # export pseudo enum as configitem.*
111 112 f.dynamicdefault = dynamicdefault
112 113 return f
113 114
114 115 coreconfigitem = getitemregister(coreitems)
115 116
116 117 def _registerdiffopts(section, configprefix=''):
117 118 coreconfigitem(section, configprefix + 'nodates',
118 119 default=False,
119 120 )
120 121 coreconfigitem(section, configprefix + 'showfunc',
121 122 default=False,
122 123 )
123 124 coreconfigitem(section, configprefix + 'unified',
124 125 default=None,
125 126 )
126 127 coreconfigitem(section, configprefix + 'git',
127 128 default=False,
128 129 )
129 130 coreconfigitem(section, configprefix + 'ignorews',
130 131 default=False,
131 132 )
132 133 coreconfigitem(section, configprefix + 'ignorewsamount',
133 134 default=False,
134 135 )
135 136 coreconfigitem(section, configprefix + 'ignoreblanklines',
136 137 default=False,
137 138 )
138 139 coreconfigitem(section, configprefix + 'ignorewseol',
139 140 default=False,
140 141 )
141 142 coreconfigitem(section, configprefix + 'nobinary',
142 143 default=False,
143 144 )
144 145 coreconfigitem(section, configprefix + 'noprefix',
145 146 default=False,
146 147 )
147 148 coreconfigitem(section, configprefix + 'word-diff',
148 149 default=False,
149 150 )
150 151
151 152 coreconfigitem('alias', '.*',
152 153 default=dynamicdefault,
153 154 generic=True,
154 155 )
155 156 coreconfigitem('auth', 'cookiefile',
156 157 default=None,
157 158 )
158 159 _registerdiffopts(section='annotate')
159 160 # bookmarks.pushing: internal hack for discovery
160 161 coreconfigitem('bookmarks', 'pushing',
161 162 default=list,
162 163 )
163 164 # bundle.mainreporoot: internal hack for bundlerepo
164 165 coreconfigitem('bundle', 'mainreporoot',
165 166 default='',
166 167 )
167 168 coreconfigitem('censor', 'policy',
168 169 default='abort',
170 experimental=True,
169 171 )
170 172 coreconfigitem('chgserver', 'idletimeout',
171 173 default=3600,
172 174 )
173 175 coreconfigitem('chgserver', 'skiphash',
174 176 default=False,
175 177 )
176 178 coreconfigitem('cmdserver', 'log',
177 179 default=None,
178 180 )
179 181 coreconfigitem('cmdserver', 'max-log-files',
180 182 default=7,
181 183 )
182 184 coreconfigitem('cmdserver', 'max-log-size',
183 185 default='1 MB',
184 186 )
185 187 coreconfigitem('cmdserver', 'max-repo-cache',
186 188 default=0,
189 experimental=True,
187 190 )
188 191 coreconfigitem('cmdserver', 'message-encodings',
189 192 default=list,
193 experimental=True,
190 194 )
191 195 coreconfigitem('cmdserver', 'track-log',
192 196 default=lambda: ['chgserver', 'cmdserver', 'repocache'],
193 197 )
194 198 coreconfigitem('color', '.*',
195 199 default=None,
196 200 generic=True,
197 201 )
198 202 coreconfigitem('color', 'mode',
199 203 default='auto',
200 204 )
201 205 coreconfigitem('color', 'pagermode',
202 206 default=dynamicdefault,
203 207 )
204 208 _registerdiffopts(section='commands', configprefix='commit.interactive.')
205 209 coreconfigitem('commands', 'commit.post-status',
206 210 default=False,
207 211 )
208 212 coreconfigitem('commands', 'grep.all-files',
209 213 default=False,
214 experimental=True,
210 215 )
211 216 coreconfigitem('commands', 'resolve.confirm',
212 217 default=False,
213 218 )
214 219 coreconfigitem('commands', 'resolve.explicit-re-merge',
215 220 default=False,
216 221 )
217 222 coreconfigitem('commands', 'resolve.mark-check',
218 223 default='none',
219 224 )
220 225 _registerdiffopts(section='commands', configprefix='revert.interactive.')
221 226 coreconfigitem('commands', 'show.aliasprefix',
222 227 default=list,
223 228 )
224 229 coreconfigitem('commands', 'status.relative',
225 230 default=False,
226 231 )
227 232 coreconfigitem('commands', 'status.skipstates',
228 233 default=[],
234 experimental=True,
229 235 )
230 236 coreconfigitem('commands', 'status.terse',
231 237 default='',
232 238 )
233 239 coreconfigitem('commands', 'status.verbose',
234 240 default=False,
235 241 )
236 242 coreconfigitem('commands', 'update.check',
237 243 default=None,
238 244 )
239 245 coreconfigitem('commands', 'update.requiredest',
240 246 default=False,
241 247 )
242 248 coreconfigitem('committemplate', '.*',
243 249 default=None,
244 250 generic=True,
245 251 )
246 252 coreconfigitem('convert', 'bzr.saverev',
247 253 default=True,
248 254 )
249 255 coreconfigitem('convert', 'cvsps.cache',
250 256 default=True,
251 257 )
252 258 coreconfigitem('convert', 'cvsps.fuzz',
253 259 default=60,
254 260 )
255 261 coreconfigitem('convert', 'cvsps.logencoding',
256 262 default=None,
257 263 )
258 264 coreconfigitem('convert', 'cvsps.mergefrom',
259 265 default=None,
260 266 )
261 267 coreconfigitem('convert', 'cvsps.mergeto',
262 268 default=None,
263 269 )
264 270 coreconfigitem('convert', 'git.committeractions',
265 271 default=lambda: ['messagedifferent'],
266 272 )
267 273 coreconfigitem('convert', 'git.extrakeys',
268 274 default=list,
269 275 )
270 276 coreconfigitem('convert', 'git.findcopiesharder',
271 277 default=False,
272 278 )
273 279 coreconfigitem('convert', 'git.remoteprefix',
274 280 default='remote',
275 281 )
276 282 coreconfigitem('convert', 'git.renamelimit',
277 283 default=400,
278 284 )
279 285 coreconfigitem('convert', 'git.saverev',
280 286 default=True,
281 287 )
282 288 coreconfigitem('convert', 'git.similarity',
283 289 default=50,
284 290 )
285 291 coreconfigitem('convert', 'git.skipsubmodules',
286 292 default=False,
287 293 )
288 294 coreconfigitem('convert', 'hg.clonebranches',
289 295 default=False,
290 296 )
291 297 coreconfigitem('convert', 'hg.ignoreerrors',
292 298 default=False,
293 299 )
294 300 coreconfigitem('convert', 'hg.preserve-hash',
295 301 default=False,
296 302 )
297 303 coreconfigitem('convert', 'hg.revs',
298 304 default=None,
299 305 )
300 306 coreconfigitem('convert', 'hg.saverev',
301 307 default=False,
302 308 )
303 309 coreconfigitem('convert', 'hg.sourcename',
304 310 default=None,
305 311 )
306 312 coreconfigitem('convert', 'hg.startrev',
307 313 default=None,
308 314 )
309 315 coreconfigitem('convert', 'hg.tagsbranch',
310 316 default='default',
311 317 )
312 318 coreconfigitem('convert', 'hg.usebranchnames',
313 319 default=True,
314 320 )
315 321 coreconfigitem('convert', 'ignoreancestorcheck',
316 322 default=False,
323 experimental=True,
317 324 )
318 325 coreconfigitem('convert', 'localtimezone',
319 326 default=False,
320 327 )
321 328 coreconfigitem('convert', 'p4.encoding',
322 329 default=dynamicdefault,
323 330 )
324 331 coreconfigitem('convert', 'p4.startrev',
325 332 default=0,
326 333 )
327 334 coreconfigitem('convert', 'skiptags',
328 335 default=False,
329 336 )
330 337 coreconfigitem('convert', 'svn.debugsvnlog',
331 338 default=True,
332 339 )
333 340 coreconfigitem('convert', 'svn.trunk',
334 341 default=None,
335 342 )
336 343 coreconfigitem('convert', 'svn.tags',
337 344 default=None,
338 345 )
339 346 coreconfigitem('convert', 'svn.branches',
340 347 default=None,
341 348 )
342 349 coreconfigitem('convert', 'svn.startrev',
343 350 default=0,
344 351 )
345 352 coreconfigitem('debug', 'dirstate.delaywrite',
346 353 default=0,
347 354 )
348 355 coreconfigitem('defaults', '.*',
349 356 default=None,
350 357 generic=True,
351 358 )
352 359 coreconfigitem('devel', 'all-warnings',
353 360 default=False,
354 361 )
355 362 coreconfigitem('devel', 'bundle2.debug',
356 363 default=False,
357 364 )
358 365 coreconfigitem('devel', 'bundle.delta',
359 366 default='',
360 367 )
361 368 coreconfigitem('devel', 'cache-vfs',
362 369 default=None,
363 370 )
364 371 coreconfigitem('devel', 'check-locks',
365 372 default=False,
366 373 )
367 374 coreconfigitem('devel', 'check-relroot',
368 375 default=False,
369 376 )
370 377 coreconfigitem('devel', 'default-date',
371 378 default=None,
372 379 )
373 380 coreconfigitem('devel', 'deprec-warn',
374 381 default=False,
375 382 )
376 383 coreconfigitem('devel', 'disableloaddefaultcerts',
377 384 default=False,
378 385 )
379 386 coreconfigitem('devel', 'warn-empty-changegroup',
380 387 default=False,
381 388 )
382 389 coreconfigitem('devel', 'legacy.exchange',
383 390 default=list,
384 391 )
385 392 coreconfigitem('devel', 'servercafile',
386 393 default='',
387 394 )
388 395 coreconfigitem('devel', 'serverexactprotocol',
389 396 default='',
390 397 )
391 398 coreconfigitem('devel', 'serverrequirecert',
392 399 default=False,
393 400 )
394 401 coreconfigitem('devel', 'strip-obsmarkers',
395 402 default=True,
396 403 )
397 404 coreconfigitem('devel', 'warn-config',
398 405 default=None,
399 406 )
400 407 coreconfigitem('devel', 'warn-config-default',
401 408 default=None,
402 409 )
403 410 coreconfigitem('devel', 'user.obsmarker',
404 411 default=None,
405 412 )
406 413 coreconfigitem('devel', 'warn-config-unknown',
407 414 default=None,
408 415 )
409 416 coreconfigitem('devel', 'debug.copies',
410 417 default=False,
411 418 )
412 419 coreconfigitem('devel', 'debug.extensions',
413 420 default=False,
414 421 )
415 422 coreconfigitem('devel', 'debug.peer-request',
416 423 default=False,
417 424 )
418 425 coreconfigitem('devel', 'discovery.randomize',
419 426 default=True,
420 427 )
421 428 _registerdiffopts(section='diff')
422 429 coreconfigitem('email', 'bcc',
423 430 default=None,
424 431 )
425 432 coreconfigitem('email', 'cc',
426 433 default=None,
427 434 )
428 435 coreconfigitem('email', 'charsets',
429 436 default=list,
430 437 )
431 438 coreconfigitem('email', 'from',
432 439 default=None,
433 440 )
434 441 coreconfigitem('email', 'method',
435 442 default='smtp',
436 443 )
437 444 coreconfigitem('email', 'reply-to',
438 445 default=None,
439 446 )
440 447 coreconfigitem('email', 'to',
441 448 default=None,
442 449 )
443 450 coreconfigitem('experimental', 'archivemetatemplate',
444 451 default=dynamicdefault,
445 452 )
446 453 coreconfigitem('experimental', 'auto-publish',
447 454 default='publish',
448 455 )
449 456 coreconfigitem('experimental', 'bundle-phases',
450 457 default=False,
451 458 )
452 459 coreconfigitem('experimental', 'bundle2-advertise',
453 460 default=True,
454 461 )
455 462 coreconfigitem('experimental', 'bundle2-output-capture',
456 463 default=False,
457 464 )
458 465 coreconfigitem('experimental', 'bundle2.pushback',
459 466 default=False,
460 467 )
461 468 coreconfigitem('experimental', 'bundle2lazylocking',
462 469 default=False,
463 470 )
464 471 coreconfigitem('experimental', 'bundlecomplevel',
465 472 default=None,
466 473 )
467 474 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
468 475 default=None,
469 476 )
470 477 coreconfigitem('experimental', 'bundlecomplevel.gzip',
471 478 default=None,
472 479 )
473 480 coreconfigitem('experimental', 'bundlecomplevel.none',
474 481 default=None,
475 482 )
476 483 coreconfigitem('experimental', 'bundlecomplevel.zstd',
477 484 default=None,
478 485 )
479 486 coreconfigitem('experimental', 'changegroup3',
480 487 default=False,
481 488 )
482 489 coreconfigitem('experimental', 'cleanup-as-archived',
483 490 default=False,
484 491 )
485 492 coreconfigitem('experimental', 'clientcompressionengines',
486 493 default=list,
487 494 )
488 495 coreconfigitem('experimental', 'copytrace',
489 496 default='on',
490 497 )
491 498 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
492 499 default=100,
493 500 )
494 501 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
495 502 default=100,
496 503 )
497 504 coreconfigitem('experimental', 'copies.read-from',
498 505 default="filelog-only",
499 506 )
500 507 coreconfigitem('experimental', 'copies.write-to',
501 508 default='filelog-only',
502 509 )
503 510 coreconfigitem('experimental', 'crecordtest',
504 511 default=None,
505 512 )
506 513 coreconfigitem('experimental', 'directaccess',
507 514 default=False,
508 515 )
509 516 coreconfigitem('experimental', 'directaccess.revnums',
510 517 default=False,
511 518 )
512 519 coreconfigitem('experimental', 'editortmpinhg',
513 520 default=False,
514 521 )
515 522 coreconfigitem('experimental', 'evolution',
516 523 default=list,
517 524 )
518 525 coreconfigitem('experimental', 'evolution.allowdivergence',
519 526 default=False,
520 527 alias=[('experimental', 'allowdivergence')]
521 528 )
522 529 coreconfigitem('experimental', 'evolution.allowunstable',
523 530 default=None,
524 531 )
525 532 coreconfigitem('experimental', 'evolution.createmarkers',
526 533 default=None,
527 534 )
528 535 coreconfigitem('experimental', 'evolution.effect-flags',
529 536 default=True,
530 537 alias=[('experimental', 'effect-flags')]
531 538 )
532 539 coreconfigitem('experimental', 'evolution.exchange',
533 540 default=None,
534 541 )
535 542 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
536 543 default=False,
537 544 )
538 545 coreconfigitem('experimental', 'log.topo',
539 546 default=False,
540 547 )
541 548 coreconfigitem('experimental', 'evolution.report-instabilities',
542 549 default=True,
543 550 )
544 551 coreconfigitem('experimental', 'evolution.track-operation',
545 552 default=True,
546 553 )
547 554 # repo-level config to exclude a revset visibility
548 555 #
549 556 # The target use case is to use `share` to expose different subset of the same
550 557 # repository, especially server side. See also `server.view`.
551 558 coreconfigitem('experimental', 'extra-filter-revs',
552 559 default=None,
553 560 )
554 561 coreconfigitem('experimental', 'maxdeltachainspan',
555 562 default=-1,
556 563 )
557 564 coreconfigitem('experimental', 'mergetempdirprefix',
558 565 default=None,
559 566 )
560 567 coreconfigitem('experimental', 'mmapindexthreshold',
561 568 default=None,
562 569 )
563 570 coreconfigitem('experimental', 'narrow',
564 571 default=False,
565 572 )
566 573 coreconfigitem('experimental', 'nonnormalparanoidcheck',
567 574 default=False,
568 575 )
569 576 coreconfigitem('experimental', 'exportableenviron',
570 577 default=list,
571 578 )
572 579 coreconfigitem('experimental', 'extendedheader.index',
573 580 default=None,
574 581 )
575 582 coreconfigitem('experimental', 'extendedheader.similarity',
576 583 default=False,
577 584 )
578 585 coreconfigitem('experimental', 'graphshorten',
579 586 default=False,
580 587 )
581 588 coreconfigitem('experimental', 'graphstyle.parent',
582 589 default=dynamicdefault,
583 590 )
584 591 coreconfigitem('experimental', 'graphstyle.missing',
585 592 default=dynamicdefault,
586 593 )
587 594 coreconfigitem('experimental', 'graphstyle.grandparent',
588 595 default=dynamicdefault,
589 596 )
590 597 coreconfigitem('experimental', 'hook-track-tags',
591 598 default=False,
592 599 )
593 600 coreconfigitem('experimental', 'httppeer.advertise-v2',
594 601 default=False,
595 602 )
596 603 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
597 604 default=None,
598 605 )
599 606 coreconfigitem('experimental', 'httppostargs',
600 607 default=False,
601 608 )
602 609 coreconfigitem('experimental', 'mergedriver',
603 610 default=None,
604 611 )
605 612 coreconfigitem('experimental', 'nointerrupt', default=False)
606 613 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
607 614
608 615 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
609 616 default=False,
610 617 )
611 618 coreconfigitem('experimental', 'remotenames',
612 619 default=False,
613 620 )
614 621 coreconfigitem('experimental', 'removeemptydirs',
615 622 default=True,
616 623 )
617 624 coreconfigitem('experimental', 'revert.interactive.select-to-keep',
618 625 default=False,
619 626 )
620 627 coreconfigitem('experimental', 'revisions.prefixhexnode',
621 628 default=False,
622 629 )
623 630 coreconfigitem('experimental', 'revlogv2',
624 631 default=None,
625 632 )
626 633 coreconfigitem('experimental', 'revisions.disambiguatewithin',
627 634 default=None,
628 635 )
629 636 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
630 637 default=50000,
631 638 )
632 639 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
633 640 default=100000,
634 641 )
635 642 coreconfigitem('experimental', 'server.stream-narrow-clones',
636 643 default=False,
637 644 )
638 645 coreconfigitem('experimental', 'single-head-per-branch',
639 646 default=False,
640 647 )
641 648 coreconfigitem('experimental', 'sshserver.support-v2',
642 649 default=False,
643 650 )
644 651 coreconfigitem('experimental', 'sparse-read',
645 652 default=False,
646 653 )
647 654 coreconfigitem('experimental', 'sparse-read.density-threshold',
648 655 default=0.50,
649 656 )
650 657 coreconfigitem('experimental', 'sparse-read.min-gap-size',
651 658 default='65K',
652 659 )
653 660 coreconfigitem('experimental', 'treemanifest',
654 661 default=False,
655 662 )
656 663 coreconfigitem('experimental', 'update.atomic-file',
657 664 default=False,
658 665 )
659 666 coreconfigitem('experimental', 'sshpeer.advertise-v2',
660 667 default=False,
661 668 )
662 669 coreconfigitem('experimental', 'web.apiserver',
663 670 default=False,
664 671 )
665 672 coreconfigitem('experimental', 'web.api.http-v2',
666 673 default=False,
667 674 )
668 675 coreconfigitem('experimental', 'web.api.debugreflect',
669 676 default=False,
670 677 )
671 678 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
672 679 default=False,
673 680 )
674 681 coreconfigitem('experimental', 'xdiff',
675 682 default=False,
676 683 )
677 684 coreconfigitem('extensions', '.*',
678 685 default=None,
679 686 generic=True,
680 687 )
681 688 coreconfigitem('extdata', '.*',
682 689 default=None,
683 690 generic=True,
684 691 )
685 692 coreconfigitem('format', 'bookmarks-in-store',
686 693 default=False,
687 694 )
688 695 coreconfigitem('format', 'chunkcachesize',
689 696 default=None,
697 experimental=True,
690 698 )
691 699 coreconfigitem('format', 'dotencode',
692 700 default=True,
693 701 )
694 702 coreconfigitem('format', 'generaldelta',
695 703 default=False,
704 experimental=True,
696 705 )
697 706 coreconfigitem('format', 'manifestcachesize',
698 707 default=None,
708 experimental=True,
699 709 )
700 710 coreconfigitem('format', 'maxchainlen',
701 711 default=dynamicdefault,
712 experimental=True,
702 713 )
703 714 coreconfigitem('format', 'obsstore-version',
704 715 default=None,
705 716 )
706 717 coreconfigitem('format', 'sparse-revlog',
707 718 default=True,
708 719 )
709 720 coreconfigitem('format', 'revlog-compression',
710 721 default='zlib',
711 722 alias=[('experimental', 'format.compression')]
712 723 )
713 724 coreconfigitem('format', 'usefncache',
714 725 default=True,
715 726 )
716 727 coreconfigitem('format', 'usegeneraldelta',
717 728 default=True,
718 729 )
719 730 coreconfigitem('format', 'usestore',
720 731 default=True,
721 732 )
722 733 coreconfigitem('format', 'internal-phase',
723 734 default=False,
735 experimental=True,
724 736 )
725 737 coreconfigitem('fsmonitor', 'warn_when_unused',
726 738 default=True,
727 739 )
728 740 coreconfigitem('fsmonitor', 'warn_update_file_count',
729 741 default=50000,
730 742 )
731 743 coreconfigitem('help', br'hidden-command\..*',
732 744 default=False,
733 745 generic=True,
734 746 )
735 747 coreconfigitem('help', br'hidden-topic\..*',
736 748 default=False,
737 749 generic=True,
738 750 )
739 751 coreconfigitem('hooks', '.*',
740 752 default=dynamicdefault,
741 753 generic=True,
742 754 )
743 755 coreconfigitem('hgweb-paths', '.*',
744 756 default=list,
745 757 generic=True,
746 758 )
747 759 coreconfigitem('hostfingerprints', '.*',
748 760 default=list,
749 761 generic=True,
750 762 )
751 763 coreconfigitem('hostsecurity', 'ciphers',
752 764 default=None,
753 765 )
754 766 coreconfigitem('hostsecurity', 'disabletls10warning',
755 767 default=False,
756 768 )
757 769 coreconfigitem('hostsecurity', 'minimumprotocol',
758 770 default=dynamicdefault,
759 771 )
760 772 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
761 773 default=dynamicdefault,
762 774 generic=True,
763 775 )
764 776 coreconfigitem('hostsecurity', '.*:ciphers$',
765 777 default=dynamicdefault,
766 778 generic=True,
767 779 )
768 780 coreconfigitem('hostsecurity', '.*:fingerprints$',
769 781 default=list,
770 782 generic=True,
771 783 )
772 784 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
773 785 default=None,
774 786 generic=True,
775 787 )
776 788
777 789 coreconfigitem('http_proxy', 'always',
778 790 default=False,
779 791 )
780 792 coreconfigitem('http_proxy', 'host',
781 793 default=None,
782 794 )
783 795 coreconfigitem('http_proxy', 'no',
784 796 default=list,
785 797 )
786 798 coreconfigitem('http_proxy', 'passwd',
787 799 default=None,
788 800 )
789 801 coreconfigitem('http_proxy', 'user',
790 802 default=None,
791 803 )
792 804
793 805 coreconfigitem('http', 'timeout',
794 806 default=None,
795 807 )
796 808
797 809 coreconfigitem('logtoprocess', 'commandexception',
798 810 default=None,
799 811 )
800 812 coreconfigitem('logtoprocess', 'commandfinish',
801 813 default=None,
802 814 )
803 815 coreconfigitem('logtoprocess', 'command',
804 816 default=None,
805 817 )
806 818 coreconfigitem('logtoprocess', 'develwarn',
807 819 default=None,
808 820 )
809 821 coreconfigitem('logtoprocess', 'uiblocked',
810 822 default=None,
811 823 )
812 824 coreconfigitem('merge', 'checkunknown',
813 825 default='abort',
814 826 )
815 827 coreconfigitem('merge', 'checkignored',
816 828 default='abort',
817 829 )
818 830 coreconfigitem('experimental', 'merge.checkpathconflicts',
819 831 default=False,
820 832 )
821 833 coreconfigitem('merge', 'followcopies',
822 834 default=True,
823 835 )
824 836 coreconfigitem('merge', 'on-failure',
825 837 default='continue',
826 838 )
827 839 coreconfigitem('merge', 'preferancestor',
828 840 default=lambda: ['*'],
841 experimental=True,
829 842 )
830 843 coreconfigitem('merge', 'strict-capability-check',
831 844 default=False,
832 845 )
833 846 coreconfigitem('merge-tools', '.*',
834 847 default=None,
835 848 generic=True,
836 849 )
837 850 coreconfigitem('merge-tools', br'.*\.args$',
838 851 default="$local $base $other",
839 852 generic=True,
840 853 priority=-1,
841 854 )
842 855 coreconfigitem('merge-tools', br'.*\.binary$',
843 856 default=False,
844 857 generic=True,
845 858 priority=-1,
846 859 )
847 860 coreconfigitem('merge-tools', br'.*\.check$',
848 861 default=list,
849 862 generic=True,
850 863 priority=-1,
851 864 )
852 865 coreconfigitem('merge-tools', br'.*\.checkchanged$',
853 866 default=False,
854 867 generic=True,
855 868 priority=-1,
856 869 )
857 870 coreconfigitem('merge-tools', br'.*\.executable$',
858 871 default=dynamicdefault,
859 872 generic=True,
860 873 priority=-1,
861 874 )
862 875 coreconfigitem('merge-tools', br'.*\.fixeol$',
863 876 default=False,
864 877 generic=True,
865 878 priority=-1,
866 879 )
867 880 coreconfigitem('merge-tools', br'.*\.gui$',
868 881 default=False,
869 882 generic=True,
870 883 priority=-1,
871 884 )
872 885 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
873 886 default='basic',
874 887 generic=True,
875 888 priority=-1,
876 889 )
877 890 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
878 891 default=dynamicdefault, # take from ui.mergemarkertemplate
879 892 generic=True,
880 893 priority=-1,
881 894 )
882 895 coreconfigitem('merge-tools', br'.*\.priority$',
883 896 default=0,
884 897 generic=True,
885 898 priority=-1,
886 899 )
887 900 coreconfigitem('merge-tools', br'.*\.premerge$',
888 901 default=dynamicdefault,
889 902 generic=True,
890 903 priority=-1,
891 904 )
892 905 coreconfigitem('merge-tools', br'.*\.symlink$',
893 906 default=False,
894 907 generic=True,
895 908 priority=-1,
896 909 )
897 910 coreconfigitem('pager', 'attend-.*',
898 911 default=dynamicdefault,
899 912 generic=True,
900 913 )
901 914 coreconfigitem('pager', 'ignore',
902 915 default=list,
903 916 )
904 917 coreconfigitem('pager', 'pager',
905 918 default=dynamicdefault,
906 919 )
907 920 coreconfigitem('patch', 'eol',
908 921 default='strict',
909 922 )
910 923 coreconfigitem('patch', 'fuzz',
911 924 default=2,
912 925 )
913 926 coreconfigitem('paths', 'default',
914 927 default=None,
915 928 )
916 929 coreconfigitem('paths', 'default-push',
917 930 default=None,
918 931 )
919 932 coreconfigitem('paths', '.*',
920 933 default=None,
921 934 generic=True,
922 935 )
923 936 coreconfigitem('phases', 'checksubrepos',
924 937 default='follow',
925 938 )
926 939 coreconfigitem('phases', 'new-commit',
927 940 default='draft',
928 941 )
929 942 coreconfigitem('phases', 'publish',
930 943 default=True,
931 944 )
932 945 coreconfigitem('profiling', 'enabled',
933 946 default=False,
934 947 )
935 948 coreconfigitem('profiling', 'format',
936 949 default='text',
937 950 )
938 951 coreconfigitem('profiling', 'freq',
939 952 default=1000,
940 953 )
941 954 coreconfigitem('profiling', 'limit',
942 955 default=30,
943 956 )
944 957 coreconfigitem('profiling', 'nested',
945 958 default=0,
946 959 )
947 960 coreconfigitem('profiling', 'output',
948 961 default=None,
949 962 )
950 963 coreconfigitem('profiling', 'showmax',
951 964 default=0.999,
952 965 )
953 966 coreconfigitem('profiling', 'showmin',
954 967 default=dynamicdefault,
955 968 )
956 969 coreconfigitem('profiling', 'showtime',
957 970 default=True,
958 971 )
959 972 coreconfigitem('profiling', 'sort',
960 973 default='inlinetime',
961 974 )
962 975 coreconfigitem('profiling', 'statformat',
963 976 default='hotpath',
964 977 )
965 978 coreconfigitem('profiling', 'time-track',
966 979 default=dynamicdefault,
967 980 )
968 981 coreconfigitem('profiling', 'type',
969 982 default='stat',
970 983 )
971 984 coreconfigitem('progress', 'assume-tty',
972 985 default=False,
973 986 )
974 987 coreconfigitem('progress', 'changedelay',
975 988 default=1,
976 989 )
977 990 coreconfigitem('progress', 'clear-complete',
978 991 default=True,
979 992 )
980 993 coreconfigitem('progress', 'debug',
981 994 default=False,
982 995 )
983 996 coreconfigitem('progress', 'delay',
984 997 default=3,
985 998 )
986 999 coreconfigitem('progress', 'disable',
987 1000 default=False,
988 1001 )
989 1002 coreconfigitem('progress', 'estimateinterval',
990 1003 default=60.0,
991 1004 )
992 1005 coreconfigitem('progress', 'format',
993 1006 default=lambda: ['topic', 'bar', 'number', 'estimate'],
994 1007 )
995 1008 coreconfigitem('progress', 'refresh',
996 1009 default=0.1,
997 1010 )
998 1011 coreconfigitem('progress', 'width',
999 1012 default=dynamicdefault,
1000 1013 )
1001 1014 coreconfigitem('push', 'pushvars.server',
1002 1015 default=False,
1003 1016 )
1004 1017 coreconfigitem('rewrite', 'backup-bundle',
1005 1018 default=True,
1006 1019 alias=[('ui', 'history-editing-backup')],
1007 1020 )
1008 1021 coreconfigitem('rewrite', 'update-timestamp',
1009 1022 default=False,
1010 1023 )
1011 1024 coreconfigitem('storage', 'new-repo-backend',
1012 1025 default='revlogv1',
1026 experimental=True,
1013 1027 )
1014 1028 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
1015 1029 default=True,
1016 1030 alias=[('format', 'aggressivemergedeltas')],
1017 1031 )
1018 1032 coreconfigitem('storage', 'revlog.reuse-external-delta',
1019 1033 default=True,
1020 1034 )
1021 1035 coreconfigitem('storage', 'revlog.reuse-external-delta-parent',
1022 1036 default=None,
1023 1037 )
1024 1038 coreconfigitem('storage', 'revlog.zlib.level',
1025 1039 default=None,
1026 1040 )
1027 1041 coreconfigitem('storage', 'revlog.zstd.level',
1028 1042 default=None,
1029 1043 )
1030 1044 coreconfigitem('server', 'bookmarks-pushkey-compat',
1031 1045 default=True,
1032 1046 )
1033 1047 coreconfigitem('server', 'bundle1',
1034 1048 default=True,
1035 1049 )
1036 1050 coreconfigitem('server', 'bundle1gd',
1037 1051 default=None,
1038 1052 )
1039 1053 coreconfigitem('server', 'bundle1.pull',
1040 1054 default=None,
1041 1055 )
1042 1056 coreconfigitem('server', 'bundle1gd.pull',
1043 1057 default=None,
1044 1058 )
1045 1059 coreconfigitem('server', 'bundle1.push',
1046 1060 default=None,
1047 1061 )
1048 1062 coreconfigitem('server', 'bundle1gd.push',
1049 1063 default=None,
1050 1064 )
1051 1065 coreconfigitem('server', 'bundle2.stream',
1052 1066 default=True,
1053 1067 alias=[('experimental', 'bundle2.stream')]
1054 1068 )
1055 1069 coreconfigitem('server', 'compressionengines',
1056 1070 default=list,
1057 1071 )
1058 1072 coreconfigitem('server', 'concurrent-push-mode',
1059 1073 default='strict',
1060 1074 )
1061 1075 coreconfigitem('server', 'disablefullbundle',
1062 1076 default=False,
1063 1077 )
1064 1078 coreconfigitem('server', 'maxhttpheaderlen',
1065 1079 default=1024,
1066 1080 )
1067 1081 coreconfigitem('server', 'pullbundle',
1068 1082 default=False,
1069 1083 )
1070 1084 coreconfigitem('server', 'preferuncompressed',
1071 1085 default=False,
1072 1086 )
1073 1087 coreconfigitem('server', 'streamunbundle',
1074 1088 default=False,
1075 1089 )
1076 1090 coreconfigitem('server', 'uncompressed',
1077 1091 default=True,
1078 1092 )
1079 1093 coreconfigitem('server', 'uncompressedallowsecret',
1080 1094 default=False,
1081 1095 )
1082 1096 coreconfigitem('server', 'view',
1083 1097 default='served',
1084 1098 )
1085 1099 coreconfigitem('server', 'validate',
1086 1100 default=False,
1087 1101 )
1088 1102 coreconfigitem('server', 'zliblevel',
1089 1103 default=-1,
1090 1104 )
1091 1105 coreconfigitem('server', 'zstdlevel',
1092 1106 default=3,
1093 1107 )
1094 1108 coreconfigitem('share', 'pool',
1095 1109 default=None,
1096 1110 )
1097 1111 coreconfigitem('share', 'poolnaming',
1098 1112 default='identity',
1099 1113 )
1100 1114 coreconfigitem('shelve','maxbackups',
1101 1115 default=10,
1102 1116 )
1103 1117 coreconfigitem('smtp', 'host',
1104 1118 default=None,
1105 1119 )
1106 1120 coreconfigitem('smtp', 'local_hostname',
1107 1121 default=None,
1108 1122 )
1109 1123 coreconfigitem('smtp', 'password',
1110 1124 default=None,
1111 1125 )
1112 1126 coreconfigitem('smtp', 'port',
1113 1127 default=dynamicdefault,
1114 1128 )
1115 1129 coreconfigitem('smtp', 'tls',
1116 1130 default='none',
1117 1131 )
1118 1132 coreconfigitem('smtp', 'username',
1119 1133 default=None,
1120 1134 )
1121 1135 coreconfigitem('sparse', 'missingwarning',
1122 1136 default=True,
1137 experimental=True,
1123 1138 )
1124 1139 coreconfigitem('subrepos', 'allowed',
1125 1140 default=dynamicdefault, # to make backporting simpler
1126 1141 )
1127 1142 coreconfigitem('subrepos', 'hg:allowed',
1128 1143 default=dynamicdefault,
1129 1144 )
1130 1145 coreconfigitem('subrepos', 'git:allowed',
1131 1146 default=dynamicdefault,
1132 1147 )
1133 1148 coreconfigitem('subrepos', 'svn:allowed',
1134 1149 default=dynamicdefault,
1135 1150 )
1136 1151 coreconfigitem('templates', '.*',
1137 1152 default=None,
1138 1153 generic=True,
1139 1154 )
1140 1155 coreconfigitem('templateconfig', '.*',
1141 1156 default=dynamicdefault,
1142 1157 generic=True,
1143 1158 )
1144 1159 coreconfigitem('trusted', 'groups',
1145 1160 default=list,
1146 1161 )
1147 1162 coreconfigitem('trusted', 'users',
1148 1163 default=list,
1149 1164 )
1150 1165 coreconfigitem('ui', '_usedassubrepo',
1151 1166 default=False,
1152 1167 )
1153 1168 coreconfigitem('ui', 'allowemptycommit',
1154 1169 default=False,
1155 1170 )
1156 1171 coreconfigitem('ui', 'archivemeta',
1157 1172 default=True,
1158 1173 )
1159 1174 coreconfigitem('ui', 'askusername',
1160 1175 default=False,
1161 1176 )
1162 1177 coreconfigitem('ui', 'clonebundlefallback',
1163 1178 default=False,
1164 1179 )
1165 1180 coreconfigitem('ui', 'clonebundleprefers',
1166 1181 default=list,
1167 1182 )
1168 1183 coreconfigitem('ui', 'clonebundles',
1169 1184 default=True,
1170 1185 )
1171 1186 coreconfigitem('ui', 'color',
1172 1187 default='auto',
1173 1188 )
1174 1189 coreconfigitem('ui', 'commitsubrepos',
1175 1190 default=False,
1176 1191 )
1177 1192 coreconfigitem('ui', 'debug',
1178 1193 default=False,
1179 1194 )
1180 1195 coreconfigitem('ui', 'debugger',
1181 1196 default=None,
1182 1197 )
1183 1198 coreconfigitem('ui', 'editor',
1184 1199 default=dynamicdefault,
1185 1200 )
1186 1201 coreconfigitem('ui', 'fallbackencoding',
1187 1202 default=None,
1188 1203 )
1189 1204 coreconfigitem('ui', 'forcecwd',
1190 1205 default=None,
1191 1206 )
1192 1207 coreconfigitem('ui', 'forcemerge',
1193 1208 default=None,
1194 1209 )
1195 1210 coreconfigitem('ui', 'formatdebug',
1196 1211 default=False,
1197 1212 )
1198 1213 coreconfigitem('ui', 'formatjson',
1199 1214 default=False,
1200 1215 )
1201 1216 coreconfigitem('ui', 'formatted',
1202 1217 default=None,
1203 1218 )
1204 1219 coreconfigitem('ui', 'graphnodetemplate',
1205 1220 default=None,
1206 1221 )
1207 1222 coreconfigitem('ui', 'interactive',
1208 1223 default=None,
1209 1224 )
1210 1225 coreconfigitem('ui', 'interface',
1211 1226 default=None,
1212 1227 )
1213 1228 coreconfigitem('ui', 'interface.chunkselector',
1214 1229 default=None,
1215 1230 )
1216 1231 coreconfigitem('ui', 'large-file-limit',
1217 1232 default=10000000,
1218 1233 )
1219 1234 coreconfigitem('ui', 'logblockedtimes',
1220 1235 default=False,
1221 1236 )
1222 1237 coreconfigitem('ui', 'logtemplate',
1223 1238 default=None,
1224 1239 )
1225 1240 coreconfigitem('ui', 'merge',
1226 1241 default=None,
1227 1242 )
1228 1243 coreconfigitem('ui', 'mergemarkers',
1229 1244 default='basic',
1230 1245 )
1231 1246 coreconfigitem('ui', 'mergemarkertemplate',
1232 1247 default=('{node|short} '
1233 1248 '{ifeq(tags, "tip", "", '
1234 1249 'ifeq(tags, "", "", "{tags} "))}'
1235 1250 '{if(bookmarks, "{bookmarks} ")}'
1236 1251 '{ifeq(branch, "default", "", "{branch} ")}'
1237 1252 '- {author|user}: {desc|firstline}')
1238 1253 )
1239 1254 coreconfigitem('ui', 'message-output',
1240 1255 default='stdio',
1241 1256 )
1242 1257 coreconfigitem('ui', 'nontty',
1243 1258 default=False,
1244 1259 )
1245 1260 coreconfigitem('ui', 'origbackuppath',
1246 1261 default=None,
1247 1262 )
1248 1263 coreconfigitem('ui', 'paginate',
1249 1264 default=True,
1250 1265 )
1251 1266 coreconfigitem('ui', 'patch',
1252 1267 default=None,
1253 1268 )
1254 1269 coreconfigitem('ui', 'pre-merge-tool-output-template',
1255 1270 default=None,
1256 1271 )
1257 1272 coreconfigitem('ui', 'portablefilenames',
1258 1273 default='warn',
1259 1274 )
1260 1275 coreconfigitem('ui', 'promptecho',
1261 1276 default=False,
1262 1277 )
1263 1278 coreconfigitem('ui', 'quiet',
1264 1279 default=False,
1265 1280 )
1266 1281 coreconfigitem('ui', 'quietbookmarkmove',
1267 1282 default=False,
1268 1283 )
1269 1284 coreconfigitem('ui', 'relative-paths',
1270 1285 default='legacy',
1271 1286 )
1272 1287 coreconfigitem('ui', 'remotecmd',
1273 1288 default='hg',
1274 1289 )
1275 1290 coreconfigitem('ui', 'report_untrusted',
1276 1291 default=True,
1277 1292 )
1278 1293 coreconfigitem('ui', 'rollback',
1279 1294 default=True,
1280 1295 )
1281 1296 coreconfigitem('ui', 'signal-safe-lock',
1282 1297 default=True,
1283 1298 )
1284 1299 coreconfigitem('ui', 'slash',
1285 1300 default=False,
1286 1301 )
1287 1302 coreconfigitem('ui', 'ssh',
1288 1303 default='ssh',
1289 1304 )
1290 1305 coreconfigitem('ui', 'ssherrorhint',
1291 1306 default=None,
1292 1307 )
1293 1308 coreconfigitem('ui', 'statuscopies',
1294 1309 default=False,
1295 1310 )
1296 1311 coreconfigitem('ui', 'strict',
1297 1312 default=False,
1298 1313 )
1299 1314 coreconfigitem('ui', 'style',
1300 1315 default='',
1301 1316 )
1302 1317 coreconfigitem('ui', 'supportcontact',
1303 1318 default=None,
1304 1319 )
1305 1320 coreconfigitem('ui', 'textwidth',
1306 1321 default=78,
1307 1322 )
1308 1323 coreconfigitem('ui', 'timeout',
1309 1324 default='600',
1310 1325 )
1311 1326 coreconfigitem('ui', 'timeout.warn',
1312 1327 default=0,
1313 1328 )
1314 1329 coreconfigitem('ui', 'traceback',
1315 1330 default=False,
1316 1331 )
1317 1332 coreconfigitem('ui', 'tweakdefaults',
1318 1333 default=False,
1319 1334 )
1320 1335 coreconfigitem('ui', 'username',
1321 1336 alias=[('ui', 'user')]
1322 1337 )
1323 1338 coreconfigitem('ui', 'verbose',
1324 1339 default=False,
1325 1340 )
1326 1341 coreconfigitem('verify', 'skipflags',
1327 1342 default=None,
1328 1343 )
1329 1344 coreconfigitem('web', 'allowbz2',
1330 1345 default=False,
1331 1346 )
1332 1347 coreconfigitem('web', 'allowgz',
1333 1348 default=False,
1334 1349 )
1335 1350 coreconfigitem('web', 'allow-pull',
1336 1351 alias=[('web', 'allowpull')],
1337 1352 default=True,
1338 1353 )
1339 1354 coreconfigitem('web', 'allow-push',
1340 1355 alias=[('web', 'allow_push')],
1341 1356 default=list,
1342 1357 )
1343 1358 coreconfigitem('web', 'allowzip',
1344 1359 default=False,
1345 1360 )
1346 1361 coreconfigitem('web', 'archivesubrepos',
1347 1362 default=False,
1348 1363 )
1349 1364 coreconfigitem('web', 'cache',
1350 1365 default=True,
1351 1366 )
1352 1367 coreconfigitem('web', 'comparisoncontext',
1353 1368 default=5,
1354 1369 )
1355 1370 coreconfigitem('web', 'contact',
1356 1371 default=None,
1357 1372 )
1358 1373 coreconfigitem('web', 'deny_push',
1359 1374 default=list,
1360 1375 )
1361 1376 coreconfigitem('web', 'guessmime',
1362 1377 default=False,
1363 1378 )
1364 1379 coreconfigitem('web', 'hidden',
1365 1380 default=False,
1366 1381 )
1367 1382 coreconfigitem('web', 'labels',
1368 1383 default=list,
1369 1384 )
1370 1385 coreconfigitem('web', 'logoimg',
1371 1386 default='hglogo.png',
1372 1387 )
1373 1388 coreconfigitem('web', 'logourl',
1374 1389 default='https://mercurial-scm.org/',
1375 1390 )
1376 1391 coreconfigitem('web', 'accesslog',
1377 1392 default='-',
1378 1393 )
1379 1394 coreconfigitem('web', 'address',
1380 1395 default='',
1381 1396 )
1382 1397 coreconfigitem('web', 'allow-archive',
1383 1398 alias=[('web', 'allow_archive')],
1384 1399 default=list,
1385 1400 )
1386 1401 coreconfigitem('web', 'allow_read',
1387 1402 default=list,
1388 1403 )
1389 1404 coreconfigitem('web', 'baseurl',
1390 1405 default=None,
1391 1406 )
1392 1407 coreconfigitem('web', 'cacerts',
1393 1408 default=None,
1394 1409 )
1395 1410 coreconfigitem('web', 'certificate',
1396 1411 default=None,
1397 1412 )
1398 1413 coreconfigitem('web', 'collapse',
1399 1414 default=False,
1400 1415 )
1401 1416 coreconfigitem('web', 'csp',
1402 1417 default=None,
1403 1418 )
1404 1419 coreconfigitem('web', 'deny_read',
1405 1420 default=list,
1406 1421 )
1407 1422 coreconfigitem('web', 'descend',
1408 1423 default=True,
1409 1424 )
1410 1425 coreconfigitem('web', 'description',
1411 1426 default="",
1412 1427 )
1413 1428 coreconfigitem('web', 'encoding',
1414 1429 default=lambda: encoding.encoding,
1415 1430 )
1416 1431 coreconfigitem('web', 'errorlog',
1417 1432 default='-',
1418 1433 )
1419 1434 coreconfigitem('web', 'ipv6',
1420 1435 default=False,
1421 1436 )
1422 1437 coreconfigitem('web', 'maxchanges',
1423 1438 default=10,
1424 1439 )
1425 1440 coreconfigitem('web', 'maxfiles',
1426 1441 default=10,
1427 1442 )
1428 1443 coreconfigitem('web', 'maxshortchanges',
1429 1444 default=60,
1430 1445 )
1431 1446 coreconfigitem('web', 'motd',
1432 1447 default='',
1433 1448 )
1434 1449 coreconfigitem('web', 'name',
1435 1450 default=dynamicdefault,
1436 1451 )
1437 1452 coreconfigitem('web', 'port',
1438 1453 default=8000,
1439 1454 )
1440 1455 coreconfigitem('web', 'prefix',
1441 1456 default='',
1442 1457 )
1443 1458 coreconfigitem('web', 'push_ssl',
1444 1459 default=True,
1445 1460 )
1446 1461 coreconfigitem('web', 'refreshinterval',
1447 1462 default=20,
1448 1463 )
1449 1464 coreconfigitem('web', 'server-header',
1450 1465 default=None,
1451 1466 )
1452 1467 coreconfigitem('web', 'static',
1453 1468 default=None,
1454 1469 )
1455 1470 coreconfigitem('web', 'staticurl',
1456 1471 default=None,
1457 1472 )
1458 1473 coreconfigitem('web', 'stripes',
1459 1474 default=1,
1460 1475 )
1461 1476 coreconfigitem('web', 'style',
1462 1477 default='paper',
1463 1478 )
1464 1479 coreconfigitem('web', 'templates',
1465 1480 default=None,
1466 1481 )
1467 1482 coreconfigitem('web', 'view',
1468 1483 default='served',
1484 experimental=True,
1469 1485 )
1470 1486 coreconfigitem('worker', 'backgroundclose',
1471 1487 default=dynamicdefault,
1472 1488 )
1473 1489 # Windows defaults to a limit of 512 open files. A buffer of 128
1474 1490 # should give us enough headway.
1475 1491 coreconfigitem('worker', 'backgroundclosemaxqueue',
1476 1492 default=384,
1477 1493 )
1478 1494 coreconfigitem('worker', 'backgroundcloseminfilecount',
1479 1495 default=2048,
1480 1496 )
1481 1497 coreconfigitem('worker', 'backgroundclosethreadcount',
1482 1498 default=4,
1483 1499 )
1484 1500 coreconfigitem('worker', 'enabled',
1485 1501 default=True,
1486 1502 )
1487 1503 coreconfigitem('worker', 'numcpus',
1488 1504 default=None,
1489 1505 )
1490 1506
1491 1507 # Rebase related configuration moved to core because other extension are doing
1492 1508 # strange things. For example, shelve import the extensions to reuse some bit
1493 1509 # without formally loading it.
1494 1510 coreconfigitem('commands', 'rebase.requiredest',
1495 1511 default=False,
1496 1512 )
1497 1513 coreconfigitem('experimental', 'rebaseskipobsolete',
1498 1514 default=True,
1499 1515 )
1500 1516 coreconfigitem('rebase', 'singletransaction',
1501 1517 default=False,
1502 1518 )
1503 1519 coreconfigitem('rebase', 'experimental.inmemory',
1504 1520 default=False,
1505 1521 )
General Comments 0
You need to be logged in to leave comments. Login now