Show More
@@ -790,6 +790,63 b' def perfphases(ui, repo, **opts):' | |||
|
790 | 790 | timer(d) |
|
791 | 791 | fm.end() |
|
792 | 792 | |
|
793 | @command('perfphasesremote', | |
|
794 | [], "[DEST]") | |
|
795 | def perfphasesremote(ui, repo, dest=None, **opts): | |
|
796 | """benchmark time needed to analyse phases of the remote server""" | |
|
797 | from mercurial.node import ( | |
|
798 | bin, | |
|
799 | ) | |
|
800 | from mercurial import ( | |
|
801 | exchange, | |
|
802 | hg, | |
|
803 | phases, | |
|
804 | ) | |
|
805 | timer, fm = gettimer(ui, opts) | |
|
806 | ||
|
807 | path = ui.paths.getpath(dest, default=('default-push', 'default')) | |
|
808 | if not path: | |
|
809 | raise error.abort(('default repository not configured!'), | |
|
810 | hint=("see 'hg help config.paths'")) | |
|
811 | dest = path.pushloc or path.loc | |
|
812 | branches = (path.branch, opts.get('branch') or []) | |
|
813 | ui.status(('analysing phase of %s\n') % util.hidepassword(dest)) | |
|
814 | revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) | |
|
815 | other = hg.peer(repo, opts, dest) | |
|
816 | ||
|
817 | # easier to perform discovery through the operation | |
|
818 | op = exchange.pushoperation(repo, other) | |
|
819 | exchange._pushdiscoverychangeset(op) | |
|
820 | ||
|
821 | remotesubset = op.fallbackheads | |
|
822 | ||
|
823 | with other.commandexecutor() as e: | |
|
824 | remotephases = e.callcommand('listkeys', | |
|
825 | {'namespace': 'phases'}).result() | |
|
826 | del other | |
|
827 | publishing = remotephases.get('publishing', False) | |
|
828 | if publishing: | |
|
829 | ui.status(('publishing: yes\n')) | |
|
830 | else: | |
|
831 | ui.status(('publishing: no\n')) | |
|
832 | ||
|
833 | nodemap = repo.changelog.nodemap | |
|
834 | nonpublishroots = 0 | |
|
835 | for nhex, phase in remotephases.iteritems(): | |
|
836 | if nhex == 'publishing': # ignore data related to publish option | |
|
837 | continue | |
|
838 | node = bin(nhex) | |
|
839 | if node in nodemap and int(phase): | |
|
840 | nonpublishroots += 1 | |
|
841 | ui.status(('number of roots: %d\n') % len(remotephases)) | |
|
842 | ui.status(('number of known non public roots: %d\n') % nonpublishroots) | |
|
843 | def d(): | |
|
844 | phases.remotephasessummary(repo, | |
|
845 | remotesubset, | |
|
846 | remotephases) | |
|
847 | timer(d) | |
|
848 | fm.end() | |
|
849 | ||
|
793 | 850 | @command('perfmanifest',[ |
|
794 | 851 | ('m', 'manifest-rev', False, 'Look up a manifest node revision'), |
|
795 | 852 | ('', 'clear-disk', False, 'clear on-disk caches too'), |
@@ -664,9 +664,39 b' def newheads(repo, heads, roots):' | |||
|
664 | 664 | |
|
665 | 665 | * `heads`: define the first subset |
|
666 | 666 | * `roots`: define the second we subtract from the first""" |
|
667 | # prevent an import cycle | |
|
668 | # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases | |
|
669 | from . import dagop | |
|
670 | ||
|
667 | 671 | repo = repo.unfiltered() |
|
668 | revs = repo.revs('heads(::%ln - (%ln::%ln))', heads, roots, heads) | |
|
669 | return pycompat.maplist(repo.changelog.node, revs) | |
|
672 | cl = repo.changelog | |
|
673 | rev = cl.nodemap.get | |
|
674 | if not roots: | |
|
675 | return heads | |
|
676 | if not heads or heads == [nullrev]: | |
|
677 | return [] | |
|
678 | # The logic operated on revisions, convert arguments early for convenience | |
|
679 | new_heads = set(rev(n) for n in heads if n != nullid) | |
|
680 | roots = [rev(n) for n in roots] | |
|
681 | if not heads or not roots: | |
|
682 | return heads | |
|
683 | # compute the area we need to remove | |
|
684 | affected_zone = repo.revs("(%ld::%ld)", roots, new_heads) | |
|
685 | # heads in the area are no longer heads | |
|
686 | new_heads.difference_update(affected_zone) | |
|
687 | # revisions in the area have children outside of it, | |
|
688 | # They might be new heads | |
|
689 | candidates = repo.revs("parents(%ld + (%ld and merge())) and not null", | |
|
690 | roots, affected_zone) | |
|
691 | candidates -= affected_zone | |
|
692 | if new_heads or candidates: | |
|
693 | # remove candidate that are ancestors of other heads | |
|
694 | new_heads.update(candidates) | |
|
695 | prunestart = repo.revs("parents(%ld) and not null", new_heads) | |
|
696 | pruned = dagop.reachableroots(repo, candidates, prunestart) | |
|
697 | new_heads.difference_update(pruned) | |
|
698 | ||
|
699 | return pycompat.maplist(cl.node, sorted(new_heads)) | |
|
670 | 700 | |
|
671 | 701 | def newcommitphase(ui): |
|
672 | 702 | """helper to get the target phase of new commit |
@@ -263,13 +263,17 b' def _trimchunk(revlog, revs, startidx, e' | |||
|
263 | 263 | if endidx is None: |
|
264 | 264 | endidx = len(revs) |
|
265 | 265 | |
|
266 | # Trim empty revs at the end, but never the very first revision of a chain | |
|
267 | while endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0: | |
|
266 | # If we have a non-emtpy delta candidate, there are nothing to trim | |
|
267 | if revs[endidx - 1] < len(revlog): | |
|
268 | # Trim empty revs at the end, except the very first revision of a chain | |
|
269 | while (endidx > 1 | |
|
270 | and endidx > startidx | |
|
271 | and length(revs[endidx - 1]) == 0): | |
|
268 | 272 | endidx -= 1 |
|
269 | 273 | |
|
270 | 274 | return revs[startidx:endidx] |
|
271 | 275 | |
|
272 | def _segmentspan(revlog, revs): | |
|
276 | def _segmentspan(revlog, revs, deltainfo=None): | |
|
273 | 277 | """Get the byte span of a segment of revisions |
|
274 | 278 | |
|
275 | 279 | revs is a sorted array of revision numbers |
@@ -295,7 +299,14 b' def _segmentspan(revlog, revs):' | |||
|
295 | 299 | """ |
|
296 | 300 | if not revs: |
|
297 | 301 | return 0 |
|
298 | return revlog.end(revs[-1]) - revlog.start(revs[0]) | |
|
302 | if deltainfo is not None and len(revlog) <= revs[-1]: | |
|
303 | if len(revs) == 1: | |
|
304 | return deltainfo.deltalen | |
|
305 | offset = revlog.end(len(revlog) - 1) | |
|
306 | end = deltainfo.deltalen + offset | |
|
307 | else: | |
|
308 | end = revlog.end(revs[-1]) | |
|
309 | return end - revlog.start(revs[0]) | |
|
299 | 310 | |
|
300 | 311 | def _slicechunk(revlog, revs, deltainfo=None, targetsize=None): |
|
301 | 312 | """slice revs to reduce the amount of unrelated data to be read from disk. |
@@ -527,7 +538,7 b' def _slicechunktodensity(revlog, revs, d' | |||
|
527 | 538 | yield revs |
|
528 | 539 | return |
|
529 | 540 | |
|
530 | if deltainfo is not None: | |
|
541 | if deltainfo is not None and deltainfo.deltalen: | |
|
531 | 542 | revs = list(revs) |
|
532 | 543 | revs.append(nextrev) |
|
533 | 544 | |
@@ -2471,7 +2482,8 b' class revlog(object):' | |||
|
2471 | 2482 | deltachain = [] |
|
2472 | 2483 | |
|
2473 | 2484 | chunks = _slicechunk(self, deltachain, deltainfo) |
|
2474 |
|
|
|
2485 | all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks] | |
|
2486 | distance = max(all_span) | |
|
2475 | 2487 | else: |
|
2476 | 2488 | distance = deltainfo.distance |
|
2477 | 2489 |
@@ -141,10 +141,10 b' delete a remote bookmark' | |||
|
141 | 141 | bundle2-output: payload chunk size: 23 |
|
142 | 142 | bundle2-output: closing payload chunk |
|
143 | 143 | bundle2-output: bundle part: "check:phases" |
|
144 |
bundle2-output-part: "check:phases" 4 |
|
|
144 | bundle2-output-part: "check:phases" 24 bytes payload | |
|
145 | 145 | bundle2-output: part 2: "CHECK:PHASES" |
|
146 | 146 | bundle2-output: header chunk size: 19 |
|
147 |
bundle2-output: payload chunk size: 4 |
|
|
147 | bundle2-output: payload chunk size: 24 | |
|
148 | 148 | bundle2-output: closing payload chunk |
|
149 | 149 | bundle2-output: bundle part: "pushkey" |
|
150 | 150 | bundle2-output-part: "pushkey" (params: 4 mandatory) empty payload |
@@ -180,9 +180,9 b' delete a remote bookmark' | |||
|
180 | 180 | bundle2-input: part parameters: 0 |
|
181 | 181 | bundle2-input: found a handler for part check:phases |
|
182 | 182 | bundle2-input-part: "check:phases" supported |
|
183 |
bundle2-input: payload chunk size: 4 |
|
|
183 | bundle2-input: payload chunk size: 24 | |
|
184 | 184 | bundle2-input: payload chunk size: 0 |
|
185 |
bundle2-input-part: total payload size 4 |
|
|
185 | bundle2-input-part: total payload size 24 | |
|
186 | 186 | bundle2-input: part header size: 90 |
|
187 | 187 | bundle2-input: part type: "PUSHKEY" |
|
188 | 188 | bundle2-input: part id: "3" |
@@ -253,10 +253,10 b' delete a remote bookmark' | |||
|
253 | 253 | bundle2-output: payload chunk size: 23 |
|
254 | 254 | bundle2-output: closing payload chunk |
|
255 | 255 | bundle2-output: bundle part: "check:phases" |
|
256 |
bundle2-output-part: "check:phases" 4 |
|
|
256 | bundle2-output-part: "check:phases" 24 bytes payload | |
|
257 | 257 | bundle2-output: part 2: "CHECK:PHASES" |
|
258 | 258 | bundle2-output: header chunk size: 19 |
|
259 |
bundle2-output: payload chunk size: 4 |
|
|
259 | bundle2-output: payload chunk size: 24 | |
|
260 | 260 | bundle2-output: closing payload chunk |
|
261 | 261 | bundle2-output: bundle part: "bookmarks" |
|
262 | 262 | bundle2-output-part: "bookmarks" 23 bytes payload |
@@ -293,9 +293,9 b' delete a remote bookmark' | |||
|
293 | 293 | bundle2-input: part parameters: 0 |
|
294 | 294 | bundle2-input: found a handler for part check:phases |
|
295 | 295 | bundle2-input-part: "check:phases" supported |
|
296 |
bundle2-input: payload chunk size: 4 |
|
|
296 | bundle2-input: payload chunk size: 24 | |
|
297 | 297 | bundle2-input: payload chunk size: 0 |
|
298 |
bundle2-input-part: total payload size 4 |
|
|
298 | bundle2-input-part: total payload size 24 | |
|
299 | 299 | bundle2-input: part header size: 16 |
|
300 | 300 | bundle2-input: part type: "BOOKMARKS" |
|
301 | 301 | bundle2-input: part id: "3" |
@@ -103,6 +103,8 b' perfstatus' | |||
|
103 | 103 | perfpathcopies |
|
104 | 104 | (no help text available) |
|
105 | 105 | perfphases benchmark phasesets computation |
|
106 | perfphasesremote | |
|
107 | benchmark time needed to analyse phases of the remote server | |
|
106 | 108 | perfrawfiles (no help text available) |
|
107 | 109 | perfrevlogchunks |
|
108 | 110 | Benchmark operations on revlog chunks. |
@@ -212,4 +214,7 b' Check perf.py for historical portability' | |||
|
212 | 214 | contrib/perf.py:\d+: (re) |
|
213 | 215 | > from mercurial import ( |
|
214 | 216 | import newer module separately in try clause for early Mercurial |
|
217 | contrib/perf.py:\d+: (re) | |
|
218 | > from mercurial import ( | |
|
219 | import newer module separately in try clause for early Mercurial | |
|
215 | 220 | [1] |
General Comments 0
You need to be logged in to leave comments.
Login now