Show More
@@ -1,412 +1,412 b'' | |||||
1 | # perf.py - performance test routines |
|
1 | # perf.py - performance test routines | |
2 | '''helper extension to measure performance''' |
|
2 | '''helper extension to measure performance''' | |
3 |
|
3 | |||
4 | from mercurial import cmdutil, scmutil, util, commands, obsolete |
|
4 | from mercurial import cmdutil, scmutil, util, commands, obsolete | |
5 | from mercurial import repoview, branchmap, merge, copies |
|
5 | from mercurial import repoview, branchmap, merge, copies | |
6 | import time, os, sys |
|
6 | import time, os, sys | |
7 |
|
7 | |||
8 | cmdtable = {} |
|
8 | cmdtable = {} | |
9 | command = cmdutil.command(cmdtable) |
|
9 | command = cmdutil.command(cmdtable) | |
10 |
|
10 | |||
11 | def timer(func, title=None): |
|
11 | def timer(func, title=None): | |
12 | results = [] |
|
12 | results = [] | |
13 | begin = time.time() |
|
13 | begin = time.time() | |
14 | count = 0 |
|
14 | count = 0 | |
15 | while True: |
|
15 | while True: | |
16 | ostart = os.times() |
|
16 | ostart = os.times() | |
17 | cstart = time.time() |
|
17 | cstart = time.time() | |
18 | r = func() |
|
18 | r = func() | |
19 | cstop = time.time() |
|
19 | cstop = time.time() | |
20 | ostop = os.times() |
|
20 | ostop = os.times() | |
21 | count += 1 |
|
21 | count += 1 | |
22 | a, b = ostart, ostop |
|
22 | a, b = ostart, ostop | |
23 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) |
|
23 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) | |
24 | if cstop - begin > 3 and count >= 100: |
|
24 | if cstop - begin > 3 and count >= 100: | |
25 | break |
|
25 | break | |
26 | if cstop - begin > 10 and count >= 3: |
|
26 | if cstop - begin > 10 and count >= 3: | |
27 | break |
|
27 | break | |
28 | if title: |
|
28 | if title: | |
29 | sys.stderr.write("! %s\n" % title) |
|
29 | sys.stderr.write("! %s\n" % title) | |
30 | if r: |
|
30 | if r: | |
31 | sys.stderr.write("! result: %s\n" % r) |
|
31 | sys.stderr.write("! result: %s\n" % r) | |
32 | m = min(results) |
|
32 | m = min(results) | |
33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" |
|
33 | sys.stderr.write("! wall %f comb %f user %f sys %f (best of %d)\n" | |
34 | % (m[0], m[1] + m[2], m[1], m[2], count)) |
|
34 | % (m[0], m[1] + m[2], m[1], m[2], count)) | |
35 |
|
35 | |||
36 | @command('perfwalk') |
|
36 | @command('perfwalk') | |
37 | def perfwalk(ui, repo, *pats): |
|
37 | def perfwalk(ui, repo, *pats): | |
38 | try: |
|
38 | try: | |
39 | m = scmutil.match(repo[None], pats, {}) |
|
39 | m = scmutil.match(repo[None], pats, {}) | |
40 | timer(lambda: len(list(repo.dirstate.walk(m, [], True, False)))) |
|
40 | timer(lambda: len(list(repo.dirstate.walk(m, [], True, False)))) | |
41 | except Exception: |
|
41 | except Exception: | |
42 | try: |
|
42 | try: | |
43 | m = scmutil.match(repo[None], pats, {}) |
|
43 | m = scmutil.match(repo[None], pats, {}) | |
44 | timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)])) |
|
44 | timer(lambda: len([b for a, b, c in repo.dirstate.statwalk([], m)])) | |
45 | except Exception: |
|
45 | except Exception: | |
46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) |
|
46 | timer(lambda: len(list(cmdutil.walk(repo, pats, {})))) | |
47 |
|
47 | |||
48 | @command('perfannotate') |
|
48 | @command('perfannotate') | |
49 | def perfannotate(ui, repo, f): |
|
49 | def perfannotate(ui, repo, f): | |
50 | fc = repo['.'][f] |
|
50 | fc = repo['.'][f] | |
51 | timer(lambda: len(fc.annotate(True))) |
|
51 | timer(lambda: len(fc.annotate(True))) | |
52 |
|
52 | |||
53 | @command('perfstatus', |
|
53 | @command('perfstatus', | |
54 | [('u', 'unknown', False, |
|
54 | [('u', 'unknown', False, | |
55 | 'ask status to look for unknown files')]) |
|
55 | 'ask status to look for unknown files')]) | |
56 | def perfstatus(ui, repo, **opts): |
|
56 | def perfstatus(ui, repo, **opts): | |
57 | #m = match.always(repo.root, repo.getcwd()) |
|
57 | #m = match.always(repo.root, repo.getcwd()) | |
58 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, |
|
58 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | |
59 | # False)))) |
|
59 | # False)))) | |
60 | timer(lambda: sum(map(len, repo.status(**opts)))) |
|
60 | timer(lambda: sum(map(len, repo.status(**opts)))) | |
61 |
|
61 | |||
62 | @command('perfaddremove') |
|
62 | @command('perfaddremove') | |
63 | def perfaddremove(ui, repo): |
|
63 | def perfaddremove(ui, repo): | |
64 | try: |
|
64 | try: | |
65 | oldquiet = repo.ui.quiet |
|
65 | oldquiet = repo.ui.quiet | |
66 | repo.ui.quiet = True |
|
66 | repo.ui.quiet = True | |
67 | timer(lambda: scmutil.addremove(repo, dry_run=True)) |
|
67 | timer(lambda: scmutil.addremove(repo, dry_run=True)) | |
68 | finally: |
|
68 | finally: | |
69 | repo.ui.quiet = oldquiet |
|
69 | repo.ui.quiet = oldquiet | |
70 |
|
70 | |||
71 | def clearcaches(cl): |
|
71 | def clearcaches(cl): | |
72 | # behave somewhat consistently across internal API changes |
|
72 | # behave somewhat consistently across internal API changes | |
73 | if util.safehasattr(cl, 'clearcaches'): |
|
73 | if util.safehasattr(cl, 'clearcaches'): | |
74 | cl.clearcaches() |
|
74 | cl.clearcaches() | |
75 | elif util.safehasattr(cl, '_nodecache'): |
|
75 | elif util.safehasattr(cl, '_nodecache'): | |
76 | from mercurial.node import nullid, nullrev |
|
76 | from mercurial.node import nullid, nullrev | |
77 | cl._nodecache = {nullid: nullrev} |
|
77 | cl._nodecache = {nullid: nullrev} | |
78 | cl._nodepos = None |
|
78 | cl._nodepos = None | |
79 |
|
79 | |||
80 | @command('perfheads') |
|
80 | @command('perfheads') | |
81 | def perfheads(ui, repo): |
|
81 | def perfheads(ui, repo): | |
82 | cl = repo.changelog |
|
82 | cl = repo.changelog | |
83 | def d(): |
|
83 | def d(): | |
84 | len(cl.headrevs()) |
|
84 | len(cl.headrevs()) | |
85 | clearcaches(cl) |
|
85 | clearcaches(cl) | |
86 | timer(d) |
|
86 | timer(d) | |
87 |
|
87 | |||
88 | @command('perftags') |
|
88 | @command('perftags') | |
89 | def perftags(ui, repo): |
|
89 | def perftags(ui, repo): | |
90 | import mercurial.changelog |
|
90 | import mercurial.changelog | |
91 | import mercurial.manifest |
|
91 | import mercurial.manifest | |
92 | def t(): |
|
92 | def t(): | |
93 | repo.changelog = mercurial.changelog.changelog(repo.sopener) |
|
93 | repo.changelog = mercurial.changelog.changelog(repo.sopener) | |
94 | repo.manifest = mercurial.manifest.manifest(repo.sopener) |
|
94 | repo.manifest = mercurial.manifest.manifest(repo.sopener) | |
95 | repo._tags = None |
|
95 | repo._tags = None | |
96 | return len(repo.tags()) |
|
96 | return len(repo.tags()) | |
97 | timer(t) |
|
97 | timer(t) | |
98 |
|
98 | |||
99 | @command('perfancestors') |
|
99 | @command('perfancestors') | |
100 | def perfancestors(ui, repo): |
|
100 | def perfancestors(ui, repo): | |
101 | heads = repo.changelog.headrevs() |
|
101 | heads = repo.changelog.headrevs() | |
102 | def d(): |
|
102 | def d(): | |
103 | for a in repo.changelog.ancestors(heads): |
|
103 | for a in repo.changelog.ancestors(heads): | |
104 | pass |
|
104 | pass | |
105 | timer(d) |
|
105 | timer(d) | |
106 |
|
106 | |||
107 | @command('perfancestorset') |
|
107 | @command('perfancestorset') | |
108 | def perfancestorset(ui, repo, revset): |
|
108 | def perfancestorset(ui, repo, revset): | |
109 | revs = repo.revs(revset) |
|
109 | revs = repo.revs(revset) | |
110 | heads = repo.changelog.headrevs() |
|
110 | heads = repo.changelog.headrevs() | |
111 | def d(): |
|
111 | def d(): | |
112 | s = repo.changelog.ancestors(heads) |
|
112 | s = repo.changelog.ancestors(heads) | |
113 | for rev in revs: |
|
113 | for rev in revs: | |
114 | rev in s |
|
114 | rev in s | |
115 | timer(d) |
|
115 | timer(d) | |
116 |
|
116 | |||
117 | @command('perfdirs') |
|
117 | @command('perfdirs') | |
118 | def perfdirs(ui, repo): |
|
118 | def perfdirs(ui, repo): | |
119 | dirstate = repo.dirstate |
|
119 | dirstate = repo.dirstate | |
120 | 'a' in dirstate |
|
120 | 'a' in dirstate | |
121 | def d(): |
|
121 | def d(): | |
122 | dirstate.dirs() |
|
122 | dirstate.dirs() | |
123 | del dirstate._dirs |
|
123 | del dirstate._dirs | |
124 | timer(d) |
|
124 | timer(d) | |
125 |
|
125 | |||
126 | @command('perfdirstate') |
|
126 | @command('perfdirstate') | |
127 | def perfdirstate(ui, repo): |
|
127 | def perfdirstate(ui, repo): | |
128 | "a" in repo.dirstate |
|
128 | "a" in repo.dirstate | |
129 | def d(): |
|
129 | def d(): | |
130 | repo.dirstate.invalidate() |
|
130 | repo.dirstate.invalidate() | |
131 | "a" in repo.dirstate |
|
131 | "a" in repo.dirstate | |
132 | timer(d) |
|
132 | timer(d) | |
133 |
|
133 | |||
134 | @command('perfdirstatedirs') |
|
134 | @command('perfdirstatedirs') | |
135 | def perfdirstatedirs(ui, repo): |
|
135 | def perfdirstatedirs(ui, repo): | |
136 | "a" in repo.dirstate |
|
136 | "a" in repo.dirstate | |
137 | def d(): |
|
137 | def d(): | |
138 | "a" in repo.dirstate._dirs |
|
138 | "a" in repo.dirstate._dirs | |
139 | del repo.dirstate._dirs |
|
139 | del repo.dirstate._dirs | |
140 | timer(d) |
|
140 | timer(d) | |
141 |
|
141 | |||
142 | @command('perfdirstatewrite') |
|
142 | @command('perfdirstatewrite') | |
143 | def perfdirstatewrite(ui, repo): |
|
143 | def perfdirstatewrite(ui, repo): | |
144 | ds = repo.dirstate |
|
144 | ds = repo.dirstate | |
145 | "a" in ds |
|
145 | "a" in ds | |
146 | def d(): |
|
146 | def d(): | |
147 | ds._dirty = True |
|
147 | ds._dirty = True | |
148 | ds.write() |
|
148 | ds.write() | |
149 | timer(d) |
|
149 | timer(d) | |
150 |
|
150 | |||
151 | @command('perfmergecalculate', |
|
151 | @command('perfmergecalculate', | |
152 | [('r', 'rev', '.', 'rev to merge against')]) |
|
152 | [('r', 'rev', '.', 'rev to merge against')]) | |
153 | def perfmergecalculate(ui, repo, rev): |
|
153 | def perfmergecalculate(ui, repo, rev): | |
154 | wctx = repo[None] |
|
154 | wctx = repo[None] | |
155 | rctx = scmutil.revsingle(repo, rev, rev) |
|
155 | rctx = scmutil.revsingle(repo, rev, rev) | |
156 | ancestor = wctx.ancestor(rctx) |
|
156 | ancestor = wctx.ancestor(rctx) | |
157 | # we don't want working dir files to be stat'd in the benchmark, so prime |
|
157 | # we don't want working dir files to be stat'd in the benchmark, so prime | |
158 | # that cache |
|
158 | # that cache | |
159 | wctx.dirty() |
|
159 | wctx.dirty() | |
160 | def d(): |
|
160 | def d(): | |
161 | # acceptremote is True because we don't want prompts in the middle of |
|
161 | # acceptremote is True because we don't want prompts in the middle of | |
162 | # our benchmark |
|
162 | # our benchmark | |
163 | merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False, |
|
163 | merge.calculateupdates(repo, wctx, rctx, ancestor, False, False, False, | |
164 | acceptremote=True) |
|
164 | acceptremote=True) | |
165 | timer(d) |
|
165 | timer(d) | |
166 |
|
166 | |||
167 | @command('perfpathcopies', [], "REV REV") |
|
167 | @command('perfpathcopies', [], "REV REV") | |
168 | def perfpathcopies(ui, repo, rev1, rev2): |
|
168 | def perfpathcopies(ui, repo, rev1, rev2): | |
169 | ctx1 = scmutil.revsingle(repo, rev1, rev1) |
|
169 | ctx1 = scmutil.revsingle(repo, rev1, rev1) | |
170 | ctx2 = scmutil.revsingle(repo, rev2, rev2) |
|
170 | ctx2 = scmutil.revsingle(repo, rev2, rev2) | |
171 | def d(): |
|
171 | def d(): | |
172 | copies.pathcopies(ctx1, ctx2) |
|
172 | copies.pathcopies(ctx1, ctx2) | |
173 | timer(d) |
|
173 | timer(d) | |
174 |
|
174 | |||
175 | @command('perfmanifest', [], 'REV') |
|
175 | @command('perfmanifest', [], 'REV') | |
176 | def perfmanifest(ui, repo, rev): |
|
176 | def perfmanifest(ui, repo, rev): | |
177 | ctx = scmutil.revsingle(repo, rev, rev) |
|
177 | ctx = scmutil.revsingle(repo, rev, rev) | |
178 | t = ctx.manifestnode() |
|
178 | t = ctx.manifestnode() | |
179 | def d(): |
|
179 | def d(): | |
180 | repo.manifest._mancache.clear() |
|
180 | repo.manifest._mancache.clear() | |
181 | repo.manifest._cache = None |
|
181 | repo.manifest._cache = None | |
182 | repo.manifest.read(t) |
|
182 | repo.manifest.read(t) | |
183 | timer(d) |
|
183 | timer(d) | |
184 |
|
184 | |||
185 | @command('perfchangeset') |
|
185 | @command('perfchangeset') | |
186 | def perfchangeset(ui, repo, rev): |
|
186 | def perfchangeset(ui, repo, rev): | |
187 | n = repo[rev].node() |
|
187 | n = repo[rev].node() | |
188 | def d(): |
|
188 | def d(): | |
189 | repo.changelog.read(n) |
|
189 | repo.changelog.read(n) | |
190 | #repo.changelog._cache = None |
|
190 | #repo.changelog._cache = None | |
191 | timer(d) |
|
191 | timer(d) | |
192 |
|
192 | |||
193 | @command('perfindex') |
|
193 | @command('perfindex') | |
194 | def perfindex(ui, repo): |
|
194 | def perfindex(ui, repo): | |
195 | import mercurial.revlog |
|
195 | import mercurial.revlog | |
196 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
196 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
197 | n = repo["tip"].node() |
|
197 | n = repo["tip"].node() | |
198 | def d(): |
|
198 | def d(): | |
199 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
199 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") | |
200 | cl.rev(n) |
|
200 | cl.rev(n) | |
201 | timer(d) |
|
201 | timer(d) | |
202 |
|
202 | |||
203 | @command('perfstartup') |
|
203 | @command('perfstartup') | |
204 | def perfstartup(ui, repo): |
|
204 | def perfstartup(ui, repo): | |
205 | cmd = sys.argv[0] |
|
205 | cmd = sys.argv[0] | |
206 | def d(): |
|
206 | def d(): | |
207 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) |
|
207 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) | |
208 | timer(d) |
|
208 | timer(d) | |
209 |
|
209 | |||
210 | @command('perfparents') |
|
210 | @command('perfparents') | |
211 | def perfparents(ui, repo): |
|
211 | def perfparents(ui, repo): | |
212 | nl = [repo.changelog.node(i) for i in xrange(1000)] |
|
212 | nl = [repo.changelog.node(i) for i in xrange(1000)] | |
213 | def d(): |
|
213 | def d(): | |
214 | for n in nl: |
|
214 | for n in nl: | |
215 | repo.changelog.parents(n) |
|
215 | repo.changelog.parents(n) | |
216 | timer(d) |
|
216 | timer(d) | |
217 |
|
217 | |||
218 | @command('perflookup') |
|
218 | @command('perflookup') | |
219 | def perflookup(ui, repo, rev): |
|
219 | def perflookup(ui, repo, rev): | |
220 | timer(lambda: len(repo.lookup(rev))) |
|
220 | timer(lambda: len(repo.lookup(rev))) | |
221 |
|
221 | |||
222 | @command('perfrevrange') |
|
222 | @command('perfrevrange') | |
223 | def perfrevrange(ui, repo, *specs): |
|
223 | def perfrevrange(ui, repo, *specs): | |
224 | revrange = scmutil.revrange |
|
224 | revrange = scmutil.revrange | |
225 | timer(lambda: len(revrange(repo, specs))) |
|
225 | timer(lambda: len(revrange(repo, specs))) | |
226 |
|
226 | |||
227 | @command('perfnodelookup') |
|
227 | @command('perfnodelookup') | |
228 | def perfnodelookup(ui, repo, rev): |
|
228 | def perfnodelookup(ui, repo, rev): | |
229 | import mercurial.revlog |
|
229 | import mercurial.revlog | |
230 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
230 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | |
231 | n = repo[rev].node() |
|
231 | n = repo[rev].node() | |
232 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") |
|
232 | cl = mercurial.revlog.revlog(repo.sopener, "00changelog.i") | |
233 | def d(): |
|
233 | def d(): | |
234 | cl.rev(n) |
|
234 | cl.rev(n) | |
235 | clearcaches(cl) |
|
235 | clearcaches(cl) | |
236 | timer(d) |
|
236 | timer(d) | |
237 |
|
237 | |||
238 | @command('perflog', |
|
238 | @command('perflog', | |
239 | [('', 'rename', False, 'ask log to follow renames')]) |
|
239 | [('', 'rename', False, 'ask log to follow renames')]) | |
240 | def perflog(ui, repo, **opts): |
|
240 | def perflog(ui, repo, **opts): | |
241 | ui.pushbuffer() |
|
241 | ui.pushbuffer() | |
242 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
242 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', | |
243 | copies=opts.get('rename'))) |
|
243 | copies=opts.get('rename'))) | |
244 | ui.popbuffer() |
|
244 | ui.popbuffer() | |
245 |
|
245 | |||
246 | @command('perftemplating') |
|
246 | @command('perftemplating') | |
247 | def perftemplating(ui, repo): |
|
247 | def perftemplating(ui, repo): | |
248 | ui.pushbuffer() |
|
248 | ui.pushbuffer() | |
249 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', |
|
249 | timer(lambda: commands.log(ui, repo, rev=[], date='', user='', | |
250 | template='{date|shortdate} [{rev}:{node|short}]' |
|
250 | template='{date|shortdate} [{rev}:{node|short}]' | |
251 | ' {author|person}: {desc|firstline}\n')) |
|
251 | ' {author|person}: {desc|firstline}\n')) | |
252 | ui.popbuffer() |
|
252 | ui.popbuffer() | |
253 |
|
253 | |||
254 | @command('perfcca') |
|
254 | @command('perfcca') | |
255 | def perfcca(ui, repo): |
|
255 | def perfcca(ui, repo): | |
256 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) |
|
256 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | |
257 |
|
257 | |||
258 | @command('perffncacheload') |
|
258 | @command('perffncacheload') | |
259 | def perffncacheload(ui, repo): |
|
259 | def perffncacheload(ui, repo): | |
260 | s = repo.store |
|
260 | s = repo.store | |
261 | def d(): |
|
261 | def d(): | |
262 | s.fncache._load() |
|
262 | s.fncache._load() | |
263 | timer(d) |
|
263 | timer(d) | |
264 |
|
264 | |||
265 | @command('perffncachewrite') |
|
265 | @command('perffncachewrite') | |
266 | def perffncachewrite(ui, repo): |
|
266 | def perffncachewrite(ui, repo): | |
267 | s = repo.store |
|
267 | s = repo.store | |
268 | s.fncache._load() |
|
268 | s.fncache._load() | |
269 | def d(): |
|
269 | def d(): | |
270 | s.fncache._dirty = True |
|
270 | s.fncache._dirty = True | |
271 | s.fncache.write() |
|
271 | s.fncache.write() | |
272 | timer(d) |
|
272 | timer(d) | |
273 |
|
273 | |||
274 | @command('perffncacheencode') |
|
274 | @command('perffncacheencode') | |
275 | def perffncacheencode(ui, repo): |
|
275 | def perffncacheencode(ui, repo): | |
276 | s = repo.store |
|
276 | s = repo.store | |
277 | s.fncache._load() |
|
277 | s.fncache._load() | |
278 | def d(): |
|
278 | def d(): | |
279 | for p in s.fncache.entries: |
|
279 | for p in s.fncache.entries: | |
280 | s.encode(p) |
|
280 | s.encode(p) | |
281 | timer(d) |
|
281 | timer(d) | |
282 |
|
282 | |||
283 | @command('perfdiffwd') |
|
283 | @command('perfdiffwd') | |
284 | def perfdiffwd(ui, repo): |
|
284 | def perfdiffwd(ui, repo): | |
285 | """Profile diff of working directory changes""" |
|
285 | """Profile diff of working directory changes""" | |
286 | options = { |
|
286 | options = { | |
287 | 'w': 'ignore_all_space', |
|
287 | 'w': 'ignore_all_space', | |
288 | 'b': 'ignore_space_change', |
|
288 | 'b': 'ignore_space_change', | |
289 | 'B': 'ignore_blank_lines', |
|
289 | 'B': 'ignore_blank_lines', | |
290 | } |
|
290 | } | |
291 |
|
291 | |||
292 | for diffopt in ('', 'w', 'b', 'B', 'wB'): |
|
292 | for diffopt in ('', 'w', 'b', 'B', 'wB'): | |
293 | opts = dict((options[c], '1') for c in diffopt) |
|
293 | opts = dict((options[c], '1') for c in diffopt) | |
294 | def d(): |
|
294 | def d(): | |
295 | ui.pushbuffer() |
|
295 | ui.pushbuffer() | |
296 | commands.diff(ui, repo, **opts) |
|
296 | commands.diff(ui, repo, **opts) | |
297 | ui.popbuffer() |
|
297 | ui.popbuffer() | |
298 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') |
|
298 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') | |
299 | timer(d, title) |
|
299 | timer(d, title) | |
300 |
|
300 | |||
301 | @command('perfrevlog', |
|
301 | @command('perfrevlog', | |
302 | [('d', 'dist', 100, 'distance between the revisions')], |
|
302 | [('d', 'dist', 100, 'distance between the revisions')], | |
303 | "[INDEXFILE]") |
|
303 | "[INDEXFILE]") | |
304 | def perfrevlog(ui, repo, file_, **opts): |
|
304 | def perfrevlog(ui, repo, file_, **opts): | |
305 | from mercurial import revlog |
|
305 | from mercurial import revlog | |
306 | dist = opts['dist'] |
|
306 | dist = opts['dist'] | |
307 | def d(): |
|
307 | def d(): | |
308 | r = revlog.revlog(lambda fn: open(fn, 'rb'), file_) |
|
308 | r = revlog.revlog(lambda fn: open(fn, 'rb'), file_) | |
309 | for x in xrange(0, len(r), dist): |
|
309 | for x in xrange(0, len(r), dist): | |
310 | r.revision(r.node(x)) |
|
310 | r.revision(r.node(x)) | |
311 |
|
311 | |||
312 | timer(d) |
|
312 | timer(d) | |
313 |
|
313 | |||
314 | @command('perfrevset', |
|
314 | @command('perfrevset', | |
315 | [('C', 'clear', False, 'clear volatile cache between each call.')], |
|
315 | [('C', 'clear', False, 'clear volatile cache between each call.')], | |
316 | "REVSET") |
|
316 | "REVSET") | |
317 | def perfrevset(ui, repo, expr, clear=False): |
|
317 | def perfrevset(ui, repo, expr, clear=False): | |
318 | """benchmark the execution time of a revset |
|
318 | """benchmark the execution time of a revset | |
319 |
|
319 | |||
320 | Use the --clean option if need to evaluate the impact of build volatile |
|
320 | Use the --clean option if need to evaluate the impact of build volatile | |
321 | revisions set cache on the revset execution. Volatile cache hold filtered |
|
321 | revisions set cache on the revset execution. Volatile cache hold filtered | |
322 | and obsolete related cache.""" |
|
322 | and obsolete related cache.""" | |
323 | def d(): |
|
323 | def d(): | |
324 | if clear: |
|
324 | if clear: | |
325 | repo.invalidatevolatilesets() |
|
325 | repo.invalidatevolatilesets() | |
326 | repo.revs(expr) |
|
326 | repo.revs(expr) | |
327 | timer(d) |
|
327 | timer(d) | |
328 |
|
328 | |||
329 | @command('perfvolatilesets') |
|
329 | @command('perfvolatilesets') | |
330 | def perfvolatilesets(ui, repo, *names): |
|
330 | def perfvolatilesets(ui, repo, *names): | |
331 | """benchmark the computation of various volatile set |
|
331 | """benchmark the computation of various volatile set | |
332 |
|
332 | |||
333 | Volatile set computes element related to filtering and obsolescence.""" |
|
333 | Volatile set computes element related to filtering and obsolescence.""" | |
334 | repo = repo.unfiltered() |
|
334 | repo = repo.unfiltered() | |
335 |
|
335 | |||
336 | def getobs(name): |
|
336 | def getobs(name): | |
337 | def d(): |
|
337 | def d(): | |
338 | repo.invalidatevolatilesets() |
|
338 | repo.invalidatevolatilesets() | |
339 | obsolete.getrevs(repo, name) |
|
339 | obsolete.getrevs(repo, name) | |
340 | return d |
|
340 | return d | |
341 |
|
341 | |||
342 | allobs = sorted(obsolete.cachefuncs) |
|
342 | allobs = sorted(obsolete.cachefuncs) | |
343 | if names: |
|
343 | if names: | |
344 | allobs = [n for n in allobs if n in names] |
|
344 | allobs = [n for n in allobs if n in names] | |
345 |
|
345 | |||
346 | for name in allobs: |
|
346 | for name in allobs: | |
347 | timer(getobs(name), title=name) |
|
347 | timer(getobs(name), title=name) | |
348 |
|
348 | |||
349 | def getfiltered(name): |
|
349 | def getfiltered(name): | |
350 | def d(): |
|
350 | def d(): | |
351 | repo.invalidatevolatilesets() |
|
351 | repo.invalidatevolatilesets() | |
352 | repoview.filteredrevs(repo, name) |
|
352 | repoview.filteredrevs(repo, name) | |
353 | return d |
|
353 | return d | |
354 |
|
354 | |||
355 | allfilter = sorted(repoview.filtertable) |
|
355 | allfilter = sorted(repoview.filtertable) | |
356 | if names: |
|
356 | if names: | |
357 | allfilter = [n for n in allfilter if n in names] |
|
357 | allfilter = [n for n in allfilter if n in names] | |
358 |
|
358 | |||
359 | for name in allfilter: |
|
359 | for name in allfilter: | |
360 | timer(getfiltered(name), title=name) |
|
360 | timer(getfiltered(name), title=name) | |
361 |
|
361 | |||
362 | @command('perfbranchmap', |
|
362 | @command('perfbranchmap', | |
363 | [('f', 'full', False, |
|
363 | [('f', 'full', False, | |
364 | 'Includes build time of subset'), |
|
364 | 'Includes build time of subset'), | |
365 | ]) |
|
365 | ]) | |
366 | def perfbranchmap(ui, repo, full=False): |
|
366 | def perfbranchmap(ui, repo, full=False): | |
367 | """benchmark the update of a branchmap |
|
367 | """benchmark the update of a branchmap | |
368 |
|
368 | |||
369 | This benchmarks the full repo.branchmap() call with read and write disabled |
|
369 | This benchmarks the full repo.branchmap() call with read and write disabled | |
370 | """ |
|
370 | """ | |
371 | def getbranchmap(filtername): |
|
371 | def getbranchmap(filtername): | |
372 | """generate a benchmark function for the filtername""" |
|
372 | """generate a benchmark function for the filtername""" | |
373 | if filtername is None: |
|
373 | if filtername is None: | |
374 | view = repo |
|
374 | view = repo | |
375 | else: |
|
375 | else: | |
376 | view = repo.filtered(filtername) |
|
376 | view = repo.filtered(filtername) | |
377 | def d(): |
|
377 | def d(): | |
378 | if full: |
|
378 | if full: | |
379 | view._branchcaches.clear() |
|
379 | view._branchcaches.clear() | |
380 | else: |
|
380 | else: | |
381 | view._branchcaches.pop(filtername, None) |
|
381 | view._branchcaches.pop(filtername, None) | |
382 | view.branchmap() |
|
382 | view.branchmap() | |
383 | return d |
|
383 | return d | |
384 | # add filter in smaller subset to bigger subset |
|
384 | # add filter in smaller subset to bigger subset | |
385 | possiblefilters = set(repoview.filtertable) |
|
385 | possiblefilters = set(repoview.filtertable) | |
386 | allfilters = [] |
|
386 | allfilters = [] | |
387 | while possiblefilters: |
|
387 | while possiblefilters: | |
388 | for name in possiblefilters: |
|
388 | for name in possiblefilters: | |
389 |
subset = |
|
389 | subset = branchmap.subsettable.get(name) | |
390 | if subset not in possiblefilters: |
|
390 | if subset not in possiblefilters: | |
391 | break |
|
391 | break | |
392 | else: |
|
392 | else: | |
393 | assert False, 'subset cycle %s!' % possiblefilters |
|
393 | assert False, 'subset cycle %s!' % possiblefilters | |
394 | allfilters.append(name) |
|
394 | allfilters.append(name) | |
395 | possiblefilters.remove(name) |
|
395 | possiblefilters.remove(name) | |
396 |
|
396 | |||
397 | # warm the cache |
|
397 | # warm the cache | |
398 | if not full: |
|
398 | if not full: | |
399 | for name in allfilters: |
|
399 | for name in allfilters: | |
400 | repo.filtered(name).branchmap() |
|
400 | repo.filtered(name).branchmap() | |
401 | # add unfiltered |
|
401 | # add unfiltered | |
402 | allfilters.append(None) |
|
402 | allfilters.append(None) | |
403 | oldread = branchmap.read |
|
403 | oldread = branchmap.read | |
404 | oldwrite = branchmap.branchcache.write |
|
404 | oldwrite = branchmap.branchcache.write | |
405 | try: |
|
405 | try: | |
406 | branchmap.read = lambda repo: None |
|
406 | branchmap.read = lambda repo: None | |
407 | branchmap.write = lambda repo: None |
|
407 | branchmap.write = lambda repo: None | |
408 | for name in allfilters: |
|
408 | for name in allfilters: | |
409 | timer(getbranchmap(name), title=str(name)) |
|
409 | timer(getbranchmap(name), title=str(name)) | |
410 | finally: |
|
410 | finally: | |
411 | branchmap.read = oldread |
|
411 | branchmap.read = oldread | |
412 | branchmap.branchcache.write = oldwrite |
|
412 | branchmap.branchcache.write = oldwrite |
@@ -1,210 +1,221 b'' | |||||
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo |
|
1 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from node import bin, hex, nullid, nullrev |
|
8 | from node import bin, hex, nullid, nullrev | |
9 | import encoding |
|
9 | import encoding | |
10 |
import util |
|
10 | import util | |
11 |
|
11 | |||
12 | def _filename(repo): |
|
12 | def _filename(repo): | |
13 | """name of a branchcache file for a given repo or repoview""" |
|
13 | """name of a branchcache file for a given repo or repoview""" | |
14 | filename = "cache/branchheads" |
|
14 | filename = "cache/branchheads" | |
15 | if repo.filtername: |
|
15 | if repo.filtername: | |
16 | filename = '%s-%s' % (filename, repo.filtername) |
|
16 | filename = '%s-%s' % (filename, repo.filtername) | |
17 | return filename |
|
17 | return filename | |
18 |
|
18 | |||
19 | def read(repo): |
|
19 | def read(repo): | |
20 | try: |
|
20 | try: | |
21 | f = repo.opener(_filename(repo)) |
|
21 | f = repo.opener(_filename(repo)) | |
22 | lines = f.read().split('\n') |
|
22 | lines = f.read().split('\n') | |
23 | f.close() |
|
23 | f.close() | |
24 | except (IOError, OSError): |
|
24 | except (IOError, OSError): | |
25 | return None |
|
25 | return None | |
26 |
|
26 | |||
27 | try: |
|
27 | try: | |
28 | cachekey = lines.pop(0).split(" ", 2) |
|
28 | cachekey = lines.pop(0).split(" ", 2) | |
29 | last, lrev = cachekey[:2] |
|
29 | last, lrev = cachekey[:2] | |
30 | last, lrev = bin(last), int(lrev) |
|
30 | last, lrev = bin(last), int(lrev) | |
31 | filteredhash = None |
|
31 | filteredhash = None | |
32 | if len(cachekey) > 2: |
|
32 | if len(cachekey) > 2: | |
33 | filteredhash = bin(cachekey[2]) |
|
33 | filteredhash = bin(cachekey[2]) | |
34 | partial = branchcache(tipnode=last, tiprev=lrev, |
|
34 | partial = branchcache(tipnode=last, tiprev=lrev, | |
35 | filteredhash=filteredhash) |
|
35 | filteredhash=filteredhash) | |
36 | if not partial.validfor(repo): |
|
36 | if not partial.validfor(repo): | |
37 | # invalidate the cache |
|
37 | # invalidate the cache | |
38 | raise ValueError('tip differs') |
|
38 | raise ValueError('tip differs') | |
39 | for l in lines: |
|
39 | for l in lines: | |
40 | if not l: |
|
40 | if not l: | |
41 | continue |
|
41 | continue | |
42 | node, label = l.split(" ", 1) |
|
42 | node, label = l.split(" ", 1) | |
43 | label = encoding.tolocal(label.strip()) |
|
43 | label = encoding.tolocal(label.strip()) | |
44 | if not node in repo: |
|
44 | if not node in repo: | |
45 | raise ValueError('node %s does not exist' % node) |
|
45 | raise ValueError('node %s does not exist' % node) | |
46 | partial.setdefault(label, []).append(bin(node)) |
|
46 | partial.setdefault(label, []).append(bin(node)) | |
47 | except KeyboardInterrupt: |
|
47 | except KeyboardInterrupt: | |
48 | raise |
|
48 | raise | |
49 | except Exception, inst: |
|
49 | except Exception, inst: | |
50 | if repo.ui.debugflag: |
|
50 | if repo.ui.debugflag: | |
51 | msg = 'invalid branchheads cache' |
|
51 | msg = 'invalid branchheads cache' | |
52 | if repo.filtername is not None: |
|
52 | if repo.filtername is not None: | |
53 | msg += ' (%s)' % repo.filtername |
|
53 | msg += ' (%s)' % repo.filtername | |
54 | msg += ': %s\n' |
|
54 | msg += ': %s\n' | |
55 | repo.ui.warn(msg % inst) |
|
55 | repo.ui.warn(msg % inst) | |
56 | partial = None |
|
56 | partial = None | |
57 | return partial |
|
57 | return partial | |
58 |
|
58 | |||
59 |
|
59 | |||
60 |
|
60 | |||
|
61 | ### Nearest subset relation | |||
|
62 | # Nearest subset of filter X is a filter Y so that: | |||
|
63 | # * Y is included in X, | |||
|
64 | # * X - Y is as small as possible. | |||
|
65 | # This create and ordering used for branchmap purpose. | |||
|
66 | # the ordering may be partial | |||
|
67 | subsettable = {None: 'visible', | |||
|
68 | 'visible': 'served', | |||
|
69 | 'served': 'immutable', | |||
|
70 | 'immutable': 'base'} | |||
|
71 | ||||
61 | def updatecache(repo): |
|
72 | def updatecache(repo): | |
62 | cl = repo.changelog |
|
73 | cl = repo.changelog | |
63 | filtername = repo.filtername |
|
74 | filtername = repo.filtername | |
64 | partial = repo._branchcaches.get(filtername) |
|
75 | partial = repo._branchcaches.get(filtername) | |
65 |
|
76 | |||
66 | revs = [] |
|
77 | revs = [] | |
67 | if partial is None or not partial.validfor(repo): |
|
78 | if partial is None or not partial.validfor(repo): | |
68 | partial = read(repo) |
|
79 | partial = read(repo) | |
69 | if partial is None: |
|
80 | if partial is None: | |
70 |
subsetname = |
|
81 | subsetname = subsettable.get(filtername) | |
71 | if subsetname is None: |
|
82 | if subsetname is None: | |
72 | partial = branchcache() |
|
83 | partial = branchcache() | |
73 | else: |
|
84 | else: | |
74 | subset = repo.filtered(subsetname) |
|
85 | subset = repo.filtered(subsetname) | |
75 | partial = subset.branchmap().copy() |
|
86 | partial = subset.branchmap().copy() | |
76 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs |
|
87 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | |
77 | revs.extend(r for r in extrarevs if r <= partial.tiprev) |
|
88 | revs.extend(r for r in extrarevs if r <= partial.tiprev) | |
78 | revs.extend(cl.revs(start=partial.tiprev + 1)) |
|
89 | revs.extend(cl.revs(start=partial.tiprev + 1)) | |
79 | if revs: |
|
90 | if revs: | |
80 | partial.update(repo, revs) |
|
91 | partial.update(repo, revs) | |
81 | partial.write(repo) |
|
92 | partial.write(repo) | |
82 | assert partial.validfor(repo), filtername |
|
93 | assert partial.validfor(repo), filtername | |
83 | repo._branchcaches[repo.filtername] = partial |
|
94 | repo._branchcaches[repo.filtername] = partial | |
84 |
|
95 | |||
85 | class branchcache(dict): |
|
96 | class branchcache(dict): | |
86 | """A dict like object that hold branches heads cache""" |
|
97 | """A dict like object that hold branches heads cache""" | |
87 |
|
98 | |||
88 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, |
|
99 | def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, | |
89 | filteredhash=None): |
|
100 | filteredhash=None): | |
90 | super(branchcache, self).__init__(entries) |
|
101 | super(branchcache, self).__init__(entries) | |
91 | self.tipnode = tipnode |
|
102 | self.tipnode = tipnode | |
92 | self.tiprev = tiprev |
|
103 | self.tiprev = tiprev | |
93 | self.filteredhash = filteredhash |
|
104 | self.filteredhash = filteredhash | |
94 |
|
105 | |||
95 | def _hashfiltered(self, repo): |
|
106 | def _hashfiltered(self, repo): | |
96 | """build hash of revision filtered in the current cache |
|
107 | """build hash of revision filtered in the current cache | |
97 |
|
108 | |||
98 | Tracking tipnode and tiprev is not enough to ensure validity of the |
|
109 | Tracking tipnode and tiprev is not enough to ensure validity of the | |
99 | cache as they do not help to distinct cache that ignored various |
|
110 | cache as they do not help to distinct cache that ignored various | |
100 | revision bellow tiprev. |
|
111 | revision bellow tiprev. | |
101 |
|
112 | |||
102 | To detect such difference, we build a cache of all ignored revisions. |
|
113 | To detect such difference, we build a cache of all ignored revisions. | |
103 | """ |
|
114 | """ | |
104 | cl = repo.changelog |
|
115 | cl = repo.changelog | |
105 | if not cl.filteredrevs: |
|
116 | if not cl.filteredrevs: | |
106 | return None |
|
117 | return None | |
107 | key = None |
|
118 | key = None | |
108 | revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev) |
|
119 | revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev) | |
109 | if revs: |
|
120 | if revs: | |
110 | s = util.sha1() |
|
121 | s = util.sha1() | |
111 | for rev in revs: |
|
122 | for rev in revs: | |
112 | s.update('%s;' % rev) |
|
123 | s.update('%s;' % rev) | |
113 | key = s.digest() |
|
124 | key = s.digest() | |
114 | return key |
|
125 | return key | |
115 |
|
126 | |||
116 | def validfor(self, repo): |
|
127 | def validfor(self, repo): | |
117 | """Is the cache content valid regarding a repo |
|
128 | """Is the cache content valid regarding a repo | |
118 |
|
129 | |||
119 | - False when cached tipnode is unknown or if we detect a strip. |
|
130 | - False when cached tipnode is unknown or if we detect a strip. | |
120 | - True when cache is up to date or a subset of current repo.""" |
|
131 | - True when cache is up to date or a subset of current repo.""" | |
121 | try: |
|
132 | try: | |
122 | return ((self.tipnode == repo.changelog.node(self.tiprev)) |
|
133 | return ((self.tipnode == repo.changelog.node(self.tiprev)) | |
123 | and (self.filteredhash == self._hashfiltered(repo))) |
|
134 | and (self.filteredhash == self._hashfiltered(repo))) | |
124 | except IndexError: |
|
135 | except IndexError: | |
125 | return False |
|
136 | return False | |
126 |
|
137 | |||
127 | def copy(self): |
|
138 | def copy(self): | |
128 | """return an deep copy of the branchcache object""" |
|
139 | """return an deep copy of the branchcache object""" | |
129 | return branchcache(self, self.tipnode, self.tiprev, self.filteredhash) |
|
140 | return branchcache(self, self.tipnode, self.tiprev, self.filteredhash) | |
130 |
|
141 | |||
131 | def write(self, repo): |
|
142 | def write(self, repo): | |
132 | try: |
|
143 | try: | |
133 | f = repo.opener(_filename(repo), "w", atomictemp=True) |
|
144 | f = repo.opener(_filename(repo), "w", atomictemp=True) | |
134 | cachekey = [hex(self.tipnode), str(self.tiprev)] |
|
145 | cachekey = [hex(self.tipnode), str(self.tiprev)] | |
135 | if self.filteredhash is not None: |
|
146 | if self.filteredhash is not None: | |
136 | cachekey.append(hex(self.filteredhash)) |
|
147 | cachekey.append(hex(self.filteredhash)) | |
137 | f.write(" ".join(cachekey) + '\n') |
|
148 | f.write(" ".join(cachekey) + '\n') | |
138 | for label, nodes in sorted(self.iteritems()): |
|
149 | for label, nodes in sorted(self.iteritems()): | |
139 | for node in nodes: |
|
150 | for node in nodes: | |
140 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) |
|
151 | f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) | |
141 | f.close() |
|
152 | f.close() | |
142 | except (IOError, OSError, util.Abort): |
|
153 | except (IOError, OSError, util.Abort): | |
143 | # Abort may be raise by read only opener |
|
154 | # Abort may be raise by read only opener | |
144 | pass |
|
155 | pass | |
145 |
|
156 | |||
146 | def update(self, repo, revgen): |
|
157 | def update(self, repo, revgen): | |
147 | """Given a branchhead cache, self, that may have extra nodes or be |
|
158 | """Given a branchhead cache, self, that may have extra nodes or be | |
148 | missing heads, and a generator of nodes that are at least a superset of |
|
159 | missing heads, and a generator of nodes that are at least a superset of | |
149 | heads missing, this function updates self to be correct. |
|
160 | heads missing, this function updates self to be correct. | |
150 | """ |
|
161 | """ | |
151 | cl = repo.changelog |
|
162 | cl = repo.changelog | |
152 | # collect new branch entries |
|
163 | # collect new branch entries | |
153 | newbranches = {} |
|
164 | newbranches = {} | |
154 | getbranch = cl.branch |
|
165 | getbranch = cl.branch | |
155 | for r in revgen: |
|
166 | for r in revgen: | |
156 | newbranches.setdefault(getbranch(r), []).append(cl.node(r)) |
|
167 | newbranches.setdefault(getbranch(r), []).append(cl.node(r)) | |
157 | # if older branchheads are reachable from new ones, they aren't |
|
168 | # if older branchheads are reachable from new ones, they aren't | |
158 | # really branchheads. Note checking parents is insufficient: |
|
169 | # really branchheads. Note checking parents is insufficient: | |
159 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) |
|
170 | # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | |
160 | for branch, newnodes in newbranches.iteritems(): |
|
171 | for branch, newnodes in newbranches.iteritems(): | |
161 | bheads = self.setdefault(branch, []) |
|
172 | bheads = self.setdefault(branch, []) | |
162 | # Remove candidate heads that no longer are in the repo (e.g., as |
|
173 | # Remove candidate heads that no longer are in the repo (e.g., as | |
163 | # the result of a strip that just happened). Avoid using 'node in |
|
174 | # the result of a strip that just happened). Avoid using 'node in | |
164 | # self' here because that dives down into branchcache code somewhat |
|
175 | # self' here because that dives down into branchcache code somewhat | |
165 | # recursively. |
|
176 | # recursively. | |
166 | bheadrevs = [cl.rev(node) for node in bheads |
|
177 | bheadrevs = [cl.rev(node) for node in bheads | |
167 | if cl.hasnode(node)] |
|
178 | if cl.hasnode(node)] | |
168 | newheadrevs = [cl.rev(node) for node in newnodes |
|
179 | newheadrevs = [cl.rev(node) for node in newnodes | |
169 | if cl.hasnode(node)] |
|
180 | if cl.hasnode(node)] | |
170 | ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) |
|
181 | ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) | |
171 | # Remove duplicates - nodes that are in newheadrevs and are already |
|
182 | # Remove duplicates - nodes that are in newheadrevs and are already | |
172 | # in bheadrevs. This can happen if you strip a node whose parent |
|
183 | # in bheadrevs. This can happen if you strip a node whose parent | |
173 | # was already a head (because they're on different branches). |
|
184 | # was already a head (because they're on different branches). | |
174 | bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) |
|
185 | bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) | |
175 |
|
186 | |||
176 | # Starting from tip means fewer passes over reachable. If we know |
|
187 | # Starting from tip means fewer passes over reachable. If we know | |
177 | # the new candidates are not ancestors of existing heads, we don't |
|
188 | # the new candidates are not ancestors of existing heads, we don't | |
178 | # have to examine ancestors of existing heads |
|
189 | # have to examine ancestors of existing heads | |
179 | if ctxisnew: |
|
190 | if ctxisnew: | |
180 | iterrevs = sorted(newheadrevs) |
|
191 | iterrevs = sorted(newheadrevs) | |
181 | else: |
|
192 | else: | |
182 | iterrevs = list(bheadrevs) |
|
193 | iterrevs = list(bheadrevs) | |
183 |
|
194 | |||
184 | # This loop prunes out two kinds of heads - heads that are |
|
195 | # This loop prunes out two kinds of heads - heads that are | |
185 | # superseded by a head in newheadrevs, and newheadrevs that are not |
|
196 | # superseded by a head in newheadrevs, and newheadrevs that are not | |
186 | # heads because an existing head is their descendant. |
|
197 | # heads because an existing head is their descendant. | |
187 | while iterrevs: |
|
198 | while iterrevs: | |
188 | latest = iterrevs.pop() |
|
199 | latest = iterrevs.pop() | |
189 | if latest not in bheadrevs: |
|
200 | if latest not in bheadrevs: | |
190 | continue |
|
201 | continue | |
191 | ancestors = set(cl.ancestors([latest], |
|
202 | ancestors = set(cl.ancestors([latest], | |
192 | bheadrevs[0])) |
|
203 | bheadrevs[0])) | |
193 | if ancestors: |
|
204 | if ancestors: | |
194 | bheadrevs = [b for b in bheadrevs if b not in ancestors] |
|
205 | bheadrevs = [b for b in bheadrevs if b not in ancestors] | |
195 | self[branch] = [cl.node(rev) for rev in bheadrevs] |
|
206 | self[branch] = [cl.node(rev) for rev in bheadrevs] | |
196 | tiprev = max(bheadrevs) |
|
207 | tiprev = max(bheadrevs) | |
197 | if tiprev > self.tiprev: |
|
208 | if tiprev > self.tiprev: | |
198 | self.tipnode = cl.node(tiprev) |
|
209 | self.tipnode = cl.node(tiprev) | |
199 | self.tiprev = tiprev |
|
210 | self.tiprev = tiprev | |
200 |
|
211 | |||
201 | if not self.validfor(repo): |
|
212 | if not self.validfor(repo): | |
202 | # cache key are not valid anymore |
|
213 | # cache key are not valid anymore | |
203 | self.tipnode = nullid |
|
214 | self.tipnode = nullid | |
204 | self.tiprev = nullrev |
|
215 | self.tiprev = nullrev | |
205 | for heads in self.values(): |
|
216 | for heads in self.values(): | |
206 | tiprev = max(cl.rev(node) for node in heads) |
|
217 | tiprev = max(cl.rev(node) for node in heads) | |
207 | if tiprev > self.tiprev: |
|
218 | if tiprev > self.tiprev: | |
208 | self.tipnode = cl.node(tiprev) |
|
219 | self.tipnode = cl.node(tiprev) | |
209 | self.tiprev = tiprev |
|
220 | self.tiprev = tiprev | |
210 | self.filteredhash = self._hashfiltered(repo) |
|
221 | self.filteredhash = self._hashfiltered(repo) |
@@ -1,218 +1,207 b'' | |||||
1 | # repoview.py - Filtered view of a localrepo object |
|
1 | # repoview.py - Filtered view of a localrepo object | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | import copy |
|
9 | import copy | |
10 | import phases |
|
10 | import phases | |
11 | import util |
|
11 | import util | |
12 | import obsolete, revset |
|
12 | import obsolete, revset | |
13 |
|
13 | |||
14 |
|
14 | |||
15 | def hideablerevs(repo): |
|
15 | def hideablerevs(repo): | |
16 | """Revisions candidates to be hidden |
|
16 | """Revisions candidates to be hidden | |
17 |
|
17 | |||
18 | This is a standalone function to help extensions to wrap it.""" |
|
18 | This is a standalone function to help extensions to wrap it.""" | |
19 | return obsolete.getrevs(repo, 'obsolete') |
|
19 | return obsolete.getrevs(repo, 'obsolete') | |
20 |
|
20 | |||
21 | def computehidden(repo): |
|
21 | def computehidden(repo): | |
22 | """compute the set of hidden revision to filter |
|
22 | """compute the set of hidden revision to filter | |
23 |
|
23 | |||
24 | During most operation hidden should be filtered.""" |
|
24 | During most operation hidden should be filtered.""" | |
25 | assert not repo.changelog.filteredrevs |
|
25 | assert not repo.changelog.filteredrevs | |
26 | hideable = hideablerevs(repo) |
|
26 | hideable = hideablerevs(repo) | |
27 | if hideable: |
|
27 | if hideable: | |
28 | cl = repo.changelog |
|
28 | cl = repo.changelog | |
29 | firsthideable = min(hideable) |
|
29 | firsthideable = min(hideable) | |
30 | revs = cl.revs(start=firsthideable) |
|
30 | revs = cl.revs(start=firsthideable) | |
31 | blockers = [r for r in revset._children(repo, revs, hideable) |
|
31 | blockers = [r for r in revset._children(repo, revs, hideable) | |
32 | if r not in hideable] |
|
32 | if r not in hideable] | |
33 | for par in repo[None].parents(): |
|
33 | for par in repo[None].parents(): | |
34 | blockers.append(par.rev()) |
|
34 | blockers.append(par.rev()) | |
35 | for bm in repo._bookmarks.values(): |
|
35 | for bm in repo._bookmarks.values(): | |
36 | blockers.append(repo[bm].rev()) |
|
36 | blockers.append(repo[bm].rev()) | |
37 | blocked = cl.ancestors(blockers, inclusive=True) |
|
37 | blocked = cl.ancestors(blockers, inclusive=True) | |
38 | return frozenset(r for r in hideable if r not in blocked) |
|
38 | return frozenset(r for r in hideable if r not in blocked) | |
39 | return frozenset() |
|
39 | return frozenset() | |
40 |
|
40 | |||
41 | def computeunserved(repo): |
|
41 | def computeunserved(repo): | |
42 | """compute the set of revision that should be filtered when used a server |
|
42 | """compute the set of revision that should be filtered when used a server | |
43 |
|
43 | |||
44 | Secret and hidden changeset should not pretend to be here.""" |
|
44 | Secret and hidden changeset should not pretend to be here.""" | |
45 | assert not repo.changelog.filteredrevs |
|
45 | assert not repo.changelog.filteredrevs | |
46 | # fast path in simple case to avoid impact of non optimised code |
|
46 | # fast path in simple case to avoid impact of non optimised code | |
47 | hiddens = filterrevs(repo, 'visible') |
|
47 | hiddens = filterrevs(repo, 'visible') | |
48 | if phases.hassecret(repo): |
|
48 | if phases.hassecret(repo): | |
49 | cl = repo.changelog |
|
49 | cl = repo.changelog | |
50 | secret = phases.secret |
|
50 | secret = phases.secret | |
51 | getphase = repo._phasecache.phase |
|
51 | getphase = repo._phasecache.phase | |
52 | first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) |
|
52 | first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret]) | |
53 | revs = cl.revs(start=first) |
|
53 | revs = cl.revs(start=first) | |
54 | secrets = set(r for r in revs if getphase(repo, r) >= secret) |
|
54 | secrets = set(r for r in revs if getphase(repo, r) >= secret) | |
55 | return frozenset(hiddens | secrets) |
|
55 | return frozenset(hiddens | secrets) | |
56 | else: |
|
56 | else: | |
57 | return hiddens |
|
57 | return hiddens | |
58 |
|
58 | |||
59 | def computemutable(repo): |
|
59 | def computemutable(repo): | |
60 | """compute the set of revision that should be filtered when used a server |
|
60 | """compute the set of revision that should be filtered when used a server | |
61 |
|
61 | |||
62 | Secret and hidden changeset should not pretend to be here.""" |
|
62 | Secret and hidden changeset should not pretend to be here.""" | |
63 | assert not repo.changelog.filteredrevs |
|
63 | assert not repo.changelog.filteredrevs | |
64 | # fast check to avoid revset call on huge repo |
|
64 | # fast check to avoid revset call on huge repo | |
65 | if util.any(repo._phasecache.phaseroots[1:]): |
|
65 | if util.any(repo._phasecache.phaseroots[1:]): | |
66 | getphase = repo._phasecache.phase |
|
66 | getphase = repo._phasecache.phase | |
67 | maymutable = filterrevs(repo, 'base') |
|
67 | maymutable = filterrevs(repo, 'base') | |
68 | return frozenset(r for r in maymutable if getphase(repo, r)) |
|
68 | return frozenset(r for r in maymutable if getphase(repo, r)) | |
69 | return frozenset() |
|
69 | return frozenset() | |
70 |
|
70 | |||
71 | def computeimpactable(repo): |
|
71 | def computeimpactable(repo): | |
72 | """Everything impactable by mutable revision |
|
72 | """Everything impactable by mutable revision | |
73 |
|
73 | |||
74 | The immutable filter still have some chance to get invalidated. This will |
|
74 | The immutable filter still have some chance to get invalidated. This will | |
75 | happen when: |
|
75 | happen when: | |
76 |
|
76 | |||
77 | - you garbage collect hidden changeset, |
|
77 | - you garbage collect hidden changeset, | |
78 | - public phase is moved backward, |
|
78 | - public phase is moved backward, | |
79 | - something is changed in the filtering (this could be fixed) |
|
79 | - something is changed in the filtering (this could be fixed) | |
80 |
|
80 | |||
81 | This filter out any mutable changeset and any public changeset that may be |
|
81 | This filter out any mutable changeset and any public changeset that may be | |
82 | impacted by something happening to a mutable revision. |
|
82 | impacted by something happening to a mutable revision. | |
83 |
|
83 | |||
84 | This is achieved by filtered everything with a revision number egal or |
|
84 | This is achieved by filtered everything with a revision number egal or | |
85 | higher than the first mutable changeset is filtered.""" |
|
85 | higher than the first mutable changeset is filtered.""" | |
86 | assert not repo.changelog.filteredrevs |
|
86 | assert not repo.changelog.filteredrevs | |
87 | cl = repo.changelog |
|
87 | cl = repo.changelog | |
88 | firstmutable = len(cl) |
|
88 | firstmutable = len(cl) | |
89 | for roots in repo._phasecache.phaseroots[1:]: |
|
89 | for roots in repo._phasecache.phaseroots[1:]: | |
90 | if roots: |
|
90 | if roots: | |
91 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) |
|
91 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) | |
92 | # protect from nullrev root |
|
92 | # protect from nullrev root | |
93 | firstmutable = max(0, firstmutable) |
|
93 | firstmutable = max(0, firstmutable) | |
94 | return frozenset(xrange(firstmutable, len(cl))) |
|
94 | return frozenset(xrange(firstmutable, len(cl))) | |
95 |
|
95 | |||
96 | # function to compute filtered set |
|
96 | # function to compute filtered set | |
97 | filtertable = {'visible': computehidden, |
|
97 | filtertable = {'visible': computehidden, | |
98 | 'served': computeunserved, |
|
98 | 'served': computeunserved, | |
99 | 'immutable': computemutable, |
|
99 | 'immutable': computemutable, | |
100 | 'base': computeimpactable} |
|
100 | 'base': computeimpactable} | |
101 | ### Nearest subset relation |
|
|||
102 | # Nearest subset of filter X is a filter Y so that: |
|
|||
103 | # * Y is included in X, |
|
|||
104 | # * X - Y is as small as possible. |
|
|||
105 | # This create and ordering used for branchmap purpose. |
|
|||
106 | # the ordering may be partial |
|
|||
107 | subsettable = {None: 'visible', |
|
|||
108 | 'visible': 'served', |
|
|||
109 | 'served': 'immutable', |
|
|||
110 | 'immutable': 'base'} |
|
|||
111 |
|
101 | |||
112 | def filterrevs(repo, filtername): |
|
102 | def filterrevs(repo, filtername): | |
113 | """returns set of filtered revision for this filter name""" |
|
103 | """returns set of filtered revision for this filter name""" | |
114 | if filtername not in repo.filteredrevcache: |
|
104 | if filtername not in repo.filteredrevcache: | |
115 | func = filtertable[filtername] |
|
105 | func = filtertable[filtername] | |
116 | repo.filteredrevcache[filtername] = func(repo.unfiltered()) |
|
106 | repo.filteredrevcache[filtername] = func(repo.unfiltered()) | |
117 | return repo.filteredrevcache[filtername] |
|
107 | return repo.filteredrevcache[filtername] | |
118 |
|
108 | |||
119 | class repoview(object): |
|
109 | class repoview(object): | |
120 | """Provide a read/write view of a repo through a filtered changelog |
|
110 | """Provide a read/write view of a repo through a filtered changelog | |
121 |
|
111 | |||
122 | This object is used to access a filtered version of a repository without |
|
112 | This object is used to access a filtered version of a repository without | |
123 | altering the original repository object itself. We can not alter the |
|
113 | altering the original repository object itself. We can not alter the | |
124 | original object for two main reasons: |
|
114 | original object for two main reasons: | |
125 | - It prevents the use of a repo with multiple filters at the same time. In |
|
115 | - It prevents the use of a repo with multiple filters at the same time. In | |
126 | particular when multiple threads are involved. |
|
116 | particular when multiple threads are involved. | |
127 | - It makes scope of the filtering harder to control. |
|
117 | - It makes scope of the filtering harder to control. | |
128 |
|
118 | |||
129 | This object behaves very closely to the original repository. All attribute |
|
119 | This object behaves very closely to the original repository. All attribute | |
130 | operations are done on the original repository: |
|
120 | operations are done on the original repository: | |
131 | - An access to `repoview.someattr` actually returns `repo.someattr`, |
|
121 | - An access to `repoview.someattr` actually returns `repo.someattr`, | |
132 | - A write to `repoview.someattr` actually sets value of `repo.someattr`, |
|
122 | - A write to `repoview.someattr` actually sets value of `repo.someattr`, | |
133 | - A deletion of `repoview.someattr` actually drops `someattr` |
|
123 | - A deletion of `repoview.someattr` actually drops `someattr` | |
134 | from `repo.__dict__`. |
|
124 | from `repo.__dict__`. | |
135 |
|
125 | |||
136 | The only exception is the `changelog` property. It is overridden to return |
|
126 | The only exception is the `changelog` property. It is overridden to return | |
137 | a (surface) copy of `repo.changelog` with some revisions filtered. The |
|
127 | a (surface) copy of `repo.changelog` with some revisions filtered. The | |
138 | `filtername` attribute of the view control the revisions that need to be |
|
128 | `filtername` attribute of the view control the revisions that need to be | |
139 | filtered. (the fact the changelog is copied is an implementation detail). |
|
129 | filtered. (the fact the changelog is copied is an implementation detail). | |
140 |
|
130 | |||
141 | Unlike attributes, this object intercepts all method calls. This means that |
|
131 | Unlike attributes, this object intercepts all method calls. This means that | |
142 | all methods are run on the `repoview` object with the filtered `changelog` |
|
132 | all methods are run on the `repoview` object with the filtered `changelog` | |
143 | property. For this purpose the simple `repoview` class must be mixed with |
|
133 | property. For this purpose the simple `repoview` class must be mixed with | |
144 | the actual class of the repository. This ensures that the resulting |
|
134 | the actual class of the repository. This ensures that the resulting | |
145 | `repoview` object have the very same methods than the repo object. This |
|
135 | `repoview` object have the very same methods than the repo object. This | |
146 | leads to the property below. |
|
136 | leads to the property below. | |
147 |
|
137 | |||
148 | repoview.method() --> repo.__class__.method(repoview) |
|
138 | repoview.method() --> repo.__class__.method(repoview) | |
149 |
|
139 | |||
150 | The inheritance has to be done dynamically because `repo` can be of any |
|
140 | The inheritance has to be done dynamically because `repo` can be of any | |
151 | subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`. |
|
141 | subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`. | |
152 | """ |
|
142 | """ | |
153 |
|
143 | |||
154 | def __init__(self, repo, filtername): |
|
144 | def __init__(self, repo, filtername): | |
155 | object.__setattr__(self, '_unfilteredrepo', repo) |
|
145 | object.__setattr__(self, '_unfilteredrepo', repo) | |
156 | object.__setattr__(self, 'filtername', filtername) |
|
146 | object.__setattr__(self, 'filtername', filtername) | |
157 | object.__setattr__(self, '_clcachekey', None) |
|
147 | object.__setattr__(self, '_clcachekey', None) | |
158 | object.__setattr__(self, '_clcache', None) |
|
148 | object.__setattr__(self, '_clcache', None) | |
159 |
|
149 | |||
160 | # not a propertycache on purpose we shall implement a proper cache later |
|
150 | # not a propertycache on purpose we shall implement a proper cache later | |
161 | @property |
|
151 | @property | |
162 | def changelog(self): |
|
152 | def changelog(self): | |
163 | """return a filtered version of the changeset |
|
153 | """return a filtered version of the changeset | |
164 |
|
154 | |||
165 | this changelog must not be used for writing""" |
|
155 | this changelog must not be used for writing""" | |
166 | # some cache may be implemented later |
|
156 | # some cache may be implemented later | |
167 | unfi = self._unfilteredrepo |
|
157 | unfi = self._unfilteredrepo | |
168 | unfichangelog = unfi.changelog |
|
158 | unfichangelog = unfi.changelog | |
169 | revs = filterrevs(unfi, self.filtername) |
|
159 | revs = filterrevs(unfi, self.filtername) | |
170 | cl = self._clcache |
|
160 | cl = self._clcache | |
171 | newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs)) |
|
161 | newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs)) | |
172 | if cl is not None: |
|
162 | if cl is not None: | |
173 | # we need to check curkey too for some obscure reason. |
|
163 | # we need to check curkey too for some obscure reason. | |
174 | # MQ test show a corruption of the underlying repo (in _clcache) |
|
164 | # MQ test show a corruption of the underlying repo (in _clcache) | |
175 | # without change in the cachekey. |
|
165 | # without change in the cachekey. | |
176 | oldfilter = cl.filteredrevs |
|
166 | oldfilter = cl.filteredrevs | |
177 | try: |
|
167 | try: | |
178 | cl.filterrevs = () # disable filtering for tip |
|
168 | cl.filterrevs = () # disable filtering for tip | |
179 | curkey = (len(cl), cl.tip(), hash(oldfilter)) |
|
169 | curkey = (len(cl), cl.tip(), hash(oldfilter)) | |
180 | finally: |
|
170 | finally: | |
181 | cl.filteredrevs = oldfilter |
|
171 | cl.filteredrevs = oldfilter | |
182 | if newkey != self._clcachekey or newkey != curkey: |
|
172 | if newkey != self._clcachekey or newkey != curkey: | |
183 | cl = None |
|
173 | cl = None | |
184 | # could have been made None by the previous if |
|
174 | # could have been made None by the previous if | |
185 | if cl is None: |
|
175 | if cl is None: | |
186 | cl = copy.copy(unfichangelog) |
|
176 | cl = copy.copy(unfichangelog) | |
187 | cl.filteredrevs = revs |
|
177 | cl.filteredrevs = revs | |
188 | object.__setattr__(self, '_clcache', cl) |
|
178 | object.__setattr__(self, '_clcache', cl) | |
189 | object.__setattr__(self, '_clcachekey', newkey) |
|
179 | object.__setattr__(self, '_clcachekey', newkey) | |
190 | return cl |
|
180 | return cl | |
191 |
|
181 | |||
192 | def unfiltered(self): |
|
182 | def unfiltered(self): | |
193 | """Return an unfiltered version of a repo""" |
|
183 | """Return an unfiltered version of a repo""" | |
194 | return self._unfilteredrepo |
|
184 | return self._unfilteredrepo | |
195 |
|
185 | |||
196 | def filtered(self, name): |
|
186 | def filtered(self, name): | |
197 | """Return a filtered version of a repository""" |
|
187 | """Return a filtered version of a repository""" | |
198 | if name == self.filtername: |
|
188 | if name == self.filtername: | |
199 | return self |
|
189 | return self | |
200 | return self.unfiltered().filtered(name) |
|
190 | return self.unfiltered().filtered(name) | |
201 |
|
191 | |||
202 | # everything access are forwarded to the proxied repo |
|
192 | # everything access are forwarded to the proxied repo | |
203 | def __getattr__(self, attr): |
|
193 | def __getattr__(self, attr): | |
204 | return getattr(self._unfilteredrepo, attr) |
|
194 | return getattr(self._unfilteredrepo, attr) | |
205 |
|
195 | |||
206 | def __setattr__(self, attr, value): |
|
196 | def __setattr__(self, attr, value): | |
207 | return setattr(self._unfilteredrepo, attr, value) |
|
197 | return setattr(self._unfilteredrepo, attr, value) | |
208 |
|
198 | |||
209 | def __delattr__(self, attr): |
|
199 | def __delattr__(self, attr): | |
210 | return delattr(self._unfilteredrepo, attr) |
|
200 | return delattr(self._unfilteredrepo, attr) | |
211 |
|
201 | |||
212 | # The `requirements` attribute is initialized during __init__. But |
|
202 | # The `requirements` attribute is initialized during __init__. But | |
213 | # __getattr__ won't be called as it also exists on the class. We need |
|
203 | # __getattr__ won't be called as it also exists on the class. We need | |
214 | # explicit forwarding to main repo here |
|
204 | # explicit forwarding to main repo here | |
215 | @property |
|
205 | @property | |
216 | def requirements(self): |
|
206 | def requirements(self): | |
217 | return self._unfilteredrepo.requirements |
|
207 | return self._unfilteredrepo.requirements | |
218 |
|
General Comments 0
You need to be logged in to leave comments.
Login now