Show More
@@ -1,1502 +1,1502 | |||
|
1 | 1 | # perf.py - performance test routines |
|
2 | 2 | '''helper extension to measure performance''' |
|
3 | 3 | |
|
4 | 4 | # "historical portability" policy of perf.py: |
|
5 | 5 | # |
|
6 | 6 | # We have to do: |
|
7 | 7 | # - make perf.py "loadable" with as wide Mercurial version as possible |
|
8 | 8 | # This doesn't mean that perf commands work correctly with that Mercurial. |
|
9 | 9 | # BTW, perf.py itself has been available since 1.1 (or eb240755386d). |
|
10 | 10 | # - make historical perf command work correctly with as wide Mercurial |
|
11 | 11 | # version as possible |
|
12 | 12 | # |
|
13 | 13 | # We have to do, if possible with reasonable cost: |
|
14 | 14 | # - make recent perf command for historical feature work correctly |
|
15 | 15 | # with early Mercurial |
|
16 | 16 | # |
|
17 | 17 | # We don't have to do: |
|
18 | 18 | # - make perf command for recent feature work correctly with early |
|
19 | 19 | # Mercurial |
|
20 | 20 | |
|
21 | 21 | from __future__ import absolute_import |
|
22 | 22 | import functools |
|
23 | 23 | import gc |
|
24 | 24 | import os |
|
25 | 25 | import random |
|
26 | 26 | import struct |
|
27 | 27 | import sys |
|
28 | 28 | import time |
|
29 | 29 | from mercurial import ( |
|
30 | 30 | changegroup, |
|
31 | 31 | cmdutil, |
|
32 | 32 | commands, |
|
33 | 33 | copies, |
|
34 | 34 | error, |
|
35 | 35 | extensions, |
|
36 | 36 | mdiff, |
|
37 | 37 | merge, |
|
38 | 38 | revlog, |
|
39 | 39 | util, |
|
40 | 40 | ) |
|
41 | 41 | |
|
42 | 42 | # for "historical portability": |
|
43 | 43 | # try to import modules separately (in dict order), and ignore |
|
44 | 44 | # failure, because these aren't available with early Mercurial |
|
45 | 45 | try: |
|
46 | 46 | from mercurial import branchmap # since 2.5 (or bcee63733aad) |
|
47 | 47 | except ImportError: |
|
48 | 48 | pass |
|
49 | 49 | try: |
|
50 | 50 | from mercurial import obsolete # since 2.3 (or ad0d6c2b3279) |
|
51 | 51 | except ImportError: |
|
52 | 52 | pass |
|
53 | 53 | try: |
|
54 | 54 | from mercurial import registrar # since 3.7 (or 37d50250b696) |
|
55 | 55 | dir(registrar) # forcibly load it |
|
56 | 56 | except ImportError: |
|
57 | 57 | registrar = None |
|
58 | 58 | try: |
|
59 | 59 | from mercurial import repoview # since 2.5 (or 3a6ddacb7198) |
|
60 | 60 | except ImportError: |
|
61 | 61 | pass |
|
62 | 62 | try: |
|
63 | 63 | from mercurial import scmutil # since 1.9 (or 8b252e826c68) |
|
64 | 64 | except ImportError: |
|
65 | 65 | pass |
|
66 | 66 | |
|
67 | 67 | # for "historical portability": |
|
68 | 68 | # define util.safehasattr forcibly, because util.safehasattr has been |
|
69 | 69 | # available since 1.9.3 (or 94b200a11cf7) |
|
70 | 70 | _undefined = object() |
|
71 | 71 | def safehasattr(thing, attr): |
|
72 | 72 | return getattr(thing, attr, _undefined) is not _undefined |
|
73 | 73 | setattr(util, 'safehasattr', safehasattr) |
|
74 | 74 | |
|
75 | 75 | # for "historical portability": |
|
76 | 76 | # define util.timer forcibly, because util.timer has been available |
|
77 | 77 | # since ae5d60bb70c9 |
|
78 | 78 | if safehasattr(time, 'perf_counter'): |
|
79 | 79 | util.timer = time.perf_counter |
|
80 | 80 | elif os.name == 'nt': |
|
81 | 81 | util.timer = time.clock |
|
82 | 82 | else: |
|
83 | 83 | util.timer = time.time |
|
84 | 84 | |
|
85 | 85 | # for "historical portability": |
|
86 | 86 | # use locally defined empty option list, if formatteropts isn't |
|
87 | 87 | # available, because commands.formatteropts has been available since |
|
88 | 88 | # 3.2 (or 7a7eed5176a4), even though formatting itself has been |
|
89 | 89 | # available since 2.2 (or ae5f92e154d3) |
|
90 | 90 | formatteropts = getattr(cmdutil, "formatteropts", |
|
91 | 91 | getattr(commands, "formatteropts", [])) |
|
92 | 92 | |
|
93 | 93 | # for "historical portability": |
|
94 | 94 | # use locally defined option list, if debugrevlogopts isn't available, |
|
95 | 95 | # because commands.debugrevlogopts has been available since 3.7 (or |
|
96 | 96 | # 5606f7d0d063), even though cmdutil.openrevlog() has been available |
|
97 | 97 | # since 1.9 (or a79fea6b3e77). |
|
98 | 98 | revlogopts = getattr(cmdutil, "debugrevlogopts", |
|
99 | 99 | getattr(commands, "debugrevlogopts", [ |
|
100 | 100 | ('c', 'changelog', False, ('open changelog')), |
|
101 | 101 | ('m', 'manifest', False, ('open manifest')), |
|
102 | 102 | ('', 'dir', False, ('open directory manifest')), |
|
103 | 103 | ])) |
|
104 | 104 | |
|
105 | 105 | cmdtable = {} |
|
106 | 106 | |
|
107 | 107 | # for "historical portability": |
|
108 | 108 | # define parsealiases locally, because cmdutil.parsealiases has been |
|
109 | 109 | # available since 1.5 (or 6252852b4332) |
|
110 | 110 | def parsealiases(cmd): |
|
111 | 111 | return cmd.lstrip("^").split("|") |
|
112 | 112 | |
|
113 | 113 | if safehasattr(registrar, 'command'): |
|
114 | 114 | command = registrar.command(cmdtable) |
|
115 | 115 | elif safehasattr(cmdutil, 'command'): |
|
116 | 116 | import inspect |
|
117 | 117 | command = cmdutil.command(cmdtable) |
|
118 | 118 | if 'norepo' not in inspect.getargspec(command)[0]: |
|
119 | 119 | # for "historical portability": |
|
120 | 120 | # wrap original cmdutil.command, because "norepo" option has |
|
121 | 121 | # been available since 3.1 (or 75a96326cecb) |
|
122 | 122 | _command = command |
|
123 | 123 | def command(name, options=(), synopsis=None, norepo=False): |
|
124 | 124 | if norepo: |
|
125 | 125 | commands.norepo += ' %s' % ' '.join(parsealiases(name)) |
|
126 | 126 | return _command(name, list(options), synopsis) |
|
127 | 127 | else: |
|
128 | 128 | # for "historical portability": |
|
129 | 129 | # define "@command" annotation locally, because cmdutil.command |
|
130 | 130 | # has been available since 1.9 (or 2daa5179e73f) |
|
131 | 131 | def command(name, options=(), synopsis=None, norepo=False): |
|
132 | 132 | def decorator(func): |
|
133 | 133 | if synopsis: |
|
134 | 134 | cmdtable[name] = func, list(options), synopsis |
|
135 | 135 | else: |
|
136 | 136 | cmdtable[name] = func, list(options) |
|
137 | 137 | if norepo: |
|
138 | 138 | commands.norepo += ' %s' % ' '.join(parsealiases(name)) |
|
139 | 139 | return func |
|
140 | 140 | return decorator |
|
141 | 141 | |
|
142 | 142 | try: |
|
143 | 143 | import registrar |
|
144 | 144 | configtable = {} |
|
145 | 145 | configitem = registrar.configitem(configtable) |
|
146 | 146 | configitem('perf', 'stub', |
|
147 | 147 | default=False, |
|
148 | 148 | ) |
|
149 | 149 | except (ImportError, AttributeError): |
|
150 | 150 | pass |
|
151 | 151 | |
|
152 | 152 | def getlen(ui): |
|
153 | 153 | if ui.configbool("perf", "stub"): |
|
154 | 154 | return lambda x: 1 |
|
155 | 155 | return len |
|
156 | 156 | |
|
157 | 157 | def gettimer(ui, opts=None): |
|
158 | 158 | """return a timer function and formatter: (timer, formatter) |
|
159 | 159 | |
|
160 | 160 | This function exists to gather the creation of formatter in a single |
|
161 | 161 | place instead of duplicating it in all performance commands.""" |
|
162 | 162 | |
|
163 | 163 | # enforce an idle period before execution to counteract power management |
|
164 | 164 | # experimental config: perf.presleep |
|
165 | 165 | time.sleep(getint(ui, "perf", "presleep", 1)) |
|
166 | 166 | |
|
167 | 167 | if opts is None: |
|
168 | 168 | opts = {} |
|
169 | 169 | # redirect all to stderr unless buffer api is in use |
|
170 | 170 | if not ui._buffers: |
|
171 | 171 | ui = ui.copy() |
|
172 | 172 | uifout = safeattrsetter(ui, 'fout', ignoremissing=True) |
|
173 | 173 | if uifout: |
|
174 | 174 | # for "historical portability": |
|
175 | 175 | # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d) |
|
176 | 176 | uifout.set(ui.ferr) |
|
177 | 177 | |
|
178 | 178 | # get a formatter |
|
179 | 179 | uiformatter = getattr(ui, 'formatter', None) |
|
180 | 180 | if uiformatter: |
|
181 | 181 | fm = uiformatter('perf', opts) |
|
182 | 182 | else: |
|
183 | 183 | # for "historical portability": |
|
184 | 184 | # define formatter locally, because ui.formatter has been |
|
185 | 185 | # available since 2.2 (or ae5f92e154d3) |
|
186 | 186 | from mercurial import node |
|
187 | 187 | class defaultformatter(object): |
|
188 | 188 | """Minimized composition of baseformatter and plainformatter |
|
189 | 189 | """ |
|
190 | 190 | def __init__(self, ui, topic, opts): |
|
191 | 191 | self._ui = ui |
|
192 | 192 | if ui.debugflag: |
|
193 | 193 | self.hexfunc = node.hex |
|
194 | 194 | else: |
|
195 | 195 | self.hexfunc = node.short |
|
196 | 196 | def __nonzero__(self): |
|
197 | 197 | return False |
|
198 | 198 | __bool__ = __nonzero__ |
|
199 | 199 | def startitem(self): |
|
200 | 200 | pass |
|
201 | 201 | def data(self, **data): |
|
202 | 202 | pass |
|
203 | 203 | def write(self, fields, deftext, *fielddata, **opts): |
|
204 | 204 | self._ui.write(deftext % fielddata, **opts) |
|
205 | 205 | def condwrite(self, cond, fields, deftext, *fielddata, **opts): |
|
206 | 206 | if cond: |
|
207 | 207 | self._ui.write(deftext % fielddata, **opts) |
|
208 | 208 | def plain(self, text, **opts): |
|
209 | 209 | self._ui.write(text, **opts) |
|
210 | 210 | def end(self): |
|
211 | 211 | pass |
|
212 | 212 | fm = defaultformatter(ui, 'perf', opts) |
|
213 | 213 | |
|
214 | 214 | # stub function, runs code only once instead of in a loop |
|
215 | 215 | # experimental config: perf.stub |
|
216 | 216 | if ui.configbool("perf", "stub"): |
|
217 | 217 | return functools.partial(stub_timer, fm), fm |
|
218 | 218 | return functools.partial(_timer, fm), fm |
|
219 | 219 | |
|
220 | 220 | def stub_timer(fm, func, title=None): |
|
221 | 221 | func() |
|
222 | 222 | |
|
223 | 223 | def _timer(fm, func, title=None): |
|
224 | 224 | gc.collect() |
|
225 | 225 | results = [] |
|
226 | 226 | begin = util.timer() |
|
227 | 227 | count = 0 |
|
228 | 228 | while True: |
|
229 | 229 | ostart = os.times() |
|
230 | 230 | cstart = util.timer() |
|
231 | 231 | r = func() |
|
232 | 232 | cstop = util.timer() |
|
233 | 233 | ostop = os.times() |
|
234 | 234 | count += 1 |
|
235 | 235 | a, b = ostart, ostop |
|
236 | 236 | results.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) |
|
237 | 237 | if cstop - begin > 3 and count >= 100: |
|
238 | 238 | break |
|
239 | 239 | if cstop - begin > 10 and count >= 3: |
|
240 | 240 | break |
|
241 | 241 | |
|
242 | 242 | fm.startitem() |
|
243 | 243 | |
|
244 | 244 | if title: |
|
245 | 245 | fm.write('title', '! %s\n', title) |
|
246 | 246 | if r: |
|
247 | 247 | fm.write('result', '! result: %s\n', r) |
|
248 | 248 | m = min(results) |
|
249 | 249 | fm.plain('!') |
|
250 | 250 | fm.write('wall', ' wall %f', m[0]) |
|
251 | 251 | fm.write('comb', ' comb %f', m[1] + m[2]) |
|
252 | 252 | fm.write('user', ' user %f', m[1]) |
|
253 | 253 | fm.write('sys', ' sys %f', m[2]) |
|
254 | 254 | fm.write('count', ' (best of %d)', count) |
|
255 | 255 | fm.plain('\n') |
|
256 | 256 | |
|
257 | 257 | # utilities for historical portability |
|
258 | 258 | |
|
259 | 259 | def getint(ui, section, name, default): |
|
260 | 260 | # for "historical portability": |
|
261 | 261 | # ui.configint has been available since 1.9 (or fa2b596db182) |
|
262 | 262 | v = ui.config(section, name, None) |
|
263 | 263 | if v is None: |
|
264 | 264 | return default |
|
265 | 265 | try: |
|
266 | 266 | return int(v) |
|
267 | 267 | except ValueError: |
|
268 | 268 | raise error.ConfigError(("%s.%s is not an integer ('%s')") |
|
269 | 269 | % (section, name, v)) |
|
270 | 270 | |
|
271 | 271 | def safeattrsetter(obj, name, ignoremissing=False): |
|
272 | 272 | """Ensure that 'obj' has 'name' attribute before subsequent setattr |
|
273 | 273 | |
|
274 | 274 | This function is aborted, if 'obj' doesn't have 'name' attribute |
|
275 | 275 | at runtime. This avoids overlooking removal of an attribute, which |
|
276 | 276 | breaks assumption of performance measurement, in the future. |
|
277 | 277 | |
|
278 | 278 | This function returns the object to (1) assign a new value, and |
|
279 | 279 | (2) restore an original value to the attribute. |
|
280 | 280 | |
|
281 | 281 | If 'ignoremissing' is true, missing 'name' attribute doesn't cause |
|
282 | 282 | abortion, and this function returns None. This is useful to |
|
283 | 283 | examine an attribute, which isn't ensured in all Mercurial |
|
284 | 284 | versions. |
|
285 | 285 | """ |
|
286 | 286 | if not util.safehasattr(obj, name): |
|
287 | 287 | if ignoremissing: |
|
288 | 288 | return None |
|
289 | 289 | raise error.Abort(("missing attribute %s of %s might break assumption" |
|
290 | 290 | " of performance measurement") % (name, obj)) |
|
291 | 291 | |
|
292 | 292 | origvalue = getattr(obj, name) |
|
293 | 293 | class attrutil(object): |
|
294 | 294 | def set(self, newvalue): |
|
295 | 295 | setattr(obj, name, newvalue) |
|
296 | 296 | def restore(self): |
|
297 | 297 | setattr(obj, name, origvalue) |
|
298 | 298 | |
|
299 | 299 | return attrutil() |
|
300 | 300 | |
|
301 | 301 | # utilities to examine each internal API changes |
|
302 | 302 | |
|
303 | 303 | def getbranchmapsubsettable(): |
|
304 | 304 | # for "historical portability": |
|
305 | 305 | # subsettable is defined in: |
|
306 | 306 | # - branchmap since 2.9 (or 175c6fd8cacc) |
|
307 | 307 | # - repoview since 2.5 (or 59a9f18d4587) |
|
308 | 308 | for mod in (branchmap, repoview): |
|
309 | 309 | subsettable = getattr(mod, 'subsettable', None) |
|
310 | 310 | if subsettable: |
|
311 | 311 | return subsettable |
|
312 | 312 | |
|
313 | 313 | # bisecting in bcee63733aad::59a9f18d4587 can reach here (both |
|
314 | 314 | # branchmap and repoview modules exist, but subsettable attribute |
|
315 | 315 | # doesn't) |
|
316 | 316 | raise error.Abort(("perfbranchmap not available with this Mercurial"), |
|
317 | 317 | hint="use 2.5 or later") |
|
318 | 318 | |
|
319 | 319 | def getsvfs(repo): |
|
320 | 320 | """Return appropriate object to access files under .hg/store |
|
321 | 321 | """ |
|
322 | 322 | # for "historical portability": |
|
323 | 323 | # repo.svfs has been available since 2.3 (or 7034365089bf) |
|
324 | 324 | svfs = getattr(repo, 'svfs', None) |
|
325 | 325 | if svfs: |
|
326 | 326 | return svfs |
|
327 | 327 | else: |
|
328 | 328 | return getattr(repo, 'sopener') |
|
329 | 329 | |
|
330 | 330 | def getvfs(repo): |
|
331 | 331 | """Return appropriate object to access files under .hg |
|
332 | 332 | """ |
|
333 | 333 | # for "historical portability": |
|
334 | 334 | # repo.vfs has been available since 2.3 (or 7034365089bf) |
|
335 | 335 | vfs = getattr(repo, 'vfs', None) |
|
336 | 336 | if vfs: |
|
337 | 337 | return vfs |
|
338 | 338 | else: |
|
339 | 339 | return getattr(repo, 'opener') |
|
340 | 340 | |
|
341 | 341 | def repocleartagscachefunc(repo): |
|
342 | 342 | """Return the function to clear tags cache according to repo internal API |
|
343 | 343 | """ |
|
344 | 344 | if util.safehasattr(repo, '_tagscache'): # since 2.0 (or 9dca7653b525) |
|
345 | 345 | # in this case, setattr(repo, '_tagscache', None) or so isn't |
|
346 | 346 | # correct way to clear tags cache, because existing code paths |
|
347 | 347 | # expect _tagscache to be a structured object. |
|
348 | 348 | def clearcache(): |
|
349 | 349 | # _tagscache has been filteredpropertycache since 2.5 (or |
|
350 | 350 | # 98c867ac1330), and delattr() can't work in such case |
|
351 | 351 | if '_tagscache' in vars(repo): |
|
352 | 352 | del repo.__dict__['_tagscache'] |
|
353 | 353 | return clearcache |
|
354 | 354 | |
|
355 | 355 | repotags = safeattrsetter(repo, '_tags', ignoremissing=True) |
|
356 | 356 | if repotags: # since 1.4 (or 5614a628d173) |
|
357 | 357 | return lambda : repotags.set(None) |
|
358 | 358 | |
|
359 | 359 | repotagscache = safeattrsetter(repo, 'tagscache', ignoremissing=True) |
|
360 | 360 | if repotagscache: # since 0.6 (or d7df759d0e97) |
|
361 | 361 | return lambda : repotagscache.set(None) |
|
362 | 362 | |
|
363 | 363 | # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches |
|
364 | 364 | # this point, but it isn't so problematic, because: |
|
365 | 365 | # - repo.tags of such Mercurial isn't "callable", and repo.tags() |
|
366 | 366 | # in perftags() causes failure soon |
|
367 | 367 | # - perf.py itself has been available since 1.1 (or eb240755386d) |
|
368 | 368 | raise error.Abort(("tags API of this hg command is unknown")) |
|
369 | 369 | |
|
370 | 370 | # utilities to clear cache |
|
371 | 371 | |
|
372 | 372 | def clearfilecache(repo, attrname): |
|
373 | 373 | unfi = repo.unfiltered() |
|
374 | 374 | if attrname in vars(unfi): |
|
375 | 375 | delattr(unfi, attrname) |
|
376 | 376 | unfi._filecache.pop(attrname, None) |
|
377 | 377 | |
|
378 | 378 | # perf commands |
|
379 | 379 | |
|
380 | 380 | @command('perfwalk', formatteropts) |
|
381 | 381 | def perfwalk(ui, repo, *pats, **opts): |
|
382 | 382 | timer, fm = gettimer(ui, opts) |
|
383 | 383 | m = scmutil.match(repo[None], pats, {}) |
|
384 | 384 | timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True, |
|
385 | 385 | ignored=False)))) |
|
386 | 386 | fm.end() |
|
387 | 387 | |
|
388 | 388 | @command('perfannotate', formatteropts) |
|
389 | 389 | def perfannotate(ui, repo, f, **opts): |
|
390 | 390 | timer, fm = gettimer(ui, opts) |
|
391 | 391 | fc = repo['.'][f] |
|
392 | 392 | timer(lambda: len(fc.annotate(True))) |
|
393 | 393 | fm.end() |
|
394 | 394 | |
|
395 | 395 | @command('perfstatus', |
|
396 | 396 | [('u', 'unknown', False, |
|
397 | 397 | 'ask status to look for unknown files')] + formatteropts) |
|
398 | 398 | def perfstatus(ui, repo, **opts): |
|
399 | 399 | #m = match.always(repo.root, repo.getcwd()) |
|
400 | 400 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, |
|
401 | 401 | # False)))) |
|
402 | 402 | timer, fm = gettimer(ui, opts) |
|
403 | 403 | timer(lambda: sum(map(len, repo.status(unknown=opts['unknown'])))) |
|
404 | 404 | fm.end() |
|
405 | 405 | |
|
406 | 406 | @command('perfaddremove', formatteropts) |
|
407 | 407 | def perfaddremove(ui, repo, **opts): |
|
408 | 408 | timer, fm = gettimer(ui, opts) |
|
409 | 409 | try: |
|
410 | 410 | oldquiet = repo.ui.quiet |
|
411 | 411 | repo.ui.quiet = True |
|
412 | 412 | matcher = scmutil.match(repo[None]) |
|
413 | 413 | timer(lambda: scmutil.addremove(repo, matcher, "", dry_run=True)) |
|
414 | 414 | finally: |
|
415 | 415 | repo.ui.quiet = oldquiet |
|
416 | 416 | fm.end() |
|
417 | 417 | |
|
418 | 418 | def clearcaches(cl): |
|
419 | 419 | # behave somewhat consistently across internal API changes |
|
420 | 420 | if util.safehasattr(cl, 'clearcaches'): |
|
421 | 421 | cl.clearcaches() |
|
422 | 422 | elif util.safehasattr(cl, '_nodecache'): |
|
423 | 423 | from mercurial.node import nullid, nullrev |
|
424 | 424 | cl._nodecache = {nullid: nullrev} |
|
425 | 425 | cl._nodepos = None |
|
426 | 426 | |
|
427 | 427 | @command('perfheads', formatteropts) |
|
428 | 428 | def perfheads(ui, repo, **opts): |
|
429 | 429 | timer, fm = gettimer(ui, opts) |
|
430 | 430 | cl = repo.changelog |
|
431 | 431 | def d(): |
|
432 | 432 | len(cl.headrevs()) |
|
433 | 433 | clearcaches(cl) |
|
434 | 434 | timer(d) |
|
435 | 435 | fm.end() |
|
436 | 436 | |
|
437 | 437 | @command('perftags', formatteropts) |
|
438 | 438 | def perftags(ui, repo, **opts): |
|
439 | 439 | import mercurial.changelog |
|
440 | 440 | import mercurial.manifest |
|
441 | 441 | timer, fm = gettimer(ui, opts) |
|
442 | 442 | svfs = getsvfs(repo) |
|
443 | 443 | repocleartagscache = repocleartagscachefunc(repo) |
|
444 | 444 | def t(): |
|
445 | 445 | repo.changelog = mercurial.changelog.changelog(svfs) |
|
446 | 446 | repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo) |
|
447 | 447 | repocleartagscache() |
|
448 | 448 | return len(repo.tags()) |
|
449 | 449 | timer(t) |
|
450 | 450 | fm.end() |
|
451 | 451 | |
|
452 | 452 | @command('perfancestors', formatteropts) |
|
453 | 453 | def perfancestors(ui, repo, **opts): |
|
454 | 454 | timer, fm = gettimer(ui, opts) |
|
455 | 455 | heads = repo.changelog.headrevs() |
|
456 | 456 | def d(): |
|
457 | 457 | for a in repo.changelog.ancestors(heads): |
|
458 | 458 | pass |
|
459 | 459 | timer(d) |
|
460 | 460 | fm.end() |
|
461 | 461 | |
|
462 | 462 | @command('perfancestorset', formatteropts) |
|
463 | 463 | def perfancestorset(ui, repo, revset, **opts): |
|
464 | 464 | timer, fm = gettimer(ui, opts) |
|
465 | 465 | revs = repo.revs(revset) |
|
466 | 466 | heads = repo.changelog.headrevs() |
|
467 | 467 | def d(): |
|
468 | 468 | s = repo.changelog.ancestors(heads) |
|
469 | 469 | for rev in revs: |
|
470 | 470 | rev in s |
|
471 | 471 | timer(d) |
|
472 | 472 | fm.end() |
|
473 | 473 | |
|
474 | 474 | @command('perfbookmarks', formatteropts) |
|
475 | 475 | def perfbookmarks(ui, repo, **opts): |
|
476 | 476 | """benchmark parsing bookmarks from disk to memory""" |
|
477 | 477 | timer, fm = gettimer(ui, opts) |
|
478 | 478 | def d(): |
|
479 | 479 | clearfilecache(repo, '_bookmarks') |
|
480 | 480 | repo._bookmarks |
|
481 | 481 | timer(d) |
|
482 | 482 | fm.end() |
|
483 | 483 | |
|
484 | 484 | @command('perfchangegroupchangelog', formatteropts + |
|
485 | 485 | [('', 'version', '02', 'changegroup version'), |
|
486 | 486 | ('r', 'rev', '', 'revisions to add to changegroup')]) |
|
487 | 487 | def perfchangegroupchangelog(ui, repo, version='02', rev=None, **opts): |
|
488 | 488 | """Benchmark producing a changelog group for a changegroup. |
|
489 | 489 | |
|
490 | 490 | This measures the time spent processing the changelog during a |
|
491 | 491 | bundle operation. This occurs during `hg bundle` and on a server |
|
492 | 492 | processing a `getbundle` wire protocol request (handles clones |
|
493 | 493 | and pull requests). |
|
494 | 494 | |
|
495 | 495 | By default, all revisions are added to the changegroup. |
|
496 | 496 | """ |
|
497 | 497 | cl = repo.changelog |
|
498 | 498 | revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')] |
|
499 | 499 | bundler = changegroup.getbundler(version, repo) |
|
500 | 500 | |
|
501 | 501 | def lookup(node): |
|
502 | 502 | # The real bundler reads the revision in order to access the |
|
503 | 503 | # manifest node and files list. Do that here. |
|
504 | 504 | cl.read(node) |
|
505 | 505 | return node |
|
506 | 506 | |
|
507 | 507 | def d(): |
|
508 | 508 | for chunk in bundler.group(revs, cl, lookup): |
|
509 | 509 | pass |
|
510 | 510 | |
|
511 | 511 | timer, fm = gettimer(ui, opts) |
|
512 | 512 | timer(d) |
|
513 | 513 | fm.end() |
|
514 | 514 | |
|
515 | 515 | @command('perfdirs', formatteropts) |
|
516 | 516 | def perfdirs(ui, repo, **opts): |
|
517 | 517 | timer, fm = gettimer(ui, opts) |
|
518 | 518 | dirstate = repo.dirstate |
|
519 | 519 | 'a' in dirstate |
|
520 | 520 | def d(): |
|
521 | 521 | dirstate.dirs() |
|
522 | 522 | del dirstate._dirs |
|
523 | 523 | timer(d) |
|
524 | 524 | fm.end() |
|
525 | 525 | |
|
526 | 526 | @command('perfdirstate', formatteropts) |
|
527 | 527 | def perfdirstate(ui, repo, **opts): |
|
528 | 528 | timer, fm = gettimer(ui, opts) |
|
529 | 529 | "a" in repo.dirstate |
|
530 | 530 | def d(): |
|
531 | 531 | repo.dirstate.invalidate() |
|
532 | 532 | "a" in repo.dirstate |
|
533 | 533 | timer(d) |
|
534 | 534 | fm.end() |
|
535 | 535 | |
|
536 | 536 | @command('perfdirstatedirs', formatteropts) |
|
537 | 537 | def perfdirstatedirs(ui, repo, **opts): |
|
538 | 538 | timer, fm = gettimer(ui, opts) |
|
539 | 539 | "a" in repo.dirstate |
|
540 | 540 | def d(): |
|
541 | 541 | "a" in repo.dirstate._dirs |
|
542 | 542 | del repo.dirstate._dirs |
|
543 | 543 | timer(d) |
|
544 | 544 | fm.end() |
|
545 | 545 | |
|
546 | 546 | @command('perfdirstatefoldmap', formatteropts) |
|
547 | 547 | def perfdirstatefoldmap(ui, repo, **opts): |
|
548 | 548 | timer, fm = gettimer(ui, opts) |
|
549 | 549 | dirstate = repo.dirstate |
|
550 | 550 | 'a' in dirstate |
|
551 | 551 | def d(): |
|
552 | dirstate._filefoldmap.get('a') | |
|
553 | del dirstate._filefoldmap | |
|
552 | dirstate._map.filefoldmap.get('a') | |
|
553 | del dirstate._map.filefoldmap | |
|
554 | 554 | timer(d) |
|
555 | 555 | fm.end() |
|
556 | 556 | |
|
557 | 557 | @command('perfdirfoldmap', formatteropts) |
|
558 | 558 | def perfdirfoldmap(ui, repo, **opts): |
|
559 | 559 | timer, fm = gettimer(ui, opts) |
|
560 | 560 | dirstate = repo.dirstate |
|
561 | 561 | 'a' in dirstate |
|
562 | 562 | def d(): |
|
563 | 563 | dirstate._dirfoldmap.get('a') |
|
564 | 564 | del dirstate._dirfoldmap |
|
565 | 565 | del dirstate._dirs |
|
566 | 566 | timer(d) |
|
567 | 567 | fm.end() |
|
568 | 568 | |
|
569 | 569 | @command('perfdirstatewrite', formatteropts) |
|
570 | 570 | def perfdirstatewrite(ui, repo, **opts): |
|
571 | 571 | timer, fm = gettimer(ui, opts) |
|
572 | 572 | ds = repo.dirstate |
|
573 | 573 | "a" in ds |
|
574 | 574 | def d(): |
|
575 | 575 | ds._dirty = True |
|
576 | 576 | ds.write(repo.currenttransaction()) |
|
577 | 577 | timer(d) |
|
578 | 578 | fm.end() |
|
579 | 579 | |
|
580 | 580 | @command('perfmergecalculate', |
|
581 | 581 | [('r', 'rev', '.', 'rev to merge against')] + formatteropts) |
|
582 | 582 | def perfmergecalculate(ui, repo, rev, **opts): |
|
583 | 583 | timer, fm = gettimer(ui, opts) |
|
584 | 584 | wctx = repo[None] |
|
585 | 585 | rctx = scmutil.revsingle(repo, rev, rev) |
|
586 | 586 | ancestor = wctx.ancestor(rctx) |
|
587 | 587 | # we don't want working dir files to be stat'd in the benchmark, so prime |
|
588 | 588 | # that cache |
|
589 | 589 | wctx.dirty() |
|
590 | 590 | def d(): |
|
591 | 591 | # acceptremote is True because we don't want prompts in the middle of |
|
592 | 592 | # our benchmark |
|
593 | 593 | merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False, |
|
594 | 594 | acceptremote=True, followcopies=True) |
|
595 | 595 | timer(d) |
|
596 | 596 | fm.end() |
|
597 | 597 | |
|
598 | 598 | @command('perfpathcopies', [], "REV REV") |
|
599 | 599 | def perfpathcopies(ui, repo, rev1, rev2, **opts): |
|
600 | 600 | timer, fm = gettimer(ui, opts) |
|
601 | 601 | ctx1 = scmutil.revsingle(repo, rev1, rev1) |
|
602 | 602 | ctx2 = scmutil.revsingle(repo, rev2, rev2) |
|
603 | 603 | def d(): |
|
604 | 604 | copies.pathcopies(ctx1, ctx2) |
|
605 | 605 | timer(d) |
|
606 | 606 | fm.end() |
|
607 | 607 | |
|
608 | 608 | @command('perfphases', |
|
609 | 609 | [('', 'full', False, 'include file reading time too'), |
|
610 | 610 | ], "") |
|
611 | 611 | def perfphases(ui, repo, **opts): |
|
612 | 612 | """benchmark phasesets computation""" |
|
613 | 613 | timer, fm = gettimer(ui, opts) |
|
614 | 614 | _phases = repo._phasecache |
|
615 | 615 | full = opts.get('full') |
|
616 | 616 | def d(): |
|
617 | 617 | phases = _phases |
|
618 | 618 | if full: |
|
619 | 619 | clearfilecache(repo, '_phasecache') |
|
620 | 620 | phases = repo._phasecache |
|
621 | 621 | phases.invalidate() |
|
622 | 622 | phases.loadphaserevs(repo) |
|
623 | 623 | timer(d) |
|
624 | 624 | fm.end() |
|
625 | 625 | |
|
626 | 626 | @command('perfmanifest', [], 'REV') |
|
627 | 627 | def perfmanifest(ui, repo, rev, **opts): |
|
628 | 628 | timer, fm = gettimer(ui, opts) |
|
629 | 629 | ctx = scmutil.revsingle(repo, rev, rev) |
|
630 | 630 | t = ctx.manifestnode() |
|
631 | 631 | def d(): |
|
632 | 632 | repo.manifestlog.clearcaches() |
|
633 | 633 | repo.manifestlog[t].read() |
|
634 | 634 | timer(d) |
|
635 | 635 | fm.end() |
|
636 | 636 | |
|
637 | 637 | @command('perfchangeset', formatteropts) |
|
638 | 638 | def perfchangeset(ui, repo, rev, **opts): |
|
639 | 639 | timer, fm = gettimer(ui, opts) |
|
640 | 640 | n = repo[rev].node() |
|
641 | 641 | def d(): |
|
642 | 642 | repo.changelog.read(n) |
|
643 | 643 | #repo.changelog._cache = None |
|
644 | 644 | timer(d) |
|
645 | 645 | fm.end() |
|
646 | 646 | |
|
647 | 647 | @command('perfindex', formatteropts) |
|
648 | 648 | def perfindex(ui, repo, **opts): |
|
649 | 649 | import mercurial.revlog |
|
650 | 650 | timer, fm = gettimer(ui, opts) |
|
651 | 651 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
652 | 652 | n = repo["tip"].node() |
|
653 | 653 | svfs = getsvfs(repo) |
|
654 | 654 | def d(): |
|
655 | 655 | cl = mercurial.revlog.revlog(svfs, "00changelog.i") |
|
656 | 656 | cl.rev(n) |
|
657 | 657 | timer(d) |
|
658 | 658 | fm.end() |
|
659 | 659 | |
|
660 | 660 | @command('perfstartup', formatteropts) |
|
661 | 661 | def perfstartup(ui, repo, **opts): |
|
662 | 662 | timer, fm = gettimer(ui, opts) |
|
663 | 663 | cmd = sys.argv[0] |
|
664 | 664 | def d(): |
|
665 | 665 | if os.name != 'nt': |
|
666 | 666 | os.system("HGRCPATH= %s version -q > /dev/null" % cmd) |
|
667 | 667 | else: |
|
668 | 668 | os.environ['HGRCPATH'] = ' ' |
|
669 | 669 | os.system("%s version -q > NUL" % cmd) |
|
670 | 670 | timer(d) |
|
671 | 671 | fm.end() |
|
672 | 672 | |
|
673 | 673 | @command('perfparents', formatteropts) |
|
674 | 674 | def perfparents(ui, repo, **opts): |
|
675 | 675 | timer, fm = gettimer(ui, opts) |
|
676 | 676 | # control the number of commits perfparents iterates over |
|
677 | 677 | # experimental config: perf.parentscount |
|
678 | 678 | count = getint(ui, "perf", "parentscount", 1000) |
|
679 | 679 | if len(repo.changelog) < count: |
|
680 | 680 | raise error.Abort("repo needs %d commits for this test" % count) |
|
681 | 681 | repo = repo.unfiltered() |
|
682 | 682 | nl = [repo.changelog.node(i) for i in xrange(count)] |
|
683 | 683 | def d(): |
|
684 | 684 | for n in nl: |
|
685 | 685 | repo.changelog.parents(n) |
|
686 | 686 | timer(d) |
|
687 | 687 | fm.end() |
|
688 | 688 | |
|
689 | 689 | @command('perfctxfiles', formatteropts) |
|
690 | 690 | def perfctxfiles(ui, repo, x, **opts): |
|
691 | 691 | x = int(x) |
|
692 | 692 | timer, fm = gettimer(ui, opts) |
|
693 | 693 | def d(): |
|
694 | 694 | len(repo[x].files()) |
|
695 | 695 | timer(d) |
|
696 | 696 | fm.end() |
|
697 | 697 | |
|
698 | 698 | @command('perfrawfiles', formatteropts) |
|
699 | 699 | def perfrawfiles(ui, repo, x, **opts): |
|
700 | 700 | x = int(x) |
|
701 | 701 | timer, fm = gettimer(ui, opts) |
|
702 | 702 | cl = repo.changelog |
|
703 | 703 | def d(): |
|
704 | 704 | len(cl.read(x)[3]) |
|
705 | 705 | timer(d) |
|
706 | 706 | fm.end() |
|
707 | 707 | |
|
708 | 708 | @command('perflookup', formatteropts) |
|
709 | 709 | def perflookup(ui, repo, rev, **opts): |
|
710 | 710 | timer, fm = gettimer(ui, opts) |
|
711 | 711 | timer(lambda: len(repo.lookup(rev))) |
|
712 | 712 | fm.end() |
|
713 | 713 | |
|
714 | 714 | @command('perfrevrange', formatteropts) |
|
715 | 715 | def perfrevrange(ui, repo, *specs, **opts): |
|
716 | 716 | timer, fm = gettimer(ui, opts) |
|
717 | 717 | revrange = scmutil.revrange |
|
718 | 718 | timer(lambda: len(revrange(repo, specs))) |
|
719 | 719 | fm.end() |
|
720 | 720 | |
|
721 | 721 | @command('perfnodelookup', formatteropts) |
|
722 | 722 | def perfnodelookup(ui, repo, rev, **opts): |
|
723 | 723 | timer, fm = gettimer(ui, opts) |
|
724 | 724 | import mercurial.revlog |
|
725 | 725 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg |
|
726 | 726 | n = repo[rev].node() |
|
727 | 727 | cl = mercurial.revlog.revlog(getsvfs(repo), "00changelog.i") |
|
728 | 728 | def d(): |
|
729 | 729 | cl.rev(n) |
|
730 | 730 | clearcaches(cl) |
|
731 | 731 | timer(d) |
|
732 | 732 | fm.end() |
|
733 | 733 | |
|
734 | 734 | @command('perflog', |
|
735 | 735 | [('', 'rename', False, 'ask log to follow renames')] + formatteropts) |
|
736 | 736 | def perflog(ui, repo, rev=None, **opts): |
|
737 | 737 | if rev is None: |
|
738 | 738 | rev=[] |
|
739 | 739 | timer, fm = gettimer(ui, opts) |
|
740 | 740 | ui.pushbuffer() |
|
741 | 741 | timer(lambda: commands.log(ui, repo, rev=rev, date='', user='', |
|
742 | 742 | copies=opts.get('rename'))) |
|
743 | 743 | ui.popbuffer() |
|
744 | 744 | fm.end() |
|
745 | 745 | |
|
746 | 746 | @command('perfmoonwalk', formatteropts) |
|
747 | 747 | def perfmoonwalk(ui, repo, **opts): |
|
748 | 748 | """benchmark walking the changelog backwards |
|
749 | 749 | |
|
750 | 750 | This also loads the changelog data for each revision in the changelog. |
|
751 | 751 | """ |
|
752 | 752 | timer, fm = gettimer(ui, opts) |
|
753 | 753 | def moonwalk(): |
|
754 | 754 | for i in xrange(len(repo), -1, -1): |
|
755 | 755 | ctx = repo[i] |
|
756 | 756 | ctx.branch() # read changelog data (in addition to the index) |
|
757 | 757 | timer(moonwalk) |
|
758 | 758 | fm.end() |
|
759 | 759 | |
|
760 | 760 | @command('perftemplating', formatteropts) |
|
761 | 761 | def perftemplating(ui, repo, rev=None, **opts): |
|
762 | 762 | if rev is None: |
|
763 | 763 | rev=[] |
|
764 | 764 | timer, fm = gettimer(ui, opts) |
|
765 | 765 | ui.pushbuffer() |
|
766 | 766 | timer(lambda: commands.log(ui, repo, rev=rev, date='', user='', |
|
767 | 767 | template='{date|shortdate} [{rev}:{node|short}]' |
|
768 | 768 | ' {author|person}: {desc|firstline}\n')) |
|
769 | 769 | ui.popbuffer() |
|
770 | 770 | fm.end() |
|
771 | 771 | |
|
772 | 772 | @command('perfcca', formatteropts) |
|
773 | 773 | def perfcca(ui, repo, **opts): |
|
774 | 774 | timer, fm = gettimer(ui, opts) |
|
775 | 775 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) |
|
776 | 776 | fm.end() |
|
777 | 777 | |
|
778 | 778 | @command('perffncacheload', formatteropts) |
|
779 | 779 | def perffncacheload(ui, repo, **opts): |
|
780 | 780 | timer, fm = gettimer(ui, opts) |
|
781 | 781 | s = repo.store |
|
782 | 782 | def d(): |
|
783 | 783 | s.fncache._load() |
|
784 | 784 | timer(d) |
|
785 | 785 | fm.end() |
|
786 | 786 | |
|
787 | 787 | @command('perffncachewrite', formatteropts) |
|
788 | 788 | def perffncachewrite(ui, repo, **opts): |
|
789 | 789 | timer, fm = gettimer(ui, opts) |
|
790 | 790 | s = repo.store |
|
791 | 791 | s.fncache._load() |
|
792 | 792 | lock = repo.lock() |
|
793 | 793 | tr = repo.transaction('perffncachewrite') |
|
794 | 794 | def d(): |
|
795 | 795 | s.fncache._dirty = True |
|
796 | 796 | s.fncache.write(tr) |
|
797 | 797 | timer(d) |
|
798 | 798 | tr.close() |
|
799 | 799 | lock.release() |
|
800 | 800 | fm.end() |
|
801 | 801 | |
|
802 | 802 | @command('perffncacheencode', formatteropts) |
|
803 | 803 | def perffncacheencode(ui, repo, **opts): |
|
804 | 804 | timer, fm = gettimer(ui, opts) |
|
805 | 805 | s = repo.store |
|
806 | 806 | s.fncache._load() |
|
807 | 807 | def d(): |
|
808 | 808 | for p in s.fncache.entries: |
|
809 | 809 | s.encode(p) |
|
810 | 810 | timer(d) |
|
811 | 811 | fm.end() |
|
812 | 812 | |
|
813 | 813 | @command('perfbdiff', revlogopts + formatteropts + [ |
|
814 | 814 | ('', 'count', 1, 'number of revisions to test (when using --startrev)'), |
|
815 | 815 | ('', 'alldata', False, 'test bdiffs for all associated revisions')], |
|
816 | 816 | '-c|-m|FILE REV') |
|
817 | 817 | def perfbdiff(ui, repo, file_, rev=None, count=None, **opts): |
|
818 | 818 | """benchmark a bdiff between revisions |
|
819 | 819 | |
|
820 | 820 | By default, benchmark a bdiff between its delta parent and itself. |
|
821 | 821 | |
|
822 | 822 | With ``--count``, benchmark bdiffs between delta parents and self for N |
|
823 | 823 | revisions starting at the specified revision. |
|
824 | 824 | |
|
825 | 825 | With ``--alldata``, assume the requested revision is a changeset and |
|
826 | 826 | measure bdiffs for all changes related to that changeset (manifest |
|
827 | 827 | and filelogs). |
|
828 | 828 | """ |
|
829 | 829 | if opts['alldata']: |
|
830 | 830 | opts['changelog'] = True |
|
831 | 831 | |
|
832 | 832 | if opts.get('changelog') or opts.get('manifest'): |
|
833 | 833 | file_, rev = None, file_ |
|
834 | 834 | elif rev is None: |
|
835 | 835 | raise error.CommandError('perfbdiff', 'invalid arguments') |
|
836 | 836 | |
|
837 | 837 | textpairs = [] |
|
838 | 838 | |
|
839 | 839 | r = cmdutil.openrevlog(repo, 'perfbdiff', file_, opts) |
|
840 | 840 | |
|
841 | 841 | startrev = r.rev(r.lookup(rev)) |
|
842 | 842 | for rev in range(startrev, min(startrev + count, len(r) - 1)): |
|
843 | 843 | if opts['alldata']: |
|
844 | 844 | # Load revisions associated with changeset. |
|
845 | 845 | ctx = repo[rev] |
|
846 | 846 | mtext = repo.manifestlog._revlog.revision(ctx.manifestnode()) |
|
847 | 847 | for pctx in ctx.parents(): |
|
848 | 848 | pman = repo.manifestlog._revlog.revision(pctx.manifestnode()) |
|
849 | 849 | textpairs.append((pman, mtext)) |
|
850 | 850 | |
|
851 | 851 | # Load filelog revisions by iterating manifest delta. |
|
852 | 852 | man = ctx.manifest() |
|
853 | 853 | pman = ctx.p1().manifest() |
|
854 | 854 | for filename, change in pman.diff(man).items(): |
|
855 | 855 | fctx = repo.file(filename) |
|
856 | 856 | f1 = fctx.revision(change[0][0] or -1) |
|
857 | 857 | f2 = fctx.revision(change[1][0] or -1) |
|
858 | 858 | textpairs.append((f1, f2)) |
|
859 | 859 | else: |
|
860 | 860 | dp = r.deltaparent(rev) |
|
861 | 861 | textpairs.append((r.revision(dp), r.revision(rev))) |
|
862 | 862 | |
|
863 | 863 | def d(): |
|
864 | 864 | for pair in textpairs: |
|
865 | 865 | mdiff.textdiff(*pair) |
|
866 | 866 | |
|
867 | 867 | timer, fm = gettimer(ui, opts) |
|
868 | 868 | timer(d) |
|
869 | 869 | fm.end() |
|
870 | 870 | |
|
871 | 871 | @command('perfdiffwd', formatteropts) |
|
872 | 872 | def perfdiffwd(ui, repo, **opts): |
|
873 | 873 | """Profile diff of working directory changes""" |
|
874 | 874 | timer, fm = gettimer(ui, opts) |
|
875 | 875 | options = { |
|
876 | 876 | 'w': 'ignore_all_space', |
|
877 | 877 | 'b': 'ignore_space_change', |
|
878 | 878 | 'B': 'ignore_blank_lines', |
|
879 | 879 | } |
|
880 | 880 | |
|
881 | 881 | for diffopt in ('', 'w', 'b', 'B', 'wB'): |
|
882 | 882 | opts = dict((options[c], '1') for c in diffopt) |
|
883 | 883 | def d(): |
|
884 | 884 | ui.pushbuffer() |
|
885 | 885 | commands.diff(ui, repo, **opts) |
|
886 | 886 | ui.popbuffer() |
|
887 | 887 | title = 'diffopts: %s' % (diffopt and ('-' + diffopt) or 'none') |
|
888 | 888 | timer(d, title) |
|
889 | 889 | fm.end() |
|
890 | 890 | |
|
891 | 891 | @command('perfrevlogindex', revlogopts + formatteropts, |
|
892 | 892 | '-c|-m|FILE') |
|
893 | 893 | def perfrevlogindex(ui, repo, file_=None, **opts): |
|
894 | 894 | """Benchmark operations against a revlog index. |
|
895 | 895 | |
|
896 | 896 | This tests constructing a revlog instance, reading index data, |
|
897 | 897 | parsing index data, and performing various operations related to |
|
898 | 898 | index data. |
|
899 | 899 | """ |
|
900 | 900 | |
|
901 | 901 | rl = cmdutil.openrevlog(repo, 'perfrevlogindex', file_, opts) |
|
902 | 902 | |
|
903 | 903 | opener = getattr(rl, 'opener') # trick linter |
|
904 | 904 | indexfile = rl.indexfile |
|
905 | 905 | data = opener.read(indexfile) |
|
906 | 906 | |
|
907 | 907 | header = struct.unpack('>I', data[0:4])[0] |
|
908 | 908 | version = header & 0xFFFF |
|
909 | 909 | if version == 1: |
|
910 | 910 | revlogio = revlog.revlogio() |
|
911 | 911 | inline = header & (1 << 16) |
|
912 | 912 | else: |
|
913 | 913 | raise error.Abort(('unsupported revlog version: %d') % version) |
|
914 | 914 | |
|
915 | 915 | rllen = len(rl) |
|
916 | 916 | |
|
917 | 917 | node0 = rl.node(0) |
|
918 | 918 | node25 = rl.node(rllen // 4) |
|
919 | 919 | node50 = rl.node(rllen // 2) |
|
920 | 920 | node75 = rl.node(rllen // 4 * 3) |
|
921 | 921 | node100 = rl.node(rllen - 1) |
|
922 | 922 | |
|
923 | 923 | allrevs = range(rllen) |
|
924 | 924 | allrevsrev = list(reversed(allrevs)) |
|
925 | 925 | allnodes = [rl.node(rev) for rev in range(rllen)] |
|
926 | 926 | allnodesrev = list(reversed(allnodes)) |
|
927 | 927 | |
|
928 | 928 | def constructor(): |
|
929 | 929 | revlog.revlog(opener, indexfile) |
|
930 | 930 | |
|
931 | 931 | def read(): |
|
932 | 932 | with opener(indexfile) as fh: |
|
933 | 933 | fh.read() |
|
934 | 934 | |
|
935 | 935 | def parseindex(): |
|
936 | 936 | revlogio.parseindex(data, inline) |
|
937 | 937 | |
|
938 | 938 | def getentry(revornode): |
|
939 | 939 | index = revlogio.parseindex(data, inline)[0] |
|
940 | 940 | index[revornode] |
|
941 | 941 | |
|
942 | 942 | def getentries(revs, count=1): |
|
943 | 943 | index = revlogio.parseindex(data, inline)[0] |
|
944 | 944 | |
|
945 | 945 | for i in range(count): |
|
946 | 946 | for rev in revs: |
|
947 | 947 | index[rev] |
|
948 | 948 | |
|
949 | 949 | def resolvenode(node): |
|
950 | 950 | nodemap = revlogio.parseindex(data, inline)[1] |
|
951 | 951 | # This only works for the C code. |
|
952 | 952 | if nodemap is None: |
|
953 | 953 | return |
|
954 | 954 | |
|
955 | 955 | try: |
|
956 | 956 | nodemap[node] |
|
957 | 957 | except error.RevlogError: |
|
958 | 958 | pass |
|
959 | 959 | |
|
960 | 960 | def resolvenodes(nodes, count=1): |
|
961 | 961 | nodemap = revlogio.parseindex(data, inline)[1] |
|
962 | 962 | if nodemap is None: |
|
963 | 963 | return |
|
964 | 964 | |
|
965 | 965 | for i in range(count): |
|
966 | 966 | for node in nodes: |
|
967 | 967 | try: |
|
968 | 968 | nodemap[node] |
|
969 | 969 | except error.RevlogError: |
|
970 | 970 | pass |
|
971 | 971 | |
|
972 | 972 | benches = [ |
|
973 | 973 | (constructor, 'revlog constructor'), |
|
974 | 974 | (read, 'read'), |
|
975 | 975 | (parseindex, 'create index object'), |
|
976 | 976 | (lambda: getentry(0), 'retrieve index entry for rev 0'), |
|
977 | 977 | (lambda: resolvenode('a' * 20), 'look up missing node'), |
|
978 | 978 | (lambda: resolvenode(node0), 'look up node at rev 0'), |
|
979 | 979 | (lambda: resolvenode(node25), 'look up node at 1/4 len'), |
|
980 | 980 | (lambda: resolvenode(node50), 'look up node at 1/2 len'), |
|
981 | 981 | (lambda: resolvenode(node75), 'look up node at 3/4 len'), |
|
982 | 982 | (lambda: resolvenode(node100), 'look up node at tip'), |
|
983 | 983 | # 2x variation is to measure caching impact. |
|
984 | 984 | (lambda: resolvenodes(allnodes), |
|
985 | 985 | 'look up all nodes (forward)'), |
|
986 | 986 | (lambda: resolvenodes(allnodes, 2), |
|
987 | 987 | 'look up all nodes 2x (forward)'), |
|
988 | 988 | (lambda: resolvenodes(allnodesrev), |
|
989 | 989 | 'look up all nodes (reverse)'), |
|
990 | 990 | (lambda: resolvenodes(allnodesrev, 2), |
|
991 | 991 | 'look up all nodes 2x (reverse)'), |
|
992 | 992 | (lambda: getentries(allrevs), |
|
993 | 993 | 'retrieve all index entries (forward)'), |
|
994 | 994 | (lambda: getentries(allrevs, 2), |
|
995 | 995 | 'retrieve all index entries 2x (forward)'), |
|
996 | 996 | (lambda: getentries(allrevsrev), |
|
997 | 997 | 'retrieve all index entries (reverse)'), |
|
998 | 998 | (lambda: getentries(allrevsrev, 2), |
|
999 | 999 | 'retrieve all index entries 2x (reverse)'), |
|
1000 | 1000 | ] |
|
1001 | 1001 | |
|
1002 | 1002 | for fn, title in benches: |
|
1003 | 1003 | timer, fm = gettimer(ui, opts) |
|
1004 | 1004 | timer(fn, title=title) |
|
1005 | 1005 | fm.end() |
|
1006 | 1006 | |
|
1007 | 1007 | @command('perfrevlogrevisions', revlogopts + formatteropts + |
|
1008 | 1008 | [('d', 'dist', 100, 'distance between the revisions'), |
|
1009 | 1009 | ('s', 'startrev', 0, 'revision to start reading at'), |
|
1010 | 1010 | ('', 'reverse', False, 'read in reverse')], |
|
1011 | 1011 | '-c|-m|FILE') |
|
1012 | 1012 | def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False, |
|
1013 | 1013 | **opts): |
|
1014 | 1014 | """Benchmark reading a series of revisions from a revlog. |
|
1015 | 1015 | |
|
1016 | 1016 | By default, we read every ``-d/--dist`` revision from 0 to tip of |
|
1017 | 1017 | the specified revlog. |
|
1018 | 1018 | |
|
1019 | 1019 | The start revision can be defined via ``-s/--startrev``. |
|
1020 | 1020 | """ |
|
1021 | 1021 | rl = cmdutil.openrevlog(repo, 'perfrevlogrevisions', file_, opts) |
|
1022 | 1022 | rllen = getlen(ui)(rl) |
|
1023 | 1023 | |
|
1024 | 1024 | def d(): |
|
1025 | 1025 | rl.clearcaches() |
|
1026 | 1026 | |
|
1027 | 1027 | beginrev = startrev |
|
1028 | 1028 | endrev = rllen |
|
1029 | 1029 | dist = opts['dist'] |
|
1030 | 1030 | |
|
1031 | 1031 | if reverse: |
|
1032 | 1032 | beginrev, endrev = endrev, beginrev |
|
1033 | 1033 | dist = -1 * dist |
|
1034 | 1034 | |
|
1035 | 1035 | for x in xrange(beginrev, endrev, dist): |
|
1036 | 1036 | # Old revisions don't support passing int. |
|
1037 | 1037 | n = rl.node(x) |
|
1038 | 1038 | rl.revision(n) |
|
1039 | 1039 | |
|
1040 | 1040 | timer, fm = gettimer(ui, opts) |
|
1041 | 1041 | timer(d) |
|
1042 | 1042 | fm.end() |
|
1043 | 1043 | |
|
1044 | 1044 | @command('perfrevlogchunks', revlogopts + formatteropts + |
|
1045 | 1045 | [('e', 'engines', '', 'compression engines to use'), |
|
1046 | 1046 | ('s', 'startrev', 0, 'revision to start at')], |
|
1047 | 1047 | '-c|-m|FILE') |
|
1048 | 1048 | def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts): |
|
1049 | 1049 | """Benchmark operations on revlog chunks. |
|
1050 | 1050 | |
|
1051 | 1051 | Logically, each revlog is a collection of fulltext revisions. However, |
|
1052 | 1052 | stored within each revlog are "chunks" of possibly compressed data. This |
|
1053 | 1053 | data needs to be read and decompressed or compressed and written. |
|
1054 | 1054 | |
|
1055 | 1055 | This command measures the time it takes to read+decompress and recompress |
|
1056 | 1056 | chunks in a revlog. It effectively isolates I/O and compression performance. |
|
1057 | 1057 | For measurements of higher-level operations like resolving revisions, |
|
1058 | 1058 | see ``perfrevlogrevisions`` and ``perfrevlogrevision``. |
|
1059 | 1059 | """ |
|
1060 | 1060 | rl = cmdutil.openrevlog(repo, 'perfrevlogchunks', file_, opts) |
|
1061 | 1061 | |
|
1062 | 1062 | # _chunkraw was renamed to _getsegmentforrevs. |
|
1063 | 1063 | try: |
|
1064 | 1064 | segmentforrevs = rl._getsegmentforrevs |
|
1065 | 1065 | except AttributeError: |
|
1066 | 1066 | segmentforrevs = rl._chunkraw |
|
1067 | 1067 | |
|
1068 | 1068 | # Verify engines argument. |
|
1069 | 1069 | if engines: |
|
1070 | 1070 | engines = set(e.strip() for e in engines.split(',')) |
|
1071 | 1071 | for engine in engines: |
|
1072 | 1072 | try: |
|
1073 | 1073 | util.compressionengines[engine] |
|
1074 | 1074 | except KeyError: |
|
1075 | 1075 | raise error.Abort('unknown compression engine: %s' % engine) |
|
1076 | 1076 | else: |
|
1077 | 1077 | engines = [] |
|
1078 | 1078 | for e in util.compengines: |
|
1079 | 1079 | engine = util.compengines[e] |
|
1080 | 1080 | try: |
|
1081 | 1081 | if engine.available(): |
|
1082 | 1082 | engine.revlogcompressor().compress('dummy') |
|
1083 | 1083 | engines.append(e) |
|
1084 | 1084 | except NotImplementedError: |
|
1085 | 1085 | pass |
|
1086 | 1086 | |
|
1087 | 1087 | revs = list(rl.revs(startrev, len(rl) - 1)) |
|
1088 | 1088 | |
|
1089 | 1089 | def rlfh(rl): |
|
1090 | 1090 | if rl._inline: |
|
1091 | 1091 | return getsvfs(repo)(rl.indexfile) |
|
1092 | 1092 | else: |
|
1093 | 1093 | return getsvfs(repo)(rl.datafile) |
|
1094 | 1094 | |
|
1095 | 1095 | def doread(): |
|
1096 | 1096 | rl.clearcaches() |
|
1097 | 1097 | for rev in revs: |
|
1098 | 1098 | segmentforrevs(rev, rev) |
|
1099 | 1099 | |
|
1100 | 1100 | def doreadcachedfh(): |
|
1101 | 1101 | rl.clearcaches() |
|
1102 | 1102 | fh = rlfh(rl) |
|
1103 | 1103 | for rev in revs: |
|
1104 | 1104 | segmentforrevs(rev, rev, df=fh) |
|
1105 | 1105 | |
|
1106 | 1106 | def doreadbatch(): |
|
1107 | 1107 | rl.clearcaches() |
|
1108 | 1108 | segmentforrevs(revs[0], revs[-1]) |
|
1109 | 1109 | |
|
1110 | 1110 | def doreadbatchcachedfh(): |
|
1111 | 1111 | rl.clearcaches() |
|
1112 | 1112 | fh = rlfh(rl) |
|
1113 | 1113 | segmentforrevs(revs[0], revs[-1], df=fh) |
|
1114 | 1114 | |
|
1115 | 1115 | def dochunk(): |
|
1116 | 1116 | rl.clearcaches() |
|
1117 | 1117 | fh = rlfh(rl) |
|
1118 | 1118 | for rev in revs: |
|
1119 | 1119 | rl._chunk(rev, df=fh) |
|
1120 | 1120 | |
|
1121 | 1121 | chunks = [None] |
|
1122 | 1122 | |
|
1123 | 1123 | def dochunkbatch(): |
|
1124 | 1124 | rl.clearcaches() |
|
1125 | 1125 | fh = rlfh(rl) |
|
1126 | 1126 | # Save chunks as a side-effect. |
|
1127 | 1127 | chunks[0] = rl._chunks(revs, df=fh) |
|
1128 | 1128 | |
|
1129 | 1129 | def docompress(compressor): |
|
1130 | 1130 | rl.clearcaches() |
|
1131 | 1131 | |
|
1132 | 1132 | try: |
|
1133 | 1133 | # Swap in the requested compression engine. |
|
1134 | 1134 | oldcompressor = rl._compressor |
|
1135 | 1135 | rl._compressor = compressor |
|
1136 | 1136 | for chunk in chunks[0]: |
|
1137 | 1137 | rl.compress(chunk) |
|
1138 | 1138 | finally: |
|
1139 | 1139 | rl._compressor = oldcompressor |
|
1140 | 1140 | |
|
1141 | 1141 | benches = [ |
|
1142 | 1142 | (lambda: doread(), 'read'), |
|
1143 | 1143 | (lambda: doreadcachedfh(), 'read w/ reused fd'), |
|
1144 | 1144 | (lambda: doreadbatch(), 'read batch'), |
|
1145 | 1145 | (lambda: doreadbatchcachedfh(), 'read batch w/ reused fd'), |
|
1146 | 1146 | (lambda: dochunk(), 'chunk'), |
|
1147 | 1147 | (lambda: dochunkbatch(), 'chunk batch'), |
|
1148 | 1148 | ] |
|
1149 | 1149 | |
|
1150 | 1150 | for engine in sorted(engines): |
|
1151 | 1151 | compressor = util.compengines[engine].revlogcompressor() |
|
1152 | 1152 | benches.append((functools.partial(docompress, compressor), |
|
1153 | 1153 | 'compress w/ %s' % engine)) |
|
1154 | 1154 | |
|
1155 | 1155 | for fn, title in benches: |
|
1156 | 1156 | timer, fm = gettimer(ui, opts) |
|
1157 | 1157 | timer(fn, title=title) |
|
1158 | 1158 | fm.end() |
|
1159 | 1159 | |
|
1160 | 1160 | @command('perfrevlogrevision', revlogopts + formatteropts + |
|
1161 | 1161 | [('', 'cache', False, 'use caches instead of clearing')], |
|
1162 | 1162 | '-c|-m|FILE REV') |
|
1163 | 1163 | def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): |
|
1164 | 1164 | """Benchmark obtaining a revlog revision. |
|
1165 | 1165 | |
|
1166 | 1166 | Obtaining a revlog revision consists of roughly the following steps: |
|
1167 | 1167 | |
|
1168 | 1168 | 1. Compute the delta chain |
|
1169 | 1169 | 2. Obtain the raw chunks for that delta chain |
|
1170 | 1170 | 3. Decompress each raw chunk |
|
1171 | 1171 | 4. Apply binary patches to obtain fulltext |
|
1172 | 1172 | 5. Verify hash of fulltext |
|
1173 | 1173 | |
|
1174 | 1174 | This command measures the time spent in each of these phases. |
|
1175 | 1175 | """ |
|
1176 | 1176 | if opts.get('changelog') or opts.get('manifest'): |
|
1177 | 1177 | file_, rev = None, file_ |
|
1178 | 1178 | elif rev is None: |
|
1179 | 1179 | raise error.CommandError('perfrevlogrevision', 'invalid arguments') |
|
1180 | 1180 | |
|
1181 | 1181 | r = cmdutil.openrevlog(repo, 'perfrevlogrevision', file_, opts) |
|
1182 | 1182 | |
|
1183 | 1183 | # _chunkraw was renamed to _getsegmentforrevs. |
|
1184 | 1184 | try: |
|
1185 | 1185 | segmentforrevs = r._getsegmentforrevs |
|
1186 | 1186 | except AttributeError: |
|
1187 | 1187 | segmentforrevs = r._chunkraw |
|
1188 | 1188 | |
|
1189 | 1189 | node = r.lookup(rev) |
|
1190 | 1190 | rev = r.rev(node) |
|
1191 | 1191 | |
|
1192 | 1192 | def getrawchunks(data, chain): |
|
1193 | 1193 | start = r.start |
|
1194 | 1194 | length = r.length |
|
1195 | 1195 | inline = r._inline |
|
1196 | 1196 | iosize = r._io.size |
|
1197 | 1197 | buffer = util.buffer |
|
1198 | 1198 | offset = start(chain[0]) |
|
1199 | 1199 | |
|
1200 | 1200 | chunks = [] |
|
1201 | 1201 | ladd = chunks.append |
|
1202 | 1202 | |
|
1203 | 1203 | for rev in chain: |
|
1204 | 1204 | chunkstart = start(rev) |
|
1205 | 1205 | if inline: |
|
1206 | 1206 | chunkstart += (rev + 1) * iosize |
|
1207 | 1207 | chunklength = length(rev) |
|
1208 | 1208 | ladd(buffer(data, chunkstart - offset, chunklength)) |
|
1209 | 1209 | |
|
1210 | 1210 | return chunks |
|
1211 | 1211 | |
|
1212 | 1212 | def dodeltachain(rev): |
|
1213 | 1213 | if not cache: |
|
1214 | 1214 | r.clearcaches() |
|
1215 | 1215 | r._deltachain(rev) |
|
1216 | 1216 | |
|
1217 | 1217 | def doread(chain): |
|
1218 | 1218 | if not cache: |
|
1219 | 1219 | r.clearcaches() |
|
1220 | 1220 | segmentforrevs(chain[0], chain[-1]) |
|
1221 | 1221 | |
|
1222 | 1222 | def dorawchunks(data, chain): |
|
1223 | 1223 | if not cache: |
|
1224 | 1224 | r.clearcaches() |
|
1225 | 1225 | getrawchunks(data, chain) |
|
1226 | 1226 | |
|
1227 | 1227 | def dodecompress(chunks): |
|
1228 | 1228 | decomp = r.decompress |
|
1229 | 1229 | for chunk in chunks: |
|
1230 | 1230 | decomp(chunk) |
|
1231 | 1231 | |
|
1232 | 1232 | def dopatch(text, bins): |
|
1233 | 1233 | if not cache: |
|
1234 | 1234 | r.clearcaches() |
|
1235 | 1235 | mdiff.patches(text, bins) |
|
1236 | 1236 | |
|
1237 | 1237 | def dohash(text): |
|
1238 | 1238 | if not cache: |
|
1239 | 1239 | r.clearcaches() |
|
1240 | 1240 | r.checkhash(text, node, rev=rev) |
|
1241 | 1241 | |
|
1242 | 1242 | def dorevision(): |
|
1243 | 1243 | if not cache: |
|
1244 | 1244 | r.clearcaches() |
|
1245 | 1245 | r.revision(node) |
|
1246 | 1246 | |
|
1247 | 1247 | chain = r._deltachain(rev)[0] |
|
1248 | 1248 | data = segmentforrevs(chain[0], chain[-1])[1] |
|
1249 | 1249 | rawchunks = getrawchunks(data, chain) |
|
1250 | 1250 | bins = r._chunks(chain) |
|
1251 | 1251 | text = str(bins[0]) |
|
1252 | 1252 | bins = bins[1:] |
|
1253 | 1253 | text = mdiff.patches(text, bins) |
|
1254 | 1254 | |
|
1255 | 1255 | benches = [ |
|
1256 | 1256 | (lambda: dorevision(), 'full'), |
|
1257 | 1257 | (lambda: dodeltachain(rev), 'deltachain'), |
|
1258 | 1258 | (lambda: doread(chain), 'read'), |
|
1259 | 1259 | (lambda: dorawchunks(data, chain), 'rawchunks'), |
|
1260 | 1260 | (lambda: dodecompress(rawchunks), 'decompress'), |
|
1261 | 1261 | (lambda: dopatch(text, bins), 'patch'), |
|
1262 | 1262 | (lambda: dohash(text), 'hash'), |
|
1263 | 1263 | ] |
|
1264 | 1264 | |
|
1265 | 1265 | for fn, title in benches: |
|
1266 | 1266 | timer, fm = gettimer(ui, opts) |
|
1267 | 1267 | timer(fn, title=title) |
|
1268 | 1268 | fm.end() |
|
1269 | 1269 | |
|
1270 | 1270 | @command('perfrevset', |
|
1271 | 1271 | [('C', 'clear', False, 'clear volatile cache between each call.'), |
|
1272 | 1272 | ('', 'contexts', False, 'obtain changectx for each revision')] |
|
1273 | 1273 | + formatteropts, "REVSET") |
|
1274 | 1274 | def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts): |
|
1275 | 1275 | """benchmark the execution time of a revset |
|
1276 | 1276 | |
|
1277 | 1277 | Use the --clean option if need to evaluate the impact of build volatile |
|
1278 | 1278 | revisions set cache on the revset execution. Volatile cache hold filtered |
|
1279 | 1279 | and obsolete related cache.""" |
|
1280 | 1280 | timer, fm = gettimer(ui, opts) |
|
1281 | 1281 | def d(): |
|
1282 | 1282 | if clear: |
|
1283 | 1283 | repo.invalidatevolatilesets() |
|
1284 | 1284 | if contexts: |
|
1285 | 1285 | for ctx in repo.set(expr): pass |
|
1286 | 1286 | else: |
|
1287 | 1287 | for r in repo.revs(expr): pass |
|
1288 | 1288 | timer(d) |
|
1289 | 1289 | fm.end() |
|
1290 | 1290 | |
|
1291 | 1291 | @command('perfvolatilesets', |
|
1292 | 1292 | [('', 'clear-obsstore', False, 'drop obsstore between each call.'), |
|
1293 | 1293 | ] + formatteropts) |
|
1294 | 1294 | def perfvolatilesets(ui, repo, *names, **opts): |
|
1295 | 1295 | """benchmark the computation of various volatile set |
|
1296 | 1296 | |
|
1297 | 1297 | Volatile set computes element related to filtering and obsolescence.""" |
|
1298 | 1298 | timer, fm = gettimer(ui, opts) |
|
1299 | 1299 | repo = repo.unfiltered() |
|
1300 | 1300 | |
|
1301 | 1301 | def getobs(name): |
|
1302 | 1302 | def d(): |
|
1303 | 1303 | repo.invalidatevolatilesets() |
|
1304 | 1304 | if opts['clear_obsstore']: |
|
1305 | 1305 | clearfilecache(repo, 'obsstore') |
|
1306 | 1306 | obsolete.getrevs(repo, name) |
|
1307 | 1307 | return d |
|
1308 | 1308 | |
|
1309 | 1309 | allobs = sorted(obsolete.cachefuncs) |
|
1310 | 1310 | if names: |
|
1311 | 1311 | allobs = [n for n in allobs if n in names] |
|
1312 | 1312 | |
|
1313 | 1313 | for name in allobs: |
|
1314 | 1314 | timer(getobs(name), title=name) |
|
1315 | 1315 | |
|
1316 | 1316 | def getfiltered(name): |
|
1317 | 1317 | def d(): |
|
1318 | 1318 | repo.invalidatevolatilesets() |
|
1319 | 1319 | if opts['clear_obsstore']: |
|
1320 | 1320 | clearfilecache(repo, 'obsstore') |
|
1321 | 1321 | repoview.filterrevs(repo, name) |
|
1322 | 1322 | return d |
|
1323 | 1323 | |
|
1324 | 1324 | allfilter = sorted(repoview.filtertable) |
|
1325 | 1325 | if names: |
|
1326 | 1326 | allfilter = [n for n in allfilter if n in names] |
|
1327 | 1327 | |
|
1328 | 1328 | for name in allfilter: |
|
1329 | 1329 | timer(getfiltered(name), title=name) |
|
1330 | 1330 | fm.end() |
|
1331 | 1331 | |
|
1332 | 1332 | @command('perfbranchmap', |
|
1333 | 1333 | [('f', 'full', False, |
|
1334 | 1334 | 'Includes build time of subset'), |
|
1335 | 1335 | ('', 'clear-revbranch', False, |
|
1336 | 1336 | 'purge the revbranch cache between computation'), |
|
1337 | 1337 | ] + formatteropts) |
|
1338 | 1338 | def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts): |
|
1339 | 1339 | """benchmark the update of a branchmap |
|
1340 | 1340 | |
|
1341 | 1341 | This benchmarks the full repo.branchmap() call with read and write disabled |
|
1342 | 1342 | """ |
|
1343 | 1343 | timer, fm = gettimer(ui, opts) |
|
1344 | 1344 | def getbranchmap(filtername): |
|
1345 | 1345 | """generate a benchmark function for the filtername""" |
|
1346 | 1346 | if filtername is None: |
|
1347 | 1347 | view = repo |
|
1348 | 1348 | else: |
|
1349 | 1349 | view = repo.filtered(filtername) |
|
1350 | 1350 | def d(): |
|
1351 | 1351 | if clear_revbranch: |
|
1352 | 1352 | repo.revbranchcache()._clear() |
|
1353 | 1353 | if full: |
|
1354 | 1354 | view._branchcaches.clear() |
|
1355 | 1355 | else: |
|
1356 | 1356 | view._branchcaches.pop(filtername, None) |
|
1357 | 1357 | view.branchmap() |
|
1358 | 1358 | return d |
|
1359 | 1359 | # add filter in smaller subset to bigger subset |
|
1360 | 1360 | possiblefilters = set(repoview.filtertable) |
|
1361 | 1361 | subsettable = getbranchmapsubsettable() |
|
1362 | 1362 | allfilters = [] |
|
1363 | 1363 | while possiblefilters: |
|
1364 | 1364 | for name in possiblefilters: |
|
1365 | 1365 | subset = subsettable.get(name) |
|
1366 | 1366 | if subset not in possiblefilters: |
|
1367 | 1367 | break |
|
1368 | 1368 | else: |
|
1369 | 1369 | assert False, 'subset cycle %s!' % possiblefilters |
|
1370 | 1370 | allfilters.append(name) |
|
1371 | 1371 | possiblefilters.remove(name) |
|
1372 | 1372 | |
|
1373 | 1373 | # warm the cache |
|
1374 | 1374 | if not full: |
|
1375 | 1375 | for name in allfilters: |
|
1376 | 1376 | repo.filtered(name).branchmap() |
|
1377 | 1377 | # add unfiltered |
|
1378 | 1378 | allfilters.append(None) |
|
1379 | 1379 | |
|
1380 | 1380 | branchcacheread = safeattrsetter(branchmap, 'read') |
|
1381 | 1381 | branchcachewrite = safeattrsetter(branchmap.branchcache, 'write') |
|
1382 | 1382 | branchcacheread.set(lambda repo: None) |
|
1383 | 1383 | branchcachewrite.set(lambda bc, repo: None) |
|
1384 | 1384 | try: |
|
1385 | 1385 | for name in allfilters: |
|
1386 | 1386 | timer(getbranchmap(name), title=str(name)) |
|
1387 | 1387 | finally: |
|
1388 | 1388 | branchcacheread.restore() |
|
1389 | 1389 | branchcachewrite.restore() |
|
1390 | 1390 | fm.end() |
|
1391 | 1391 | |
|
1392 | 1392 | @command('perfloadmarkers') |
|
1393 | 1393 | def perfloadmarkers(ui, repo): |
|
1394 | 1394 | """benchmark the time to parse the on-disk markers for a repo |
|
1395 | 1395 | |
|
1396 | 1396 | Result is the number of markers in the repo.""" |
|
1397 | 1397 | timer, fm = gettimer(ui) |
|
1398 | 1398 | svfs = getsvfs(repo) |
|
1399 | 1399 | timer(lambda: len(obsolete.obsstore(svfs))) |
|
1400 | 1400 | fm.end() |
|
1401 | 1401 | |
|
1402 | 1402 | @command('perflrucachedict', formatteropts + |
|
1403 | 1403 | [('', 'size', 4, 'size of cache'), |
|
1404 | 1404 | ('', 'gets', 10000, 'number of key lookups'), |
|
1405 | 1405 | ('', 'sets', 10000, 'number of key sets'), |
|
1406 | 1406 | ('', 'mixed', 10000, 'number of mixed mode operations'), |
|
1407 | 1407 | ('', 'mixedgetfreq', 50, 'frequency of get vs set ops in mixed mode')], |
|
1408 | 1408 | norepo=True) |
|
1409 | 1409 | def perflrucache(ui, size=4, gets=10000, sets=10000, mixed=10000, |
|
1410 | 1410 | mixedgetfreq=50, **opts): |
|
1411 | 1411 | def doinit(): |
|
1412 | 1412 | for i in xrange(10000): |
|
1413 | 1413 | util.lrucachedict(size) |
|
1414 | 1414 | |
|
1415 | 1415 | values = [] |
|
1416 | 1416 | for i in xrange(size): |
|
1417 | 1417 | values.append(random.randint(0, sys.maxint)) |
|
1418 | 1418 | |
|
1419 | 1419 | # Get mode fills the cache and tests raw lookup performance with no |
|
1420 | 1420 | # eviction. |
|
1421 | 1421 | getseq = [] |
|
1422 | 1422 | for i in xrange(gets): |
|
1423 | 1423 | getseq.append(random.choice(values)) |
|
1424 | 1424 | |
|
1425 | 1425 | def dogets(): |
|
1426 | 1426 | d = util.lrucachedict(size) |
|
1427 | 1427 | for v in values: |
|
1428 | 1428 | d[v] = v |
|
1429 | 1429 | for key in getseq: |
|
1430 | 1430 | value = d[key] |
|
1431 | 1431 | value # silence pyflakes warning |
|
1432 | 1432 | |
|
1433 | 1433 | # Set mode tests insertion speed with cache eviction. |
|
1434 | 1434 | setseq = [] |
|
1435 | 1435 | for i in xrange(sets): |
|
1436 | 1436 | setseq.append(random.randint(0, sys.maxint)) |
|
1437 | 1437 | |
|
1438 | 1438 | def dosets(): |
|
1439 | 1439 | d = util.lrucachedict(size) |
|
1440 | 1440 | for v in setseq: |
|
1441 | 1441 | d[v] = v |
|
1442 | 1442 | |
|
1443 | 1443 | # Mixed mode randomly performs gets and sets with eviction. |
|
1444 | 1444 | mixedops = [] |
|
1445 | 1445 | for i in xrange(mixed): |
|
1446 | 1446 | r = random.randint(0, 100) |
|
1447 | 1447 | if r < mixedgetfreq: |
|
1448 | 1448 | op = 0 |
|
1449 | 1449 | else: |
|
1450 | 1450 | op = 1 |
|
1451 | 1451 | |
|
1452 | 1452 | mixedops.append((op, random.randint(0, size * 2))) |
|
1453 | 1453 | |
|
1454 | 1454 | def domixed(): |
|
1455 | 1455 | d = util.lrucachedict(size) |
|
1456 | 1456 | |
|
1457 | 1457 | for op, v in mixedops: |
|
1458 | 1458 | if op == 0: |
|
1459 | 1459 | try: |
|
1460 | 1460 | d[v] |
|
1461 | 1461 | except KeyError: |
|
1462 | 1462 | pass |
|
1463 | 1463 | else: |
|
1464 | 1464 | d[v] = v |
|
1465 | 1465 | |
|
1466 | 1466 | benches = [ |
|
1467 | 1467 | (doinit, 'init'), |
|
1468 | 1468 | (dogets, 'gets'), |
|
1469 | 1469 | (dosets, 'sets'), |
|
1470 | 1470 | (domixed, 'mixed') |
|
1471 | 1471 | ] |
|
1472 | 1472 | |
|
1473 | 1473 | for fn, title in benches: |
|
1474 | 1474 | timer, fm = gettimer(ui, opts) |
|
1475 | 1475 | timer(fn, title=title) |
|
1476 | 1476 | fm.end() |
|
1477 | 1477 | |
|
1478 | 1478 | @command('perfwrite', formatteropts) |
|
1479 | 1479 | def perfwrite(ui, repo, **opts): |
|
1480 | 1480 | """microbenchmark ui.write |
|
1481 | 1481 | """ |
|
1482 | 1482 | timer, fm = gettimer(ui, opts) |
|
1483 | 1483 | def write(): |
|
1484 | 1484 | for i in range(100000): |
|
1485 | 1485 | ui.write(('Testing write performance\n')) |
|
1486 | 1486 | timer(write) |
|
1487 | 1487 | fm.end() |
|
1488 | 1488 | |
|
1489 | 1489 | def uisetup(ui): |
|
1490 | 1490 | if (util.safehasattr(cmdutil, 'openrevlog') and |
|
1491 | 1491 | not util.safehasattr(commands, 'debugrevlogopts')): |
|
1492 | 1492 | # for "historical portability": |
|
1493 | 1493 | # In this case, Mercurial should be 1.9 (or a79fea6b3e77) - |
|
1494 | 1494 | # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for |
|
1495 | 1495 | # openrevlog() should cause failure, because it has been |
|
1496 | 1496 | # available since 3.5 (or 49c583ca48c4). |
|
1497 | 1497 | def openrevlog(orig, repo, cmd, file_, opts): |
|
1498 | 1498 | if opts.get('dir') and not util.safehasattr(repo, 'dirlog'): |
|
1499 | 1499 | raise error.Abort("This version doesn't support --dir option", |
|
1500 | 1500 | hint="use 3.5 or later") |
|
1501 | 1501 | return orig(repo, cmd, file_, opts) |
|
1502 | 1502 | extensions.wrapfunction(cmdutil, 'openrevlog', openrevlog) |
@@ -1,1406 +1,1403 | |||
|
1 | 1 | # dirstate.py - working directory tracking for mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import contextlib |
|
12 | 12 | import errno |
|
13 | 13 | import os |
|
14 | 14 | import stat |
|
15 | 15 | |
|
16 | 16 | from .i18n import _ |
|
17 | 17 | from .node import nullid |
|
18 | 18 | from . import ( |
|
19 | 19 | encoding, |
|
20 | 20 | error, |
|
21 | 21 | match as matchmod, |
|
22 | 22 | pathutil, |
|
23 | 23 | policy, |
|
24 | 24 | pycompat, |
|
25 | 25 | scmutil, |
|
26 | 26 | txnutil, |
|
27 | 27 | util, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | parsers = policy.importmod(r'parsers') |
|
31 | 31 | |
|
32 | 32 | propertycache = util.propertycache |
|
33 | 33 | filecache = scmutil.filecache |
|
34 | 34 | _rangemask = 0x7fffffff |
|
35 | 35 | |
|
36 | 36 | dirstatetuple = parsers.dirstatetuple |
|
37 | 37 | |
|
38 | 38 | class repocache(filecache): |
|
39 | 39 | """filecache for files in .hg/""" |
|
40 | 40 | def join(self, obj, fname): |
|
41 | 41 | return obj._opener.join(fname) |
|
42 | 42 | |
|
43 | 43 | class rootcache(filecache): |
|
44 | 44 | """filecache for files in the repository root""" |
|
45 | 45 | def join(self, obj, fname): |
|
46 | 46 | return obj._join(fname) |
|
47 | 47 | |
|
48 | 48 | def _getfsnow(vfs): |
|
49 | 49 | '''Get "now" timestamp on filesystem''' |
|
50 | 50 | tmpfd, tmpname = vfs.mkstemp() |
|
51 | 51 | try: |
|
52 | 52 | return os.fstat(tmpfd).st_mtime |
|
53 | 53 | finally: |
|
54 | 54 | os.close(tmpfd) |
|
55 | 55 | vfs.unlink(tmpname) |
|
56 | 56 | |
|
57 | 57 | class dirstate(object): |
|
58 | 58 | |
|
59 | 59 | def __init__(self, opener, ui, root, validate, sparsematchfn): |
|
60 | 60 | '''Create a new dirstate object. |
|
61 | 61 | |
|
62 | 62 | opener is an open()-like callable that can be used to open the |
|
63 | 63 | dirstate file; root is the root of the directory tracked by |
|
64 | 64 | the dirstate. |
|
65 | 65 | ''' |
|
66 | 66 | self._opener = opener |
|
67 | 67 | self._validate = validate |
|
68 | 68 | self._root = root |
|
69 | 69 | self._sparsematchfn = sparsematchfn |
|
70 | 70 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is |
|
71 | 71 | # UNC path pointing to root share (issue4557) |
|
72 | 72 | self._rootdir = pathutil.normasprefix(root) |
|
73 | 73 | self._dirty = False |
|
74 | 74 | self._lastnormaltime = 0 |
|
75 | 75 | self._ui = ui |
|
76 | 76 | self._filecache = {} |
|
77 | 77 | self._parentwriters = 0 |
|
78 | 78 | self._filename = 'dirstate' |
|
79 | 79 | self._pendingfilename = '%s.pending' % self._filename |
|
80 | 80 | self._plchangecallbacks = {} |
|
81 | 81 | self._origpl = None |
|
82 | 82 | self._updatedfiles = set() |
|
83 | 83 | |
|
84 | 84 | @contextlib.contextmanager |
|
85 | 85 | def parentchange(self): |
|
86 | 86 | '''Context manager for handling dirstate parents. |
|
87 | 87 | |
|
88 | 88 | If an exception occurs in the scope of the context manager, |
|
89 | 89 | the incoherent dirstate won't be written when wlock is |
|
90 | 90 | released. |
|
91 | 91 | ''' |
|
92 | 92 | self._parentwriters += 1 |
|
93 | 93 | yield |
|
94 | 94 | # Typically we want the "undo" step of a context manager in a |
|
95 | 95 | # finally block so it happens even when an exception |
|
96 | 96 | # occurs. In this case, however, we only want to decrement |
|
97 | 97 | # parentwriters if the code in the with statement exits |
|
98 | 98 | # normally, so we don't have a try/finally here on purpose. |
|
99 | 99 | self._parentwriters -= 1 |
|
100 | 100 | |
|
101 | 101 | def beginparentchange(self): |
|
102 | 102 | '''Marks the beginning of a set of changes that involve changing |
|
103 | 103 | the dirstate parents. If there is an exception during this time, |
|
104 | 104 | the dirstate will not be written when the wlock is released. This |
|
105 | 105 | prevents writing an incoherent dirstate where the parent doesn't |
|
106 | 106 | match the contents. |
|
107 | 107 | ''' |
|
108 | 108 | self._ui.deprecwarn('beginparentchange is obsoleted by the ' |
|
109 | 109 | 'parentchange context manager.', '4.3') |
|
110 | 110 | self._parentwriters += 1 |
|
111 | 111 | |
|
112 | 112 | def endparentchange(self): |
|
113 | 113 | '''Marks the end of a set of changes that involve changing the |
|
114 | 114 | dirstate parents. Once all parent changes have been marked done, |
|
115 | 115 | the wlock will be free to write the dirstate on release. |
|
116 | 116 | ''' |
|
117 | 117 | self._ui.deprecwarn('endparentchange is obsoleted by the ' |
|
118 | 118 | 'parentchange context manager.', '4.3') |
|
119 | 119 | if self._parentwriters > 0: |
|
120 | 120 | self._parentwriters -= 1 |
|
121 | 121 | |
|
122 | 122 | def pendingparentchange(self): |
|
123 | 123 | '''Returns true if the dirstate is in the middle of a set of changes |
|
124 | 124 | that modify the dirstate parent. |
|
125 | 125 | ''' |
|
126 | 126 | return self._parentwriters > 0 |
|
127 | 127 | |
|
128 | 128 | @propertycache |
|
129 | 129 | def _map(self): |
|
130 | 130 | '''Return the dirstate contents as a map from filename to |
|
131 | 131 | (state, mode, size, time).''' |
|
132 | 132 | self._read() |
|
133 | 133 | return self._map |
|
134 | 134 | |
|
135 | 135 | @propertycache |
|
136 | def _filefoldmap(self): | |
|
137 | return self._map.filefoldmap() | |
|
138 | ||
|
139 | @propertycache | |
|
140 | 136 | def _dirfoldmap(self): |
|
141 | 137 | f = {} |
|
142 | 138 | normcase = util.normcase |
|
143 | 139 | for name in self._dirs: |
|
144 | 140 | f[normcase(name)] = name |
|
145 | 141 | return f |
|
146 | 142 | |
|
147 | 143 | @property |
|
148 | 144 | def _sparsematcher(self): |
|
149 | 145 | """The matcher for the sparse checkout. |
|
150 | 146 | |
|
151 | 147 | The working directory may not include every file from a manifest. The |
|
152 | 148 | matcher obtained by this property will match a path if it is to be |
|
153 | 149 | included in the working directory. |
|
154 | 150 | """ |
|
155 | 151 | # TODO there is potential to cache this property. For now, the matcher |
|
156 | 152 | # is resolved on every access. (But the called function does use a |
|
157 | 153 | # cache to keep the lookup fast.) |
|
158 | 154 | return self._sparsematchfn() |
|
159 | 155 | |
|
160 | 156 | @repocache('branch') |
|
161 | 157 | def _branch(self): |
|
162 | 158 | try: |
|
163 | 159 | return self._opener.read("branch").strip() or "default" |
|
164 | 160 | except IOError as inst: |
|
165 | 161 | if inst.errno != errno.ENOENT: |
|
166 | 162 | raise |
|
167 | 163 | return "default" |
|
168 | 164 | |
|
169 | 165 | @property |
|
170 | 166 | def _pl(self): |
|
171 | 167 | return self._map.parents() |
|
172 | 168 | |
|
173 | 169 | @propertycache |
|
174 | 170 | def _dirs(self): |
|
175 | 171 | return self._map.dirs() |
|
176 | 172 | |
|
177 | 173 | def dirs(self): |
|
178 | 174 | return self._dirs |
|
179 | 175 | |
|
180 | 176 | @rootcache('.hgignore') |
|
181 | 177 | def _ignore(self): |
|
182 | 178 | files = self._ignorefiles() |
|
183 | 179 | if not files: |
|
184 | 180 | return matchmod.never(self._root, '') |
|
185 | 181 | |
|
186 | 182 | pats = ['include:%s' % f for f in files] |
|
187 | 183 | return matchmod.match(self._root, '', [], pats, warn=self._ui.warn) |
|
188 | 184 | |
|
189 | 185 | @propertycache |
|
190 | 186 | def _slash(self): |
|
191 | 187 | return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/' |
|
192 | 188 | |
|
193 | 189 | @propertycache |
|
194 | 190 | def _checklink(self): |
|
195 | 191 | return util.checklink(self._root) |
|
196 | 192 | |
|
197 | 193 | @propertycache |
|
198 | 194 | def _checkexec(self): |
|
199 | 195 | return util.checkexec(self._root) |
|
200 | 196 | |
|
201 | 197 | @propertycache |
|
202 | 198 | def _checkcase(self): |
|
203 | 199 | return not util.fscasesensitive(self._join('.hg')) |
|
204 | 200 | |
|
205 | 201 | def _join(self, f): |
|
206 | 202 | # much faster than os.path.join() |
|
207 | 203 | # it's safe because f is always a relative path |
|
208 | 204 | return self._rootdir + f |
|
209 | 205 | |
|
210 | 206 | def flagfunc(self, buildfallback): |
|
211 | 207 | if self._checklink and self._checkexec: |
|
212 | 208 | def f(x): |
|
213 | 209 | try: |
|
214 | 210 | st = os.lstat(self._join(x)) |
|
215 | 211 | if util.statislink(st): |
|
216 | 212 | return 'l' |
|
217 | 213 | if util.statisexec(st): |
|
218 | 214 | return 'x' |
|
219 | 215 | except OSError: |
|
220 | 216 | pass |
|
221 | 217 | return '' |
|
222 | 218 | return f |
|
223 | 219 | |
|
224 | 220 | fallback = buildfallback() |
|
225 | 221 | if self._checklink: |
|
226 | 222 | def f(x): |
|
227 | 223 | if os.path.islink(self._join(x)): |
|
228 | 224 | return 'l' |
|
229 | 225 | if 'x' in fallback(x): |
|
230 | 226 | return 'x' |
|
231 | 227 | return '' |
|
232 | 228 | return f |
|
233 | 229 | if self._checkexec: |
|
234 | 230 | def f(x): |
|
235 | 231 | if 'l' in fallback(x): |
|
236 | 232 | return 'l' |
|
237 | 233 | if util.isexec(self._join(x)): |
|
238 | 234 | return 'x' |
|
239 | 235 | return '' |
|
240 | 236 | return f |
|
241 | 237 | else: |
|
242 | 238 | return fallback |
|
243 | 239 | |
|
244 | 240 | @propertycache |
|
245 | 241 | def _cwd(self): |
|
246 | 242 | # internal config: ui.forcecwd |
|
247 | 243 | forcecwd = self._ui.config('ui', 'forcecwd') |
|
248 | 244 | if forcecwd: |
|
249 | 245 | return forcecwd |
|
250 | 246 | return pycompat.getcwd() |
|
251 | 247 | |
|
252 | 248 | def getcwd(self): |
|
253 | 249 | '''Return the path from which a canonical path is calculated. |
|
254 | 250 | |
|
255 | 251 | This path should be used to resolve file patterns or to convert |
|
256 | 252 | canonical paths back to file paths for display. It shouldn't be |
|
257 | 253 | used to get real file paths. Use vfs functions instead. |
|
258 | 254 | ''' |
|
259 | 255 | cwd = self._cwd |
|
260 | 256 | if cwd == self._root: |
|
261 | 257 | return '' |
|
262 | 258 | # self._root ends with a path separator if self._root is '/' or 'C:\' |
|
263 | 259 | rootsep = self._root |
|
264 | 260 | if not util.endswithsep(rootsep): |
|
265 | 261 | rootsep += pycompat.ossep |
|
266 | 262 | if cwd.startswith(rootsep): |
|
267 | 263 | return cwd[len(rootsep):] |
|
268 | 264 | else: |
|
269 | 265 | # we're outside the repo. return an absolute path. |
|
270 | 266 | return cwd |
|
271 | 267 | |
|
272 | 268 | def pathto(self, f, cwd=None): |
|
273 | 269 | if cwd is None: |
|
274 | 270 | cwd = self.getcwd() |
|
275 | 271 | path = util.pathto(self._root, cwd, f) |
|
276 | 272 | if self._slash: |
|
277 | 273 | return util.pconvert(path) |
|
278 | 274 | return path |
|
279 | 275 | |
|
280 | 276 | def __getitem__(self, key): |
|
281 | 277 | '''Return the current state of key (a filename) in the dirstate. |
|
282 | 278 | |
|
283 | 279 | States are: |
|
284 | 280 | n normal |
|
285 | 281 | m needs merging |
|
286 | 282 | r marked for removal |
|
287 | 283 | a marked for addition |
|
288 | 284 | ? not tracked |
|
289 | 285 | ''' |
|
290 | 286 | return self._map.get(key, ("?",))[0] |
|
291 | 287 | |
|
292 | 288 | def __contains__(self, key): |
|
293 | 289 | return key in self._map |
|
294 | 290 | |
|
295 | 291 | def __iter__(self): |
|
296 | 292 | return iter(sorted(self._map)) |
|
297 | 293 | |
|
298 | 294 | def items(self): |
|
299 | 295 | return self._map.iteritems() |
|
300 | 296 | |
|
301 | 297 | iteritems = items |
|
302 | 298 | |
|
303 | 299 | def parents(self): |
|
304 | 300 | return [self._validate(p) for p in self._pl] |
|
305 | 301 | |
|
306 | 302 | def p1(self): |
|
307 | 303 | return self._validate(self._pl[0]) |
|
308 | 304 | |
|
309 | 305 | def p2(self): |
|
310 | 306 | return self._validate(self._pl[1]) |
|
311 | 307 | |
|
312 | 308 | def branch(self): |
|
313 | 309 | return encoding.tolocal(self._branch) |
|
314 | 310 | |
|
315 | 311 | def setparents(self, p1, p2=nullid): |
|
316 | 312 | """Set dirstate parents to p1 and p2. |
|
317 | 313 | |
|
318 | 314 | When moving from two parents to one, 'm' merged entries a |
|
319 | 315 | adjusted to normal and previous copy records discarded and |
|
320 | 316 | returned by the call. |
|
321 | 317 | |
|
322 | 318 | See localrepo.setparents() |
|
323 | 319 | """ |
|
324 | 320 | if self._parentwriters == 0: |
|
325 | 321 | raise ValueError("cannot set dirstate parent without " |
|
326 | 322 | "calling dirstate.beginparentchange") |
|
327 | 323 | |
|
328 | 324 | self._dirty = True |
|
329 | 325 | oldp2 = self._pl[1] |
|
330 | 326 | if self._origpl is None: |
|
331 | 327 | self._origpl = self._pl |
|
332 | 328 | self._map.setparents(p1, p2) |
|
333 | 329 | copies = {} |
|
334 | 330 | if oldp2 != nullid and p2 == nullid: |
|
335 | 331 | candidatefiles = self._map.nonnormalset.union( |
|
336 | 332 | self._map.otherparentset) |
|
337 | 333 | for f in candidatefiles: |
|
338 | 334 | s = self._map.get(f) |
|
339 | 335 | if s is None: |
|
340 | 336 | continue |
|
341 | 337 | |
|
342 | 338 | # Discard 'm' markers when moving away from a merge state |
|
343 | 339 | if s[0] == 'm': |
|
344 | 340 | source = self._map.copymap.get(f) |
|
345 | 341 | if source: |
|
346 | 342 | copies[f] = source |
|
347 | 343 | self.normallookup(f) |
|
348 | 344 | # Also fix up otherparent markers |
|
349 | 345 | elif s[0] == 'n' and s[2] == -2: |
|
350 | 346 | source = self._map.copymap.get(f) |
|
351 | 347 | if source: |
|
352 | 348 | copies[f] = source |
|
353 | 349 | self.add(f) |
|
354 | 350 | return copies |
|
355 | 351 | |
|
356 | 352 | def setbranch(self, branch): |
|
357 | 353 | self._branch = encoding.fromlocal(branch) |
|
358 | 354 | f = self._opener('branch', 'w', atomictemp=True, checkambig=True) |
|
359 | 355 | try: |
|
360 | 356 | f.write(self._branch + '\n') |
|
361 | 357 | f.close() |
|
362 | 358 | |
|
363 | 359 | # make sure filecache has the correct stat info for _branch after |
|
364 | 360 | # replacing the underlying file |
|
365 | 361 | ce = self._filecache['_branch'] |
|
366 | 362 | if ce: |
|
367 | 363 | ce.refresh() |
|
368 | 364 | except: # re-raises |
|
369 | 365 | f.discard() |
|
370 | 366 | raise |
|
371 | 367 | |
|
372 | 368 | def _read(self): |
|
373 | 369 | self._map = dirstatemap(self._ui, self._opener, self._root) |
|
374 | 370 | self._map.read() |
|
375 | 371 | |
|
376 | 372 | def invalidate(self): |
|
377 | 373 | '''Causes the next access to reread the dirstate. |
|
378 | 374 | |
|
379 | 375 | This is different from localrepo.invalidatedirstate() because it always |
|
380 | 376 | rereads the dirstate. Use localrepo.invalidatedirstate() if you want to |
|
381 | 377 | check whether the dirstate has changed before rereading it.''' |
|
382 | 378 | |
|
383 |
for a in ("_map", " |
|
|
379 | for a in ("_map", "_dirfoldmap", "_branch", | |
|
384 | 380 | "_dirs", "_ignore"): |
|
385 | 381 | if a in self.__dict__: |
|
386 | 382 | delattr(self, a) |
|
387 | 383 | self._lastnormaltime = 0 |
|
388 | 384 | self._dirty = False |
|
389 | 385 | self._updatedfiles.clear() |
|
390 | 386 | self._parentwriters = 0 |
|
391 | 387 | self._origpl = None |
|
392 | 388 | |
|
393 | 389 | def copy(self, source, dest): |
|
394 | 390 | """Mark dest as a copy of source. Unmark dest if source is None.""" |
|
395 | 391 | if source == dest: |
|
396 | 392 | return |
|
397 | 393 | self._dirty = True |
|
398 | 394 | if source is not None: |
|
399 | 395 | self._map.copymap[dest] = source |
|
400 | 396 | self._updatedfiles.add(source) |
|
401 | 397 | self._updatedfiles.add(dest) |
|
402 | 398 | elif self._map.copymap.pop(dest, None): |
|
403 | 399 | self._updatedfiles.add(dest) |
|
404 | 400 | |
|
405 | 401 | def copied(self, file): |
|
406 | 402 | return self._map.copymap.get(file, None) |
|
407 | 403 | |
|
408 | 404 | def copies(self): |
|
409 | 405 | return self._map.copymap |
|
410 | 406 | |
|
411 | 407 | def _droppath(self, f): |
|
412 | 408 | if self[f] not in "?r" and "_dirs" in self.__dict__: |
|
413 | 409 | self._dirs.delpath(f) |
|
414 | 410 | |
|
415 |
if " |
|
|
411 | if "filefoldmap" in self._map.__dict__: | |
|
416 | 412 | normed = util.normcase(f) |
|
417 | if normed in self._filefoldmap: | |
|
418 | del self._filefoldmap[normed] | |
|
413 | if normed in self._map.filefoldmap: | |
|
414 | del self._map.filefoldmap[normed] | |
|
419 | 415 | |
|
420 | 416 | self._updatedfiles.add(f) |
|
421 | 417 | |
|
422 | 418 | def _addpath(self, f, state, mode, size, mtime): |
|
423 | 419 | oldstate = self[f] |
|
424 | 420 | if state == 'a' or oldstate == 'r': |
|
425 | 421 | scmutil.checkfilename(f) |
|
426 | 422 | if f in self._dirs: |
|
427 | 423 | raise error.Abort(_('directory %r already in dirstate') % f) |
|
428 | 424 | # shadows |
|
429 | 425 | for d in util.finddirs(f): |
|
430 | 426 | if d in self._dirs: |
|
431 | 427 | break |
|
432 | 428 | entry = self._map.get(d) |
|
433 | 429 | if entry is not None and entry[0] != 'r': |
|
434 | 430 | raise error.Abort( |
|
435 | 431 | _('file %r in dirstate clashes with %r') % (d, f)) |
|
436 | 432 | if oldstate in "?r" and "_dirs" in self.__dict__: |
|
437 | 433 | self._dirs.addpath(f) |
|
438 | 434 | self._dirty = True |
|
439 | 435 | self._updatedfiles.add(f) |
|
440 | 436 | self._map[f] = dirstatetuple(state, mode, size, mtime) |
|
441 | 437 | if state != 'n' or mtime == -1: |
|
442 | 438 | self._map.nonnormalset.add(f) |
|
443 | 439 | if size == -2: |
|
444 | 440 | self._map.otherparentset.add(f) |
|
445 | 441 | |
|
446 | 442 | def normal(self, f): |
|
447 | 443 | '''Mark a file normal and clean.''' |
|
448 | 444 | s = os.lstat(self._join(f)) |
|
449 | 445 | mtime = s.st_mtime |
|
450 | 446 | self._addpath(f, 'n', s.st_mode, |
|
451 | 447 | s.st_size & _rangemask, mtime & _rangemask) |
|
452 | 448 | self._map.copymap.pop(f, None) |
|
453 | 449 | if f in self._map.nonnormalset: |
|
454 | 450 | self._map.nonnormalset.remove(f) |
|
455 | 451 | if mtime > self._lastnormaltime: |
|
456 | 452 | # Remember the most recent modification timeslot for status(), |
|
457 | 453 | # to make sure we won't miss future size-preserving file content |
|
458 | 454 | # modifications that happen within the same timeslot. |
|
459 | 455 | self._lastnormaltime = mtime |
|
460 | 456 | |
|
461 | 457 | def normallookup(self, f): |
|
462 | 458 | '''Mark a file normal, but possibly dirty.''' |
|
463 | 459 | if self._pl[1] != nullid: |
|
464 | 460 | # if there is a merge going on and the file was either |
|
465 | 461 | # in state 'm' (-1) or coming from other parent (-2) before |
|
466 | 462 | # being removed, restore that state. |
|
467 | 463 | entry = self._map.get(f) |
|
468 | 464 | if entry is not None: |
|
469 | 465 | if entry[0] == 'r' and entry[2] in (-1, -2): |
|
470 | 466 | source = self._map.copymap.get(f) |
|
471 | 467 | if entry[2] == -1: |
|
472 | 468 | self.merge(f) |
|
473 | 469 | elif entry[2] == -2: |
|
474 | 470 | self.otherparent(f) |
|
475 | 471 | if source: |
|
476 | 472 | self.copy(source, f) |
|
477 | 473 | return |
|
478 | 474 | if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: |
|
479 | 475 | return |
|
480 | 476 | self._addpath(f, 'n', 0, -1, -1) |
|
481 | 477 | self._map.copymap.pop(f, None) |
|
482 | 478 | if f in self._map.nonnormalset: |
|
483 | 479 | self._map.nonnormalset.remove(f) |
|
484 | 480 | |
|
485 | 481 | def otherparent(self, f): |
|
486 | 482 | '''Mark as coming from the other parent, always dirty.''' |
|
487 | 483 | if self._pl[1] == nullid: |
|
488 | 484 | raise error.Abort(_("setting %r to other parent " |
|
489 | 485 | "only allowed in merges") % f) |
|
490 | 486 | if f in self and self[f] == 'n': |
|
491 | 487 | # merge-like |
|
492 | 488 | self._addpath(f, 'm', 0, -2, -1) |
|
493 | 489 | else: |
|
494 | 490 | # add-like |
|
495 | 491 | self._addpath(f, 'n', 0, -2, -1) |
|
496 | 492 | self._map.copymap.pop(f, None) |
|
497 | 493 | |
|
498 | 494 | def add(self, f): |
|
499 | 495 | '''Mark a file added.''' |
|
500 | 496 | self._addpath(f, 'a', 0, -1, -1) |
|
501 | 497 | self._map.copymap.pop(f, None) |
|
502 | 498 | |
|
503 | 499 | def remove(self, f): |
|
504 | 500 | '''Mark a file removed.''' |
|
505 | 501 | self._dirty = True |
|
506 | 502 | self._droppath(f) |
|
507 | 503 | size = 0 |
|
508 | 504 | if self._pl[1] != nullid: |
|
509 | 505 | entry = self._map.get(f) |
|
510 | 506 | if entry is not None: |
|
511 | 507 | # backup the previous state |
|
512 | 508 | if entry[0] == 'm': # merge |
|
513 | 509 | size = -1 |
|
514 | 510 | elif entry[0] == 'n' and entry[2] == -2: # other parent |
|
515 | 511 | size = -2 |
|
516 | 512 | self._map.otherparentset.add(f) |
|
517 | 513 | self._map[f] = dirstatetuple('r', 0, size, 0) |
|
518 | 514 | self._map.nonnormalset.add(f) |
|
519 | 515 | if size == 0: |
|
520 | 516 | self._map.copymap.pop(f, None) |
|
521 | 517 | |
|
522 | 518 | def merge(self, f): |
|
523 | 519 | '''Mark a file merged.''' |
|
524 | 520 | if self._pl[1] == nullid: |
|
525 | 521 | return self.normallookup(f) |
|
526 | 522 | return self.otherparent(f) |
|
527 | 523 | |
|
528 | 524 | def drop(self, f): |
|
529 | 525 | '''Drop a file from the dirstate''' |
|
530 | 526 | if f in self._map: |
|
531 | 527 | self._dirty = True |
|
532 | 528 | self._droppath(f) |
|
533 | 529 | del self._map[f] |
|
534 | 530 | if f in self._map.nonnormalset: |
|
535 | 531 | self._map.nonnormalset.remove(f) |
|
536 | 532 | self._map.copymap.pop(f, None) |
|
537 | 533 | |
|
538 | 534 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): |
|
539 | 535 | if exists is None: |
|
540 | 536 | exists = os.path.lexists(os.path.join(self._root, path)) |
|
541 | 537 | if not exists: |
|
542 | 538 | # Maybe a path component exists |
|
543 | 539 | if not ignoremissing and '/' in path: |
|
544 | 540 | d, f = path.rsplit('/', 1) |
|
545 | 541 | d = self._normalize(d, False, ignoremissing, None) |
|
546 | 542 | folded = d + "/" + f |
|
547 | 543 | else: |
|
548 | 544 | # No path components, preserve original case |
|
549 | 545 | folded = path |
|
550 | 546 | else: |
|
551 | 547 | # recursively normalize leading directory components |
|
552 | 548 | # against dirstate |
|
553 | 549 | if '/' in normed: |
|
554 | 550 | d, f = normed.rsplit('/', 1) |
|
555 | 551 | d = self._normalize(d, False, ignoremissing, True) |
|
556 | 552 | r = self._root + "/" + d |
|
557 | 553 | folded = d + "/" + util.fspath(f, r) |
|
558 | 554 | else: |
|
559 | 555 | folded = util.fspath(normed, self._root) |
|
560 | 556 | storemap[normed] = folded |
|
561 | 557 | |
|
562 | 558 | return folded |
|
563 | 559 | |
|
564 | 560 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): |
|
565 | 561 | normed = util.normcase(path) |
|
566 | folded = self._filefoldmap.get(normed, None) | |
|
562 | folded = self._map.filefoldmap.get(normed, None) | |
|
567 | 563 | if folded is None: |
|
568 | 564 | if isknown: |
|
569 | 565 | folded = path |
|
570 | 566 | else: |
|
571 | 567 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
572 | self._filefoldmap) | |
|
568 | self._map.filefoldmap) | |
|
573 | 569 | return folded |
|
574 | 570 | |
|
575 | 571 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): |
|
576 | 572 | normed = util.normcase(path) |
|
577 | folded = self._filefoldmap.get(normed, None) | |
|
573 | folded = self._map.filefoldmap.get(normed, None) | |
|
578 | 574 | if folded is None: |
|
579 | 575 | folded = self._dirfoldmap.get(normed, None) |
|
580 | 576 | if folded is None: |
|
581 | 577 | if isknown: |
|
582 | 578 | folded = path |
|
583 | 579 | else: |
|
584 | 580 | # store discovered result in dirfoldmap so that future |
|
585 | 581 | # normalizefile calls don't start matching directories |
|
586 | 582 | folded = self._discoverpath(path, normed, ignoremissing, exists, |
|
587 | 583 | self._dirfoldmap) |
|
588 | 584 | return folded |
|
589 | 585 | |
|
590 | 586 | def normalize(self, path, isknown=False, ignoremissing=False): |
|
591 | 587 | ''' |
|
592 | 588 | normalize the case of a pathname when on a casefolding filesystem |
|
593 | 589 | |
|
594 | 590 | isknown specifies whether the filename came from walking the |
|
595 | 591 | disk, to avoid extra filesystem access. |
|
596 | 592 | |
|
597 | 593 | If ignoremissing is True, missing path are returned |
|
598 | 594 | unchanged. Otherwise, we try harder to normalize possibly |
|
599 | 595 | existing path components. |
|
600 | 596 | |
|
601 | 597 | The normalized case is determined based on the following precedence: |
|
602 | 598 | |
|
603 | 599 | - version of name already stored in the dirstate |
|
604 | 600 | - version of name stored on disk |
|
605 | 601 | - version provided via command arguments |
|
606 | 602 | ''' |
|
607 | 603 | |
|
608 | 604 | if self._checkcase: |
|
609 | 605 | return self._normalize(path, isknown, ignoremissing) |
|
610 | 606 | return path |
|
611 | 607 | |
|
612 | 608 | def clear(self): |
|
613 | 609 | self._map = dirstatemap(self._ui, self._opener, self._root) |
|
614 | 610 | if "_dirs" in self.__dict__: |
|
615 | 611 | delattr(self, "_dirs") |
|
616 | 612 | self._map.setparents(nullid, nullid) |
|
617 | 613 | self._lastnormaltime = 0 |
|
618 | 614 | self._updatedfiles.clear() |
|
619 | 615 | self._dirty = True |
|
620 | 616 | |
|
621 | 617 | def rebuild(self, parent, allfiles, changedfiles=None): |
|
622 | 618 | if changedfiles is None: |
|
623 | 619 | # Rebuild entire dirstate |
|
624 | 620 | changedfiles = allfiles |
|
625 | 621 | lastnormaltime = self._lastnormaltime |
|
626 | 622 | self.clear() |
|
627 | 623 | self._lastnormaltime = lastnormaltime |
|
628 | 624 | |
|
629 | 625 | if self._origpl is None: |
|
630 | 626 | self._origpl = self._pl |
|
631 | 627 | self._map.setparents(parent, nullid) |
|
632 | 628 | for f in changedfiles: |
|
633 | 629 | if f in allfiles: |
|
634 | 630 | self.normallookup(f) |
|
635 | 631 | else: |
|
636 | 632 | self.drop(f) |
|
637 | 633 | |
|
638 | 634 | self._dirty = True |
|
639 | 635 | |
|
640 | 636 | def identity(self): |
|
641 | 637 | '''Return identity of dirstate itself to detect changing in storage |
|
642 | 638 | |
|
643 | 639 | If identity of previous dirstate is equal to this, writing |
|
644 | 640 | changes based on the former dirstate out can keep consistency. |
|
645 | 641 | ''' |
|
646 | 642 | return self._map.identity |
|
647 | 643 | |
|
648 | 644 | def write(self, tr): |
|
649 | 645 | if not self._dirty: |
|
650 | 646 | return |
|
651 | 647 | |
|
652 | 648 | filename = self._filename |
|
653 | 649 | if tr: |
|
654 | 650 | # 'dirstate.write()' is not only for writing in-memory |
|
655 | 651 | # changes out, but also for dropping ambiguous timestamp. |
|
656 | 652 | # delayed writing re-raise "ambiguous timestamp issue". |
|
657 | 653 | # See also the wiki page below for detail: |
|
658 | 654 | # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan |
|
659 | 655 | |
|
660 | 656 | # emulate dropping timestamp in 'parsers.pack_dirstate' |
|
661 | 657 | now = _getfsnow(self._opener) |
|
662 | 658 | dmap = self._map |
|
663 | 659 | for f in self._updatedfiles: |
|
664 | 660 | e = dmap.get(f) |
|
665 | 661 | if e is not None and e[0] == 'n' and e[3] == now: |
|
666 | 662 | dmap[f] = dirstatetuple(e[0], e[1], e[2], -1) |
|
667 | 663 | self._map.nonnormalset.add(f) |
|
668 | 664 | |
|
669 | 665 | # emulate that all 'dirstate.normal' results are written out |
|
670 | 666 | self._lastnormaltime = 0 |
|
671 | 667 | self._updatedfiles.clear() |
|
672 | 668 | |
|
673 | 669 | # delay writing in-memory changes out |
|
674 | 670 | tr.addfilegenerator('dirstate', (self._filename,), |
|
675 | 671 | self._writedirstate, location='plain') |
|
676 | 672 | return |
|
677 | 673 | |
|
678 | 674 | st = self._opener(filename, "w", atomictemp=True, checkambig=True) |
|
679 | 675 | self._writedirstate(st) |
|
680 | 676 | |
|
681 | 677 | def addparentchangecallback(self, category, callback): |
|
682 | 678 | """add a callback to be called when the wd parents are changed |
|
683 | 679 | |
|
684 | 680 | Callback will be called with the following arguments: |
|
685 | 681 | dirstate, (oldp1, oldp2), (newp1, newp2) |
|
686 | 682 | |
|
687 | 683 | Category is a unique identifier to allow overwriting an old callback |
|
688 | 684 | with a newer callback. |
|
689 | 685 | """ |
|
690 | 686 | self._plchangecallbacks[category] = callback |
|
691 | 687 | |
|
692 | 688 | def _writedirstate(self, st): |
|
693 | 689 | # notify callbacks about parents change |
|
694 | 690 | if self._origpl is not None and self._origpl != self._pl: |
|
695 | 691 | for c, callback in sorted(self._plchangecallbacks.iteritems()): |
|
696 | 692 | callback(self, self._origpl, self._pl) |
|
697 | 693 | self._origpl = None |
|
698 | 694 | # use the modification time of the newly created temporary file as the |
|
699 | 695 | # filesystem's notion of 'now' |
|
700 | 696 | now = util.fstat(st).st_mtime & _rangemask |
|
701 | 697 | |
|
702 | 698 | # enough 'delaywrite' prevents 'pack_dirstate' from dropping |
|
703 | 699 | # timestamp of each entries in dirstate, because of 'now > mtime' |
|
704 | 700 | delaywrite = self._ui.configint('debug', 'dirstate.delaywrite') |
|
705 | 701 | if delaywrite > 0: |
|
706 | 702 | # do we have any files to delay for? |
|
707 | 703 | for f, e in self._map.iteritems(): |
|
708 | 704 | if e[0] == 'n' and e[3] == now: |
|
709 | 705 | import time # to avoid useless import |
|
710 | 706 | # rather than sleep n seconds, sleep until the next |
|
711 | 707 | # multiple of n seconds |
|
712 | 708 | clock = time.time() |
|
713 | 709 | start = int(clock) - (int(clock) % delaywrite) |
|
714 | 710 | end = start + delaywrite |
|
715 | 711 | time.sleep(end - clock) |
|
716 | 712 | now = end # trust our estimate that the end is near now |
|
717 | 713 | break |
|
718 | 714 | |
|
719 | 715 | self._map.write(st, now) |
|
720 | 716 | self._lastnormaltime = 0 |
|
721 | 717 | self._dirty = False |
|
722 | 718 | |
|
723 | 719 | def _dirignore(self, f): |
|
724 | 720 | if f == '.': |
|
725 | 721 | return False |
|
726 | 722 | if self._ignore(f): |
|
727 | 723 | return True |
|
728 | 724 | for p in util.finddirs(f): |
|
729 | 725 | if self._ignore(p): |
|
730 | 726 | return True |
|
731 | 727 | return False |
|
732 | 728 | |
|
733 | 729 | def _ignorefiles(self): |
|
734 | 730 | files = [] |
|
735 | 731 | if os.path.exists(self._join('.hgignore')): |
|
736 | 732 | files.append(self._join('.hgignore')) |
|
737 | 733 | for name, path in self._ui.configitems("ui"): |
|
738 | 734 | if name == 'ignore' or name.startswith('ignore.'): |
|
739 | 735 | # we need to use os.path.join here rather than self._join |
|
740 | 736 | # because path is arbitrary and user-specified |
|
741 | 737 | files.append(os.path.join(self._rootdir, util.expandpath(path))) |
|
742 | 738 | return files |
|
743 | 739 | |
|
744 | 740 | def _ignorefileandline(self, f): |
|
745 | 741 | files = collections.deque(self._ignorefiles()) |
|
746 | 742 | visited = set() |
|
747 | 743 | while files: |
|
748 | 744 | i = files.popleft() |
|
749 | 745 | patterns = matchmod.readpatternfile(i, self._ui.warn, |
|
750 | 746 | sourceinfo=True) |
|
751 | 747 | for pattern, lineno, line in patterns: |
|
752 | 748 | kind, p = matchmod._patsplit(pattern, 'glob') |
|
753 | 749 | if kind == "subinclude": |
|
754 | 750 | if p not in visited: |
|
755 | 751 | files.append(p) |
|
756 | 752 | continue |
|
757 | 753 | m = matchmod.match(self._root, '', [], [pattern], |
|
758 | 754 | warn=self._ui.warn) |
|
759 | 755 | if m(f): |
|
760 | 756 | return (i, lineno, line) |
|
761 | 757 | visited.add(i) |
|
762 | 758 | return (None, -1, "") |
|
763 | 759 | |
|
764 | 760 | def _walkexplicit(self, match, subrepos): |
|
765 | 761 | '''Get stat data about the files explicitly specified by match. |
|
766 | 762 | |
|
767 | 763 | Return a triple (results, dirsfound, dirsnotfound). |
|
768 | 764 | - results is a mapping from filename to stat result. It also contains |
|
769 | 765 | listings mapping subrepos and .hg to None. |
|
770 | 766 | - dirsfound is a list of files found to be directories. |
|
771 | 767 | - dirsnotfound is a list of files that the dirstate thinks are |
|
772 | 768 | directories and that were not found.''' |
|
773 | 769 | |
|
774 | 770 | def badtype(mode): |
|
775 | 771 | kind = _('unknown') |
|
776 | 772 | if stat.S_ISCHR(mode): |
|
777 | 773 | kind = _('character device') |
|
778 | 774 | elif stat.S_ISBLK(mode): |
|
779 | 775 | kind = _('block device') |
|
780 | 776 | elif stat.S_ISFIFO(mode): |
|
781 | 777 | kind = _('fifo') |
|
782 | 778 | elif stat.S_ISSOCK(mode): |
|
783 | 779 | kind = _('socket') |
|
784 | 780 | elif stat.S_ISDIR(mode): |
|
785 | 781 | kind = _('directory') |
|
786 | 782 | return _('unsupported file type (type is %s)') % kind |
|
787 | 783 | |
|
788 | 784 | matchedir = match.explicitdir |
|
789 | 785 | badfn = match.bad |
|
790 | 786 | dmap = self._map |
|
791 | 787 | lstat = os.lstat |
|
792 | 788 | getkind = stat.S_IFMT |
|
793 | 789 | dirkind = stat.S_IFDIR |
|
794 | 790 | regkind = stat.S_IFREG |
|
795 | 791 | lnkkind = stat.S_IFLNK |
|
796 | 792 | join = self._join |
|
797 | 793 | dirsfound = [] |
|
798 | 794 | foundadd = dirsfound.append |
|
799 | 795 | dirsnotfound = [] |
|
800 | 796 | notfoundadd = dirsnotfound.append |
|
801 | 797 | |
|
802 | 798 | if not match.isexact() and self._checkcase: |
|
803 | 799 | normalize = self._normalize |
|
804 | 800 | else: |
|
805 | 801 | normalize = None |
|
806 | 802 | |
|
807 | 803 | files = sorted(match.files()) |
|
808 | 804 | subrepos.sort() |
|
809 | 805 | i, j = 0, 0 |
|
810 | 806 | while i < len(files) and j < len(subrepos): |
|
811 | 807 | subpath = subrepos[j] + "/" |
|
812 | 808 | if files[i] < subpath: |
|
813 | 809 | i += 1 |
|
814 | 810 | continue |
|
815 | 811 | while i < len(files) and files[i].startswith(subpath): |
|
816 | 812 | del files[i] |
|
817 | 813 | j += 1 |
|
818 | 814 | |
|
819 | 815 | if not files or '.' in files: |
|
820 | 816 | files = ['.'] |
|
821 | 817 | results = dict.fromkeys(subrepos) |
|
822 | 818 | results['.hg'] = None |
|
823 | 819 | |
|
824 | 820 | alldirs = None |
|
825 | 821 | for ff in files: |
|
826 | 822 | # constructing the foldmap is expensive, so don't do it for the |
|
827 | 823 | # common case where files is ['.'] |
|
828 | 824 | if normalize and ff != '.': |
|
829 | 825 | nf = normalize(ff, False, True) |
|
830 | 826 | else: |
|
831 | 827 | nf = ff |
|
832 | 828 | if nf in results: |
|
833 | 829 | continue |
|
834 | 830 | |
|
835 | 831 | try: |
|
836 | 832 | st = lstat(join(nf)) |
|
837 | 833 | kind = getkind(st.st_mode) |
|
838 | 834 | if kind == dirkind: |
|
839 | 835 | if nf in dmap: |
|
840 | 836 | # file replaced by dir on disk but still in dirstate |
|
841 | 837 | results[nf] = None |
|
842 | 838 | if matchedir: |
|
843 | 839 | matchedir(nf) |
|
844 | 840 | foundadd((nf, ff)) |
|
845 | 841 | elif kind == regkind or kind == lnkkind: |
|
846 | 842 | results[nf] = st |
|
847 | 843 | else: |
|
848 | 844 | badfn(ff, badtype(kind)) |
|
849 | 845 | if nf in dmap: |
|
850 | 846 | results[nf] = None |
|
851 | 847 | except OSError as inst: # nf not found on disk - it is dirstate only |
|
852 | 848 | if nf in dmap: # does it exactly match a missing file? |
|
853 | 849 | results[nf] = None |
|
854 | 850 | else: # does it match a missing directory? |
|
855 | 851 | if alldirs is None: |
|
856 | 852 | alldirs = util.dirs(dmap._map) |
|
857 | 853 | if nf in alldirs: |
|
858 | 854 | if matchedir: |
|
859 | 855 | matchedir(nf) |
|
860 | 856 | notfoundadd(nf) |
|
861 | 857 | else: |
|
862 | 858 | badfn(ff, encoding.strtolocal(inst.strerror)) |
|
863 | 859 | |
|
864 | 860 | # Case insensitive filesystems cannot rely on lstat() failing to detect |
|
865 | 861 | # a case-only rename. Prune the stat object for any file that does not |
|
866 | 862 | # match the case in the filesystem, if there are multiple files that |
|
867 | 863 | # normalize to the same path. |
|
868 | 864 | if match.isexact() and self._checkcase: |
|
869 | 865 | normed = {} |
|
870 | 866 | |
|
871 | 867 | for f, st in results.iteritems(): |
|
872 | 868 | if st is None: |
|
873 | 869 | continue |
|
874 | 870 | |
|
875 | 871 | nc = util.normcase(f) |
|
876 | 872 | paths = normed.get(nc) |
|
877 | 873 | |
|
878 | 874 | if paths is None: |
|
879 | 875 | paths = set() |
|
880 | 876 | normed[nc] = paths |
|
881 | 877 | |
|
882 | 878 | paths.add(f) |
|
883 | 879 | |
|
884 | 880 | for norm, paths in normed.iteritems(): |
|
885 | 881 | if len(paths) > 1: |
|
886 | 882 | for path in paths: |
|
887 | 883 | folded = self._discoverpath(path, norm, True, None, |
|
888 | 884 | self._dirfoldmap) |
|
889 | 885 | if path != folded: |
|
890 | 886 | results[path] = None |
|
891 | 887 | |
|
892 | 888 | return results, dirsfound, dirsnotfound |
|
893 | 889 | |
|
894 | 890 | def walk(self, match, subrepos, unknown, ignored, full=True): |
|
895 | 891 | ''' |
|
896 | 892 | Walk recursively through the directory tree, finding all files |
|
897 | 893 | matched by match. |
|
898 | 894 | |
|
899 | 895 | If full is False, maybe skip some known-clean files. |
|
900 | 896 | |
|
901 | 897 | Return a dict mapping filename to stat-like object (either |
|
902 | 898 | mercurial.osutil.stat instance or return value of os.stat()). |
|
903 | 899 | |
|
904 | 900 | ''' |
|
905 | 901 | # full is a flag that extensions that hook into walk can use -- this |
|
906 | 902 | # implementation doesn't use it at all. This satisfies the contract |
|
907 | 903 | # because we only guarantee a "maybe". |
|
908 | 904 | |
|
909 | 905 | if ignored: |
|
910 | 906 | ignore = util.never |
|
911 | 907 | dirignore = util.never |
|
912 | 908 | elif unknown: |
|
913 | 909 | ignore = self._ignore |
|
914 | 910 | dirignore = self._dirignore |
|
915 | 911 | else: |
|
916 | 912 | # if not unknown and not ignored, drop dir recursion and step 2 |
|
917 | 913 | ignore = util.always |
|
918 | 914 | dirignore = util.always |
|
919 | 915 | |
|
920 | 916 | matchfn = match.matchfn |
|
921 | 917 | matchalways = match.always() |
|
922 | 918 | matchtdir = match.traversedir |
|
923 | 919 | dmap = self._map |
|
924 | 920 | listdir = util.listdir |
|
925 | 921 | lstat = os.lstat |
|
926 | 922 | dirkind = stat.S_IFDIR |
|
927 | 923 | regkind = stat.S_IFREG |
|
928 | 924 | lnkkind = stat.S_IFLNK |
|
929 | 925 | join = self._join |
|
930 | 926 | |
|
931 | 927 | exact = skipstep3 = False |
|
932 | 928 | if match.isexact(): # match.exact |
|
933 | 929 | exact = True |
|
934 | 930 | dirignore = util.always # skip step 2 |
|
935 | 931 | elif match.prefix(): # match.match, no patterns |
|
936 | 932 | skipstep3 = True |
|
937 | 933 | |
|
938 | 934 | if not exact and self._checkcase: |
|
939 | 935 | normalize = self._normalize |
|
940 | 936 | normalizefile = self._normalizefile |
|
941 | 937 | skipstep3 = False |
|
942 | 938 | else: |
|
943 | 939 | normalize = self._normalize |
|
944 | 940 | normalizefile = None |
|
945 | 941 | |
|
946 | 942 | # step 1: find all explicit files |
|
947 | 943 | results, work, dirsnotfound = self._walkexplicit(match, subrepos) |
|
948 | 944 | |
|
949 | 945 | skipstep3 = skipstep3 and not (work or dirsnotfound) |
|
950 | 946 | work = [d for d in work if not dirignore(d[0])] |
|
951 | 947 | |
|
952 | 948 | # step 2: visit subdirectories |
|
953 | 949 | def traverse(work, alreadynormed): |
|
954 | 950 | wadd = work.append |
|
955 | 951 | while work: |
|
956 | 952 | nd = work.pop() |
|
957 | 953 | if not match.visitdir(nd): |
|
958 | 954 | continue |
|
959 | 955 | skip = None |
|
960 | 956 | if nd == '.': |
|
961 | 957 | nd = '' |
|
962 | 958 | else: |
|
963 | 959 | skip = '.hg' |
|
964 | 960 | try: |
|
965 | 961 | entries = listdir(join(nd), stat=True, skip=skip) |
|
966 | 962 | except OSError as inst: |
|
967 | 963 | if inst.errno in (errno.EACCES, errno.ENOENT): |
|
968 | 964 | match.bad(self.pathto(nd), |
|
969 | 965 | encoding.strtolocal(inst.strerror)) |
|
970 | 966 | continue |
|
971 | 967 | raise |
|
972 | 968 | for f, kind, st in entries: |
|
973 | 969 | if normalizefile: |
|
974 | 970 | # even though f might be a directory, we're only |
|
975 | 971 | # interested in comparing it to files currently in the |
|
976 | 972 | # dmap -- therefore normalizefile is enough |
|
977 | 973 | nf = normalizefile(nd and (nd + "/" + f) or f, True, |
|
978 | 974 | True) |
|
979 | 975 | else: |
|
980 | 976 | nf = nd and (nd + "/" + f) or f |
|
981 | 977 | if nf not in results: |
|
982 | 978 | if kind == dirkind: |
|
983 | 979 | if not ignore(nf): |
|
984 | 980 | if matchtdir: |
|
985 | 981 | matchtdir(nf) |
|
986 | 982 | wadd(nf) |
|
987 | 983 | if nf in dmap and (matchalways or matchfn(nf)): |
|
988 | 984 | results[nf] = None |
|
989 | 985 | elif kind == regkind or kind == lnkkind: |
|
990 | 986 | if nf in dmap: |
|
991 | 987 | if matchalways or matchfn(nf): |
|
992 | 988 | results[nf] = st |
|
993 | 989 | elif ((matchalways or matchfn(nf)) |
|
994 | 990 | and not ignore(nf)): |
|
995 | 991 | # unknown file -- normalize if necessary |
|
996 | 992 | if not alreadynormed: |
|
997 | 993 | nf = normalize(nf, False, True) |
|
998 | 994 | results[nf] = st |
|
999 | 995 | elif nf in dmap and (matchalways or matchfn(nf)): |
|
1000 | 996 | results[nf] = None |
|
1001 | 997 | |
|
1002 | 998 | for nd, d in work: |
|
1003 | 999 | # alreadynormed means that processwork doesn't have to do any |
|
1004 | 1000 | # expensive directory normalization |
|
1005 | 1001 | alreadynormed = not normalize or nd == d |
|
1006 | 1002 | traverse([d], alreadynormed) |
|
1007 | 1003 | |
|
1008 | 1004 | for s in subrepos: |
|
1009 | 1005 | del results[s] |
|
1010 | 1006 | del results['.hg'] |
|
1011 | 1007 | |
|
1012 | 1008 | # step 3: visit remaining files from dmap |
|
1013 | 1009 | if not skipstep3 and not exact: |
|
1014 | 1010 | # If a dmap file is not in results yet, it was either |
|
1015 | 1011 | # a) not matching matchfn b) ignored, c) missing, or d) under a |
|
1016 | 1012 | # symlink directory. |
|
1017 | 1013 | if not results and matchalways: |
|
1018 | 1014 | visit = [f for f in dmap] |
|
1019 | 1015 | else: |
|
1020 | 1016 | visit = [f for f in dmap if f not in results and matchfn(f)] |
|
1021 | 1017 | visit.sort() |
|
1022 | 1018 | |
|
1023 | 1019 | if unknown: |
|
1024 | 1020 | # unknown == True means we walked all dirs under the roots |
|
1025 | 1021 | # that wasn't ignored, and everything that matched was stat'ed |
|
1026 | 1022 | # and is already in results. |
|
1027 | 1023 | # The rest must thus be ignored or under a symlink. |
|
1028 | 1024 | audit_path = pathutil.pathauditor(self._root, cached=True) |
|
1029 | 1025 | |
|
1030 | 1026 | for nf in iter(visit): |
|
1031 | 1027 | # If a stat for the same file was already added with a |
|
1032 | 1028 | # different case, don't add one for this, since that would |
|
1033 | 1029 | # make it appear as if the file exists under both names |
|
1034 | 1030 | # on disk. |
|
1035 | 1031 | if (normalizefile and |
|
1036 | 1032 | normalizefile(nf, True, True) in results): |
|
1037 | 1033 | results[nf] = None |
|
1038 | 1034 | # Report ignored items in the dmap as long as they are not |
|
1039 | 1035 | # under a symlink directory. |
|
1040 | 1036 | elif audit_path.check(nf): |
|
1041 | 1037 | try: |
|
1042 | 1038 | results[nf] = lstat(join(nf)) |
|
1043 | 1039 | # file was just ignored, no links, and exists |
|
1044 | 1040 | except OSError: |
|
1045 | 1041 | # file doesn't exist |
|
1046 | 1042 | results[nf] = None |
|
1047 | 1043 | else: |
|
1048 | 1044 | # It's either missing or under a symlink directory |
|
1049 | 1045 | # which we in this case report as missing |
|
1050 | 1046 | results[nf] = None |
|
1051 | 1047 | else: |
|
1052 | 1048 | # We may not have walked the full directory tree above, |
|
1053 | 1049 | # so stat and check everything we missed. |
|
1054 | 1050 | iv = iter(visit) |
|
1055 | 1051 | for st in util.statfiles([join(i) for i in visit]): |
|
1056 | 1052 | results[next(iv)] = st |
|
1057 | 1053 | return results |
|
1058 | 1054 | |
|
1059 | 1055 | def status(self, match, subrepos, ignored, clean, unknown): |
|
1060 | 1056 | '''Determine the status of the working copy relative to the |
|
1061 | 1057 | dirstate and return a pair of (unsure, status), where status is of type |
|
1062 | 1058 | scmutil.status and: |
|
1063 | 1059 | |
|
1064 | 1060 | unsure: |
|
1065 | 1061 | files that might have been modified since the dirstate was |
|
1066 | 1062 | written, but need to be read to be sure (size is the same |
|
1067 | 1063 | but mtime differs) |
|
1068 | 1064 | status.modified: |
|
1069 | 1065 | files that have definitely been modified since the dirstate |
|
1070 | 1066 | was written (different size or mode) |
|
1071 | 1067 | status.clean: |
|
1072 | 1068 | files that have definitely not been modified since the |
|
1073 | 1069 | dirstate was written |
|
1074 | 1070 | ''' |
|
1075 | 1071 | listignored, listclean, listunknown = ignored, clean, unknown |
|
1076 | 1072 | lookup, modified, added, unknown, ignored = [], [], [], [], [] |
|
1077 | 1073 | removed, deleted, clean = [], [], [] |
|
1078 | 1074 | |
|
1079 | 1075 | dmap = self._map |
|
1080 | 1076 | ladd = lookup.append # aka "unsure" |
|
1081 | 1077 | madd = modified.append |
|
1082 | 1078 | aadd = added.append |
|
1083 | 1079 | uadd = unknown.append |
|
1084 | 1080 | iadd = ignored.append |
|
1085 | 1081 | radd = removed.append |
|
1086 | 1082 | dadd = deleted.append |
|
1087 | 1083 | cadd = clean.append |
|
1088 | 1084 | mexact = match.exact |
|
1089 | 1085 | dirignore = self._dirignore |
|
1090 | 1086 | checkexec = self._checkexec |
|
1091 | 1087 | copymap = self._map.copymap |
|
1092 | 1088 | lastnormaltime = self._lastnormaltime |
|
1093 | 1089 | |
|
1094 | 1090 | # We need to do full walks when either |
|
1095 | 1091 | # - we're listing all clean files, or |
|
1096 | 1092 | # - match.traversedir does something, because match.traversedir should |
|
1097 | 1093 | # be called for every dir in the working dir |
|
1098 | 1094 | full = listclean or match.traversedir is not None |
|
1099 | 1095 | for fn, st in self.walk(match, subrepos, listunknown, listignored, |
|
1100 | 1096 | full=full).iteritems(): |
|
1101 | 1097 | if fn not in dmap: |
|
1102 | 1098 | if (listignored or mexact(fn)) and dirignore(fn): |
|
1103 | 1099 | if listignored: |
|
1104 | 1100 | iadd(fn) |
|
1105 | 1101 | else: |
|
1106 | 1102 | uadd(fn) |
|
1107 | 1103 | continue |
|
1108 | 1104 | |
|
1109 | 1105 | # This is equivalent to 'state, mode, size, time = dmap[fn]' but not |
|
1110 | 1106 | # written like that for performance reasons. dmap[fn] is not a |
|
1111 | 1107 | # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE |
|
1112 | 1108 | # opcode has fast paths when the value to be unpacked is a tuple or |
|
1113 | 1109 | # a list, but falls back to creating a full-fledged iterator in |
|
1114 | 1110 | # general. That is much slower than simply accessing and storing the |
|
1115 | 1111 | # tuple members one by one. |
|
1116 | 1112 | t = dmap[fn] |
|
1117 | 1113 | state = t[0] |
|
1118 | 1114 | mode = t[1] |
|
1119 | 1115 | size = t[2] |
|
1120 | 1116 | time = t[3] |
|
1121 | 1117 | |
|
1122 | 1118 | if not st and state in "nma": |
|
1123 | 1119 | dadd(fn) |
|
1124 | 1120 | elif state == 'n': |
|
1125 | 1121 | if (size >= 0 and |
|
1126 | 1122 | ((size != st.st_size and size != st.st_size & _rangemask) |
|
1127 | 1123 | or ((mode ^ st.st_mode) & 0o100 and checkexec)) |
|
1128 | 1124 | or size == -2 # other parent |
|
1129 | 1125 | or fn in copymap): |
|
1130 | 1126 | madd(fn) |
|
1131 | 1127 | elif time != st.st_mtime and time != st.st_mtime & _rangemask: |
|
1132 | 1128 | ladd(fn) |
|
1133 | 1129 | elif st.st_mtime == lastnormaltime: |
|
1134 | 1130 | # fn may have just been marked as normal and it may have |
|
1135 | 1131 | # changed in the same second without changing its size. |
|
1136 | 1132 | # This can happen if we quickly do multiple commits. |
|
1137 | 1133 | # Force lookup, so we don't miss such a racy file change. |
|
1138 | 1134 | ladd(fn) |
|
1139 | 1135 | elif listclean: |
|
1140 | 1136 | cadd(fn) |
|
1141 | 1137 | elif state == 'm': |
|
1142 | 1138 | madd(fn) |
|
1143 | 1139 | elif state == 'a': |
|
1144 | 1140 | aadd(fn) |
|
1145 | 1141 | elif state == 'r': |
|
1146 | 1142 | radd(fn) |
|
1147 | 1143 | |
|
1148 | 1144 | return (lookup, scmutil.status(modified, added, removed, deleted, |
|
1149 | 1145 | unknown, ignored, clean)) |
|
1150 | 1146 | |
|
1151 | 1147 | def matches(self, match): |
|
1152 | 1148 | ''' |
|
1153 | 1149 | return files in the dirstate (in whatever state) filtered by match |
|
1154 | 1150 | ''' |
|
1155 | 1151 | dmap = self._map |
|
1156 | 1152 | if match.always(): |
|
1157 | 1153 | return dmap.keys() |
|
1158 | 1154 | files = match.files() |
|
1159 | 1155 | if match.isexact(): |
|
1160 | 1156 | # fast path -- filter the other way around, since typically files is |
|
1161 | 1157 | # much smaller than dmap |
|
1162 | 1158 | return [f for f in files if f in dmap] |
|
1163 | 1159 | if match.prefix() and all(fn in dmap for fn in files): |
|
1164 | 1160 | # fast path -- all the values are known to be files, so just return |
|
1165 | 1161 | # that |
|
1166 | 1162 | return list(files) |
|
1167 | 1163 | return [f for f in dmap if match(f)] |
|
1168 | 1164 | |
|
1169 | 1165 | def _actualfilename(self, tr): |
|
1170 | 1166 | if tr: |
|
1171 | 1167 | return self._pendingfilename |
|
1172 | 1168 | else: |
|
1173 | 1169 | return self._filename |
|
1174 | 1170 | |
|
1175 | 1171 | def savebackup(self, tr, backupname): |
|
1176 | 1172 | '''Save current dirstate into backup file''' |
|
1177 | 1173 | filename = self._actualfilename(tr) |
|
1178 | 1174 | assert backupname != filename |
|
1179 | 1175 | |
|
1180 | 1176 | # use '_writedirstate' instead of 'write' to write changes certainly, |
|
1181 | 1177 | # because the latter omits writing out if transaction is running. |
|
1182 | 1178 | # output file will be used to create backup of dirstate at this point. |
|
1183 | 1179 | if self._dirty or not self._opener.exists(filename): |
|
1184 | 1180 | self._writedirstate(self._opener(filename, "w", atomictemp=True, |
|
1185 | 1181 | checkambig=True)) |
|
1186 | 1182 | |
|
1187 | 1183 | if tr: |
|
1188 | 1184 | # ensure that subsequent tr.writepending returns True for |
|
1189 | 1185 | # changes written out above, even if dirstate is never |
|
1190 | 1186 | # changed after this |
|
1191 | 1187 | tr.addfilegenerator('dirstate', (self._filename,), |
|
1192 | 1188 | self._writedirstate, location='plain') |
|
1193 | 1189 | |
|
1194 | 1190 | # ensure that pending file written above is unlinked at |
|
1195 | 1191 | # failure, even if tr.writepending isn't invoked until the |
|
1196 | 1192 | # end of this transaction |
|
1197 | 1193 | tr.registertmp(filename, location='plain') |
|
1198 | 1194 | |
|
1199 | 1195 | self._opener.tryunlink(backupname) |
|
1200 | 1196 | # hardlink backup is okay because _writedirstate is always called |
|
1201 | 1197 | # with an "atomictemp=True" file. |
|
1202 | 1198 | util.copyfile(self._opener.join(filename), |
|
1203 | 1199 | self._opener.join(backupname), hardlink=True) |
|
1204 | 1200 | |
|
1205 | 1201 | def restorebackup(self, tr, backupname): |
|
1206 | 1202 | '''Restore dirstate by backup file''' |
|
1207 | 1203 | # this "invalidate()" prevents "wlock.release()" from writing |
|
1208 | 1204 | # changes of dirstate out after restoring from backup file |
|
1209 | 1205 | self.invalidate() |
|
1210 | 1206 | filename = self._actualfilename(tr) |
|
1211 | 1207 | self._opener.rename(backupname, filename, checkambig=True) |
|
1212 | 1208 | |
|
1213 | 1209 | def clearbackup(self, tr, backupname): |
|
1214 | 1210 | '''Clear backup file''' |
|
1215 | 1211 | self._opener.unlink(backupname) |
|
1216 | 1212 | |
|
1217 | 1213 | class dirstatemap(object): |
|
1218 | 1214 | def __init__(self, ui, opener, root): |
|
1219 | 1215 | self._ui = ui |
|
1220 | 1216 | self._opener = opener |
|
1221 | 1217 | self._root = root |
|
1222 | 1218 | self._filename = 'dirstate' |
|
1223 | 1219 | |
|
1224 | 1220 | self._map = {} |
|
1225 | 1221 | self.copymap = {} |
|
1226 | 1222 | self._parents = None |
|
1227 | 1223 | self._dirtyparents = False |
|
1228 | 1224 | |
|
1229 | 1225 | # for consistent view between _pl() and _read() invocations |
|
1230 | 1226 | self._pendingmode = None |
|
1231 | 1227 | |
|
1232 | 1228 | def iteritems(self): |
|
1233 | 1229 | return self._map.iteritems() |
|
1234 | 1230 | |
|
1235 | 1231 | def __len__(self): |
|
1236 | 1232 | return len(self._map) |
|
1237 | 1233 | |
|
1238 | 1234 | def __iter__(self): |
|
1239 | 1235 | return iter(self._map) |
|
1240 | 1236 | |
|
1241 | 1237 | def get(self, key, default=None): |
|
1242 | 1238 | return self._map.get(key, default) |
|
1243 | 1239 | |
|
1244 | 1240 | def __contains__(self, key): |
|
1245 | 1241 | return key in self._map |
|
1246 | 1242 | |
|
1247 | 1243 | def __setitem__(self, key, value): |
|
1248 | 1244 | self._map[key] = value |
|
1249 | 1245 | |
|
1250 | 1246 | def __getitem__(self, key): |
|
1251 | 1247 | return self._map[key] |
|
1252 | 1248 | |
|
1253 | 1249 | def __delitem__(self, key): |
|
1254 | 1250 | del self._map[key] |
|
1255 | 1251 | |
|
1256 | 1252 | def keys(self): |
|
1257 | 1253 | return self._map.keys() |
|
1258 | 1254 | |
|
1259 | 1255 | def nonnormalentries(self): |
|
1260 | 1256 | '''Compute the nonnormal dirstate entries from the dmap''' |
|
1261 | 1257 | try: |
|
1262 | 1258 | return parsers.nonnormalotherparententries(self._map) |
|
1263 | 1259 | except AttributeError: |
|
1264 | 1260 | nonnorm = set() |
|
1265 | 1261 | otherparent = set() |
|
1266 | 1262 | for fname, e in self._map.iteritems(): |
|
1267 | 1263 | if e[0] != 'n' or e[3] == -1: |
|
1268 | 1264 | nonnorm.add(fname) |
|
1269 | 1265 | if e[0] == 'n' and e[2] == -2: |
|
1270 | 1266 | otherparent.add(fname) |
|
1271 | 1267 | return nonnorm, otherparent |
|
1272 | 1268 | |
|
1269 | @propertycache | |
|
1273 | 1270 | def filefoldmap(self): |
|
1274 | 1271 | """Returns a dictionary mapping normalized case paths to their |
|
1275 | 1272 | non-normalized versions. |
|
1276 | 1273 | """ |
|
1277 | 1274 | try: |
|
1278 | 1275 | makefilefoldmap = parsers.make_file_foldmap |
|
1279 | 1276 | except AttributeError: |
|
1280 | 1277 | pass |
|
1281 | 1278 | else: |
|
1282 | 1279 | return makefilefoldmap(self._map, util.normcasespec, |
|
1283 | 1280 | util.normcasefallback) |
|
1284 | 1281 | |
|
1285 | 1282 | f = {} |
|
1286 | 1283 | normcase = util.normcase |
|
1287 | 1284 | for name, s in self._map.iteritems(): |
|
1288 | 1285 | if s[0] != 'r': |
|
1289 | 1286 | f[normcase(name)] = name |
|
1290 | 1287 | f['.'] = '.' # prevents useless util.fspath() invocation |
|
1291 | 1288 | return f |
|
1292 | 1289 | |
|
1293 | 1290 | def dirs(self): |
|
1294 | 1291 | """Returns a set-like object containing all the directories in the |
|
1295 | 1292 | current dirstate. |
|
1296 | 1293 | """ |
|
1297 | 1294 | return util.dirs(self._map, 'r') |
|
1298 | 1295 | |
|
1299 | 1296 | def _opendirstatefile(self): |
|
1300 | 1297 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
1301 | 1298 | if self._pendingmode is not None and self._pendingmode != mode: |
|
1302 | 1299 | fp.close() |
|
1303 | 1300 | raise error.Abort(_('working directory state may be ' |
|
1304 | 1301 | 'changed parallelly')) |
|
1305 | 1302 | self._pendingmode = mode |
|
1306 | 1303 | return fp |
|
1307 | 1304 | |
|
1308 | 1305 | def parents(self): |
|
1309 | 1306 | if not self._parents: |
|
1310 | 1307 | try: |
|
1311 | 1308 | fp = self._opendirstatefile() |
|
1312 | 1309 | st = fp.read(40) |
|
1313 | 1310 | fp.close() |
|
1314 | 1311 | except IOError as err: |
|
1315 | 1312 | if err.errno != errno.ENOENT: |
|
1316 | 1313 | raise |
|
1317 | 1314 | # File doesn't exist, so the current state is empty |
|
1318 | 1315 | st = '' |
|
1319 | 1316 | |
|
1320 | 1317 | l = len(st) |
|
1321 | 1318 | if l == 40: |
|
1322 | 1319 | self._parents = st[:20], st[20:40] |
|
1323 | 1320 | elif l == 0: |
|
1324 | 1321 | self._parents = [nullid, nullid] |
|
1325 | 1322 | else: |
|
1326 | 1323 | raise error.Abort(_('working directory state appears ' |
|
1327 | 1324 | 'damaged!')) |
|
1328 | 1325 | |
|
1329 | 1326 | return self._parents |
|
1330 | 1327 | |
|
1331 | 1328 | def setparents(self, p1, p2): |
|
1332 | 1329 | self._parents = (p1, p2) |
|
1333 | 1330 | self._dirtyparents = True |
|
1334 | 1331 | |
|
1335 | 1332 | def read(self): |
|
1336 | 1333 | # ignore HG_PENDING because identity is used only for writing |
|
1337 | 1334 | self.identity = util.filestat.frompath( |
|
1338 | 1335 | self._opener.join(self._filename)) |
|
1339 | 1336 | |
|
1340 | 1337 | try: |
|
1341 | 1338 | fp = self._opendirstatefile() |
|
1342 | 1339 | try: |
|
1343 | 1340 | st = fp.read() |
|
1344 | 1341 | finally: |
|
1345 | 1342 | fp.close() |
|
1346 | 1343 | except IOError as err: |
|
1347 | 1344 | if err.errno != errno.ENOENT: |
|
1348 | 1345 | raise |
|
1349 | 1346 | return |
|
1350 | 1347 | if not st: |
|
1351 | 1348 | return |
|
1352 | 1349 | |
|
1353 | 1350 | if util.safehasattr(parsers, 'dict_new_presized'): |
|
1354 | 1351 | # Make an estimate of the number of files in the dirstate based on |
|
1355 | 1352 | # its size. From a linear regression on a set of real-world repos, |
|
1356 | 1353 | # all over 10,000 files, the size of a dirstate entry is 85 |
|
1357 | 1354 | # bytes. The cost of resizing is significantly higher than the cost |
|
1358 | 1355 | # of filling in a larger presized dict, so subtract 20% from the |
|
1359 | 1356 | # size. |
|
1360 | 1357 | # |
|
1361 | 1358 | # This heuristic is imperfect in many ways, so in a future dirstate |
|
1362 | 1359 | # format update it makes sense to just record the number of entries |
|
1363 | 1360 | # on write. |
|
1364 | 1361 | self._map = parsers.dict_new_presized(len(st) / 71) |
|
1365 | 1362 | |
|
1366 | 1363 | # Python's garbage collector triggers a GC each time a certain number |
|
1367 | 1364 | # of container objects (the number being defined by |
|
1368 | 1365 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
1369 | 1366 | # for each file in the dirstate. The C version then immediately marks |
|
1370 | 1367 | # them as not to be tracked by the collector. However, this has no |
|
1371 | 1368 | # effect on when GCs are triggered, only on what objects the GC looks |
|
1372 | 1369 | # into. This means that O(number of files) GCs are unavoidable. |
|
1373 | 1370 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
1374 | 1371 | # this can get very expensive. As a workaround, disable GC while |
|
1375 | 1372 | # parsing the dirstate. |
|
1376 | 1373 | # |
|
1377 | 1374 | # (we cannot decorate the function directly since it is in a C module) |
|
1378 | 1375 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
1379 | 1376 | p = parse_dirstate(self._map, self.copymap, st) |
|
1380 | 1377 | if not self._dirtyparents: |
|
1381 | 1378 | self.setparents(*p) |
|
1382 | 1379 | |
|
1383 | 1380 | def write(self, st, now): |
|
1384 | 1381 | st.write(parsers.pack_dirstate(self._map, self.copymap, |
|
1385 | 1382 | self.parents(), now)) |
|
1386 | 1383 | st.close() |
|
1387 | 1384 | self._dirtyparents = False |
|
1388 | 1385 | self.nonnormalset, self.otherparentset = self.nonnormalentries() |
|
1389 | 1386 | |
|
1390 | 1387 | @propertycache |
|
1391 | 1388 | def nonnormalset(self): |
|
1392 | 1389 | nonnorm, otherparents = self.nonnormalentries() |
|
1393 | 1390 | self.otherparentset = otherparents |
|
1394 | 1391 | return nonnorm |
|
1395 | 1392 | |
|
1396 | 1393 | @propertycache |
|
1397 | 1394 | def otherparentset(self): |
|
1398 | 1395 | nonnorm, otherparents = self.nonnormalentries() |
|
1399 | 1396 | self.nonnormalset = nonnorm |
|
1400 | 1397 | return otherparents |
|
1401 | 1398 | |
|
1402 | 1399 | @propertycache |
|
1403 | 1400 | def identity(self): |
|
1404 | 1401 | self.read() |
|
1405 | 1402 | return self.identity |
|
1406 | 1403 |
General Comments 0
You need to be logged in to leave comments.
Login now