##// END OF EJS Templates
dirstate-v2: add devel config option to control write behavior...
Raphaël Gomès -
r51117:ecd28d89 stable
parent child Browse files
Show More
@@ -1,2893 +1,2902 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import functools
9 import functools
10 import re
10 import re
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = b"extension '%s' overwrite config item '%s.%s'"
25 msg = b"extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config=b'warn-config')
27 ui.develwarn(msg, config=b'warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31
31
32 class configitem:
32 class configitem:
33 """represent a known config item
33 """represent a known config item
34
34
35 :section: the official config section where to find this item,
35 :section: the official config section where to find this item,
36 :name: the official name within the section,
36 :name: the official name within the section,
37 :default: default value for this item,
37 :default: default value for this item,
38 :alias: optional list of tuples as alternatives,
38 :alias: optional list of tuples as alternatives,
39 :generic: this is a generic definition, match name using regular expression.
39 :generic: this is a generic definition, match name using regular expression.
40 """
40 """
41
41
42 def __init__(
42 def __init__(
43 self,
43 self,
44 section,
44 section,
45 name,
45 name,
46 default=None,
46 default=None,
47 alias=(),
47 alias=(),
48 generic=False,
48 generic=False,
49 priority=0,
49 priority=0,
50 experimental=False,
50 experimental=False,
51 ):
51 ):
52 self.section = section
52 self.section = section
53 self.name = name
53 self.name = name
54 self.default = default
54 self.default = default
55 self.alias = list(alias)
55 self.alias = list(alias)
56 self.generic = generic
56 self.generic = generic
57 self.priority = priority
57 self.priority = priority
58 self.experimental = experimental
58 self.experimental = experimental
59 self._re = None
59 self._re = None
60 if generic:
60 if generic:
61 self._re = re.compile(self.name)
61 self._re = re.compile(self.name)
62
62
63
63
64 class itemregister(dict):
64 class itemregister(dict):
65 """A specialized dictionary that can handle wild-card selection"""
65 """A specialized dictionary that can handle wild-card selection"""
66
66
67 def __init__(self):
67 def __init__(self):
68 super(itemregister, self).__init__()
68 super(itemregister, self).__init__()
69 self._generics = set()
69 self._generics = set()
70
70
71 def update(self, other):
71 def update(self, other):
72 super(itemregister, self).update(other)
72 super(itemregister, self).update(other)
73 self._generics.update(other._generics)
73 self._generics.update(other._generics)
74
74
75 def __setitem__(self, key, item):
75 def __setitem__(self, key, item):
76 super(itemregister, self).__setitem__(key, item)
76 super(itemregister, self).__setitem__(key, item)
77 if item.generic:
77 if item.generic:
78 self._generics.add(item)
78 self._generics.add(item)
79
79
80 def get(self, key):
80 def get(self, key):
81 baseitem = super(itemregister, self).get(key)
81 baseitem = super(itemregister, self).get(key)
82 if baseitem is not None and not baseitem.generic:
82 if baseitem is not None and not baseitem.generic:
83 return baseitem
83 return baseitem
84
84
85 # search for a matching generic item
85 # search for a matching generic item
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
86 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
87 for item in generics:
87 for item in generics:
88 # we use 'match' instead of 'search' to make the matching simpler
88 # we use 'match' instead of 'search' to make the matching simpler
89 # for people unfamiliar with regular expression. Having the match
89 # for people unfamiliar with regular expression. Having the match
90 # rooted to the start of the string will produce less surprising
90 # rooted to the start of the string will produce less surprising
91 # result for user writing simple regex for sub-attribute.
91 # result for user writing simple regex for sub-attribute.
92 #
92 #
93 # For example using "color\..*" match produces an unsurprising
93 # For example using "color\..*" match produces an unsurprising
94 # result, while using search could suddenly match apparently
94 # result, while using search could suddenly match apparently
95 # unrelated configuration that happens to contains "color."
95 # unrelated configuration that happens to contains "color."
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
96 # anywhere. This is a tradeoff where we favor requiring ".*" on
97 # some match to avoid the need to prefix most pattern with "^".
97 # some match to avoid the need to prefix most pattern with "^".
98 # The "^" seems more error prone.
98 # The "^" seems more error prone.
99 if item._re.match(key):
99 if item._re.match(key):
100 return item
100 return item
101
101
102 return None
102 return None
103
103
104
104
105 coreitems = {}
105 coreitems = {}
106
106
107
107
108 def _register(configtable, *args, **kwargs):
108 def _register(configtable, *args, **kwargs):
109 item = configitem(*args, **kwargs)
109 item = configitem(*args, **kwargs)
110 section = configtable.setdefault(item.section, itemregister())
110 section = configtable.setdefault(item.section, itemregister())
111 if item.name in section:
111 if item.name in section:
112 msg = b"duplicated config item registration for '%s.%s'"
112 msg = b"duplicated config item registration for '%s.%s'"
113 raise error.ProgrammingError(msg % (item.section, item.name))
113 raise error.ProgrammingError(msg % (item.section, item.name))
114 section[item.name] = item
114 section[item.name] = item
115
115
116
116
117 # special value for case where the default is derived from other values
117 # special value for case where the default is derived from other values
118 dynamicdefault = object()
118 dynamicdefault = object()
119
119
120 # Registering actual config items
120 # Registering actual config items
121
121
122
122
123 def getitemregister(configtable):
123 def getitemregister(configtable):
124 f = functools.partial(_register, configtable)
124 f = functools.partial(_register, configtable)
125 # export pseudo enum as configitem.*
125 # export pseudo enum as configitem.*
126 f.dynamicdefault = dynamicdefault
126 f.dynamicdefault = dynamicdefault
127 return f
127 return f
128
128
129
129
130 coreconfigitem = getitemregister(coreitems)
130 coreconfigitem = getitemregister(coreitems)
131
131
132
132
133 def _registerdiffopts(section, configprefix=b''):
133 def _registerdiffopts(section, configprefix=b''):
134 coreconfigitem(
134 coreconfigitem(
135 section,
135 section,
136 configprefix + b'nodates',
136 configprefix + b'nodates',
137 default=False,
137 default=False,
138 )
138 )
139 coreconfigitem(
139 coreconfigitem(
140 section,
140 section,
141 configprefix + b'showfunc',
141 configprefix + b'showfunc',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem(
144 coreconfigitem(
145 section,
145 section,
146 configprefix + b'unified',
146 configprefix + b'unified',
147 default=None,
147 default=None,
148 )
148 )
149 coreconfigitem(
149 coreconfigitem(
150 section,
150 section,
151 configprefix + b'git',
151 configprefix + b'git',
152 default=False,
152 default=False,
153 )
153 )
154 coreconfigitem(
154 coreconfigitem(
155 section,
155 section,
156 configprefix + b'ignorews',
156 configprefix + b'ignorews',
157 default=False,
157 default=False,
158 )
158 )
159 coreconfigitem(
159 coreconfigitem(
160 section,
160 section,
161 configprefix + b'ignorewsamount',
161 configprefix + b'ignorewsamount',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem(
164 coreconfigitem(
165 section,
165 section,
166 configprefix + b'ignoreblanklines',
166 configprefix + b'ignoreblanklines',
167 default=False,
167 default=False,
168 )
168 )
169 coreconfigitem(
169 coreconfigitem(
170 section,
170 section,
171 configprefix + b'ignorewseol',
171 configprefix + b'ignorewseol',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem(
174 coreconfigitem(
175 section,
175 section,
176 configprefix + b'nobinary',
176 configprefix + b'nobinary',
177 default=False,
177 default=False,
178 )
178 )
179 coreconfigitem(
179 coreconfigitem(
180 section,
180 section,
181 configprefix + b'noprefix',
181 configprefix + b'noprefix',
182 default=False,
182 default=False,
183 )
183 )
184 coreconfigitem(
184 coreconfigitem(
185 section,
185 section,
186 configprefix + b'word-diff',
186 configprefix + b'word-diff',
187 default=False,
187 default=False,
188 )
188 )
189
189
190
190
191 coreconfigitem(
191 coreconfigitem(
192 b'alias',
192 b'alias',
193 b'.*',
193 b'.*',
194 default=dynamicdefault,
194 default=dynamicdefault,
195 generic=True,
195 generic=True,
196 )
196 )
197 coreconfigitem(
197 coreconfigitem(
198 b'auth',
198 b'auth',
199 b'cookiefile',
199 b'cookiefile',
200 default=None,
200 default=None,
201 )
201 )
202 _registerdiffopts(section=b'annotate')
202 _registerdiffopts(section=b'annotate')
203 # bookmarks.pushing: internal hack for discovery
203 # bookmarks.pushing: internal hack for discovery
204 coreconfigitem(
204 coreconfigitem(
205 b'bookmarks',
205 b'bookmarks',
206 b'pushing',
206 b'pushing',
207 default=list,
207 default=list,
208 )
208 )
209 # bundle.mainreporoot: internal hack for bundlerepo
209 # bundle.mainreporoot: internal hack for bundlerepo
210 coreconfigitem(
210 coreconfigitem(
211 b'bundle',
211 b'bundle',
212 b'mainreporoot',
212 b'mainreporoot',
213 default=b'',
213 default=b'',
214 )
214 )
215 coreconfigitem(
215 coreconfigitem(
216 b'censor',
216 b'censor',
217 b'policy',
217 b'policy',
218 default=b'abort',
218 default=b'abort',
219 experimental=True,
219 experimental=True,
220 )
220 )
221 coreconfigitem(
221 coreconfigitem(
222 b'chgserver',
222 b'chgserver',
223 b'idletimeout',
223 b'idletimeout',
224 default=3600,
224 default=3600,
225 )
225 )
226 coreconfigitem(
226 coreconfigitem(
227 b'chgserver',
227 b'chgserver',
228 b'skiphash',
228 b'skiphash',
229 default=False,
229 default=False,
230 )
230 )
231 coreconfigitem(
231 coreconfigitem(
232 b'cmdserver',
232 b'cmdserver',
233 b'log',
233 b'log',
234 default=None,
234 default=None,
235 )
235 )
236 coreconfigitem(
236 coreconfigitem(
237 b'cmdserver',
237 b'cmdserver',
238 b'max-log-files',
238 b'max-log-files',
239 default=7,
239 default=7,
240 )
240 )
241 coreconfigitem(
241 coreconfigitem(
242 b'cmdserver',
242 b'cmdserver',
243 b'max-log-size',
243 b'max-log-size',
244 default=b'1 MB',
244 default=b'1 MB',
245 )
245 )
246 coreconfigitem(
246 coreconfigitem(
247 b'cmdserver',
247 b'cmdserver',
248 b'max-repo-cache',
248 b'max-repo-cache',
249 default=0,
249 default=0,
250 experimental=True,
250 experimental=True,
251 )
251 )
252 coreconfigitem(
252 coreconfigitem(
253 b'cmdserver',
253 b'cmdserver',
254 b'message-encodings',
254 b'message-encodings',
255 default=list,
255 default=list,
256 )
256 )
257 coreconfigitem(
257 coreconfigitem(
258 b'cmdserver',
258 b'cmdserver',
259 b'track-log',
259 b'track-log',
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
260 default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
261 )
261 )
262 coreconfigitem(
262 coreconfigitem(
263 b'cmdserver',
263 b'cmdserver',
264 b'shutdown-on-interrupt',
264 b'shutdown-on-interrupt',
265 default=True,
265 default=True,
266 )
266 )
267 coreconfigitem(
267 coreconfigitem(
268 b'color',
268 b'color',
269 b'.*',
269 b'.*',
270 default=None,
270 default=None,
271 generic=True,
271 generic=True,
272 )
272 )
273 coreconfigitem(
273 coreconfigitem(
274 b'color',
274 b'color',
275 b'mode',
275 b'mode',
276 default=b'auto',
276 default=b'auto',
277 )
277 )
278 coreconfigitem(
278 coreconfigitem(
279 b'color',
279 b'color',
280 b'pagermode',
280 b'pagermode',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem(
283 coreconfigitem(
284 b'command-templates',
284 b'command-templates',
285 b'graphnode',
285 b'graphnode',
286 default=None,
286 default=None,
287 alias=[(b'ui', b'graphnodetemplate')],
287 alias=[(b'ui', b'graphnodetemplate')],
288 )
288 )
289 coreconfigitem(
289 coreconfigitem(
290 b'command-templates',
290 b'command-templates',
291 b'log',
291 b'log',
292 default=None,
292 default=None,
293 alias=[(b'ui', b'logtemplate')],
293 alias=[(b'ui', b'logtemplate')],
294 )
294 )
295 coreconfigitem(
295 coreconfigitem(
296 b'command-templates',
296 b'command-templates',
297 b'mergemarker',
297 b'mergemarker',
298 default=(
298 default=(
299 b'{node|short} '
299 b'{node|short} '
300 b'{ifeq(tags, "tip", "", '
300 b'{ifeq(tags, "tip", "", '
301 b'ifeq(tags, "", "", "{tags} "))}'
301 b'ifeq(tags, "", "", "{tags} "))}'
302 b'{if(bookmarks, "{bookmarks} ")}'
302 b'{if(bookmarks, "{bookmarks} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
303 b'{ifeq(branch, "default", "", "{branch} ")}'
304 b'- {author|user}: {desc|firstline}'
304 b'- {author|user}: {desc|firstline}'
305 ),
305 ),
306 alias=[(b'ui', b'mergemarkertemplate')],
306 alias=[(b'ui', b'mergemarkertemplate')],
307 )
307 )
308 coreconfigitem(
308 coreconfigitem(
309 b'command-templates',
309 b'command-templates',
310 b'pre-merge-tool-output',
310 b'pre-merge-tool-output',
311 default=None,
311 default=None,
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
312 alias=[(b'ui', b'pre-merge-tool-output-template')],
313 )
313 )
314 coreconfigitem(
314 coreconfigitem(
315 b'command-templates',
315 b'command-templates',
316 b'oneline-summary',
316 b'oneline-summary',
317 default=None,
317 default=None,
318 )
318 )
319 coreconfigitem(
319 coreconfigitem(
320 b'command-templates',
320 b'command-templates',
321 b'oneline-summary.*',
321 b'oneline-summary.*',
322 default=dynamicdefault,
322 default=dynamicdefault,
323 generic=True,
323 generic=True,
324 )
324 )
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
325 _registerdiffopts(section=b'commands', configprefix=b'commit.interactive.')
326 coreconfigitem(
326 coreconfigitem(
327 b'commands',
327 b'commands',
328 b'commit.post-status',
328 b'commit.post-status',
329 default=False,
329 default=False,
330 )
330 )
331 coreconfigitem(
331 coreconfigitem(
332 b'commands',
332 b'commands',
333 b'grep.all-files',
333 b'grep.all-files',
334 default=False,
334 default=False,
335 experimental=True,
335 experimental=True,
336 )
336 )
337 coreconfigitem(
337 coreconfigitem(
338 b'commands',
338 b'commands',
339 b'merge.require-rev',
339 b'merge.require-rev',
340 default=False,
340 default=False,
341 )
341 )
342 coreconfigitem(
342 coreconfigitem(
343 b'commands',
343 b'commands',
344 b'push.require-revs',
344 b'push.require-revs',
345 default=False,
345 default=False,
346 )
346 )
347 coreconfigitem(
347 coreconfigitem(
348 b'commands',
348 b'commands',
349 b'resolve.confirm',
349 b'resolve.confirm',
350 default=False,
350 default=False,
351 )
351 )
352 coreconfigitem(
352 coreconfigitem(
353 b'commands',
353 b'commands',
354 b'resolve.explicit-re-merge',
354 b'resolve.explicit-re-merge',
355 default=False,
355 default=False,
356 )
356 )
357 coreconfigitem(
357 coreconfigitem(
358 b'commands',
358 b'commands',
359 b'resolve.mark-check',
359 b'resolve.mark-check',
360 default=b'none',
360 default=b'none',
361 )
361 )
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
362 _registerdiffopts(section=b'commands', configprefix=b'revert.interactive.')
363 coreconfigitem(
363 coreconfigitem(
364 b'commands',
364 b'commands',
365 b'show.aliasprefix',
365 b'show.aliasprefix',
366 default=list,
366 default=list,
367 )
367 )
368 coreconfigitem(
368 coreconfigitem(
369 b'commands',
369 b'commands',
370 b'status.relative',
370 b'status.relative',
371 default=False,
371 default=False,
372 )
372 )
373 coreconfigitem(
373 coreconfigitem(
374 b'commands',
374 b'commands',
375 b'status.skipstates',
375 b'status.skipstates',
376 default=[],
376 default=[],
377 experimental=True,
377 experimental=True,
378 )
378 )
379 coreconfigitem(
379 coreconfigitem(
380 b'commands',
380 b'commands',
381 b'status.terse',
381 b'status.terse',
382 default=b'',
382 default=b'',
383 )
383 )
384 coreconfigitem(
384 coreconfigitem(
385 b'commands',
385 b'commands',
386 b'status.verbose',
386 b'status.verbose',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem(
389 coreconfigitem(
390 b'commands',
390 b'commands',
391 b'update.check',
391 b'update.check',
392 default=None,
392 default=None,
393 )
393 )
394 coreconfigitem(
394 coreconfigitem(
395 b'commands',
395 b'commands',
396 b'update.requiredest',
396 b'update.requiredest',
397 default=False,
397 default=False,
398 )
398 )
399 coreconfigitem(
399 coreconfigitem(
400 b'committemplate',
400 b'committemplate',
401 b'.*',
401 b'.*',
402 default=None,
402 default=None,
403 generic=True,
403 generic=True,
404 )
404 )
405 coreconfigitem(
405 coreconfigitem(
406 b'convert',
406 b'convert',
407 b'bzr.saverev',
407 b'bzr.saverev',
408 default=True,
408 default=True,
409 )
409 )
410 coreconfigitem(
410 coreconfigitem(
411 b'convert',
411 b'convert',
412 b'cvsps.cache',
412 b'cvsps.cache',
413 default=True,
413 default=True,
414 )
414 )
415 coreconfigitem(
415 coreconfigitem(
416 b'convert',
416 b'convert',
417 b'cvsps.fuzz',
417 b'cvsps.fuzz',
418 default=60,
418 default=60,
419 )
419 )
420 coreconfigitem(
420 coreconfigitem(
421 b'convert',
421 b'convert',
422 b'cvsps.logencoding',
422 b'cvsps.logencoding',
423 default=None,
423 default=None,
424 )
424 )
425 coreconfigitem(
425 coreconfigitem(
426 b'convert',
426 b'convert',
427 b'cvsps.mergefrom',
427 b'cvsps.mergefrom',
428 default=None,
428 default=None,
429 )
429 )
430 coreconfigitem(
430 coreconfigitem(
431 b'convert',
431 b'convert',
432 b'cvsps.mergeto',
432 b'cvsps.mergeto',
433 default=None,
433 default=None,
434 )
434 )
435 coreconfigitem(
435 coreconfigitem(
436 b'convert',
436 b'convert',
437 b'git.committeractions',
437 b'git.committeractions',
438 default=lambda: [b'messagedifferent'],
438 default=lambda: [b'messagedifferent'],
439 )
439 )
440 coreconfigitem(
440 coreconfigitem(
441 b'convert',
441 b'convert',
442 b'git.extrakeys',
442 b'git.extrakeys',
443 default=list,
443 default=list,
444 )
444 )
445 coreconfigitem(
445 coreconfigitem(
446 b'convert',
446 b'convert',
447 b'git.findcopiesharder',
447 b'git.findcopiesharder',
448 default=False,
448 default=False,
449 )
449 )
450 coreconfigitem(
450 coreconfigitem(
451 b'convert',
451 b'convert',
452 b'git.remoteprefix',
452 b'git.remoteprefix',
453 default=b'remote',
453 default=b'remote',
454 )
454 )
455 coreconfigitem(
455 coreconfigitem(
456 b'convert',
456 b'convert',
457 b'git.renamelimit',
457 b'git.renamelimit',
458 default=400,
458 default=400,
459 )
459 )
460 coreconfigitem(
460 coreconfigitem(
461 b'convert',
461 b'convert',
462 b'git.saverev',
462 b'git.saverev',
463 default=True,
463 default=True,
464 )
464 )
465 coreconfigitem(
465 coreconfigitem(
466 b'convert',
466 b'convert',
467 b'git.similarity',
467 b'git.similarity',
468 default=50,
468 default=50,
469 )
469 )
470 coreconfigitem(
470 coreconfigitem(
471 b'convert',
471 b'convert',
472 b'git.skipsubmodules',
472 b'git.skipsubmodules',
473 default=False,
473 default=False,
474 )
474 )
475 coreconfigitem(
475 coreconfigitem(
476 b'convert',
476 b'convert',
477 b'hg.clonebranches',
477 b'hg.clonebranches',
478 default=False,
478 default=False,
479 )
479 )
480 coreconfigitem(
480 coreconfigitem(
481 b'convert',
481 b'convert',
482 b'hg.ignoreerrors',
482 b'hg.ignoreerrors',
483 default=False,
483 default=False,
484 )
484 )
485 coreconfigitem(
485 coreconfigitem(
486 b'convert',
486 b'convert',
487 b'hg.preserve-hash',
487 b'hg.preserve-hash',
488 default=False,
488 default=False,
489 )
489 )
490 coreconfigitem(
490 coreconfigitem(
491 b'convert',
491 b'convert',
492 b'hg.revs',
492 b'hg.revs',
493 default=None,
493 default=None,
494 )
494 )
495 coreconfigitem(
495 coreconfigitem(
496 b'convert',
496 b'convert',
497 b'hg.saverev',
497 b'hg.saverev',
498 default=False,
498 default=False,
499 )
499 )
500 coreconfigitem(
500 coreconfigitem(
501 b'convert',
501 b'convert',
502 b'hg.sourcename',
502 b'hg.sourcename',
503 default=None,
503 default=None,
504 )
504 )
505 coreconfigitem(
505 coreconfigitem(
506 b'convert',
506 b'convert',
507 b'hg.startrev',
507 b'hg.startrev',
508 default=None,
508 default=None,
509 )
509 )
510 coreconfigitem(
510 coreconfigitem(
511 b'convert',
511 b'convert',
512 b'hg.tagsbranch',
512 b'hg.tagsbranch',
513 default=b'default',
513 default=b'default',
514 )
514 )
515 coreconfigitem(
515 coreconfigitem(
516 b'convert',
516 b'convert',
517 b'hg.usebranchnames',
517 b'hg.usebranchnames',
518 default=True,
518 default=True,
519 )
519 )
520 coreconfigitem(
520 coreconfigitem(
521 b'convert',
521 b'convert',
522 b'ignoreancestorcheck',
522 b'ignoreancestorcheck',
523 default=False,
523 default=False,
524 experimental=True,
524 experimental=True,
525 )
525 )
526 coreconfigitem(
526 coreconfigitem(
527 b'convert',
527 b'convert',
528 b'localtimezone',
528 b'localtimezone',
529 default=False,
529 default=False,
530 )
530 )
531 coreconfigitem(
531 coreconfigitem(
532 b'convert',
532 b'convert',
533 b'p4.encoding',
533 b'p4.encoding',
534 default=dynamicdefault,
534 default=dynamicdefault,
535 )
535 )
536 coreconfigitem(
536 coreconfigitem(
537 b'convert',
537 b'convert',
538 b'p4.startrev',
538 b'p4.startrev',
539 default=0,
539 default=0,
540 )
540 )
541 coreconfigitem(
541 coreconfigitem(
542 b'convert',
542 b'convert',
543 b'skiptags',
543 b'skiptags',
544 default=False,
544 default=False,
545 )
545 )
546 coreconfigitem(
546 coreconfigitem(
547 b'convert',
547 b'convert',
548 b'svn.debugsvnlog',
548 b'svn.debugsvnlog',
549 default=True,
549 default=True,
550 )
550 )
551 coreconfigitem(
551 coreconfigitem(
552 b'convert',
552 b'convert',
553 b'svn.trunk',
553 b'svn.trunk',
554 default=None,
554 default=None,
555 )
555 )
556 coreconfigitem(
556 coreconfigitem(
557 b'convert',
557 b'convert',
558 b'svn.tags',
558 b'svn.tags',
559 default=None,
559 default=None,
560 )
560 )
561 coreconfigitem(
561 coreconfigitem(
562 b'convert',
562 b'convert',
563 b'svn.branches',
563 b'svn.branches',
564 default=None,
564 default=None,
565 )
565 )
566 coreconfigitem(
566 coreconfigitem(
567 b'convert',
567 b'convert',
568 b'svn.startrev',
568 b'svn.startrev',
569 default=0,
569 default=0,
570 )
570 )
571 coreconfigitem(
571 coreconfigitem(
572 b'convert',
572 b'convert',
573 b'svn.dangerous-set-commit-dates',
573 b'svn.dangerous-set-commit-dates',
574 default=False,
574 default=False,
575 )
575 )
576 coreconfigitem(
576 coreconfigitem(
577 b'debug',
577 b'debug',
578 b'dirstate.delaywrite',
578 b'dirstate.delaywrite',
579 default=0,
579 default=0,
580 )
580 )
581 coreconfigitem(
581 coreconfigitem(
582 b'debug',
582 b'debug',
583 b'revlog.verifyposition.changelog',
583 b'revlog.verifyposition.changelog',
584 default=b'',
584 default=b'',
585 )
585 )
586 coreconfigitem(
586 coreconfigitem(
587 b'debug',
587 b'debug',
588 b'revlog.debug-delta',
588 b'revlog.debug-delta',
589 default=False,
589 default=False,
590 )
590 )
591 coreconfigitem(
591 coreconfigitem(
592 b'defaults',
592 b'defaults',
593 b'.*',
593 b'.*',
594 default=None,
594 default=None,
595 generic=True,
595 generic=True,
596 )
596 )
597 coreconfigitem(
597 coreconfigitem(
598 b'devel',
598 b'devel',
599 b'all-warnings',
599 b'all-warnings',
600 default=False,
600 default=False,
601 )
601 )
602 coreconfigitem(
602 coreconfigitem(
603 b'devel',
603 b'devel',
604 b'bundle2.debug',
604 b'bundle2.debug',
605 default=False,
605 default=False,
606 )
606 )
607 coreconfigitem(
607 coreconfigitem(
608 b'devel',
608 b'devel',
609 b'bundle.delta',
609 b'bundle.delta',
610 default=b'',
610 default=b'',
611 )
611 )
612 coreconfigitem(
612 coreconfigitem(
613 b'devel',
613 b'devel',
614 b'cache-vfs',
614 b'cache-vfs',
615 default=None,
615 default=None,
616 )
616 )
617 coreconfigitem(
617 coreconfigitem(
618 b'devel',
618 b'devel',
619 b'check-locks',
619 b'check-locks',
620 default=False,
620 default=False,
621 )
621 )
622 coreconfigitem(
622 coreconfigitem(
623 b'devel',
623 b'devel',
624 b'check-relroot',
624 b'check-relroot',
625 default=False,
625 default=False,
626 )
626 )
627 # Track copy information for all file, not just "added" one (very slow)
627 # Track copy information for all file, not just "added" one (very slow)
628 coreconfigitem(
628 coreconfigitem(
629 b'devel',
629 b'devel',
630 b'copy-tracing.trace-all-files',
630 b'copy-tracing.trace-all-files',
631 default=False,
631 default=False,
632 )
632 )
633 coreconfigitem(
633 coreconfigitem(
634 b'devel',
634 b'devel',
635 b'default-date',
635 b'default-date',
636 default=None,
636 default=None,
637 )
637 )
638 coreconfigitem(
638 coreconfigitem(
639 b'devel',
639 b'devel',
640 b'deprec-warn',
640 b'deprec-warn',
641 default=False,
641 default=False,
642 )
642 )
643 # possible values:
644 # - auto (the default)
645 # - force-append
646 # - force-new
647 coreconfigitem(
648 b'devel',
649 b'dirstate.v2.data_update_mode',
650 default="auto",
651 )
643 coreconfigitem(
652 coreconfigitem(
644 b'devel',
653 b'devel',
645 b'disableloaddefaultcerts',
654 b'disableloaddefaultcerts',
646 default=False,
655 default=False,
647 )
656 )
648 coreconfigitem(
657 coreconfigitem(
649 b'devel',
658 b'devel',
650 b'warn-empty-changegroup',
659 b'warn-empty-changegroup',
651 default=False,
660 default=False,
652 )
661 )
653 coreconfigitem(
662 coreconfigitem(
654 b'devel',
663 b'devel',
655 b'legacy.exchange',
664 b'legacy.exchange',
656 default=list,
665 default=list,
657 )
666 )
658 # When True, revlogs use a special reference version of the nodemap, that is not
667 # When True, revlogs use a special reference version of the nodemap, that is not
659 # performant but is "known" to behave properly.
668 # performant but is "known" to behave properly.
660 coreconfigitem(
669 coreconfigitem(
661 b'devel',
670 b'devel',
662 b'persistent-nodemap',
671 b'persistent-nodemap',
663 default=False,
672 default=False,
664 )
673 )
665 coreconfigitem(
674 coreconfigitem(
666 b'devel',
675 b'devel',
667 b'servercafile',
676 b'servercafile',
668 default=b'',
677 default=b'',
669 )
678 )
670 coreconfigitem(
679 coreconfigitem(
671 b'devel',
680 b'devel',
672 b'serverexactprotocol',
681 b'serverexactprotocol',
673 default=b'',
682 default=b'',
674 )
683 )
675 coreconfigitem(
684 coreconfigitem(
676 b'devel',
685 b'devel',
677 b'serverrequirecert',
686 b'serverrequirecert',
678 default=False,
687 default=False,
679 )
688 )
680 # Makes the status algorithm wait for the existence of this file
689 # Makes the status algorithm wait for the existence of this file
681 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
690 # (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout`
682 # seconds) before taking the lock and writing the dirstate.
691 # seconds) before taking the lock and writing the dirstate.
683 # Status signals that it's ready to wait by creating a file
692 # Status signals that it's ready to wait by creating a file
684 # with the same name + `.waiting`.
693 # with the same name + `.waiting`.
685 # Useful when testing race conditions.
694 # Useful when testing race conditions.
686 coreconfigitem(
695 coreconfigitem(
687 b'devel',
696 b'devel',
688 b'sync.status.pre-dirstate-write-file',
697 b'sync.status.pre-dirstate-write-file',
689 default=None,
698 default=None,
690 )
699 )
691 coreconfigitem(
700 coreconfigitem(
692 b'devel',
701 b'devel',
693 b'sync.status.pre-dirstate-write-file-timeout',
702 b'sync.status.pre-dirstate-write-file-timeout',
694 default=2,
703 default=2,
695 )
704 )
696 coreconfigitem(
705 coreconfigitem(
697 b'devel',
706 b'devel',
698 b'strip-obsmarkers',
707 b'strip-obsmarkers',
699 default=True,
708 default=True,
700 )
709 )
701 coreconfigitem(
710 coreconfigitem(
702 b'devel',
711 b'devel',
703 b'warn-config',
712 b'warn-config',
704 default=None,
713 default=None,
705 )
714 )
706 coreconfigitem(
715 coreconfigitem(
707 b'devel',
716 b'devel',
708 b'warn-config-default',
717 b'warn-config-default',
709 default=None,
718 default=None,
710 )
719 )
711 coreconfigitem(
720 coreconfigitem(
712 b'devel',
721 b'devel',
713 b'user.obsmarker',
722 b'user.obsmarker',
714 default=None,
723 default=None,
715 )
724 )
716 coreconfigitem(
725 coreconfigitem(
717 b'devel',
726 b'devel',
718 b'warn-config-unknown',
727 b'warn-config-unknown',
719 default=None,
728 default=None,
720 )
729 )
721 coreconfigitem(
730 coreconfigitem(
722 b'devel',
731 b'devel',
723 b'debug.copies',
732 b'debug.copies',
724 default=False,
733 default=False,
725 )
734 )
726 coreconfigitem(
735 coreconfigitem(
727 b'devel',
736 b'devel',
728 b'copy-tracing.multi-thread',
737 b'copy-tracing.multi-thread',
729 default=True,
738 default=True,
730 )
739 )
731 coreconfigitem(
740 coreconfigitem(
732 b'devel',
741 b'devel',
733 b'debug.extensions',
742 b'debug.extensions',
734 default=False,
743 default=False,
735 )
744 )
736 coreconfigitem(
745 coreconfigitem(
737 b'devel',
746 b'devel',
738 b'debug.repo-filters',
747 b'debug.repo-filters',
739 default=False,
748 default=False,
740 )
749 )
741 coreconfigitem(
750 coreconfigitem(
742 b'devel',
751 b'devel',
743 b'debug.peer-request',
752 b'debug.peer-request',
744 default=False,
753 default=False,
745 )
754 )
746 # If discovery.exchange-heads is False, the discovery will not start with
755 # If discovery.exchange-heads is False, the discovery will not start with
747 # remote head fetching and local head querying.
756 # remote head fetching and local head querying.
748 coreconfigitem(
757 coreconfigitem(
749 b'devel',
758 b'devel',
750 b'discovery.exchange-heads',
759 b'discovery.exchange-heads',
751 default=True,
760 default=True,
752 )
761 )
753 # If discovery.grow-sample is False, the sample size used in set discovery will
762 # If discovery.grow-sample is False, the sample size used in set discovery will
754 # not be increased through the process
763 # not be increased through the process
755 coreconfigitem(
764 coreconfigitem(
756 b'devel',
765 b'devel',
757 b'discovery.grow-sample',
766 b'discovery.grow-sample',
758 default=True,
767 default=True,
759 )
768 )
760 # When discovery.grow-sample.dynamic is True, the default, the sample size is
769 # When discovery.grow-sample.dynamic is True, the default, the sample size is
761 # adapted to the shape of the undecided set (it is set to the max of:
770 # adapted to the shape of the undecided set (it is set to the max of:
762 # <target-size>, len(roots(undecided)), len(heads(undecided)
771 # <target-size>, len(roots(undecided)), len(heads(undecided)
763 coreconfigitem(
772 coreconfigitem(
764 b'devel',
773 b'devel',
765 b'discovery.grow-sample.dynamic',
774 b'discovery.grow-sample.dynamic',
766 default=True,
775 default=True,
767 )
776 )
768 # discovery.grow-sample.rate control the rate at which the sample grow
777 # discovery.grow-sample.rate control the rate at which the sample grow
769 coreconfigitem(
778 coreconfigitem(
770 b'devel',
779 b'devel',
771 b'discovery.grow-sample.rate',
780 b'discovery.grow-sample.rate',
772 default=1.05,
781 default=1.05,
773 )
782 )
774 # If discovery.randomize is False, random sampling during discovery are
783 # If discovery.randomize is False, random sampling during discovery are
775 # deterministic. It is meant for integration tests.
784 # deterministic. It is meant for integration tests.
776 coreconfigitem(
785 coreconfigitem(
777 b'devel',
786 b'devel',
778 b'discovery.randomize',
787 b'discovery.randomize',
779 default=True,
788 default=True,
780 )
789 )
781 # Control the initial size of the discovery sample
790 # Control the initial size of the discovery sample
782 coreconfigitem(
791 coreconfigitem(
783 b'devel',
792 b'devel',
784 b'discovery.sample-size',
793 b'discovery.sample-size',
785 default=200,
794 default=200,
786 )
795 )
787 # Control the initial size of the discovery for initial change
796 # Control the initial size of the discovery for initial change
788 coreconfigitem(
797 coreconfigitem(
789 b'devel',
798 b'devel',
790 b'discovery.sample-size.initial',
799 b'discovery.sample-size.initial',
791 default=100,
800 default=100,
792 )
801 )
793 _registerdiffopts(section=b'diff')
802 _registerdiffopts(section=b'diff')
794 coreconfigitem(
803 coreconfigitem(
795 b'diff',
804 b'diff',
796 b'merge',
805 b'merge',
797 default=False,
806 default=False,
798 experimental=True,
807 experimental=True,
799 )
808 )
800 coreconfigitem(
809 coreconfigitem(
801 b'email',
810 b'email',
802 b'bcc',
811 b'bcc',
803 default=None,
812 default=None,
804 )
813 )
805 coreconfigitem(
814 coreconfigitem(
806 b'email',
815 b'email',
807 b'cc',
816 b'cc',
808 default=None,
817 default=None,
809 )
818 )
810 coreconfigitem(
819 coreconfigitem(
811 b'email',
820 b'email',
812 b'charsets',
821 b'charsets',
813 default=list,
822 default=list,
814 )
823 )
815 coreconfigitem(
824 coreconfigitem(
816 b'email',
825 b'email',
817 b'from',
826 b'from',
818 default=None,
827 default=None,
819 )
828 )
820 coreconfigitem(
829 coreconfigitem(
821 b'email',
830 b'email',
822 b'method',
831 b'method',
823 default=b'smtp',
832 default=b'smtp',
824 )
833 )
825 coreconfigitem(
834 coreconfigitem(
826 b'email',
835 b'email',
827 b'reply-to',
836 b'reply-to',
828 default=None,
837 default=None,
829 )
838 )
830 coreconfigitem(
839 coreconfigitem(
831 b'email',
840 b'email',
832 b'to',
841 b'to',
833 default=None,
842 default=None,
834 )
843 )
835 coreconfigitem(
844 coreconfigitem(
836 b'experimental',
845 b'experimental',
837 b'archivemetatemplate',
846 b'archivemetatemplate',
838 default=dynamicdefault,
847 default=dynamicdefault,
839 )
848 )
840 coreconfigitem(
849 coreconfigitem(
841 b'experimental',
850 b'experimental',
842 b'auto-publish',
851 b'auto-publish',
843 default=b'publish',
852 default=b'publish',
844 )
853 )
845 coreconfigitem(
854 coreconfigitem(
846 b'experimental',
855 b'experimental',
847 b'bundle-phases',
856 b'bundle-phases',
848 default=False,
857 default=False,
849 )
858 )
850 coreconfigitem(
859 coreconfigitem(
851 b'experimental',
860 b'experimental',
852 b'bundle2-advertise',
861 b'bundle2-advertise',
853 default=True,
862 default=True,
854 )
863 )
855 coreconfigitem(
864 coreconfigitem(
856 b'experimental',
865 b'experimental',
857 b'bundle2-output-capture',
866 b'bundle2-output-capture',
858 default=False,
867 default=False,
859 )
868 )
860 coreconfigitem(
869 coreconfigitem(
861 b'experimental',
870 b'experimental',
862 b'bundle2.pushback',
871 b'bundle2.pushback',
863 default=False,
872 default=False,
864 )
873 )
865 coreconfigitem(
874 coreconfigitem(
866 b'experimental',
875 b'experimental',
867 b'bundle2lazylocking',
876 b'bundle2lazylocking',
868 default=False,
877 default=False,
869 )
878 )
870 coreconfigitem(
879 coreconfigitem(
871 b'experimental',
880 b'experimental',
872 b'bundlecomplevel',
881 b'bundlecomplevel',
873 default=None,
882 default=None,
874 )
883 )
875 coreconfigitem(
884 coreconfigitem(
876 b'experimental',
885 b'experimental',
877 b'bundlecomplevel.bzip2',
886 b'bundlecomplevel.bzip2',
878 default=None,
887 default=None,
879 )
888 )
880 coreconfigitem(
889 coreconfigitem(
881 b'experimental',
890 b'experimental',
882 b'bundlecomplevel.gzip',
891 b'bundlecomplevel.gzip',
883 default=None,
892 default=None,
884 )
893 )
885 coreconfigitem(
894 coreconfigitem(
886 b'experimental',
895 b'experimental',
887 b'bundlecomplevel.none',
896 b'bundlecomplevel.none',
888 default=None,
897 default=None,
889 )
898 )
890 coreconfigitem(
899 coreconfigitem(
891 b'experimental',
900 b'experimental',
892 b'bundlecomplevel.zstd',
901 b'bundlecomplevel.zstd',
893 default=None,
902 default=None,
894 )
903 )
895 coreconfigitem(
904 coreconfigitem(
896 b'experimental',
905 b'experimental',
897 b'bundlecompthreads',
906 b'bundlecompthreads',
898 default=None,
907 default=None,
899 )
908 )
900 coreconfigitem(
909 coreconfigitem(
901 b'experimental',
910 b'experimental',
902 b'bundlecompthreads.bzip2',
911 b'bundlecompthreads.bzip2',
903 default=None,
912 default=None,
904 )
913 )
905 coreconfigitem(
914 coreconfigitem(
906 b'experimental',
915 b'experimental',
907 b'bundlecompthreads.gzip',
916 b'bundlecompthreads.gzip',
908 default=None,
917 default=None,
909 )
918 )
910 coreconfigitem(
919 coreconfigitem(
911 b'experimental',
920 b'experimental',
912 b'bundlecompthreads.none',
921 b'bundlecompthreads.none',
913 default=None,
922 default=None,
914 )
923 )
915 coreconfigitem(
924 coreconfigitem(
916 b'experimental',
925 b'experimental',
917 b'bundlecompthreads.zstd',
926 b'bundlecompthreads.zstd',
918 default=None,
927 default=None,
919 )
928 )
920 coreconfigitem(
929 coreconfigitem(
921 b'experimental',
930 b'experimental',
922 b'changegroup3',
931 b'changegroup3',
923 default=False,
932 default=False,
924 )
933 )
925 coreconfigitem(
934 coreconfigitem(
926 b'experimental',
935 b'experimental',
927 b'changegroup4',
936 b'changegroup4',
928 default=False,
937 default=False,
929 )
938 )
930 coreconfigitem(
939 coreconfigitem(
931 b'experimental',
940 b'experimental',
932 b'cleanup-as-archived',
941 b'cleanup-as-archived',
933 default=False,
942 default=False,
934 )
943 )
935 coreconfigitem(
944 coreconfigitem(
936 b'experimental',
945 b'experimental',
937 b'clientcompressionengines',
946 b'clientcompressionengines',
938 default=list,
947 default=list,
939 )
948 )
940 coreconfigitem(
949 coreconfigitem(
941 b'experimental',
950 b'experimental',
942 b'copytrace',
951 b'copytrace',
943 default=b'on',
952 default=b'on',
944 )
953 )
945 coreconfigitem(
954 coreconfigitem(
946 b'experimental',
955 b'experimental',
947 b'copytrace.movecandidateslimit',
956 b'copytrace.movecandidateslimit',
948 default=100,
957 default=100,
949 )
958 )
950 coreconfigitem(
959 coreconfigitem(
951 b'experimental',
960 b'experimental',
952 b'copytrace.sourcecommitlimit',
961 b'copytrace.sourcecommitlimit',
953 default=100,
962 default=100,
954 )
963 )
955 coreconfigitem(
964 coreconfigitem(
956 b'experimental',
965 b'experimental',
957 b'copies.read-from',
966 b'copies.read-from',
958 default=b"filelog-only",
967 default=b"filelog-only",
959 )
968 )
960 coreconfigitem(
969 coreconfigitem(
961 b'experimental',
970 b'experimental',
962 b'copies.write-to',
971 b'copies.write-to',
963 default=b'filelog-only',
972 default=b'filelog-only',
964 )
973 )
965 coreconfigitem(
974 coreconfigitem(
966 b'experimental',
975 b'experimental',
967 b'crecordtest',
976 b'crecordtest',
968 default=None,
977 default=None,
969 )
978 )
970 coreconfigitem(
979 coreconfigitem(
971 b'experimental',
980 b'experimental',
972 b'directaccess',
981 b'directaccess',
973 default=False,
982 default=False,
974 )
983 )
975 coreconfigitem(
984 coreconfigitem(
976 b'experimental',
985 b'experimental',
977 b'directaccess.revnums',
986 b'directaccess.revnums',
978 default=False,
987 default=False,
979 )
988 )
980 coreconfigitem(
989 coreconfigitem(
981 b'experimental',
990 b'experimental',
982 b'editortmpinhg',
991 b'editortmpinhg',
983 default=False,
992 default=False,
984 )
993 )
985 coreconfigitem(
994 coreconfigitem(
986 b'experimental',
995 b'experimental',
987 b'evolution',
996 b'evolution',
988 default=list,
997 default=list,
989 )
998 )
990 coreconfigitem(
999 coreconfigitem(
991 b'experimental',
1000 b'experimental',
992 b'evolution.allowdivergence',
1001 b'evolution.allowdivergence',
993 default=False,
1002 default=False,
994 alias=[(b'experimental', b'allowdivergence')],
1003 alias=[(b'experimental', b'allowdivergence')],
995 )
1004 )
996 coreconfigitem(
1005 coreconfigitem(
997 b'experimental',
1006 b'experimental',
998 b'evolution.allowunstable',
1007 b'evolution.allowunstable',
999 default=None,
1008 default=None,
1000 )
1009 )
1001 coreconfigitem(
1010 coreconfigitem(
1002 b'experimental',
1011 b'experimental',
1003 b'evolution.createmarkers',
1012 b'evolution.createmarkers',
1004 default=None,
1013 default=None,
1005 )
1014 )
1006 coreconfigitem(
1015 coreconfigitem(
1007 b'experimental',
1016 b'experimental',
1008 b'evolution.effect-flags',
1017 b'evolution.effect-flags',
1009 default=True,
1018 default=True,
1010 alias=[(b'experimental', b'effect-flags')],
1019 alias=[(b'experimental', b'effect-flags')],
1011 )
1020 )
1012 coreconfigitem(
1021 coreconfigitem(
1013 b'experimental',
1022 b'experimental',
1014 b'evolution.exchange',
1023 b'evolution.exchange',
1015 default=None,
1024 default=None,
1016 )
1025 )
1017 coreconfigitem(
1026 coreconfigitem(
1018 b'experimental',
1027 b'experimental',
1019 b'evolution.bundle-obsmarker',
1028 b'evolution.bundle-obsmarker',
1020 default=False,
1029 default=False,
1021 )
1030 )
1022 coreconfigitem(
1031 coreconfigitem(
1023 b'experimental',
1032 b'experimental',
1024 b'evolution.bundle-obsmarker:mandatory',
1033 b'evolution.bundle-obsmarker:mandatory',
1025 default=True,
1034 default=True,
1026 )
1035 )
1027 coreconfigitem(
1036 coreconfigitem(
1028 b'experimental',
1037 b'experimental',
1029 b'log.topo',
1038 b'log.topo',
1030 default=False,
1039 default=False,
1031 )
1040 )
1032 coreconfigitem(
1041 coreconfigitem(
1033 b'experimental',
1042 b'experimental',
1034 b'evolution.report-instabilities',
1043 b'evolution.report-instabilities',
1035 default=True,
1044 default=True,
1036 )
1045 )
1037 coreconfigitem(
1046 coreconfigitem(
1038 b'experimental',
1047 b'experimental',
1039 b'evolution.track-operation',
1048 b'evolution.track-operation',
1040 default=True,
1049 default=True,
1041 )
1050 )
1042 # repo-level config to exclude a revset visibility
1051 # repo-level config to exclude a revset visibility
1043 #
1052 #
1044 # The target use case is to use `share` to expose different subset of the same
1053 # The target use case is to use `share` to expose different subset of the same
1045 # repository, especially server side. See also `server.view`.
1054 # repository, especially server side. See also `server.view`.
1046 coreconfigitem(
1055 coreconfigitem(
1047 b'experimental',
1056 b'experimental',
1048 b'extra-filter-revs',
1057 b'extra-filter-revs',
1049 default=None,
1058 default=None,
1050 )
1059 )
1051 coreconfigitem(
1060 coreconfigitem(
1052 b'experimental',
1061 b'experimental',
1053 b'maxdeltachainspan',
1062 b'maxdeltachainspan',
1054 default=-1,
1063 default=-1,
1055 )
1064 )
1056 # tracks files which were undeleted (merge might delete them but we explicitly
1065 # tracks files which were undeleted (merge might delete them but we explicitly
1057 # kept/undeleted them) and creates new filenodes for them
1066 # kept/undeleted them) and creates new filenodes for them
1058 coreconfigitem(
1067 coreconfigitem(
1059 b'experimental',
1068 b'experimental',
1060 b'merge-track-salvaged',
1069 b'merge-track-salvaged',
1061 default=False,
1070 default=False,
1062 )
1071 )
1063 coreconfigitem(
1072 coreconfigitem(
1064 b'experimental',
1073 b'experimental',
1065 b'mmapindexthreshold',
1074 b'mmapindexthreshold',
1066 default=None,
1075 default=None,
1067 )
1076 )
1068 coreconfigitem(
1077 coreconfigitem(
1069 b'experimental',
1078 b'experimental',
1070 b'narrow',
1079 b'narrow',
1071 default=False,
1080 default=False,
1072 )
1081 )
1073 coreconfigitem(
1082 coreconfigitem(
1074 b'experimental',
1083 b'experimental',
1075 b'nonnormalparanoidcheck',
1084 b'nonnormalparanoidcheck',
1076 default=False,
1085 default=False,
1077 )
1086 )
1078 coreconfigitem(
1087 coreconfigitem(
1079 b'experimental',
1088 b'experimental',
1080 b'exportableenviron',
1089 b'exportableenviron',
1081 default=list,
1090 default=list,
1082 )
1091 )
1083 coreconfigitem(
1092 coreconfigitem(
1084 b'experimental',
1093 b'experimental',
1085 b'extendedheader.index',
1094 b'extendedheader.index',
1086 default=None,
1095 default=None,
1087 )
1096 )
1088 coreconfigitem(
1097 coreconfigitem(
1089 b'experimental',
1098 b'experimental',
1090 b'extendedheader.similarity',
1099 b'extendedheader.similarity',
1091 default=False,
1100 default=False,
1092 )
1101 )
1093 coreconfigitem(
1102 coreconfigitem(
1094 b'experimental',
1103 b'experimental',
1095 b'graphshorten',
1104 b'graphshorten',
1096 default=False,
1105 default=False,
1097 )
1106 )
1098 coreconfigitem(
1107 coreconfigitem(
1099 b'experimental',
1108 b'experimental',
1100 b'graphstyle.parent',
1109 b'graphstyle.parent',
1101 default=dynamicdefault,
1110 default=dynamicdefault,
1102 )
1111 )
1103 coreconfigitem(
1112 coreconfigitem(
1104 b'experimental',
1113 b'experimental',
1105 b'graphstyle.missing',
1114 b'graphstyle.missing',
1106 default=dynamicdefault,
1115 default=dynamicdefault,
1107 )
1116 )
1108 coreconfigitem(
1117 coreconfigitem(
1109 b'experimental',
1118 b'experimental',
1110 b'graphstyle.grandparent',
1119 b'graphstyle.grandparent',
1111 default=dynamicdefault,
1120 default=dynamicdefault,
1112 )
1121 )
1113 coreconfigitem(
1122 coreconfigitem(
1114 b'experimental',
1123 b'experimental',
1115 b'hook-track-tags',
1124 b'hook-track-tags',
1116 default=False,
1125 default=False,
1117 )
1126 )
1118 coreconfigitem(
1127 coreconfigitem(
1119 b'experimental',
1128 b'experimental',
1120 b'httppostargs',
1129 b'httppostargs',
1121 default=False,
1130 default=False,
1122 )
1131 )
1123 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1132 coreconfigitem(b'experimental', b'nointerrupt', default=False)
1124 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1133 coreconfigitem(b'experimental', b'nointerrupt-interactiveonly', default=True)
1125
1134
1126 coreconfigitem(
1135 coreconfigitem(
1127 b'experimental',
1136 b'experimental',
1128 b'obsmarkers-exchange-debug',
1137 b'obsmarkers-exchange-debug',
1129 default=False,
1138 default=False,
1130 )
1139 )
1131 coreconfigitem(
1140 coreconfigitem(
1132 b'experimental',
1141 b'experimental',
1133 b'remotenames',
1142 b'remotenames',
1134 default=False,
1143 default=False,
1135 )
1144 )
1136 coreconfigitem(
1145 coreconfigitem(
1137 b'experimental',
1146 b'experimental',
1138 b'removeemptydirs',
1147 b'removeemptydirs',
1139 default=True,
1148 default=True,
1140 )
1149 )
1141 coreconfigitem(
1150 coreconfigitem(
1142 b'experimental',
1151 b'experimental',
1143 b'revert.interactive.select-to-keep',
1152 b'revert.interactive.select-to-keep',
1144 default=False,
1153 default=False,
1145 )
1154 )
1146 coreconfigitem(
1155 coreconfigitem(
1147 b'experimental',
1156 b'experimental',
1148 b'revisions.prefixhexnode',
1157 b'revisions.prefixhexnode',
1149 default=False,
1158 default=False,
1150 )
1159 )
1151 # "out of experimental" todo list.
1160 # "out of experimental" todo list.
1152 #
1161 #
1153 # * include management of a persistent nodemap in the main docket
1162 # * include management of a persistent nodemap in the main docket
1154 # * enforce a "no-truncate" policy for mmap safety
1163 # * enforce a "no-truncate" policy for mmap safety
1155 # - for censoring operation
1164 # - for censoring operation
1156 # - for stripping operation
1165 # - for stripping operation
1157 # - for rollback operation
1166 # - for rollback operation
1158 # * proper streaming (race free) of the docket file
1167 # * proper streaming (race free) of the docket file
1159 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1168 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1160 # * Exchange-wise, we will also need to do something more efficient than
1169 # * Exchange-wise, we will also need to do something more efficient than
1161 # keeping references to the affected revlogs, especially memory-wise when
1170 # keeping references to the affected revlogs, especially memory-wise when
1162 # rewriting sidedata.
1171 # rewriting sidedata.
1163 # * introduce a proper solution to reduce the number of filelog related files.
1172 # * introduce a proper solution to reduce the number of filelog related files.
1164 # * use caching for reading sidedata (similar to what we do for data).
1173 # * use caching for reading sidedata (similar to what we do for data).
1165 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1174 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1166 # * Improvement to consider
1175 # * Improvement to consider
1167 # - avoid compression header in chunk using the default compression?
1176 # - avoid compression header in chunk using the default compression?
1168 # - forbid "inline" compression mode entirely?
1177 # - forbid "inline" compression mode entirely?
1169 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1178 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1170 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1179 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1171 # - keep track of chain base or size (probably not that useful anymore)
1180 # - keep track of chain base or size (probably not that useful anymore)
1172 coreconfigitem(
1181 coreconfigitem(
1173 b'experimental',
1182 b'experimental',
1174 b'revlogv2',
1183 b'revlogv2',
1175 default=None,
1184 default=None,
1176 )
1185 )
1177 coreconfigitem(
1186 coreconfigitem(
1178 b'experimental',
1187 b'experimental',
1179 b'revisions.disambiguatewithin',
1188 b'revisions.disambiguatewithin',
1180 default=None,
1189 default=None,
1181 )
1190 )
1182 coreconfigitem(
1191 coreconfigitem(
1183 b'experimental',
1192 b'experimental',
1184 b'rust.index',
1193 b'rust.index',
1185 default=False,
1194 default=False,
1186 )
1195 )
1187 coreconfigitem(
1196 coreconfigitem(
1188 b'experimental',
1197 b'experimental',
1189 b'server.filesdata.recommended-batch-size',
1198 b'server.filesdata.recommended-batch-size',
1190 default=50000,
1199 default=50000,
1191 )
1200 )
1192 coreconfigitem(
1201 coreconfigitem(
1193 b'experimental',
1202 b'experimental',
1194 b'server.manifestdata.recommended-batch-size',
1203 b'server.manifestdata.recommended-batch-size',
1195 default=100000,
1204 default=100000,
1196 )
1205 )
1197 coreconfigitem(
1206 coreconfigitem(
1198 b'experimental',
1207 b'experimental',
1199 b'server.stream-narrow-clones',
1208 b'server.stream-narrow-clones',
1200 default=False,
1209 default=False,
1201 )
1210 )
1202 coreconfigitem(
1211 coreconfigitem(
1203 b'experimental',
1212 b'experimental',
1204 b'single-head-per-branch',
1213 b'single-head-per-branch',
1205 default=False,
1214 default=False,
1206 )
1215 )
1207 coreconfigitem(
1216 coreconfigitem(
1208 b'experimental',
1217 b'experimental',
1209 b'single-head-per-branch:account-closed-heads',
1218 b'single-head-per-branch:account-closed-heads',
1210 default=False,
1219 default=False,
1211 )
1220 )
1212 coreconfigitem(
1221 coreconfigitem(
1213 b'experimental',
1222 b'experimental',
1214 b'single-head-per-branch:public-changes-only',
1223 b'single-head-per-branch:public-changes-only',
1215 default=False,
1224 default=False,
1216 )
1225 )
1217 coreconfigitem(
1226 coreconfigitem(
1218 b'experimental',
1227 b'experimental',
1219 b'sparse-read',
1228 b'sparse-read',
1220 default=False,
1229 default=False,
1221 )
1230 )
1222 coreconfigitem(
1231 coreconfigitem(
1223 b'experimental',
1232 b'experimental',
1224 b'sparse-read.density-threshold',
1233 b'sparse-read.density-threshold',
1225 default=0.50,
1234 default=0.50,
1226 )
1235 )
1227 coreconfigitem(
1236 coreconfigitem(
1228 b'experimental',
1237 b'experimental',
1229 b'sparse-read.min-gap-size',
1238 b'sparse-read.min-gap-size',
1230 default=b'65K',
1239 default=b'65K',
1231 )
1240 )
1232 coreconfigitem(
1241 coreconfigitem(
1233 b'experimental',
1242 b'experimental',
1234 b'treemanifest',
1243 b'treemanifest',
1235 default=False,
1244 default=False,
1236 )
1245 )
1237 coreconfigitem(
1246 coreconfigitem(
1238 b'experimental',
1247 b'experimental',
1239 b'update.atomic-file',
1248 b'update.atomic-file',
1240 default=False,
1249 default=False,
1241 )
1250 )
1242 coreconfigitem(
1251 coreconfigitem(
1243 b'experimental',
1252 b'experimental',
1244 b'web.full-garbage-collection-rate',
1253 b'web.full-garbage-collection-rate',
1245 default=1, # still forcing a full collection on each request
1254 default=1, # still forcing a full collection on each request
1246 )
1255 )
1247 coreconfigitem(
1256 coreconfigitem(
1248 b'experimental',
1257 b'experimental',
1249 b'worker.wdir-get-thread-safe',
1258 b'worker.wdir-get-thread-safe',
1250 default=False,
1259 default=False,
1251 )
1260 )
1252 coreconfigitem(
1261 coreconfigitem(
1253 b'experimental',
1262 b'experimental',
1254 b'worker.repository-upgrade',
1263 b'worker.repository-upgrade',
1255 default=False,
1264 default=False,
1256 )
1265 )
1257 coreconfigitem(
1266 coreconfigitem(
1258 b'experimental',
1267 b'experimental',
1259 b'xdiff',
1268 b'xdiff',
1260 default=False,
1269 default=False,
1261 )
1270 )
1262 coreconfigitem(
1271 coreconfigitem(
1263 b'extensions',
1272 b'extensions',
1264 b'[^:]*',
1273 b'[^:]*',
1265 default=None,
1274 default=None,
1266 generic=True,
1275 generic=True,
1267 )
1276 )
1268 coreconfigitem(
1277 coreconfigitem(
1269 b'extensions',
1278 b'extensions',
1270 b'[^:]*:required',
1279 b'[^:]*:required',
1271 default=False,
1280 default=False,
1272 generic=True,
1281 generic=True,
1273 )
1282 )
1274 coreconfigitem(
1283 coreconfigitem(
1275 b'extdata',
1284 b'extdata',
1276 b'.*',
1285 b'.*',
1277 default=None,
1286 default=None,
1278 generic=True,
1287 generic=True,
1279 )
1288 )
1280 coreconfigitem(
1289 coreconfigitem(
1281 b'format',
1290 b'format',
1282 b'bookmarks-in-store',
1291 b'bookmarks-in-store',
1283 default=False,
1292 default=False,
1284 )
1293 )
1285 coreconfigitem(
1294 coreconfigitem(
1286 b'format',
1295 b'format',
1287 b'chunkcachesize',
1296 b'chunkcachesize',
1288 default=None,
1297 default=None,
1289 experimental=True,
1298 experimental=True,
1290 )
1299 )
1291 coreconfigitem(
1300 coreconfigitem(
1292 # Enable this dirstate format *when creating a new repository*.
1301 # Enable this dirstate format *when creating a new repository*.
1293 # Which format to use for existing repos is controlled by .hg/requires
1302 # Which format to use for existing repos is controlled by .hg/requires
1294 b'format',
1303 b'format',
1295 b'use-dirstate-v2',
1304 b'use-dirstate-v2',
1296 default=False,
1305 default=False,
1297 experimental=True,
1306 experimental=True,
1298 alias=[(b'format', b'exp-rc-dirstate-v2')],
1307 alias=[(b'format', b'exp-rc-dirstate-v2')],
1299 )
1308 )
1300 coreconfigitem(
1309 coreconfigitem(
1301 b'format',
1310 b'format',
1302 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1311 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories',
1303 default=False,
1312 default=False,
1304 experimental=True,
1313 experimental=True,
1305 )
1314 )
1306 coreconfigitem(
1315 coreconfigitem(
1307 b'format',
1316 b'format',
1308 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1317 b'use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet',
1309 default=False,
1318 default=False,
1310 experimental=True,
1319 experimental=True,
1311 )
1320 )
1312 coreconfigitem(
1321 coreconfigitem(
1313 b'format',
1322 b'format',
1314 b'use-dirstate-tracked-hint',
1323 b'use-dirstate-tracked-hint',
1315 default=False,
1324 default=False,
1316 experimental=True,
1325 experimental=True,
1317 )
1326 )
1318 coreconfigitem(
1327 coreconfigitem(
1319 b'format',
1328 b'format',
1320 b'use-dirstate-tracked-hint.version',
1329 b'use-dirstate-tracked-hint.version',
1321 default=1,
1330 default=1,
1322 experimental=True,
1331 experimental=True,
1323 )
1332 )
1324 coreconfigitem(
1333 coreconfigitem(
1325 b'format',
1334 b'format',
1326 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1335 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories',
1327 default=False,
1336 default=False,
1328 experimental=True,
1337 experimental=True,
1329 )
1338 )
1330 coreconfigitem(
1339 coreconfigitem(
1331 b'format',
1340 b'format',
1332 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1341 b'use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet',
1333 default=False,
1342 default=False,
1334 experimental=True,
1343 experimental=True,
1335 )
1344 )
1336 coreconfigitem(
1345 coreconfigitem(
1337 b'format',
1346 b'format',
1338 b'dotencode',
1347 b'dotencode',
1339 default=True,
1348 default=True,
1340 )
1349 )
1341 coreconfigitem(
1350 coreconfigitem(
1342 b'format',
1351 b'format',
1343 b'generaldelta',
1352 b'generaldelta',
1344 default=False,
1353 default=False,
1345 experimental=True,
1354 experimental=True,
1346 )
1355 )
1347 coreconfigitem(
1356 coreconfigitem(
1348 b'format',
1357 b'format',
1349 b'manifestcachesize',
1358 b'manifestcachesize',
1350 default=None,
1359 default=None,
1351 experimental=True,
1360 experimental=True,
1352 )
1361 )
1353 coreconfigitem(
1362 coreconfigitem(
1354 b'format',
1363 b'format',
1355 b'maxchainlen',
1364 b'maxchainlen',
1356 default=dynamicdefault,
1365 default=dynamicdefault,
1357 experimental=True,
1366 experimental=True,
1358 )
1367 )
1359 coreconfigitem(
1368 coreconfigitem(
1360 b'format',
1369 b'format',
1361 b'obsstore-version',
1370 b'obsstore-version',
1362 default=None,
1371 default=None,
1363 )
1372 )
1364 coreconfigitem(
1373 coreconfigitem(
1365 b'format',
1374 b'format',
1366 b'sparse-revlog',
1375 b'sparse-revlog',
1367 default=True,
1376 default=True,
1368 )
1377 )
1369 coreconfigitem(
1378 coreconfigitem(
1370 b'format',
1379 b'format',
1371 b'revlog-compression',
1380 b'revlog-compression',
1372 default=lambda: [b'zstd', b'zlib'],
1381 default=lambda: [b'zstd', b'zlib'],
1373 alias=[(b'experimental', b'format.compression')],
1382 alias=[(b'experimental', b'format.compression')],
1374 )
1383 )
1375 # Experimental TODOs:
1384 # Experimental TODOs:
1376 #
1385 #
1377 # * Same as for revlogv2 (but for the reduction of the number of files)
1386 # * Same as for revlogv2 (but for the reduction of the number of files)
1378 # * Actually computing the rank of changesets
1387 # * Actually computing the rank of changesets
1379 # * Improvement to investigate
1388 # * Improvement to investigate
1380 # - storing .hgtags fnode
1389 # - storing .hgtags fnode
1381 # - storing branch related identifier
1390 # - storing branch related identifier
1382
1391
1383 coreconfigitem(
1392 coreconfigitem(
1384 b'format',
1393 b'format',
1385 b'exp-use-changelog-v2',
1394 b'exp-use-changelog-v2',
1386 default=None,
1395 default=None,
1387 experimental=True,
1396 experimental=True,
1388 )
1397 )
1389 coreconfigitem(
1398 coreconfigitem(
1390 b'format',
1399 b'format',
1391 b'usefncache',
1400 b'usefncache',
1392 default=True,
1401 default=True,
1393 )
1402 )
1394 coreconfigitem(
1403 coreconfigitem(
1395 b'format',
1404 b'format',
1396 b'usegeneraldelta',
1405 b'usegeneraldelta',
1397 default=True,
1406 default=True,
1398 )
1407 )
1399 coreconfigitem(
1408 coreconfigitem(
1400 b'format',
1409 b'format',
1401 b'usestore',
1410 b'usestore',
1402 default=True,
1411 default=True,
1403 )
1412 )
1404
1413
1405
1414
1406 def _persistent_nodemap_default():
1415 def _persistent_nodemap_default():
1407 """compute `use-persistent-nodemap` default value
1416 """compute `use-persistent-nodemap` default value
1408
1417
1409 The feature is disabled unless a fast implementation is available.
1418 The feature is disabled unless a fast implementation is available.
1410 """
1419 """
1411 from . import policy
1420 from . import policy
1412
1421
1413 return policy.importrust('revlog') is not None
1422 return policy.importrust('revlog') is not None
1414
1423
1415
1424
1416 coreconfigitem(
1425 coreconfigitem(
1417 b'format',
1426 b'format',
1418 b'use-persistent-nodemap',
1427 b'use-persistent-nodemap',
1419 default=_persistent_nodemap_default,
1428 default=_persistent_nodemap_default,
1420 )
1429 )
1421 coreconfigitem(
1430 coreconfigitem(
1422 b'format',
1431 b'format',
1423 b'exp-use-copies-side-data-changeset',
1432 b'exp-use-copies-side-data-changeset',
1424 default=False,
1433 default=False,
1425 experimental=True,
1434 experimental=True,
1426 )
1435 )
1427 coreconfigitem(
1436 coreconfigitem(
1428 b'format',
1437 b'format',
1429 b'use-share-safe',
1438 b'use-share-safe',
1430 default=True,
1439 default=True,
1431 )
1440 )
1432 coreconfigitem(
1441 coreconfigitem(
1433 b'format',
1442 b'format',
1434 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1443 b'use-share-safe.automatic-upgrade-of-mismatching-repositories',
1435 default=False,
1444 default=False,
1436 experimental=True,
1445 experimental=True,
1437 )
1446 )
1438 coreconfigitem(
1447 coreconfigitem(
1439 b'format',
1448 b'format',
1440 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1449 b'use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet',
1441 default=False,
1450 default=False,
1442 experimental=True,
1451 experimental=True,
1443 )
1452 )
1444
1453
1445 # Moving this on by default means we are confident about the scaling of phases.
1454 # Moving this on by default means we are confident about the scaling of phases.
1446 # This is not garanteed to be the case at the time this message is written.
1455 # This is not garanteed to be the case at the time this message is written.
1447 coreconfigitem(
1456 coreconfigitem(
1448 b'format',
1457 b'format',
1449 b'use-internal-phase',
1458 b'use-internal-phase',
1450 default=False,
1459 default=False,
1451 experimental=True,
1460 experimental=True,
1452 )
1461 )
1453 # The interaction between the archived phase and obsolescence markers needs to
1462 # The interaction between the archived phase and obsolescence markers needs to
1454 # be sorted out before wider usage of this are to be considered.
1463 # be sorted out before wider usage of this are to be considered.
1455 #
1464 #
1456 # At the time this message is written, behavior when archiving obsolete
1465 # At the time this message is written, behavior when archiving obsolete
1457 # changeset differ significantly from stripping. As part of stripping, we also
1466 # changeset differ significantly from stripping. As part of stripping, we also
1458 # remove the obsolescence marker associated to the stripped changesets,
1467 # remove the obsolescence marker associated to the stripped changesets,
1459 # revealing the precedecessors changesets when applicable. When archiving, we
1468 # revealing the precedecessors changesets when applicable. When archiving, we
1460 # don't touch the obsolescence markers, keeping everything hidden. This can
1469 # don't touch the obsolescence markers, keeping everything hidden. This can
1461 # result in quite confusing situation for people combining exchanging draft
1470 # result in quite confusing situation for people combining exchanging draft
1462 # with the archived phases. As some markers needed by others may be skipped
1471 # with the archived phases. As some markers needed by others may be skipped
1463 # during exchange.
1472 # during exchange.
1464 coreconfigitem(
1473 coreconfigitem(
1465 b'format',
1474 b'format',
1466 b'exp-archived-phase',
1475 b'exp-archived-phase',
1467 default=False,
1476 default=False,
1468 experimental=True,
1477 experimental=True,
1469 )
1478 )
1470 coreconfigitem(
1479 coreconfigitem(
1471 b'shelve',
1480 b'shelve',
1472 b'store',
1481 b'store',
1473 default=b'internal',
1482 default=b'internal',
1474 experimental=True,
1483 experimental=True,
1475 )
1484 )
1476 coreconfigitem(
1485 coreconfigitem(
1477 b'fsmonitor',
1486 b'fsmonitor',
1478 b'warn_when_unused',
1487 b'warn_when_unused',
1479 default=True,
1488 default=True,
1480 )
1489 )
1481 coreconfigitem(
1490 coreconfigitem(
1482 b'fsmonitor',
1491 b'fsmonitor',
1483 b'warn_update_file_count',
1492 b'warn_update_file_count',
1484 default=50000,
1493 default=50000,
1485 )
1494 )
1486 coreconfigitem(
1495 coreconfigitem(
1487 b'fsmonitor',
1496 b'fsmonitor',
1488 b'warn_update_file_count_rust',
1497 b'warn_update_file_count_rust',
1489 default=400000,
1498 default=400000,
1490 )
1499 )
1491 coreconfigitem(
1500 coreconfigitem(
1492 b'help',
1501 b'help',
1493 br'hidden-command\..*',
1502 br'hidden-command\..*',
1494 default=False,
1503 default=False,
1495 generic=True,
1504 generic=True,
1496 )
1505 )
1497 coreconfigitem(
1506 coreconfigitem(
1498 b'help',
1507 b'help',
1499 br'hidden-topic\..*',
1508 br'hidden-topic\..*',
1500 default=False,
1509 default=False,
1501 generic=True,
1510 generic=True,
1502 )
1511 )
1503 coreconfigitem(
1512 coreconfigitem(
1504 b'hooks',
1513 b'hooks',
1505 b'[^:]*',
1514 b'[^:]*',
1506 default=dynamicdefault,
1515 default=dynamicdefault,
1507 generic=True,
1516 generic=True,
1508 )
1517 )
1509 coreconfigitem(
1518 coreconfigitem(
1510 b'hooks',
1519 b'hooks',
1511 b'.*:run-with-plain',
1520 b'.*:run-with-plain',
1512 default=True,
1521 default=True,
1513 generic=True,
1522 generic=True,
1514 )
1523 )
1515 coreconfigitem(
1524 coreconfigitem(
1516 b'hgweb-paths',
1525 b'hgweb-paths',
1517 b'.*',
1526 b'.*',
1518 default=list,
1527 default=list,
1519 generic=True,
1528 generic=True,
1520 )
1529 )
1521 coreconfigitem(
1530 coreconfigitem(
1522 b'hostfingerprints',
1531 b'hostfingerprints',
1523 b'.*',
1532 b'.*',
1524 default=list,
1533 default=list,
1525 generic=True,
1534 generic=True,
1526 )
1535 )
1527 coreconfigitem(
1536 coreconfigitem(
1528 b'hostsecurity',
1537 b'hostsecurity',
1529 b'ciphers',
1538 b'ciphers',
1530 default=None,
1539 default=None,
1531 )
1540 )
1532 coreconfigitem(
1541 coreconfigitem(
1533 b'hostsecurity',
1542 b'hostsecurity',
1534 b'minimumprotocol',
1543 b'minimumprotocol',
1535 default=dynamicdefault,
1544 default=dynamicdefault,
1536 )
1545 )
1537 coreconfigitem(
1546 coreconfigitem(
1538 b'hostsecurity',
1547 b'hostsecurity',
1539 b'.*:minimumprotocol$',
1548 b'.*:minimumprotocol$',
1540 default=dynamicdefault,
1549 default=dynamicdefault,
1541 generic=True,
1550 generic=True,
1542 )
1551 )
1543 coreconfigitem(
1552 coreconfigitem(
1544 b'hostsecurity',
1553 b'hostsecurity',
1545 b'.*:ciphers$',
1554 b'.*:ciphers$',
1546 default=dynamicdefault,
1555 default=dynamicdefault,
1547 generic=True,
1556 generic=True,
1548 )
1557 )
1549 coreconfigitem(
1558 coreconfigitem(
1550 b'hostsecurity',
1559 b'hostsecurity',
1551 b'.*:fingerprints$',
1560 b'.*:fingerprints$',
1552 default=list,
1561 default=list,
1553 generic=True,
1562 generic=True,
1554 )
1563 )
1555 coreconfigitem(
1564 coreconfigitem(
1556 b'hostsecurity',
1565 b'hostsecurity',
1557 b'.*:verifycertsfile$',
1566 b'.*:verifycertsfile$',
1558 default=None,
1567 default=None,
1559 generic=True,
1568 generic=True,
1560 )
1569 )
1561
1570
1562 coreconfigitem(
1571 coreconfigitem(
1563 b'http_proxy',
1572 b'http_proxy',
1564 b'always',
1573 b'always',
1565 default=False,
1574 default=False,
1566 )
1575 )
1567 coreconfigitem(
1576 coreconfigitem(
1568 b'http_proxy',
1577 b'http_proxy',
1569 b'host',
1578 b'host',
1570 default=None,
1579 default=None,
1571 )
1580 )
1572 coreconfigitem(
1581 coreconfigitem(
1573 b'http_proxy',
1582 b'http_proxy',
1574 b'no',
1583 b'no',
1575 default=list,
1584 default=list,
1576 )
1585 )
1577 coreconfigitem(
1586 coreconfigitem(
1578 b'http_proxy',
1587 b'http_proxy',
1579 b'passwd',
1588 b'passwd',
1580 default=None,
1589 default=None,
1581 )
1590 )
1582 coreconfigitem(
1591 coreconfigitem(
1583 b'http_proxy',
1592 b'http_proxy',
1584 b'user',
1593 b'user',
1585 default=None,
1594 default=None,
1586 )
1595 )
1587
1596
1588 coreconfigitem(
1597 coreconfigitem(
1589 b'http',
1598 b'http',
1590 b'timeout',
1599 b'timeout',
1591 default=None,
1600 default=None,
1592 )
1601 )
1593
1602
1594 coreconfigitem(
1603 coreconfigitem(
1595 b'logtoprocess',
1604 b'logtoprocess',
1596 b'commandexception',
1605 b'commandexception',
1597 default=None,
1606 default=None,
1598 )
1607 )
1599 coreconfigitem(
1608 coreconfigitem(
1600 b'logtoprocess',
1609 b'logtoprocess',
1601 b'commandfinish',
1610 b'commandfinish',
1602 default=None,
1611 default=None,
1603 )
1612 )
1604 coreconfigitem(
1613 coreconfigitem(
1605 b'logtoprocess',
1614 b'logtoprocess',
1606 b'command',
1615 b'command',
1607 default=None,
1616 default=None,
1608 )
1617 )
1609 coreconfigitem(
1618 coreconfigitem(
1610 b'logtoprocess',
1619 b'logtoprocess',
1611 b'develwarn',
1620 b'develwarn',
1612 default=None,
1621 default=None,
1613 )
1622 )
1614 coreconfigitem(
1623 coreconfigitem(
1615 b'logtoprocess',
1624 b'logtoprocess',
1616 b'uiblocked',
1625 b'uiblocked',
1617 default=None,
1626 default=None,
1618 )
1627 )
1619 coreconfigitem(
1628 coreconfigitem(
1620 b'merge',
1629 b'merge',
1621 b'checkunknown',
1630 b'checkunknown',
1622 default=b'abort',
1631 default=b'abort',
1623 )
1632 )
1624 coreconfigitem(
1633 coreconfigitem(
1625 b'merge',
1634 b'merge',
1626 b'checkignored',
1635 b'checkignored',
1627 default=b'abort',
1636 default=b'abort',
1628 )
1637 )
1629 coreconfigitem(
1638 coreconfigitem(
1630 b'experimental',
1639 b'experimental',
1631 b'merge.checkpathconflicts',
1640 b'merge.checkpathconflicts',
1632 default=False,
1641 default=False,
1633 )
1642 )
1634 coreconfigitem(
1643 coreconfigitem(
1635 b'merge',
1644 b'merge',
1636 b'followcopies',
1645 b'followcopies',
1637 default=True,
1646 default=True,
1638 )
1647 )
1639 coreconfigitem(
1648 coreconfigitem(
1640 b'merge',
1649 b'merge',
1641 b'on-failure',
1650 b'on-failure',
1642 default=b'continue',
1651 default=b'continue',
1643 )
1652 )
1644 coreconfigitem(
1653 coreconfigitem(
1645 b'merge',
1654 b'merge',
1646 b'preferancestor',
1655 b'preferancestor',
1647 default=lambda: [b'*'],
1656 default=lambda: [b'*'],
1648 experimental=True,
1657 experimental=True,
1649 )
1658 )
1650 coreconfigitem(
1659 coreconfigitem(
1651 b'merge',
1660 b'merge',
1652 b'strict-capability-check',
1661 b'strict-capability-check',
1653 default=False,
1662 default=False,
1654 )
1663 )
1655 coreconfigitem(
1664 coreconfigitem(
1656 b'merge',
1665 b'merge',
1657 b'disable-partial-tools',
1666 b'disable-partial-tools',
1658 default=False,
1667 default=False,
1659 experimental=True,
1668 experimental=True,
1660 )
1669 )
1661 coreconfigitem(
1670 coreconfigitem(
1662 b'partial-merge-tools',
1671 b'partial-merge-tools',
1663 b'.*',
1672 b'.*',
1664 default=None,
1673 default=None,
1665 generic=True,
1674 generic=True,
1666 experimental=True,
1675 experimental=True,
1667 )
1676 )
1668 coreconfigitem(
1677 coreconfigitem(
1669 b'partial-merge-tools',
1678 b'partial-merge-tools',
1670 br'.*\.patterns',
1679 br'.*\.patterns',
1671 default=dynamicdefault,
1680 default=dynamicdefault,
1672 generic=True,
1681 generic=True,
1673 priority=-1,
1682 priority=-1,
1674 experimental=True,
1683 experimental=True,
1675 )
1684 )
1676 coreconfigitem(
1685 coreconfigitem(
1677 b'partial-merge-tools',
1686 b'partial-merge-tools',
1678 br'.*\.executable$',
1687 br'.*\.executable$',
1679 default=dynamicdefault,
1688 default=dynamicdefault,
1680 generic=True,
1689 generic=True,
1681 priority=-1,
1690 priority=-1,
1682 experimental=True,
1691 experimental=True,
1683 )
1692 )
1684 coreconfigitem(
1693 coreconfigitem(
1685 b'partial-merge-tools',
1694 b'partial-merge-tools',
1686 br'.*\.order',
1695 br'.*\.order',
1687 default=0,
1696 default=0,
1688 generic=True,
1697 generic=True,
1689 priority=-1,
1698 priority=-1,
1690 experimental=True,
1699 experimental=True,
1691 )
1700 )
1692 coreconfigitem(
1701 coreconfigitem(
1693 b'partial-merge-tools',
1702 b'partial-merge-tools',
1694 br'.*\.args',
1703 br'.*\.args',
1695 default=b"$local $base $other",
1704 default=b"$local $base $other",
1696 generic=True,
1705 generic=True,
1697 priority=-1,
1706 priority=-1,
1698 experimental=True,
1707 experimental=True,
1699 )
1708 )
1700 coreconfigitem(
1709 coreconfigitem(
1701 b'partial-merge-tools',
1710 b'partial-merge-tools',
1702 br'.*\.disable',
1711 br'.*\.disable',
1703 default=False,
1712 default=False,
1704 generic=True,
1713 generic=True,
1705 priority=-1,
1714 priority=-1,
1706 experimental=True,
1715 experimental=True,
1707 )
1716 )
1708 coreconfigitem(
1717 coreconfigitem(
1709 b'merge-tools',
1718 b'merge-tools',
1710 b'.*',
1719 b'.*',
1711 default=None,
1720 default=None,
1712 generic=True,
1721 generic=True,
1713 )
1722 )
1714 coreconfigitem(
1723 coreconfigitem(
1715 b'merge-tools',
1724 b'merge-tools',
1716 br'.*\.args$',
1725 br'.*\.args$',
1717 default=b"$local $base $other",
1726 default=b"$local $base $other",
1718 generic=True,
1727 generic=True,
1719 priority=-1,
1728 priority=-1,
1720 )
1729 )
1721 coreconfigitem(
1730 coreconfigitem(
1722 b'merge-tools',
1731 b'merge-tools',
1723 br'.*\.binary$',
1732 br'.*\.binary$',
1724 default=False,
1733 default=False,
1725 generic=True,
1734 generic=True,
1726 priority=-1,
1735 priority=-1,
1727 )
1736 )
1728 coreconfigitem(
1737 coreconfigitem(
1729 b'merge-tools',
1738 b'merge-tools',
1730 br'.*\.check$',
1739 br'.*\.check$',
1731 default=list,
1740 default=list,
1732 generic=True,
1741 generic=True,
1733 priority=-1,
1742 priority=-1,
1734 )
1743 )
1735 coreconfigitem(
1744 coreconfigitem(
1736 b'merge-tools',
1745 b'merge-tools',
1737 br'.*\.checkchanged$',
1746 br'.*\.checkchanged$',
1738 default=False,
1747 default=False,
1739 generic=True,
1748 generic=True,
1740 priority=-1,
1749 priority=-1,
1741 )
1750 )
1742 coreconfigitem(
1751 coreconfigitem(
1743 b'merge-tools',
1752 b'merge-tools',
1744 br'.*\.executable$',
1753 br'.*\.executable$',
1745 default=dynamicdefault,
1754 default=dynamicdefault,
1746 generic=True,
1755 generic=True,
1747 priority=-1,
1756 priority=-1,
1748 )
1757 )
1749 coreconfigitem(
1758 coreconfigitem(
1750 b'merge-tools',
1759 b'merge-tools',
1751 br'.*\.fixeol$',
1760 br'.*\.fixeol$',
1752 default=False,
1761 default=False,
1753 generic=True,
1762 generic=True,
1754 priority=-1,
1763 priority=-1,
1755 )
1764 )
1756 coreconfigitem(
1765 coreconfigitem(
1757 b'merge-tools',
1766 b'merge-tools',
1758 br'.*\.gui$',
1767 br'.*\.gui$',
1759 default=False,
1768 default=False,
1760 generic=True,
1769 generic=True,
1761 priority=-1,
1770 priority=-1,
1762 )
1771 )
1763 coreconfigitem(
1772 coreconfigitem(
1764 b'merge-tools',
1773 b'merge-tools',
1765 br'.*\.mergemarkers$',
1774 br'.*\.mergemarkers$',
1766 default=b'basic',
1775 default=b'basic',
1767 generic=True,
1776 generic=True,
1768 priority=-1,
1777 priority=-1,
1769 )
1778 )
1770 coreconfigitem(
1779 coreconfigitem(
1771 b'merge-tools',
1780 b'merge-tools',
1772 br'.*\.mergemarkertemplate$',
1781 br'.*\.mergemarkertemplate$',
1773 default=dynamicdefault, # take from command-templates.mergemarker
1782 default=dynamicdefault, # take from command-templates.mergemarker
1774 generic=True,
1783 generic=True,
1775 priority=-1,
1784 priority=-1,
1776 )
1785 )
1777 coreconfigitem(
1786 coreconfigitem(
1778 b'merge-tools',
1787 b'merge-tools',
1779 br'.*\.priority$',
1788 br'.*\.priority$',
1780 default=0,
1789 default=0,
1781 generic=True,
1790 generic=True,
1782 priority=-1,
1791 priority=-1,
1783 )
1792 )
1784 coreconfigitem(
1793 coreconfigitem(
1785 b'merge-tools',
1794 b'merge-tools',
1786 br'.*\.premerge$',
1795 br'.*\.premerge$',
1787 default=dynamicdefault,
1796 default=dynamicdefault,
1788 generic=True,
1797 generic=True,
1789 priority=-1,
1798 priority=-1,
1790 )
1799 )
1791 coreconfigitem(
1800 coreconfigitem(
1792 b'merge-tools',
1801 b'merge-tools',
1793 br'.*\.symlink$',
1802 br'.*\.symlink$',
1794 default=False,
1803 default=False,
1795 generic=True,
1804 generic=True,
1796 priority=-1,
1805 priority=-1,
1797 )
1806 )
1798 coreconfigitem(
1807 coreconfigitem(
1799 b'pager',
1808 b'pager',
1800 b'attend-.*',
1809 b'attend-.*',
1801 default=dynamicdefault,
1810 default=dynamicdefault,
1802 generic=True,
1811 generic=True,
1803 )
1812 )
1804 coreconfigitem(
1813 coreconfigitem(
1805 b'pager',
1814 b'pager',
1806 b'ignore',
1815 b'ignore',
1807 default=list,
1816 default=list,
1808 )
1817 )
1809 coreconfigitem(
1818 coreconfigitem(
1810 b'pager',
1819 b'pager',
1811 b'pager',
1820 b'pager',
1812 default=dynamicdefault,
1821 default=dynamicdefault,
1813 )
1822 )
1814 coreconfigitem(
1823 coreconfigitem(
1815 b'patch',
1824 b'patch',
1816 b'eol',
1825 b'eol',
1817 default=b'strict',
1826 default=b'strict',
1818 )
1827 )
1819 coreconfigitem(
1828 coreconfigitem(
1820 b'patch',
1829 b'patch',
1821 b'fuzz',
1830 b'fuzz',
1822 default=2,
1831 default=2,
1823 )
1832 )
1824 coreconfigitem(
1833 coreconfigitem(
1825 b'paths',
1834 b'paths',
1826 b'default',
1835 b'default',
1827 default=None,
1836 default=None,
1828 )
1837 )
1829 coreconfigitem(
1838 coreconfigitem(
1830 b'paths',
1839 b'paths',
1831 b'default-push',
1840 b'default-push',
1832 default=None,
1841 default=None,
1833 )
1842 )
1834 coreconfigitem(
1843 coreconfigitem(
1835 b'paths',
1844 b'paths',
1836 b'.*',
1845 b'.*',
1837 default=None,
1846 default=None,
1838 generic=True,
1847 generic=True,
1839 )
1848 )
1840 coreconfigitem(
1849 coreconfigitem(
1841 b'paths',
1850 b'paths',
1842 b'.*:bookmarks.mode',
1851 b'.*:bookmarks.mode',
1843 default='default',
1852 default='default',
1844 generic=True,
1853 generic=True,
1845 )
1854 )
1846 coreconfigitem(
1855 coreconfigitem(
1847 b'paths',
1856 b'paths',
1848 b'.*:multi-urls',
1857 b'.*:multi-urls',
1849 default=False,
1858 default=False,
1850 generic=True,
1859 generic=True,
1851 )
1860 )
1852 coreconfigitem(
1861 coreconfigitem(
1853 b'paths',
1862 b'paths',
1854 b'.*:pushrev',
1863 b'.*:pushrev',
1855 default=None,
1864 default=None,
1856 generic=True,
1865 generic=True,
1857 )
1866 )
1858 coreconfigitem(
1867 coreconfigitem(
1859 b'paths',
1868 b'paths',
1860 b'.*:pushurl',
1869 b'.*:pushurl',
1861 default=None,
1870 default=None,
1862 generic=True,
1871 generic=True,
1863 )
1872 )
1864 coreconfigitem(
1873 coreconfigitem(
1865 b'phases',
1874 b'phases',
1866 b'checksubrepos',
1875 b'checksubrepos',
1867 default=b'follow',
1876 default=b'follow',
1868 )
1877 )
1869 coreconfigitem(
1878 coreconfigitem(
1870 b'phases',
1879 b'phases',
1871 b'new-commit',
1880 b'new-commit',
1872 default=b'draft',
1881 default=b'draft',
1873 )
1882 )
1874 coreconfigitem(
1883 coreconfigitem(
1875 b'phases',
1884 b'phases',
1876 b'publish',
1885 b'publish',
1877 default=True,
1886 default=True,
1878 )
1887 )
1879 coreconfigitem(
1888 coreconfigitem(
1880 b'profiling',
1889 b'profiling',
1881 b'enabled',
1890 b'enabled',
1882 default=False,
1891 default=False,
1883 )
1892 )
1884 coreconfigitem(
1893 coreconfigitem(
1885 b'profiling',
1894 b'profiling',
1886 b'format',
1895 b'format',
1887 default=b'text',
1896 default=b'text',
1888 )
1897 )
1889 coreconfigitem(
1898 coreconfigitem(
1890 b'profiling',
1899 b'profiling',
1891 b'freq',
1900 b'freq',
1892 default=1000,
1901 default=1000,
1893 )
1902 )
1894 coreconfigitem(
1903 coreconfigitem(
1895 b'profiling',
1904 b'profiling',
1896 b'limit',
1905 b'limit',
1897 default=30,
1906 default=30,
1898 )
1907 )
1899 coreconfigitem(
1908 coreconfigitem(
1900 b'profiling',
1909 b'profiling',
1901 b'nested',
1910 b'nested',
1902 default=0,
1911 default=0,
1903 )
1912 )
1904 coreconfigitem(
1913 coreconfigitem(
1905 b'profiling',
1914 b'profiling',
1906 b'output',
1915 b'output',
1907 default=None,
1916 default=None,
1908 )
1917 )
1909 coreconfigitem(
1918 coreconfigitem(
1910 b'profiling',
1919 b'profiling',
1911 b'showmax',
1920 b'showmax',
1912 default=0.999,
1921 default=0.999,
1913 )
1922 )
1914 coreconfigitem(
1923 coreconfigitem(
1915 b'profiling',
1924 b'profiling',
1916 b'showmin',
1925 b'showmin',
1917 default=dynamicdefault,
1926 default=dynamicdefault,
1918 )
1927 )
1919 coreconfigitem(
1928 coreconfigitem(
1920 b'profiling',
1929 b'profiling',
1921 b'showtime',
1930 b'showtime',
1922 default=True,
1931 default=True,
1923 )
1932 )
1924 coreconfigitem(
1933 coreconfigitem(
1925 b'profiling',
1934 b'profiling',
1926 b'sort',
1935 b'sort',
1927 default=b'inlinetime',
1936 default=b'inlinetime',
1928 )
1937 )
1929 coreconfigitem(
1938 coreconfigitem(
1930 b'profiling',
1939 b'profiling',
1931 b'statformat',
1940 b'statformat',
1932 default=b'hotpath',
1941 default=b'hotpath',
1933 )
1942 )
1934 coreconfigitem(
1943 coreconfigitem(
1935 b'profiling',
1944 b'profiling',
1936 b'time-track',
1945 b'time-track',
1937 default=dynamicdefault,
1946 default=dynamicdefault,
1938 )
1947 )
1939 coreconfigitem(
1948 coreconfigitem(
1940 b'profiling',
1949 b'profiling',
1941 b'type',
1950 b'type',
1942 default=b'stat',
1951 default=b'stat',
1943 )
1952 )
1944 coreconfigitem(
1953 coreconfigitem(
1945 b'progress',
1954 b'progress',
1946 b'assume-tty',
1955 b'assume-tty',
1947 default=False,
1956 default=False,
1948 )
1957 )
1949 coreconfigitem(
1958 coreconfigitem(
1950 b'progress',
1959 b'progress',
1951 b'changedelay',
1960 b'changedelay',
1952 default=1,
1961 default=1,
1953 )
1962 )
1954 coreconfigitem(
1963 coreconfigitem(
1955 b'progress',
1964 b'progress',
1956 b'clear-complete',
1965 b'clear-complete',
1957 default=True,
1966 default=True,
1958 )
1967 )
1959 coreconfigitem(
1968 coreconfigitem(
1960 b'progress',
1969 b'progress',
1961 b'debug',
1970 b'debug',
1962 default=False,
1971 default=False,
1963 )
1972 )
1964 coreconfigitem(
1973 coreconfigitem(
1965 b'progress',
1974 b'progress',
1966 b'delay',
1975 b'delay',
1967 default=3,
1976 default=3,
1968 )
1977 )
1969 coreconfigitem(
1978 coreconfigitem(
1970 b'progress',
1979 b'progress',
1971 b'disable',
1980 b'disable',
1972 default=False,
1981 default=False,
1973 )
1982 )
1974 coreconfigitem(
1983 coreconfigitem(
1975 b'progress',
1984 b'progress',
1976 b'estimateinterval',
1985 b'estimateinterval',
1977 default=60.0,
1986 default=60.0,
1978 )
1987 )
1979 coreconfigitem(
1988 coreconfigitem(
1980 b'progress',
1989 b'progress',
1981 b'format',
1990 b'format',
1982 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1991 default=lambda: [b'topic', b'bar', b'number', b'estimate'],
1983 )
1992 )
1984 coreconfigitem(
1993 coreconfigitem(
1985 b'progress',
1994 b'progress',
1986 b'refresh',
1995 b'refresh',
1987 default=0.1,
1996 default=0.1,
1988 )
1997 )
1989 coreconfigitem(
1998 coreconfigitem(
1990 b'progress',
1999 b'progress',
1991 b'width',
2000 b'width',
1992 default=dynamicdefault,
2001 default=dynamicdefault,
1993 )
2002 )
1994 coreconfigitem(
2003 coreconfigitem(
1995 b'pull',
2004 b'pull',
1996 b'confirm',
2005 b'confirm',
1997 default=False,
2006 default=False,
1998 )
2007 )
1999 coreconfigitem(
2008 coreconfigitem(
2000 b'push',
2009 b'push',
2001 b'pushvars.server',
2010 b'pushvars.server',
2002 default=False,
2011 default=False,
2003 )
2012 )
2004 coreconfigitem(
2013 coreconfigitem(
2005 b'rewrite',
2014 b'rewrite',
2006 b'backup-bundle',
2015 b'backup-bundle',
2007 default=True,
2016 default=True,
2008 alias=[(b'ui', b'history-editing-backup')],
2017 alias=[(b'ui', b'history-editing-backup')],
2009 )
2018 )
2010 coreconfigitem(
2019 coreconfigitem(
2011 b'rewrite',
2020 b'rewrite',
2012 b'update-timestamp',
2021 b'update-timestamp',
2013 default=False,
2022 default=False,
2014 )
2023 )
2015 coreconfigitem(
2024 coreconfigitem(
2016 b'rewrite',
2025 b'rewrite',
2017 b'empty-successor',
2026 b'empty-successor',
2018 default=b'skip',
2027 default=b'skip',
2019 experimental=True,
2028 experimental=True,
2020 )
2029 )
2021 # experimental as long as format.use-dirstate-v2 is.
2030 # experimental as long as format.use-dirstate-v2 is.
2022 coreconfigitem(
2031 coreconfigitem(
2023 b'storage',
2032 b'storage',
2024 b'dirstate-v2.slow-path',
2033 b'dirstate-v2.slow-path',
2025 default=b"abort",
2034 default=b"abort",
2026 experimental=True,
2035 experimental=True,
2027 )
2036 )
2028 coreconfigitem(
2037 coreconfigitem(
2029 b'storage',
2038 b'storage',
2030 b'new-repo-backend',
2039 b'new-repo-backend',
2031 default=b'revlogv1',
2040 default=b'revlogv1',
2032 experimental=True,
2041 experimental=True,
2033 )
2042 )
2034 coreconfigitem(
2043 coreconfigitem(
2035 b'storage',
2044 b'storage',
2036 b'revlog.optimize-delta-parent-choice',
2045 b'revlog.optimize-delta-parent-choice',
2037 default=True,
2046 default=True,
2038 alias=[(b'format', b'aggressivemergedeltas')],
2047 alias=[(b'format', b'aggressivemergedeltas')],
2039 )
2048 )
2040 coreconfigitem(
2049 coreconfigitem(
2041 b'storage',
2050 b'storage',
2042 b'revlog.issue6528.fix-incoming',
2051 b'revlog.issue6528.fix-incoming',
2043 default=True,
2052 default=True,
2044 )
2053 )
2045 # experimental as long as rust is experimental (or a C version is implemented)
2054 # experimental as long as rust is experimental (or a C version is implemented)
2046 coreconfigitem(
2055 coreconfigitem(
2047 b'storage',
2056 b'storage',
2048 b'revlog.persistent-nodemap.mmap',
2057 b'revlog.persistent-nodemap.mmap',
2049 default=True,
2058 default=True,
2050 )
2059 )
2051 # experimental as long as format.use-persistent-nodemap is.
2060 # experimental as long as format.use-persistent-nodemap is.
2052 coreconfigitem(
2061 coreconfigitem(
2053 b'storage',
2062 b'storage',
2054 b'revlog.persistent-nodemap.slow-path',
2063 b'revlog.persistent-nodemap.slow-path',
2055 default=b"abort",
2064 default=b"abort",
2056 )
2065 )
2057
2066
2058 coreconfigitem(
2067 coreconfigitem(
2059 b'storage',
2068 b'storage',
2060 b'revlog.reuse-external-delta',
2069 b'revlog.reuse-external-delta',
2061 default=True,
2070 default=True,
2062 )
2071 )
2063 coreconfigitem(
2072 coreconfigitem(
2064 b'storage',
2073 b'storage',
2065 b'revlog.reuse-external-delta-parent',
2074 b'revlog.reuse-external-delta-parent',
2066 default=None,
2075 default=None,
2067 )
2076 )
2068 coreconfigitem(
2077 coreconfigitem(
2069 b'storage',
2078 b'storage',
2070 b'revlog.zlib.level',
2079 b'revlog.zlib.level',
2071 default=None,
2080 default=None,
2072 )
2081 )
2073 coreconfigitem(
2082 coreconfigitem(
2074 b'storage',
2083 b'storage',
2075 b'revlog.zstd.level',
2084 b'revlog.zstd.level',
2076 default=None,
2085 default=None,
2077 )
2086 )
2078 coreconfigitem(
2087 coreconfigitem(
2079 b'server',
2088 b'server',
2080 b'bookmarks-pushkey-compat',
2089 b'bookmarks-pushkey-compat',
2081 default=True,
2090 default=True,
2082 )
2091 )
2083 coreconfigitem(
2092 coreconfigitem(
2084 b'server',
2093 b'server',
2085 b'bundle1',
2094 b'bundle1',
2086 default=True,
2095 default=True,
2087 )
2096 )
2088 coreconfigitem(
2097 coreconfigitem(
2089 b'server',
2098 b'server',
2090 b'bundle1gd',
2099 b'bundle1gd',
2091 default=None,
2100 default=None,
2092 )
2101 )
2093 coreconfigitem(
2102 coreconfigitem(
2094 b'server',
2103 b'server',
2095 b'bundle1.pull',
2104 b'bundle1.pull',
2096 default=None,
2105 default=None,
2097 )
2106 )
2098 coreconfigitem(
2107 coreconfigitem(
2099 b'server',
2108 b'server',
2100 b'bundle1gd.pull',
2109 b'bundle1gd.pull',
2101 default=None,
2110 default=None,
2102 )
2111 )
2103 coreconfigitem(
2112 coreconfigitem(
2104 b'server',
2113 b'server',
2105 b'bundle1.push',
2114 b'bundle1.push',
2106 default=None,
2115 default=None,
2107 )
2116 )
2108 coreconfigitem(
2117 coreconfigitem(
2109 b'server',
2118 b'server',
2110 b'bundle1gd.push',
2119 b'bundle1gd.push',
2111 default=None,
2120 default=None,
2112 )
2121 )
2113 coreconfigitem(
2122 coreconfigitem(
2114 b'server',
2123 b'server',
2115 b'bundle2.stream',
2124 b'bundle2.stream',
2116 default=True,
2125 default=True,
2117 alias=[(b'experimental', b'bundle2.stream')],
2126 alias=[(b'experimental', b'bundle2.stream')],
2118 )
2127 )
2119 coreconfigitem(
2128 coreconfigitem(
2120 b'server',
2129 b'server',
2121 b'compressionengines',
2130 b'compressionengines',
2122 default=list,
2131 default=list,
2123 )
2132 )
2124 coreconfigitem(
2133 coreconfigitem(
2125 b'server',
2134 b'server',
2126 b'concurrent-push-mode',
2135 b'concurrent-push-mode',
2127 default=b'check-related',
2136 default=b'check-related',
2128 )
2137 )
2129 coreconfigitem(
2138 coreconfigitem(
2130 b'server',
2139 b'server',
2131 b'disablefullbundle',
2140 b'disablefullbundle',
2132 default=False,
2141 default=False,
2133 )
2142 )
2134 coreconfigitem(
2143 coreconfigitem(
2135 b'server',
2144 b'server',
2136 b'maxhttpheaderlen',
2145 b'maxhttpheaderlen',
2137 default=1024,
2146 default=1024,
2138 )
2147 )
2139 coreconfigitem(
2148 coreconfigitem(
2140 b'server',
2149 b'server',
2141 b'pullbundle',
2150 b'pullbundle',
2142 default=False,
2151 default=False,
2143 )
2152 )
2144 coreconfigitem(
2153 coreconfigitem(
2145 b'server',
2154 b'server',
2146 b'preferuncompressed',
2155 b'preferuncompressed',
2147 default=False,
2156 default=False,
2148 )
2157 )
2149 coreconfigitem(
2158 coreconfigitem(
2150 b'server',
2159 b'server',
2151 b'streamunbundle',
2160 b'streamunbundle',
2152 default=False,
2161 default=False,
2153 )
2162 )
2154 coreconfigitem(
2163 coreconfigitem(
2155 b'server',
2164 b'server',
2156 b'uncompressed',
2165 b'uncompressed',
2157 default=True,
2166 default=True,
2158 )
2167 )
2159 coreconfigitem(
2168 coreconfigitem(
2160 b'server',
2169 b'server',
2161 b'uncompressedallowsecret',
2170 b'uncompressedallowsecret',
2162 default=False,
2171 default=False,
2163 )
2172 )
2164 coreconfigitem(
2173 coreconfigitem(
2165 b'server',
2174 b'server',
2166 b'view',
2175 b'view',
2167 default=b'served',
2176 default=b'served',
2168 )
2177 )
2169 coreconfigitem(
2178 coreconfigitem(
2170 b'server',
2179 b'server',
2171 b'validate',
2180 b'validate',
2172 default=False,
2181 default=False,
2173 )
2182 )
2174 coreconfigitem(
2183 coreconfigitem(
2175 b'server',
2184 b'server',
2176 b'zliblevel',
2185 b'zliblevel',
2177 default=-1,
2186 default=-1,
2178 )
2187 )
2179 coreconfigitem(
2188 coreconfigitem(
2180 b'server',
2189 b'server',
2181 b'zstdlevel',
2190 b'zstdlevel',
2182 default=3,
2191 default=3,
2183 )
2192 )
2184 coreconfigitem(
2193 coreconfigitem(
2185 b'share',
2194 b'share',
2186 b'pool',
2195 b'pool',
2187 default=None,
2196 default=None,
2188 )
2197 )
2189 coreconfigitem(
2198 coreconfigitem(
2190 b'share',
2199 b'share',
2191 b'poolnaming',
2200 b'poolnaming',
2192 default=b'identity',
2201 default=b'identity',
2193 )
2202 )
2194 coreconfigitem(
2203 coreconfigitem(
2195 b'share',
2204 b'share',
2196 b'safe-mismatch.source-not-safe',
2205 b'safe-mismatch.source-not-safe',
2197 default=b'abort',
2206 default=b'abort',
2198 )
2207 )
2199 coreconfigitem(
2208 coreconfigitem(
2200 b'share',
2209 b'share',
2201 b'safe-mismatch.source-safe',
2210 b'safe-mismatch.source-safe',
2202 default=b'abort',
2211 default=b'abort',
2203 )
2212 )
2204 coreconfigitem(
2213 coreconfigitem(
2205 b'share',
2214 b'share',
2206 b'safe-mismatch.source-not-safe.warn',
2215 b'safe-mismatch.source-not-safe.warn',
2207 default=True,
2216 default=True,
2208 )
2217 )
2209 coreconfigitem(
2218 coreconfigitem(
2210 b'share',
2219 b'share',
2211 b'safe-mismatch.source-safe.warn',
2220 b'safe-mismatch.source-safe.warn',
2212 default=True,
2221 default=True,
2213 )
2222 )
2214 coreconfigitem(
2223 coreconfigitem(
2215 b'share',
2224 b'share',
2216 b'safe-mismatch.source-not-safe:verbose-upgrade',
2225 b'safe-mismatch.source-not-safe:verbose-upgrade',
2217 default=True,
2226 default=True,
2218 )
2227 )
2219 coreconfigitem(
2228 coreconfigitem(
2220 b'share',
2229 b'share',
2221 b'safe-mismatch.source-safe:verbose-upgrade',
2230 b'safe-mismatch.source-safe:verbose-upgrade',
2222 default=True,
2231 default=True,
2223 )
2232 )
2224 coreconfigitem(
2233 coreconfigitem(
2225 b'shelve',
2234 b'shelve',
2226 b'maxbackups',
2235 b'maxbackups',
2227 default=10,
2236 default=10,
2228 )
2237 )
2229 coreconfigitem(
2238 coreconfigitem(
2230 b'smtp',
2239 b'smtp',
2231 b'host',
2240 b'host',
2232 default=None,
2241 default=None,
2233 )
2242 )
2234 coreconfigitem(
2243 coreconfigitem(
2235 b'smtp',
2244 b'smtp',
2236 b'local_hostname',
2245 b'local_hostname',
2237 default=None,
2246 default=None,
2238 )
2247 )
2239 coreconfigitem(
2248 coreconfigitem(
2240 b'smtp',
2249 b'smtp',
2241 b'password',
2250 b'password',
2242 default=None,
2251 default=None,
2243 )
2252 )
2244 coreconfigitem(
2253 coreconfigitem(
2245 b'smtp',
2254 b'smtp',
2246 b'port',
2255 b'port',
2247 default=dynamicdefault,
2256 default=dynamicdefault,
2248 )
2257 )
2249 coreconfigitem(
2258 coreconfigitem(
2250 b'smtp',
2259 b'smtp',
2251 b'tls',
2260 b'tls',
2252 default=b'none',
2261 default=b'none',
2253 )
2262 )
2254 coreconfigitem(
2263 coreconfigitem(
2255 b'smtp',
2264 b'smtp',
2256 b'username',
2265 b'username',
2257 default=None,
2266 default=None,
2258 )
2267 )
2259 coreconfigitem(
2268 coreconfigitem(
2260 b'sparse',
2269 b'sparse',
2261 b'missingwarning',
2270 b'missingwarning',
2262 default=True,
2271 default=True,
2263 experimental=True,
2272 experimental=True,
2264 )
2273 )
2265 coreconfigitem(
2274 coreconfigitem(
2266 b'subrepos',
2275 b'subrepos',
2267 b'allowed',
2276 b'allowed',
2268 default=dynamicdefault, # to make backporting simpler
2277 default=dynamicdefault, # to make backporting simpler
2269 )
2278 )
2270 coreconfigitem(
2279 coreconfigitem(
2271 b'subrepos',
2280 b'subrepos',
2272 b'hg:allowed',
2281 b'hg:allowed',
2273 default=dynamicdefault,
2282 default=dynamicdefault,
2274 )
2283 )
2275 coreconfigitem(
2284 coreconfigitem(
2276 b'subrepos',
2285 b'subrepos',
2277 b'git:allowed',
2286 b'git:allowed',
2278 default=dynamicdefault,
2287 default=dynamicdefault,
2279 )
2288 )
2280 coreconfigitem(
2289 coreconfigitem(
2281 b'subrepos',
2290 b'subrepos',
2282 b'svn:allowed',
2291 b'svn:allowed',
2283 default=dynamicdefault,
2292 default=dynamicdefault,
2284 )
2293 )
2285 coreconfigitem(
2294 coreconfigitem(
2286 b'templates',
2295 b'templates',
2287 b'.*',
2296 b'.*',
2288 default=None,
2297 default=None,
2289 generic=True,
2298 generic=True,
2290 )
2299 )
2291 coreconfigitem(
2300 coreconfigitem(
2292 b'templateconfig',
2301 b'templateconfig',
2293 b'.*',
2302 b'.*',
2294 default=dynamicdefault,
2303 default=dynamicdefault,
2295 generic=True,
2304 generic=True,
2296 )
2305 )
2297 coreconfigitem(
2306 coreconfigitem(
2298 b'trusted',
2307 b'trusted',
2299 b'groups',
2308 b'groups',
2300 default=list,
2309 default=list,
2301 )
2310 )
2302 coreconfigitem(
2311 coreconfigitem(
2303 b'trusted',
2312 b'trusted',
2304 b'users',
2313 b'users',
2305 default=list,
2314 default=list,
2306 )
2315 )
2307 coreconfigitem(
2316 coreconfigitem(
2308 b'ui',
2317 b'ui',
2309 b'_usedassubrepo',
2318 b'_usedassubrepo',
2310 default=False,
2319 default=False,
2311 )
2320 )
2312 coreconfigitem(
2321 coreconfigitem(
2313 b'ui',
2322 b'ui',
2314 b'allowemptycommit',
2323 b'allowemptycommit',
2315 default=False,
2324 default=False,
2316 )
2325 )
2317 coreconfigitem(
2326 coreconfigitem(
2318 b'ui',
2327 b'ui',
2319 b'archivemeta',
2328 b'archivemeta',
2320 default=True,
2329 default=True,
2321 )
2330 )
2322 coreconfigitem(
2331 coreconfigitem(
2323 b'ui',
2332 b'ui',
2324 b'askusername',
2333 b'askusername',
2325 default=False,
2334 default=False,
2326 )
2335 )
2327 coreconfigitem(
2336 coreconfigitem(
2328 b'ui',
2337 b'ui',
2329 b'available-memory',
2338 b'available-memory',
2330 default=None,
2339 default=None,
2331 )
2340 )
2332
2341
2333 coreconfigitem(
2342 coreconfigitem(
2334 b'ui',
2343 b'ui',
2335 b'clonebundlefallback',
2344 b'clonebundlefallback',
2336 default=False,
2345 default=False,
2337 )
2346 )
2338 coreconfigitem(
2347 coreconfigitem(
2339 b'ui',
2348 b'ui',
2340 b'clonebundleprefers',
2349 b'clonebundleprefers',
2341 default=list,
2350 default=list,
2342 )
2351 )
2343 coreconfigitem(
2352 coreconfigitem(
2344 b'ui',
2353 b'ui',
2345 b'clonebundles',
2354 b'clonebundles',
2346 default=True,
2355 default=True,
2347 )
2356 )
2348 coreconfigitem(
2357 coreconfigitem(
2349 b'ui',
2358 b'ui',
2350 b'color',
2359 b'color',
2351 default=b'auto',
2360 default=b'auto',
2352 )
2361 )
2353 coreconfigitem(
2362 coreconfigitem(
2354 b'ui',
2363 b'ui',
2355 b'commitsubrepos',
2364 b'commitsubrepos',
2356 default=False,
2365 default=False,
2357 )
2366 )
2358 coreconfigitem(
2367 coreconfigitem(
2359 b'ui',
2368 b'ui',
2360 b'debug',
2369 b'debug',
2361 default=False,
2370 default=False,
2362 )
2371 )
2363 coreconfigitem(
2372 coreconfigitem(
2364 b'ui',
2373 b'ui',
2365 b'debugger',
2374 b'debugger',
2366 default=None,
2375 default=None,
2367 )
2376 )
2368 coreconfigitem(
2377 coreconfigitem(
2369 b'ui',
2378 b'ui',
2370 b'editor',
2379 b'editor',
2371 default=dynamicdefault,
2380 default=dynamicdefault,
2372 )
2381 )
2373 coreconfigitem(
2382 coreconfigitem(
2374 b'ui',
2383 b'ui',
2375 b'detailed-exit-code',
2384 b'detailed-exit-code',
2376 default=False,
2385 default=False,
2377 experimental=True,
2386 experimental=True,
2378 )
2387 )
2379 coreconfigitem(
2388 coreconfigitem(
2380 b'ui',
2389 b'ui',
2381 b'fallbackencoding',
2390 b'fallbackencoding',
2382 default=None,
2391 default=None,
2383 )
2392 )
2384 coreconfigitem(
2393 coreconfigitem(
2385 b'ui',
2394 b'ui',
2386 b'forcecwd',
2395 b'forcecwd',
2387 default=None,
2396 default=None,
2388 )
2397 )
2389 coreconfigitem(
2398 coreconfigitem(
2390 b'ui',
2399 b'ui',
2391 b'forcemerge',
2400 b'forcemerge',
2392 default=None,
2401 default=None,
2393 )
2402 )
2394 coreconfigitem(
2403 coreconfigitem(
2395 b'ui',
2404 b'ui',
2396 b'formatdebug',
2405 b'formatdebug',
2397 default=False,
2406 default=False,
2398 )
2407 )
2399 coreconfigitem(
2408 coreconfigitem(
2400 b'ui',
2409 b'ui',
2401 b'formatjson',
2410 b'formatjson',
2402 default=False,
2411 default=False,
2403 )
2412 )
2404 coreconfigitem(
2413 coreconfigitem(
2405 b'ui',
2414 b'ui',
2406 b'formatted',
2415 b'formatted',
2407 default=None,
2416 default=None,
2408 )
2417 )
2409 coreconfigitem(
2418 coreconfigitem(
2410 b'ui',
2419 b'ui',
2411 b'interactive',
2420 b'interactive',
2412 default=None,
2421 default=None,
2413 )
2422 )
2414 coreconfigitem(
2423 coreconfigitem(
2415 b'ui',
2424 b'ui',
2416 b'interface',
2425 b'interface',
2417 default=None,
2426 default=None,
2418 )
2427 )
2419 coreconfigitem(
2428 coreconfigitem(
2420 b'ui',
2429 b'ui',
2421 b'interface.chunkselector',
2430 b'interface.chunkselector',
2422 default=None,
2431 default=None,
2423 )
2432 )
2424 coreconfigitem(
2433 coreconfigitem(
2425 b'ui',
2434 b'ui',
2426 b'large-file-limit',
2435 b'large-file-limit',
2427 default=10 * (2 ** 20),
2436 default=10 * (2 ** 20),
2428 )
2437 )
2429 coreconfigitem(
2438 coreconfigitem(
2430 b'ui',
2439 b'ui',
2431 b'logblockedtimes',
2440 b'logblockedtimes',
2432 default=False,
2441 default=False,
2433 )
2442 )
2434 coreconfigitem(
2443 coreconfigitem(
2435 b'ui',
2444 b'ui',
2436 b'merge',
2445 b'merge',
2437 default=None,
2446 default=None,
2438 )
2447 )
2439 coreconfigitem(
2448 coreconfigitem(
2440 b'ui',
2449 b'ui',
2441 b'mergemarkers',
2450 b'mergemarkers',
2442 default=b'basic',
2451 default=b'basic',
2443 )
2452 )
2444 coreconfigitem(
2453 coreconfigitem(
2445 b'ui',
2454 b'ui',
2446 b'message-output',
2455 b'message-output',
2447 default=b'stdio',
2456 default=b'stdio',
2448 )
2457 )
2449 coreconfigitem(
2458 coreconfigitem(
2450 b'ui',
2459 b'ui',
2451 b'nontty',
2460 b'nontty',
2452 default=False,
2461 default=False,
2453 )
2462 )
2454 coreconfigitem(
2463 coreconfigitem(
2455 b'ui',
2464 b'ui',
2456 b'origbackuppath',
2465 b'origbackuppath',
2457 default=None,
2466 default=None,
2458 )
2467 )
2459 coreconfigitem(
2468 coreconfigitem(
2460 b'ui',
2469 b'ui',
2461 b'paginate',
2470 b'paginate',
2462 default=True,
2471 default=True,
2463 )
2472 )
2464 coreconfigitem(
2473 coreconfigitem(
2465 b'ui',
2474 b'ui',
2466 b'patch',
2475 b'patch',
2467 default=None,
2476 default=None,
2468 )
2477 )
2469 coreconfigitem(
2478 coreconfigitem(
2470 b'ui',
2479 b'ui',
2471 b'portablefilenames',
2480 b'portablefilenames',
2472 default=b'warn',
2481 default=b'warn',
2473 )
2482 )
2474 coreconfigitem(
2483 coreconfigitem(
2475 b'ui',
2484 b'ui',
2476 b'promptecho',
2485 b'promptecho',
2477 default=False,
2486 default=False,
2478 )
2487 )
2479 coreconfigitem(
2488 coreconfigitem(
2480 b'ui',
2489 b'ui',
2481 b'quiet',
2490 b'quiet',
2482 default=False,
2491 default=False,
2483 )
2492 )
2484 coreconfigitem(
2493 coreconfigitem(
2485 b'ui',
2494 b'ui',
2486 b'quietbookmarkmove',
2495 b'quietbookmarkmove',
2487 default=False,
2496 default=False,
2488 )
2497 )
2489 coreconfigitem(
2498 coreconfigitem(
2490 b'ui',
2499 b'ui',
2491 b'relative-paths',
2500 b'relative-paths',
2492 default=b'legacy',
2501 default=b'legacy',
2493 )
2502 )
2494 coreconfigitem(
2503 coreconfigitem(
2495 b'ui',
2504 b'ui',
2496 b'remotecmd',
2505 b'remotecmd',
2497 default=b'hg',
2506 default=b'hg',
2498 )
2507 )
2499 coreconfigitem(
2508 coreconfigitem(
2500 b'ui',
2509 b'ui',
2501 b'report_untrusted',
2510 b'report_untrusted',
2502 default=True,
2511 default=True,
2503 )
2512 )
2504 coreconfigitem(
2513 coreconfigitem(
2505 b'ui',
2514 b'ui',
2506 b'rollback',
2515 b'rollback',
2507 default=True,
2516 default=True,
2508 )
2517 )
2509 coreconfigitem(
2518 coreconfigitem(
2510 b'ui',
2519 b'ui',
2511 b'signal-safe-lock',
2520 b'signal-safe-lock',
2512 default=True,
2521 default=True,
2513 )
2522 )
2514 coreconfigitem(
2523 coreconfigitem(
2515 b'ui',
2524 b'ui',
2516 b'slash',
2525 b'slash',
2517 default=False,
2526 default=False,
2518 )
2527 )
2519 coreconfigitem(
2528 coreconfigitem(
2520 b'ui',
2529 b'ui',
2521 b'ssh',
2530 b'ssh',
2522 default=b'ssh',
2531 default=b'ssh',
2523 )
2532 )
2524 coreconfigitem(
2533 coreconfigitem(
2525 b'ui',
2534 b'ui',
2526 b'ssherrorhint',
2535 b'ssherrorhint',
2527 default=None,
2536 default=None,
2528 )
2537 )
2529 coreconfigitem(
2538 coreconfigitem(
2530 b'ui',
2539 b'ui',
2531 b'statuscopies',
2540 b'statuscopies',
2532 default=False,
2541 default=False,
2533 )
2542 )
2534 coreconfigitem(
2543 coreconfigitem(
2535 b'ui',
2544 b'ui',
2536 b'strict',
2545 b'strict',
2537 default=False,
2546 default=False,
2538 )
2547 )
2539 coreconfigitem(
2548 coreconfigitem(
2540 b'ui',
2549 b'ui',
2541 b'style',
2550 b'style',
2542 default=b'',
2551 default=b'',
2543 )
2552 )
2544 coreconfigitem(
2553 coreconfigitem(
2545 b'ui',
2554 b'ui',
2546 b'supportcontact',
2555 b'supportcontact',
2547 default=None,
2556 default=None,
2548 )
2557 )
2549 coreconfigitem(
2558 coreconfigitem(
2550 b'ui',
2559 b'ui',
2551 b'textwidth',
2560 b'textwidth',
2552 default=78,
2561 default=78,
2553 )
2562 )
2554 coreconfigitem(
2563 coreconfigitem(
2555 b'ui',
2564 b'ui',
2556 b'timeout',
2565 b'timeout',
2557 default=b'600',
2566 default=b'600',
2558 )
2567 )
2559 coreconfigitem(
2568 coreconfigitem(
2560 b'ui',
2569 b'ui',
2561 b'timeout.warn',
2570 b'timeout.warn',
2562 default=0,
2571 default=0,
2563 )
2572 )
2564 coreconfigitem(
2573 coreconfigitem(
2565 b'ui',
2574 b'ui',
2566 b'timestamp-output',
2575 b'timestamp-output',
2567 default=False,
2576 default=False,
2568 )
2577 )
2569 coreconfigitem(
2578 coreconfigitem(
2570 b'ui',
2579 b'ui',
2571 b'traceback',
2580 b'traceback',
2572 default=False,
2581 default=False,
2573 )
2582 )
2574 coreconfigitem(
2583 coreconfigitem(
2575 b'ui',
2584 b'ui',
2576 b'tweakdefaults',
2585 b'tweakdefaults',
2577 default=False,
2586 default=False,
2578 )
2587 )
2579 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2588 coreconfigitem(b'ui', b'username', alias=[(b'ui', b'user')])
2580 coreconfigitem(
2589 coreconfigitem(
2581 b'ui',
2590 b'ui',
2582 b'verbose',
2591 b'verbose',
2583 default=False,
2592 default=False,
2584 )
2593 )
2585 coreconfigitem(
2594 coreconfigitem(
2586 b'verify',
2595 b'verify',
2587 b'skipflags',
2596 b'skipflags',
2588 default=0,
2597 default=0,
2589 )
2598 )
2590 coreconfigitem(
2599 coreconfigitem(
2591 b'web',
2600 b'web',
2592 b'allowbz2',
2601 b'allowbz2',
2593 default=False,
2602 default=False,
2594 )
2603 )
2595 coreconfigitem(
2604 coreconfigitem(
2596 b'web',
2605 b'web',
2597 b'allowgz',
2606 b'allowgz',
2598 default=False,
2607 default=False,
2599 )
2608 )
2600 coreconfigitem(
2609 coreconfigitem(
2601 b'web',
2610 b'web',
2602 b'allow-pull',
2611 b'allow-pull',
2603 alias=[(b'web', b'allowpull')],
2612 alias=[(b'web', b'allowpull')],
2604 default=True,
2613 default=True,
2605 )
2614 )
2606 coreconfigitem(
2615 coreconfigitem(
2607 b'web',
2616 b'web',
2608 b'allow-push',
2617 b'allow-push',
2609 alias=[(b'web', b'allow_push')],
2618 alias=[(b'web', b'allow_push')],
2610 default=list,
2619 default=list,
2611 )
2620 )
2612 coreconfigitem(
2621 coreconfigitem(
2613 b'web',
2622 b'web',
2614 b'allowzip',
2623 b'allowzip',
2615 default=False,
2624 default=False,
2616 )
2625 )
2617 coreconfigitem(
2626 coreconfigitem(
2618 b'web',
2627 b'web',
2619 b'archivesubrepos',
2628 b'archivesubrepos',
2620 default=False,
2629 default=False,
2621 )
2630 )
2622 coreconfigitem(
2631 coreconfigitem(
2623 b'web',
2632 b'web',
2624 b'cache',
2633 b'cache',
2625 default=True,
2634 default=True,
2626 )
2635 )
2627 coreconfigitem(
2636 coreconfigitem(
2628 b'web',
2637 b'web',
2629 b'comparisoncontext',
2638 b'comparisoncontext',
2630 default=5,
2639 default=5,
2631 )
2640 )
2632 coreconfigitem(
2641 coreconfigitem(
2633 b'web',
2642 b'web',
2634 b'contact',
2643 b'contact',
2635 default=None,
2644 default=None,
2636 )
2645 )
2637 coreconfigitem(
2646 coreconfigitem(
2638 b'web',
2647 b'web',
2639 b'deny_push',
2648 b'deny_push',
2640 default=list,
2649 default=list,
2641 )
2650 )
2642 coreconfigitem(
2651 coreconfigitem(
2643 b'web',
2652 b'web',
2644 b'guessmime',
2653 b'guessmime',
2645 default=False,
2654 default=False,
2646 )
2655 )
2647 coreconfigitem(
2656 coreconfigitem(
2648 b'web',
2657 b'web',
2649 b'hidden',
2658 b'hidden',
2650 default=False,
2659 default=False,
2651 )
2660 )
2652 coreconfigitem(
2661 coreconfigitem(
2653 b'web',
2662 b'web',
2654 b'labels',
2663 b'labels',
2655 default=list,
2664 default=list,
2656 )
2665 )
2657 coreconfigitem(
2666 coreconfigitem(
2658 b'web',
2667 b'web',
2659 b'logoimg',
2668 b'logoimg',
2660 default=b'hglogo.png',
2669 default=b'hglogo.png',
2661 )
2670 )
2662 coreconfigitem(
2671 coreconfigitem(
2663 b'web',
2672 b'web',
2664 b'logourl',
2673 b'logourl',
2665 default=b'https://mercurial-scm.org/',
2674 default=b'https://mercurial-scm.org/',
2666 )
2675 )
2667 coreconfigitem(
2676 coreconfigitem(
2668 b'web',
2677 b'web',
2669 b'accesslog',
2678 b'accesslog',
2670 default=b'-',
2679 default=b'-',
2671 )
2680 )
2672 coreconfigitem(
2681 coreconfigitem(
2673 b'web',
2682 b'web',
2674 b'address',
2683 b'address',
2675 default=b'',
2684 default=b'',
2676 )
2685 )
2677 coreconfigitem(
2686 coreconfigitem(
2678 b'web',
2687 b'web',
2679 b'allow-archive',
2688 b'allow-archive',
2680 alias=[(b'web', b'allow_archive')],
2689 alias=[(b'web', b'allow_archive')],
2681 default=list,
2690 default=list,
2682 )
2691 )
2683 coreconfigitem(
2692 coreconfigitem(
2684 b'web',
2693 b'web',
2685 b'allow_read',
2694 b'allow_read',
2686 default=list,
2695 default=list,
2687 )
2696 )
2688 coreconfigitem(
2697 coreconfigitem(
2689 b'web',
2698 b'web',
2690 b'baseurl',
2699 b'baseurl',
2691 default=None,
2700 default=None,
2692 )
2701 )
2693 coreconfigitem(
2702 coreconfigitem(
2694 b'web',
2703 b'web',
2695 b'cacerts',
2704 b'cacerts',
2696 default=None,
2705 default=None,
2697 )
2706 )
2698 coreconfigitem(
2707 coreconfigitem(
2699 b'web',
2708 b'web',
2700 b'certificate',
2709 b'certificate',
2701 default=None,
2710 default=None,
2702 )
2711 )
2703 coreconfigitem(
2712 coreconfigitem(
2704 b'web',
2713 b'web',
2705 b'collapse',
2714 b'collapse',
2706 default=False,
2715 default=False,
2707 )
2716 )
2708 coreconfigitem(
2717 coreconfigitem(
2709 b'web',
2718 b'web',
2710 b'csp',
2719 b'csp',
2711 default=None,
2720 default=None,
2712 )
2721 )
2713 coreconfigitem(
2722 coreconfigitem(
2714 b'web',
2723 b'web',
2715 b'deny_read',
2724 b'deny_read',
2716 default=list,
2725 default=list,
2717 )
2726 )
2718 coreconfigitem(
2727 coreconfigitem(
2719 b'web',
2728 b'web',
2720 b'descend',
2729 b'descend',
2721 default=True,
2730 default=True,
2722 )
2731 )
2723 coreconfigitem(
2732 coreconfigitem(
2724 b'web',
2733 b'web',
2725 b'description',
2734 b'description',
2726 default=b"",
2735 default=b"",
2727 )
2736 )
2728 coreconfigitem(
2737 coreconfigitem(
2729 b'web',
2738 b'web',
2730 b'encoding',
2739 b'encoding',
2731 default=lambda: encoding.encoding,
2740 default=lambda: encoding.encoding,
2732 )
2741 )
2733 coreconfigitem(
2742 coreconfigitem(
2734 b'web',
2743 b'web',
2735 b'errorlog',
2744 b'errorlog',
2736 default=b'-',
2745 default=b'-',
2737 )
2746 )
2738 coreconfigitem(
2747 coreconfigitem(
2739 b'web',
2748 b'web',
2740 b'ipv6',
2749 b'ipv6',
2741 default=False,
2750 default=False,
2742 )
2751 )
2743 coreconfigitem(
2752 coreconfigitem(
2744 b'web',
2753 b'web',
2745 b'maxchanges',
2754 b'maxchanges',
2746 default=10,
2755 default=10,
2747 )
2756 )
2748 coreconfigitem(
2757 coreconfigitem(
2749 b'web',
2758 b'web',
2750 b'maxfiles',
2759 b'maxfiles',
2751 default=10,
2760 default=10,
2752 )
2761 )
2753 coreconfigitem(
2762 coreconfigitem(
2754 b'web',
2763 b'web',
2755 b'maxshortchanges',
2764 b'maxshortchanges',
2756 default=60,
2765 default=60,
2757 )
2766 )
2758 coreconfigitem(
2767 coreconfigitem(
2759 b'web',
2768 b'web',
2760 b'motd',
2769 b'motd',
2761 default=b'',
2770 default=b'',
2762 )
2771 )
2763 coreconfigitem(
2772 coreconfigitem(
2764 b'web',
2773 b'web',
2765 b'name',
2774 b'name',
2766 default=dynamicdefault,
2775 default=dynamicdefault,
2767 )
2776 )
2768 coreconfigitem(
2777 coreconfigitem(
2769 b'web',
2778 b'web',
2770 b'port',
2779 b'port',
2771 default=8000,
2780 default=8000,
2772 )
2781 )
2773 coreconfigitem(
2782 coreconfigitem(
2774 b'web',
2783 b'web',
2775 b'prefix',
2784 b'prefix',
2776 default=b'',
2785 default=b'',
2777 )
2786 )
2778 coreconfigitem(
2787 coreconfigitem(
2779 b'web',
2788 b'web',
2780 b'push_ssl',
2789 b'push_ssl',
2781 default=True,
2790 default=True,
2782 )
2791 )
2783 coreconfigitem(
2792 coreconfigitem(
2784 b'web',
2793 b'web',
2785 b'refreshinterval',
2794 b'refreshinterval',
2786 default=20,
2795 default=20,
2787 )
2796 )
2788 coreconfigitem(
2797 coreconfigitem(
2789 b'web',
2798 b'web',
2790 b'server-header',
2799 b'server-header',
2791 default=None,
2800 default=None,
2792 )
2801 )
2793 coreconfigitem(
2802 coreconfigitem(
2794 b'web',
2803 b'web',
2795 b'static',
2804 b'static',
2796 default=None,
2805 default=None,
2797 )
2806 )
2798 coreconfigitem(
2807 coreconfigitem(
2799 b'web',
2808 b'web',
2800 b'staticurl',
2809 b'staticurl',
2801 default=None,
2810 default=None,
2802 )
2811 )
2803 coreconfigitem(
2812 coreconfigitem(
2804 b'web',
2813 b'web',
2805 b'stripes',
2814 b'stripes',
2806 default=1,
2815 default=1,
2807 )
2816 )
2808 coreconfigitem(
2817 coreconfigitem(
2809 b'web',
2818 b'web',
2810 b'style',
2819 b'style',
2811 default=b'paper',
2820 default=b'paper',
2812 )
2821 )
2813 coreconfigitem(
2822 coreconfigitem(
2814 b'web',
2823 b'web',
2815 b'templates',
2824 b'templates',
2816 default=None,
2825 default=None,
2817 )
2826 )
2818 coreconfigitem(
2827 coreconfigitem(
2819 b'web',
2828 b'web',
2820 b'view',
2829 b'view',
2821 default=b'served',
2830 default=b'served',
2822 experimental=True,
2831 experimental=True,
2823 )
2832 )
2824 coreconfigitem(
2833 coreconfigitem(
2825 b'worker',
2834 b'worker',
2826 b'backgroundclose',
2835 b'backgroundclose',
2827 default=dynamicdefault,
2836 default=dynamicdefault,
2828 )
2837 )
2829 # Windows defaults to a limit of 512 open files. A buffer of 128
2838 # Windows defaults to a limit of 512 open files. A buffer of 128
2830 # should give us enough headway.
2839 # should give us enough headway.
2831 coreconfigitem(
2840 coreconfigitem(
2832 b'worker',
2841 b'worker',
2833 b'backgroundclosemaxqueue',
2842 b'backgroundclosemaxqueue',
2834 default=384,
2843 default=384,
2835 )
2844 )
2836 coreconfigitem(
2845 coreconfigitem(
2837 b'worker',
2846 b'worker',
2838 b'backgroundcloseminfilecount',
2847 b'backgroundcloseminfilecount',
2839 default=2048,
2848 default=2048,
2840 )
2849 )
2841 coreconfigitem(
2850 coreconfigitem(
2842 b'worker',
2851 b'worker',
2843 b'backgroundclosethreadcount',
2852 b'backgroundclosethreadcount',
2844 default=4,
2853 default=4,
2845 )
2854 )
2846 coreconfigitem(
2855 coreconfigitem(
2847 b'worker',
2856 b'worker',
2848 b'enabled',
2857 b'enabled',
2849 default=True,
2858 default=True,
2850 )
2859 )
2851 coreconfigitem(
2860 coreconfigitem(
2852 b'worker',
2861 b'worker',
2853 b'numcpus',
2862 b'numcpus',
2854 default=None,
2863 default=None,
2855 )
2864 )
2856
2865
2857 # Rebase related configuration moved to core because other extension are doing
2866 # Rebase related configuration moved to core because other extension are doing
2858 # strange things. For example, shelve import the extensions to reuse some bit
2867 # strange things. For example, shelve import the extensions to reuse some bit
2859 # without formally loading it.
2868 # without formally loading it.
2860 coreconfigitem(
2869 coreconfigitem(
2861 b'commands',
2870 b'commands',
2862 b'rebase.requiredest',
2871 b'rebase.requiredest',
2863 default=False,
2872 default=False,
2864 )
2873 )
2865 coreconfigitem(
2874 coreconfigitem(
2866 b'experimental',
2875 b'experimental',
2867 b'rebaseskipobsolete',
2876 b'rebaseskipobsolete',
2868 default=True,
2877 default=True,
2869 )
2878 )
2870 coreconfigitem(
2879 coreconfigitem(
2871 b'rebase',
2880 b'rebase',
2872 b'singletransaction',
2881 b'singletransaction',
2873 default=False,
2882 default=False,
2874 )
2883 )
2875 coreconfigitem(
2884 coreconfigitem(
2876 b'rebase',
2885 b'rebase',
2877 b'experimental.inmemory',
2886 b'experimental.inmemory',
2878 default=False,
2887 default=False,
2879 )
2888 )
2880
2889
2881 # This setting controls creation of a rebase_source extra field
2890 # This setting controls creation of a rebase_source extra field
2882 # during rebase. When False, no such field is created. This is
2891 # during rebase. When False, no such field is created. This is
2883 # useful eg for incrementally converting changesets and then
2892 # useful eg for incrementally converting changesets and then
2884 # rebasing them onto an existing repo.
2893 # rebasing them onto an existing repo.
2885 # WARNING: this is an advanced setting reserved for people who know
2894 # WARNING: this is an advanced setting reserved for people who know
2886 # exactly what they are doing. Misuse of this setting can easily
2895 # exactly what they are doing. Misuse of this setting can easily
2887 # result in obsmarker cycles and a vivid headache.
2896 # result in obsmarker cycles and a vivid headache.
2888 coreconfigitem(
2897 coreconfigitem(
2889 b'rebase',
2898 b'rebase',
2890 b'store-source',
2899 b'store-source',
2891 default=True,
2900 default=True,
2892 experimental=True,
2901 experimental=True,
2893 )
2902 )
@@ -1,693 +1,704 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6
6
7 from .i18n import _
7 from .i18n import _
8
8
9 from . import (
9 from . import (
10 error,
10 error,
11 pathutil,
11 pathutil,
12 policy,
12 policy,
13 txnutil,
13 txnutil,
14 util,
14 util,
15 )
15 )
16
16
17 from .dirstateutils import (
17 from .dirstateutils import (
18 docket as docketmod,
18 docket as docketmod,
19 v2,
19 v2,
20 )
20 )
21
21
22 parsers = policy.importmod('parsers')
22 parsers = policy.importmod('parsers')
23 rustmod = policy.importrust('dirstate')
23 rustmod = policy.importrust('dirstate')
24
24
25 propertycache = util.propertycache
25 propertycache = util.propertycache
26
26
27 if rustmod is None:
27 if rustmod is None:
28 DirstateItem = parsers.DirstateItem
28 DirstateItem = parsers.DirstateItem
29 else:
29 else:
30 DirstateItem = rustmod.DirstateItem
30 DirstateItem = rustmod.DirstateItem
31
31
32 rangemask = 0x7FFFFFFF
32 rangemask = 0x7FFFFFFF
33
33
34 WRITE_MODE_AUTO = 0
34 WRITE_MODE_AUTO = 0
35 WRITE_MODE_FORCE_NEW = 1
35 WRITE_MODE_FORCE_NEW = 1
36 WRITE_MODE_FORCE_APPEND = 2
36
37
37
38
38 class _dirstatemapcommon:
39 class _dirstatemapcommon:
39 """
40 """
40 Methods that are identical for both implementations of the dirstatemap
41 Methods that are identical for both implementations of the dirstatemap
41 class, with and without Rust extensions enabled.
42 class, with and without Rust extensions enabled.
42 """
43 """
43
44
44 # please pytype
45 # please pytype
45
46
46 _map = None
47 _map = None
47 copymap = None
48 copymap = None
48
49
49 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
50 self._use_dirstate_v2 = use_dirstate_v2
51 self._use_dirstate_v2 = use_dirstate_v2
51 self._nodeconstants = nodeconstants
52 self._nodeconstants = nodeconstants
52 self._ui = ui
53 self._ui = ui
53 self._opener = opener
54 self._opener = opener
54 self._root = root
55 self._root = root
55 self._filename = b'dirstate'
56 self._filename = b'dirstate'
56 self._nodelen = 20 # Also update Rust code when changing this!
57 self._nodelen = 20 # Also update Rust code when changing this!
57 self._parents = None
58 self._parents = None
58 self._dirtyparents = False
59 self._dirtyparents = False
59 self._docket = None
60 self._docket = None
61 write_mode = ui.config(b"devel", b"dirstate.v2.data_update_mode")
62 if write_mode == b"auto":
63 self._write_mode = WRITE_MODE_AUTO
64 elif write_mode == b"force-append":
65 self._write_mode = WRITE_MODE_FORCE_APPEND
66 elif write_mode == b"force-new":
67 self._write_mode = WRITE_MODE_FORCE_NEW
68 else:
69 # unknown value, fallback to default
70 self._write_mode = WRITE_MODE_AUTO
60
71
61 # for consistent view between _pl() and _read() invocations
72 # for consistent view between _pl() and _read() invocations
62 self._pendingmode = None
73 self._pendingmode = None
63
74
64 def preload(self):
75 def preload(self):
65 """Loads the underlying data, if it's not already loaded"""
76 """Loads the underlying data, if it's not already loaded"""
66 self._map
77 self._map
67
78
68 def get(self, key, default=None):
79 def get(self, key, default=None):
69 return self._map.get(key, default)
80 return self._map.get(key, default)
70
81
71 def __len__(self):
82 def __len__(self):
72 return len(self._map)
83 return len(self._map)
73
84
74 def __iter__(self):
85 def __iter__(self):
75 return iter(self._map)
86 return iter(self._map)
76
87
77 def __contains__(self, key):
88 def __contains__(self, key):
78 return key in self._map
89 return key in self._map
79
90
80 def __getitem__(self, item):
91 def __getitem__(self, item):
81 return self._map[item]
92 return self._map[item]
82
93
83 ### disk interaction
94 ### disk interaction
84
95
85 def _opendirstatefile(self):
96 def _opendirstatefile(self):
86 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
97 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
87 if self._pendingmode is not None and self._pendingmode != mode:
98 if self._pendingmode is not None and self._pendingmode != mode:
88 fp.close()
99 fp.close()
89 raise error.Abort(
100 raise error.Abort(
90 _(b'working directory state may be changed parallelly')
101 _(b'working directory state may be changed parallelly')
91 )
102 )
92 self._pendingmode = mode
103 self._pendingmode = mode
93 return fp
104 return fp
94
105
95 def _readdirstatefile(self, size=-1):
106 def _readdirstatefile(self, size=-1):
96 try:
107 try:
97 with self._opendirstatefile() as fp:
108 with self._opendirstatefile() as fp:
98 return fp.read(size)
109 return fp.read(size)
99 except FileNotFoundError:
110 except FileNotFoundError:
100 # File doesn't exist, so the current state is empty
111 # File doesn't exist, so the current state is empty
101 return b''
112 return b''
102
113
103 @property
114 @property
104 def docket(self):
115 def docket(self):
105 if not self._docket:
116 if not self._docket:
106 if not self._use_dirstate_v2:
117 if not self._use_dirstate_v2:
107 raise error.ProgrammingError(
118 raise error.ProgrammingError(
108 b'dirstate only has a docket in v2 format'
119 b'dirstate only has a docket in v2 format'
109 )
120 )
110 self._docket = docketmod.DirstateDocket.parse(
121 self._docket = docketmod.DirstateDocket.parse(
111 self._readdirstatefile(), self._nodeconstants
122 self._readdirstatefile(), self._nodeconstants
112 )
123 )
113 return self._docket
124 return self._docket
114
125
115 def write_v2_no_append(self, tr, st, meta, packed):
126 def write_v2_no_append(self, tr, st, meta, packed):
116 old_docket = self.docket
127 old_docket = self.docket
117 new_docket = docketmod.DirstateDocket.with_new_uuid(
128 new_docket = docketmod.DirstateDocket.with_new_uuid(
118 self.parents(), len(packed), meta
129 self.parents(), len(packed), meta
119 )
130 )
120 if old_docket.uuid == new_docket.uuid:
131 if old_docket.uuid == new_docket.uuid:
121 raise error.ProgrammingError(b'dirstate docket name collision')
132 raise error.ProgrammingError(b'dirstate docket name collision')
122 data_filename = new_docket.data_filename()
133 data_filename = new_docket.data_filename()
123 self._opener.write(data_filename, packed)
134 self._opener.write(data_filename, packed)
124 # Write the new docket after the new data file has been
135 # Write the new docket after the new data file has been
125 # written. Because `st` was opened with `atomictemp=True`,
136 # written. Because `st` was opened with `atomictemp=True`,
126 # the actual `.hg/dirstate` file is only affected on close.
137 # the actual `.hg/dirstate` file is only affected on close.
127 st.write(new_docket.serialize())
138 st.write(new_docket.serialize())
128 st.close()
139 st.close()
129 # Remove the old data file after the new docket pointing to
140 # Remove the old data file after the new docket pointing to
130 # the new data file was written.
141 # the new data file was written.
131 if old_docket.uuid:
142 if old_docket.uuid:
132 data_filename = old_docket.data_filename()
143 data_filename = old_docket.data_filename()
133 unlink = lambda _tr=None: self._opener.unlink(data_filename)
144 unlink = lambda _tr=None: self._opener.unlink(data_filename)
134 if tr:
145 if tr:
135 category = b"dirstate-v2-clean-" + old_docket.uuid
146 category = b"dirstate-v2-clean-" + old_docket.uuid
136 tr.addpostclose(category, unlink)
147 tr.addpostclose(category, unlink)
137 else:
148 else:
138 unlink()
149 unlink()
139 self._docket = new_docket
150 self._docket = new_docket
140
151
141 ### reading/setting parents
152 ### reading/setting parents
142
153
143 def parents(self):
154 def parents(self):
144 if not self._parents:
155 if not self._parents:
145 if self._use_dirstate_v2:
156 if self._use_dirstate_v2:
146 self._parents = self.docket.parents
157 self._parents = self.docket.parents
147 else:
158 else:
148 read_len = self._nodelen * 2
159 read_len = self._nodelen * 2
149 st = self._readdirstatefile(read_len)
160 st = self._readdirstatefile(read_len)
150 l = len(st)
161 l = len(st)
151 if l == read_len:
162 if l == read_len:
152 self._parents = (
163 self._parents = (
153 st[: self._nodelen],
164 st[: self._nodelen],
154 st[self._nodelen : 2 * self._nodelen],
165 st[self._nodelen : 2 * self._nodelen],
155 )
166 )
156 elif l == 0:
167 elif l == 0:
157 self._parents = (
168 self._parents = (
158 self._nodeconstants.nullid,
169 self._nodeconstants.nullid,
159 self._nodeconstants.nullid,
170 self._nodeconstants.nullid,
160 )
171 )
161 else:
172 else:
162 raise error.Abort(
173 raise error.Abort(
163 _(b'working directory state appears damaged!')
174 _(b'working directory state appears damaged!')
164 )
175 )
165
176
166 return self._parents
177 return self._parents
167
178
168
179
169 class dirstatemap(_dirstatemapcommon):
180 class dirstatemap(_dirstatemapcommon):
170 """Map encapsulating the dirstate's contents.
181 """Map encapsulating the dirstate's contents.
171
182
172 The dirstate contains the following state:
183 The dirstate contains the following state:
173
184
174 - `identity` is the identity of the dirstate file, which can be used to
185 - `identity` is the identity of the dirstate file, which can be used to
175 detect when changes have occurred to the dirstate file.
186 detect when changes have occurred to the dirstate file.
176
187
177 - `parents` is a pair containing the parents of the working copy. The
188 - `parents` is a pair containing the parents of the working copy. The
178 parents are updated by calling `setparents`.
189 parents are updated by calling `setparents`.
179
190
180 - the state map maps filenames to tuples of (state, mode, size, mtime),
191 - the state map maps filenames to tuples of (state, mode, size, mtime),
181 where state is a single character representing 'normal', 'added',
192 where state is a single character representing 'normal', 'added',
182 'removed', or 'merged'. It is read by treating the dirstate as a
193 'removed', or 'merged'. It is read by treating the dirstate as a
183 dict. File state is updated by calling various methods (see each
194 dict. File state is updated by calling various methods (see each
184 documentation for details):
195 documentation for details):
185
196
186 - `reset_state`,
197 - `reset_state`,
187 - `set_tracked`
198 - `set_tracked`
188 - `set_untracked`
199 - `set_untracked`
189 - `set_clean`
200 - `set_clean`
190 - `set_possibly_dirty`
201 - `set_possibly_dirty`
191
202
192 - `copymap` maps destination filenames to their source filename.
203 - `copymap` maps destination filenames to their source filename.
193
204
194 The dirstate also provides the following views onto the state:
205 The dirstate also provides the following views onto the state:
195
206
196 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
207 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
197 form that they appear as in the dirstate.
208 form that they appear as in the dirstate.
198
209
199 - `dirfoldmap` is a dict mapping normalized directory names to the
210 - `dirfoldmap` is a dict mapping normalized directory names to the
200 denormalized form that they appear as in the dirstate.
211 denormalized form that they appear as in the dirstate.
201 """
212 """
202
213
203 ### Core data storage and access
214 ### Core data storage and access
204
215
205 @propertycache
216 @propertycache
206 def _map(self):
217 def _map(self):
207 self._map = {}
218 self._map = {}
208 self.read()
219 self.read()
209 return self._map
220 return self._map
210
221
211 @propertycache
222 @propertycache
212 def copymap(self):
223 def copymap(self):
213 self.copymap = {}
224 self.copymap = {}
214 self._map
225 self._map
215 return self.copymap
226 return self.copymap
216
227
217 def clear(self):
228 def clear(self):
218 self._map.clear()
229 self._map.clear()
219 self.copymap.clear()
230 self.copymap.clear()
220 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
231 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
221 util.clearcachedproperty(self, b"_dirs")
232 util.clearcachedproperty(self, b"_dirs")
222 util.clearcachedproperty(self, b"_alldirs")
233 util.clearcachedproperty(self, b"_alldirs")
223 util.clearcachedproperty(self, b"filefoldmap")
234 util.clearcachedproperty(self, b"filefoldmap")
224 util.clearcachedproperty(self, b"dirfoldmap")
235 util.clearcachedproperty(self, b"dirfoldmap")
225
236
226 def items(self):
237 def items(self):
227 return self._map.items()
238 return self._map.items()
228
239
229 # forward for python2,3 compat
240 # forward for python2,3 compat
230 iteritems = items
241 iteritems = items
231
242
232 def debug_iter(self, all):
243 def debug_iter(self, all):
233 """
244 """
234 Return an iterator of (filename, state, mode, size, mtime) tuples
245 Return an iterator of (filename, state, mode, size, mtime) tuples
235
246
236 `all` is unused when Rust is not enabled
247 `all` is unused when Rust is not enabled
237 """
248 """
238 for (filename, item) in self.items():
249 for (filename, item) in self.items():
239 yield (filename, item.state, item.mode, item.size, item.mtime)
250 yield (filename, item.state, item.mode, item.size, item.mtime)
240
251
241 def keys(self):
252 def keys(self):
242 return self._map.keys()
253 return self._map.keys()
243
254
244 ### reading/setting parents
255 ### reading/setting parents
245
256
246 def setparents(self, p1, p2, fold_p2=False):
257 def setparents(self, p1, p2, fold_p2=False):
247 self._parents = (p1, p2)
258 self._parents = (p1, p2)
248 self._dirtyparents = True
259 self._dirtyparents = True
249 copies = {}
260 copies = {}
250 if fold_p2:
261 if fold_p2:
251 for f, s in self._map.items():
262 for f, s in self._map.items():
252 # Discard "merged" markers when moving away from a merge state
263 # Discard "merged" markers when moving away from a merge state
253 if s.p2_info:
264 if s.p2_info:
254 source = self.copymap.pop(f, None)
265 source = self.copymap.pop(f, None)
255 if source:
266 if source:
256 copies[f] = source
267 copies[f] = source
257 s.drop_merge_data()
268 s.drop_merge_data()
258 return copies
269 return copies
259
270
260 ### disk interaction
271 ### disk interaction
261
272
262 def read(self):
273 def read(self):
263 # ignore HG_PENDING because identity is used only for writing
274 # ignore HG_PENDING because identity is used only for writing
264 self.identity = util.filestat.frompath(
275 self.identity = util.filestat.frompath(
265 self._opener.join(self._filename)
276 self._opener.join(self._filename)
266 )
277 )
267
278
268 if self._use_dirstate_v2:
279 if self._use_dirstate_v2:
269 if not self.docket.uuid:
280 if not self.docket.uuid:
270 return
281 return
271 st = self._opener.read(self.docket.data_filename())
282 st = self._opener.read(self.docket.data_filename())
272 else:
283 else:
273 st = self._readdirstatefile()
284 st = self._readdirstatefile()
274
285
275 if not st:
286 if not st:
276 return
287 return
277
288
278 # TODO: adjust this estimate for dirstate-v2
289 # TODO: adjust this estimate for dirstate-v2
279 if util.safehasattr(parsers, b'dict_new_presized'):
290 if util.safehasattr(parsers, b'dict_new_presized'):
280 # Make an estimate of the number of files in the dirstate based on
291 # Make an estimate of the number of files in the dirstate based on
281 # its size. This trades wasting some memory for avoiding costly
292 # its size. This trades wasting some memory for avoiding costly
282 # resizes. Each entry have a prefix of 17 bytes followed by one or
293 # resizes. Each entry have a prefix of 17 bytes followed by one or
283 # two path names. Studies on various large-scale real-world repositories
294 # two path names. Studies on various large-scale real-world repositories
284 # found 54 bytes a reasonable upper limit for the average path names.
295 # found 54 bytes a reasonable upper limit for the average path names.
285 # Copy entries are ignored for the sake of this estimate.
296 # Copy entries are ignored for the sake of this estimate.
286 self._map = parsers.dict_new_presized(len(st) // 71)
297 self._map = parsers.dict_new_presized(len(st) // 71)
287
298
288 # Python's garbage collector triggers a GC each time a certain number
299 # Python's garbage collector triggers a GC each time a certain number
289 # of container objects (the number being defined by
300 # of container objects (the number being defined by
290 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
301 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
291 # for each file in the dirstate. The C version then immediately marks
302 # for each file in the dirstate. The C version then immediately marks
292 # them as not to be tracked by the collector. However, this has no
303 # them as not to be tracked by the collector. However, this has no
293 # effect on when GCs are triggered, only on what objects the GC looks
304 # effect on when GCs are triggered, only on what objects the GC looks
294 # into. This means that O(number of files) GCs are unavoidable.
305 # into. This means that O(number of files) GCs are unavoidable.
295 # Depending on when in the process's lifetime the dirstate is parsed,
306 # Depending on when in the process's lifetime the dirstate is parsed,
296 # this can get very expensive. As a workaround, disable GC while
307 # this can get very expensive. As a workaround, disable GC while
297 # parsing the dirstate.
308 # parsing the dirstate.
298 #
309 #
299 # (we cannot decorate the function directly since it is in a C module)
310 # (we cannot decorate the function directly since it is in a C module)
300 if self._use_dirstate_v2:
311 if self._use_dirstate_v2:
301 p = self.docket.parents
312 p = self.docket.parents
302 meta = self.docket.tree_metadata
313 meta = self.docket.tree_metadata
303 parse_dirstate = util.nogc(v2.parse_dirstate)
314 parse_dirstate = util.nogc(v2.parse_dirstate)
304 parse_dirstate(self._map, self.copymap, st, meta)
315 parse_dirstate(self._map, self.copymap, st, meta)
305 else:
316 else:
306 parse_dirstate = util.nogc(parsers.parse_dirstate)
317 parse_dirstate = util.nogc(parsers.parse_dirstate)
307 p = parse_dirstate(self._map, self.copymap, st)
318 p = parse_dirstate(self._map, self.copymap, st)
308 if not self._dirtyparents:
319 if not self._dirtyparents:
309 self.setparents(*p)
320 self.setparents(*p)
310
321
311 # Avoid excess attribute lookups by fast pathing certain checks
322 # Avoid excess attribute lookups by fast pathing certain checks
312 self.__contains__ = self._map.__contains__
323 self.__contains__ = self._map.__contains__
313 self.__getitem__ = self._map.__getitem__
324 self.__getitem__ = self._map.__getitem__
314 self.get = self._map.get
325 self.get = self._map.get
315
326
316 def write(self, tr, st):
327 def write(self, tr, st):
317 if self._use_dirstate_v2:
328 if self._use_dirstate_v2:
318 packed, meta = v2.pack_dirstate(self._map, self.copymap)
329 packed, meta = v2.pack_dirstate(self._map, self.copymap)
319 self.write_v2_no_append(tr, st, meta, packed)
330 self.write_v2_no_append(tr, st, meta, packed)
320 else:
331 else:
321 packed = parsers.pack_dirstate(
332 packed = parsers.pack_dirstate(
322 self._map, self.copymap, self.parents()
333 self._map, self.copymap, self.parents()
323 )
334 )
324 st.write(packed)
335 st.write(packed)
325 st.close()
336 st.close()
326 self._dirtyparents = False
337 self._dirtyparents = False
327
338
328 @propertycache
339 @propertycache
329 def identity(self):
340 def identity(self):
330 self._map
341 self._map
331 return self.identity
342 return self.identity
332
343
333 ### code related to maintaining and accessing "extra" property
344 ### code related to maintaining and accessing "extra" property
334 # (e.g. "has_dir")
345 # (e.g. "has_dir")
335
346
336 def _dirs_incr(self, filename, old_entry=None):
347 def _dirs_incr(self, filename, old_entry=None):
337 """increment the dirstate counter if applicable"""
348 """increment the dirstate counter if applicable"""
338 if (
349 if (
339 old_entry is None or old_entry.removed
350 old_entry is None or old_entry.removed
340 ) and "_dirs" in self.__dict__:
351 ) and "_dirs" in self.__dict__:
341 self._dirs.addpath(filename)
352 self._dirs.addpath(filename)
342 if old_entry is None and "_alldirs" in self.__dict__:
353 if old_entry is None and "_alldirs" in self.__dict__:
343 self._alldirs.addpath(filename)
354 self._alldirs.addpath(filename)
344
355
345 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
356 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
346 """decrement the dirstate counter if applicable"""
357 """decrement the dirstate counter if applicable"""
347 if old_entry is not None:
358 if old_entry is not None:
348 if "_dirs" in self.__dict__ and not old_entry.removed:
359 if "_dirs" in self.__dict__ and not old_entry.removed:
349 self._dirs.delpath(filename)
360 self._dirs.delpath(filename)
350 if "_alldirs" in self.__dict__ and not remove_variant:
361 if "_alldirs" in self.__dict__ and not remove_variant:
351 self._alldirs.delpath(filename)
362 self._alldirs.delpath(filename)
352 elif remove_variant and "_alldirs" in self.__dict__:
363 elif remove_variant and "_alldirs" in self.__dict__:
353 self._alldirs.addpath(filename)
364 self._alldirs.addpath(filename)
354 if "filefoldmap" in self.__dict__:
365 if "filefoldmap" in self.__dict__:
355 normed = util.normcase(filename)
366 normed = util.normcase(filename)
356 self.filefoldmap.pop(normed, None)
367 self.filefoldmap.pop(normed, None)
357
368
358 @propertycache
369 @propertycache
359 def filefoldmap(self):
370 def filefoldmap(self):
360 """Returns a dictionary mapping normalized case paths to their
371 """Returns a dictionary mapping normalized case paths to their
361 non-normalized versions.
372 non-normalized versions.
362 """
373 """
363 try:
374 try:
364 makefilefoldmap = parsers.make_file_foldmap
375 makefilefoldmap = parsers.make_file_foldmap
365 except AttributeError:
376 except AttributeError:
366 pass
377 pass
367 else:
378 else:
368 return makefilefoldmap(
379 return makefilefoldmap(
369 self._map, util.normcasespec, util.normcasefallback
380 self._map, util.normcasespec, util.normcasefallback
370 )
381 )
371
382
372 f = {}
383 f = {}
373 normcase = util.normcase
384 normcase = util.normcase
374 for name, s in self._map.items():
385 for name, s in self._map.items():
375 if not s.removed:
386 if not s.removed:
376 f[normcase(name)] = name
387 f[normcase(name)] = name
377 f[b'.'] = b'.' # prevents useless util.fspath() invocation
388 f[b'.'] = b'.' # prevents useless util.fspath() invocation
378 return f
389 return f
379
390
380 @propertycache
391 @propertycache
381 def dirfoldmap(self):
392 def dirfoldmap(self):
382 f = {}
393 f = {}
383 normcase = util.normcase
394 normcase = util.normcase
384 for name in self._dirs:
395 for name in self._dirs:
385 f[normcase(name)] = name
396 f[normcase(name)] = name
386 return f
397 return f
387
398
388 def hastrackeddir(self, d):
399 def hastrackeddir(self, d):
389 """
400 """
390 Returns True if the dirstate contains a tracked (not removed) file
401 Returns True if the dirstate contains a tracked (not removed) file
391 in this directory.
402 in this directory.
392 """
403 """
393 return d in self._dirs
404 return d in self._dirs
394
405
395 def hasdir(self, d):
406 def hasdir(self, d):
396 """
407 """
397 Returns True if the dirstate contains a file (tracked or removed)
408 Returns True if the dirstate contains a file (tracked or removed)
398 in this directory.
409 in this directory.
399 """
410 """
400 return d in self._alldirs
411 return d in self._alldirs
401
412
402 @propertycache
413 @propertycache
403 def _dirs(self):
414 def _dirs(self):
404 return pathutil.dirs(self._map, only_tracked=True)
415 return pathutil.dirs(self._map, only_tracked=True)
405
416
406 @propertycache
417 @propertycache
407 def _alldirs(self):
418 def _alldirs(self):
408 return pathutil.dirs(self._map)
419 return pathutil.dirs(self._map)
409
420
410 ### code related to manipulation of entries and copy-sources
421 ### code related to manipulation of entries and copy-sources
411
422
412 def reset_state(
423 def reset_state(
413 self,
424 self,
414 filename,
425 filename,
415 wc_tracked=False,
426 wc_tracked=False,
416 p1_tracked=False,
427 p1_tracked=False,
417 p2_info=False,
428 p2_info=False,
418 has_meaningful_mtime=True,
429 has_meaningful_mtime=True,
419 parentfiledata=None,
430 parentfiledata=None,
420 ):
431 ):
421 """Set a entry to a given state, diregarding all previous state
432 """Set a entry to a given state, diregarding all previous state
422
433
423 This is to be used by the part of the dirstate API dedicated to
434 This is to be used by the part of the dirstate API dedicated to
424 adjusting the dirstate after a update/merge.
435 adjusting the dirstate after a update/merge.
425
436
426 note: calling this might result to no entry existing at all if the
437 note: calling this might result to no entry existing at all if the
427 dirstate map does not see any point at having one for this file
438 dirstate map does not see any point at having one for this file
428 anymore.
439 anymore.
429 """
440 """
430 # copy information are now outdated
441 # copy information are now outdated
431 # (maybe new information should be in directly passed to this function)
442 # (maybe new information should be in directly passed to this function)
432 self.copymap.pop(filename, None)
443 self.copymap.pop(filename, None)
433
444
434 if not (p1_tracked or p2_info or wc_tracked):
445 if not (p1_tracked or p2_info or wc_tracked):
435 old_entry = self._map.get(filename)
446 old_entry = self._map.get(filename)
436 self._drop_entry(filename)
447 self._drop_entry(filename)
437 self._dirs_decr(filename, old_entry=old_entry)
448 self._dirs_decr(filename, old_entry=old_entry)
438 return
449 return
439
450
440 old_entry = self._map.get(filename)
451 old_entry = self._map.get(filename)
441 self._dirs_incr(filename, old_entry)
452 self._dirs_incr(filename, old_entry)
442 entry = DirstateItem(
453 entry = DirstateItem(
443 wc_tracked=wc_tracked,
454 wc_tracked=wc_tracked,
444 p1_tracked=p1_tracked,
455 p1_tracked=p1_tracked,
445 p2_info=p2_info,
456 p2_info=p2_info,
446 has_meaningful_mtime=has_meaningful_mtime,
457 has_meaningful_mtime=has_meaningful_mtime,
447 parentfiledata=parentfiledata,
458 parentfiledata=parentfiledata,
448 )
459 )
449 self._map[filename] = entry
460 self._map[filename] = entry
450
461
451 def set_tracked(self, filename):
462 def set_tracked(self, filename):
452 new = False
463 new = False
453 entry = self.get(filename)
464 entry = self.get(filename)
454 if entry is None:
465 if entry is None:
455 self._dirs_incr(filename)
466 self._dirs_incr(filename)
456 entry = DirstateItem(
467 entry = DirstateItem(
457 wc_tracked=True,
468 wc_tracked=True,
458 )
469 )
459
470
460 self._map[filename] = entry
471 self._map[filename] = entry
461 new = True
472 new = True
462 elif not entry.tracked:
473 elif not entry.tracked:
463 self._dirs_incr(filename, entry)
474 self._dirs_incr(filename, entry)
464 entry.set_tracked()
475 entry.set_tracked()
465 self._refresh_entry(filename, entry)
476 self._refresh_entry(filename, entry)
466 new = True
477 new = True
467 else:
478 else:
468 # XXX This is probably overkill for more case, but we need this to
479 # XXX This is probably overkill for more case, but we need this to
469 # fully replace the `normallookup` call with `set_tracked` one.
480 # fully replace the `normallookup` call with `set_tracked` one.
470 # Consider smoothing this in the future.
481 # Consider smoothing this in the future.
471 entry.set_possibly_dirty()
482 entry.set_possibly_dirty()
472 self._refresh_entry(filename, entry)
483 self._refresh_entry(filename, entry)
473 return new
484 return new
474
485
475 def set_untracked(self, f):
486 def set_untracked(self, f):
476 """Mark a file as no longer tracked in the dirstate map"""
487 """Mark a file as no longer tracked in the dirstate map"""
477 entry = self.get(f)
488 entry = self.get(f)
478 if entry is None:
489 if entry is None:
479 return False
490 return False
480 else:
491 else:
481 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
492 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
482 if not entry.p2_info:
493 if not entry.p2_info:
483 self.copymap.pop(f, None)
494 self.copymap.pop(f, None)
484 entry.set_untracked()
495 entry.set_untracked()
485 self._refresh_entry(f, entry)
496 self._refresh_entry(f, entry)
486 return True
497 return True
487
498
488 def set_clean(self, filename, mode, size, mtime):
499 def set_clean(self, filename, mode, size, mtime):
489 """mark a file as back to a clean state"""
500 """mark a file as back to a clean state"""
490 entry = self[filename]
501 entry = self[filename]
491 size = size & rangemask
502 size = size & rangemask
492 entry.set_clean(mode, size, mtime)
503 entry.set_clean(mode, size, mtime)
493 self._refresh_entry(filename, entry)
504 self._refresh_entry(filename, entry)
494 self.copymap.pop(filename, None)
505 self.copymap.pop(filename, None)
495
506
496 def set_possibly_dirty(self, filename):
507 def set_possibly_dirty(self, filename):
497 """record that the current state of the file on disk is unknown"""
508 """record that the current state of the file on disk is unknown"""
498 entry = self[filename]
509 entry = self[filename]
499 entry.set_possibly_dirty()
510 entry.set_possibly_dirty()
500 self._refresh_entry(filename, entry)
511 self._refresh_entry(filename, entry)
501
512
502 def _refresh_entry(self, f, entry):
513 def _refresh_entry(self, f, entry):
503 """record updated state of an entry"""
514 """record updated state of an entry"""
504 if not entry.any_tracked:
515 if not entry.any_tracked:
505 self._map.pop(f, None)
516 self._map.pop(f, None)
506
517
507 def _drop_entry(self, f):
518 def _drop_entry(self, f):
508 """remove any entry for file f
519 """remove any entry for file f
509
520
510 This should also drop associated copy information
521 This should also drop associated copy information
511
522
512 The fact we actually need to drop it is the responsability of the caller"""
523 The fact we actually need to drop it is the responsability of the caller"""
513 self._map.pop(f, None)
524 self._map.pop(f, None)
514 self.copymap.pop(f, None)
525 self.copymap.pop(f, None)
515
526
516
527
517 if rustmod is not None:
528 if rustmod is not None:
518
529
519 class dirstatemap(_dirstatemapcommon):
530 class dirstatemap(_dirstatemapcommon):
520
531
521 ### Core data storage and access
532 ### Core data storage and access
522
533
523 @propertycache
534 @propertycache
524 def _map(self):
535 def _map(self):
525 """
536 """
526 Fills the Dirstatemap when called.
537 Fills the Dirstatemap when called.
527 """
538 """
528 # ignore HG_PENDING because identity is used only for writing
539 # ignore HG_PENDING because identity is used only for writing
529 self.identity = util.filestat.frompath(
540 self.identity = util.filestat.frompath(
530 self._opener.join(self._filename)
541 self._opener.join(self._filename)
531 )
542 )
532
543
533 if self._use_dirstate_v2:
544 if self._use_dirstate_v2:
534 if self.docket.uuid:
545 if self.docket.uuid:
535 # TODO: use mmap when possible
546 # TODO: use mmap when possible
536 data = self._opener.read(self.docket.data_filename())
547 data = self._opener.read(self.docket.data_filename())
537 else:
548 else:
538 data = b''
549 data = b''
539 self._map = rustmod.DirstateMap.new_v2(
550 self._map = rustmod.DirstateMap.new_v2(
540 data, self.docket.data_size, self.docket.tree_metadata
551 data, self.docket.data_size, self.docket.tree_metadata
541 )
552 )
542 parents = self.docket.parents
553 parents = self.docket.parents
543 else:
554 else:
544 self._map, parents = rustmod.DirstateMap.new_v1(
555 self._map, parents = rustmod.DirstateMap.new_v1(
545 self._readdirstatefile()
556 self._readdirstatefile()
546 )
557 )
547
558
548 if parents and not self._dirtyparents:
559 if parents and not self._dirtyparents:
549 self.setparents(*parents)
560 self.setparents(*parents)
550
561
551 self.__contains__ = self._map.__contains__
562 self.__contains__ = self._map.__contains__
552 self.__getitem__ = self._map.__getitem__
563 self.__getitem__ = self._map.__getitem__
553 self.get = self._map.get
564 self.get = self._map.get
554 return self._map
565 return self._map
555
566
556 @property
567 @property
557 def copymap(self):
568 def copymap(self):
558 return self._map.copymap()
569 return self._map.copymap()
559
570
560 def debug_iter(self, all):
571 def debug_iter(self, all):
561 """
572 """
562 Return an iterator of (filename, state, mode, size, mtime) tuples
573 Return an iterator of (filename, state, mode, size, mtime) tuples
563
574
564 `all`: also include with `state == b' '` dirstate tree nodes that
575 `all`: also include with `state == b' '` dirstate tree nodes that
565 don't have an associated `DirstateItem`.
576 don't have an associated `DirstateItem`.
566
577
567 """
578 """
568 return self._map.debug_iter(all)
579 return self._map.debug_iter(all)
569
580
570 def clear(self):
581 def clear(self):
571 self._map.clear()
582 self._map.clear()
572 self.setparents(
583 self.setparents(
573 self._nodeconstants.nullid, self._nodeconstants.nullid
584 self._nodeconstants.nullid, self._nodeconstants.nullid
574 )
585 )
575 util.clearcachedproperty(self, b"_dirs")
586 util.clearcachedproperty(self, b"_dirs")
576 util.clearcachedproperty(self, b"_alldirs")
587 util.clearcachedproperty(self, b"_alldirs")
577 util.clearcachedproperty(self, b"dirfoldmap")
588 util.clearcachedproperty(self, b"dirfoldmap")
578
589
579 def items(self):
590 def items(self):
580 return self._map.items()
591 return self._map.items()
581
592
582 # forward for python2,3 compat
593 # forward for python2,3 compat
583 iteritems = items
594 iteritems = items
584
595
585 def keys(self):
596 def keys(self):
586 return iter(self._map)
597 return iter(self._map)
587
598
588 ### reading/setting parents
599 ### reading/setting parents
589
600
590 def setparents(self, p1, p2, fold_p2=False):
601 def setparents(self, p1, p2, fold_p2=False):
591 self._parents = (p1, p2)
602 self._parents = (p1, p2)
592 self._dirtyparents = True
603 self._dirtyparents = True
593 copies = {}
604 copies = {}
594 if fold_p2:
605 if fold_p2:
595 copies = self._map.setparents_fixup()
606 copies = self._map.setparents_fixup()
596 return copies
607 return copies
597
608
598 ### disk interaction
609 ### disk interaction
599
610
600 @propertycache
611 @propertycache
601 def identity(self):
612 def identity(self):
602 self._map
613 self._map
603 return self.identity
614 return self.identity
604
615
605 def write(self, tr, st):
616 def write(self, tr, st):
606 if not self._use_dirstate_v2:
617 if not self._use_dirstate_v2:
607 p1, p2 = self.parents()
618 p1, p2 = self.parents()
608 packed = self._map.write_v1(p1, p2)
619 packed = self._map.write_v1(p1, p2)
609 st.write(packed)
620 st.write(packed)
610 st.close()
621 st.close()
611 self._dirtyparents = False
622 self._dirtyparents = False
612 return
623 return
613
624
614 # We can only append to an existing data file if there is one
625 # We can only append to an existing data file if there is one
615 write_mode = WRITE_MODE_AUTO
626 write_mode = self._write_mode
616 if self.docket.uuid is None:
627 if self.docket.uuid is None:
617 write_mode = WRITE_MODE_FORCE_NEW
628 write_mode = WRITE_MODE_FORCE_NEW
618 packed, meta, append = self._map.write_v2(write_mode)
629 packed, meta, append = self._map.write_v2(write_mode)
619 if append:
630 if append:
620 docket = self.docket
631 docket = self.docket
621 data_filename = docket.data_filename()
632 data_filename = docket.data_filename()
622 with self._opener(data_filename, b'r+b') as fp:
633 with self._opener(data_filename, b'r+b') as fp:
623 fp.seek(docket.data_size)
634 fp.seek(docket.data_size)
624 assert fp.tell() == docket.data_size
635 assert fp.tell() == docket.data_size
625 written = fp.write(packed)
636 written = fp.write(packed)
626 if written is not None: # py2 may return None
637 if written is not None: # py2 may return None
627 assert written == len(packed), (written, len(packed))
638 assert written == len(packed), (written, len(packed))
628 docket.data_size += len(packed)
639 docket.data_size += len(packed)
629 docket.parents = self.parents()
640 docket.parents = self.parents()
630 docket.tree_metadata = meta
641 docket.tree_metadata = meta
631 st.write(docket.serialize())
642 st.write(docket.serialize())
632 st.close()
643 st.close()
633 else:
644 else:
634 self.write_v2_no_append(tr, st, meta, packed)
645 self.write_v2_no_append(tr, st, meta, packed)
635 # Reload from the newly-written file
646 # Reload from the newly-written file
636 util.clearcachedproperty(self, b"_map")
647 util.clearcachedproperty(self, b"_map")
637 self._dirtyparents = False
648 self._dirtyparents = False
638
649
639 ### code related to maintaining and accessing "extra" property
650 ### code related to maintaining and accessing "extra" property
640 # (e.g. "has_dir")
651 # (e.g. "has_dir")
641
652
642 @propertycache
653 @propertycache
643 def filefoldmap(self):
654 def filefoldmap(self):
644 """Returns a dictionary mapping normalized case paths to their
655 """Returns a dictionary mapping normalized case paths to their
645 non-normalized versions.
656 non-normalized versions.
646 """
657 """
647 return self._map.filefoldmapasdict()
658 return self._map.filefoldmapasdict()
648
659
649 def hastrackeddir(self, d):
660 def hastrackeddir(self, d):
650 return self._map.hastrackeddir(d)
661 return self._map.hastrackeddir(d)
651
662
652 def hasdir(self, d):
663 def hasdir(self, d):
653 return self._map.hasdir(d)
664 return self._map.hasdir(d)
654
665
655 @propertycache
666 @propertycache
656 def dirfoldmap(self):
667 def dirfoldmap(self):
657 f = {}
668 f = {}
658 normcase = util.normcase
669 normcase = util.normcase
659 for name in self._map.tracked_dirs():
670 for name in self._map.tracked_dirs():
660 f[normcase(name)] = name
671 f[normcase(name)] = name
661 return f
672 return f
662
673
663 ### code related to manipulation of entries and copy-sources
674 ### code related to manipulation of entries and copy-sources
664
675
665 def set_tracked(self, f):
676 def set_tracked(self, f):
666 return self._map.set_tracked(f)
677 return self._map.set_tracked(f)
667
678
668 def set_untracked(self, f):
679 def set_untracked(self, f):
669 return self._map.set_untracked(f)
680 return self._map.set_untracked(f)
670
681
671 def set_clean(self, filename, mode, size, mtime):
682 def set_clean(self, filename, mode, size, mtime):
672 self._map.set_clean(filename, mode, size, mtime)
683 self._map.set_clean(filename, mode, size, mtime)
673
684
674 def set_possibly_dirty(self, f):
685 def set_possibly_dirty(self, f):
675 self._map.set_possibly_dirty(f)
686 self._map.set_possibly_dirty(f)
676
687
677 def reset_state(
688 def reset_state(
678 self,
689 self,
679 filename,
690 filename,
680 wc_tracked=False,
691 wc_tracked=False,
681 p1_tracked=False,
692 p1_tracked=False,
682 p2_info=False,
693 p2_info=False,
683 has_meaningful_mtime=True,
694 has_meaningful_mtime=True,
684 parentfiledata=None,
695 parentfiledata=None,
685 ):
696 ):
686 return self._map.reset_state(
697 return self._map.reset_state(
687 filename,
698 filename,
688 wc_tracked,
699 wc_tracked,
689 p1_tracked,
700 p1_tracked,
690 p2_info,
701 p2_info,
691 has_meaningful_mtime,
702 has_meaningful_mtime,
692 parentfiledata,
703 parentfiledata,
693 )
704 )
@@ -1,1913 +1,1929 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::path::PathBuf;
4 use std::path::PathBuf;
5
5
6 use super::on_disk;
6 use super::on_disk;
7 use super::on_disk::DirstateV2ParseError;
7 use super::on_disk::DirstateV2ParseError;
8 use super::owning::OwningDirstateMap;
8 use super::owning::OwningDirstateMap;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::CopyMapIter;
13 use crate::dirstate::CopyMapIter;
14 use crate::dirstate::DirstateV2Data;
14 use crate::dirstate::DirstateV2Data;
15 use crate::dirstate::ParentFileData;
15 use crate::dirstate::ParentFileData;
16 use crate::dirstate::StateMapIter;
16 use crate::dirstate::StateMapIter;
17 use crate::dirstate::TruncatedTimestamp;
17 use crate::dirstate::TruncatedTimestamp;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::DirstateEntry;
20 use crate::DirstateEntry;
21 use crate::DirstateError;
21 use crate::DirstateError;
22 use crate::DirstateMapError;
22 use crate::DirstateMapError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::FastHashbrownMap as FastHashMap;
25 use crate::FastHashbrownMap as FastHashMap;
26 use crate::PatternFileWarning;
26 use crate::PatternFileWarning;
27 use crate::StatusError;
27 use crate::StatusError;
28 use crate::StatusOptions;
28 use crate::StatusOptions;
29
29
30 /// Append to an existing data file if the amount of unreachable data (not used
30 /// Append to an existing data file if the amount of unreachable data (not used
31 /// anymore) is less than this fraction of the total amount of existing data.
31 /// anymore) is less than this fraction of the total amount of existing data.
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
32 const ACCEPTABLE_UNREACHABLE_BYTES_RATIO: f32 = 0.5;
33
33
34 #[derive(Debug, PartialEq, Eq)]
34 #[derive(Debug, PartialEq, Eq)]
35 /// Version of the on-disk format
35 /// Version of the on-disk format
36 pub enum DirstateVersion {
36 pub enum DirstateVersion {
37 V1,
37 V1,
38 V2,
38 V2,
39 }
39 }
40
40
41 #[derive(Debug, PartialEq, Eq)]
41 #[derive(Debug, PartialEq, Eq)]
42 pub enum DirstateMapWriteMode {
42 pub enum DirstateMapWriteMode {
43 Auto,
43 Auto,
44 ForceNewDataFile,
44 ForceNewDataFile,
45 ForceAppend,
45 }
46 }
46
47
47 #[derive(Debug)]
48 #[derive(Debug)]
48 pub struct DirstateMap<'on_disk> {
49 pub struct DirstateMap<'on_disk> {
49 /// Contents of the `.hg/dirstate` file
50 /// Contents of the `.hg/dirstate` file
50 pub(super) on_disk: &'on_disk [u8],
51 pub(super) on_disk: &'on_disk [u8],
51
52
52 pub(super) root: ChildNodes<'on_disk>,
53 pub(super) root: ChildNodes<'on_disk>,
53
54
54 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
55 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
55 pub(super) nodes_with_entry_count: u32,
56 pub(super) nodes_with_entry_count: u32,
56
57
57 /// Number of nodes anywhere in the tree that have
58 /// Number of nodes anywhere in the tree that have
58 /// `.copy_source.is_some()`.
59 /// `.copy_source.is_some()`.
59 pub(super) nodes_with_copy_source_count: u32,
60 pub(super) nodes_with_copy_source_count: u32,
60
61
61 /// See on_disk::Header
62 /// See on_disk::Header
62 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
63 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
63
64
64 /// How many bytes of `on_disk` are not used anymore
65 /// How many bytes of `on_disk` are not used anymore
65 pub(super) unreachable_bytes: u32,
66 pub(super) unreachable_bytes: u32,
66
67
67 /// Size of the data used to first load this `DirstateMap`. Used in case
68 /// Size of the data used to first load this `DirstateMap`. Used in case
68 /// we need to write some new metadata, but no new data on disk.
69 /// we need to write some new metadata, but no new data on disk.
69 pub(super) old_data_size: usize,
70 pub(super) old_data_size: usize,
70
71
71 pub(super) dirstate_version: DirstateVersion,
72 pub(super) dirstate_version: DirstateVersion,
73
74 /// Controlled by config option `devel.dirstate.v2.data_update_mode`
75 pub(super) write_mode: DirstateMapWriteMode,
72 }
76 }
73
77
74 /// Using a plain `HgPathBuf` of the full path from the repository root as a
78 /// Using a plain `HgPathBuf` of the full path from the repository root as a
75 /// map key would also work: all paths in a given map have the same parent
79 /// map key would also work: all paths in a given map have the same parent
76 /// path, so comparing full paths gives the same result as comparing base
80 /// path, so comparing full paths gives the same result as comparing base
77 /// names. However `HashMap` would waste time always re-hashing the same
81 /// names. However `HashMap` would waste time always re-hashing the same
78 /// string prefix.
82 /// string prefix.
79 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
83 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
80
84
81 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
85 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
82 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
86 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
83 #[derive(Debug)]
87 #[derive(Debug)]
84 pub(super) enum BorrowedPath<'tree, 'on_disk> {
88 pub(super) enum BorrowedPath<'tree, 'on_disk> {
85 InMemory(&'tree HgPathBuf),
89 InMemory(&'tree HgPathBuf),
86 OnDisk(&'on_disk HgPath),
90 OnDisk(&'on_disk HgPath),
87 }
91 }
88
92
89 #[derive(Debug)]
93 #[derive(Debug)]
90 pub(super) enum ChildNodes<'on_disk> {
94 pub(super) enum ChildNodes<'on_disk> {
91 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
95 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
92 OnDisk(&'on_disk [on_disk::Node]),
96 OnDisk(&'on_disk [on_disk::Node]),
93 }
97 }
94
98
95 #[derive(Debug)]
99 #[derive(Debug)]
96 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
100 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
97 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
101 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
98 OnDisk(&'on_disk [on_disk::Node]),
102 OnDisk(&'on_disk [on_disk::Node]),
99 }
103 }
100
104
101 #[derive(Debug)]
105 #[derive(Debug)]
102 pub(super) enum NodeRef<'tree, 'on_disk> {
106 pub(super) enum NodeRef<'tree, 'on_disk> {
103 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
107 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
104 OnDisk(&'on_disk on_disk::Node),
108 OnDisk(&'on_disk on_disk::Node),
105 }
109 }
106
110
107 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
111 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
108 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
112 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
109 match *self {
113 match *self {
110 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
114 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
111 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
115 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
112 }
116 }
113 }
117 }
114 }
118 }
115
119
116 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
120 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
117 type Target = HgPath;
121 type Target = HgPath;
118
122
119 fn deref(&self) -> &HgPath {
123 fn deref(&self) -> &HgPath {
120 match *self {
124 match *self {
121 BorrowedPath::InMemory(in_memory) => in_memory,
125 BorrowedPath::InMemory(in_memory) => in_memory,
122 BorrowedPath::OnDisk(on_disk) => on_disk,
126 BorrowedPath::OnDisk(on_disk) => on_disk,
123 }
127 }
124 }
128 }
125 }
129 }
126
130
127 impl Default for ChildNodes<'_> {
131 impl Default for ChildNodes<'_> {
128 fn default() -> Self {
132 fn default() -> Self {
129 ChildNodes::InMemory(Default::default())
133 ChildNodes::InMemory(Default::default())
130 }
134 }
131 }
135 }
132
136
133 impl<'on_disk> ChildNodes<'on_disk> {
137 impl<'on_disk> ChildNodes<'on_disk> {
134 pub(super) fn as_ref<'tree>(
138 pub(super) fn as_ref<'tree>(
135 &'tree self,
139 &'tree self,
136 ) -> ChildNodesRef<'tree, 'on_disk> {
140 ) -> ChildNodesRef<'tree, 'on_disk> {
137 match self {
141 match self {
138 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
142 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
139 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
143 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
140 }
144 }
141 }
145 }
142
146
143 pub(super) fn is_empty(&self) -> bool {
147 pub(super) fn is_empty(&self) -> bool {
144 match self {
148 match self {
145 ChildNodes::InMemory(nodes) => nodes.is_empty(),
149 ChildNodes::InMemory(nodes) => nodes.is_empty(),
146 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
150 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
147 }
151 }
148 }
152 }
149
153
150 fn make_mut(
154 fn make_mut(
151 &mut self,
155 &mut self,
152 on_disk: &'on_disk [u8],
156 on_disk: &'on_disk [u8],
153 unreachable_bytes: &mut u32,
157 unreachable_bytes: &mut u32,
154 ) -> Result<
158 ) -> Result<
155 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
159 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
156 DirstateV2ParseError,
160 DirstateV2ParseError,
157 > {
161 > {
158 match self {
162 match self {
159 ChildNodes::InMemory(nodes) => Ok(nodes),
163 ChildNodes::InMemory(nodes) => Ok(nodes),
160 ChildNodes::OnDisk(nodes) => {
164 ChildNodes::OnDisk(nodes) => {
161 *unreachable_bytes +=
165 *unreachable_bytes +=
162 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
166 std::mem::size_of_val::<[on_disk::Node]>(nodes) as u32;
163 let nodes = nodes
167 let nodes = nodes
164 .iter()
168 .iter()
165 .map(|node| {
169 .map(|node| {
166 Ok((
170 Ok((
167 node.path(on_disk)?,
171 node.path(on_disk)?,
168 node.to_in_memory_node(on_disk)?,
172 node.to_in_memory_node(on_disk)?,
169 ))
173 ))
170 })
174 })
171 .collect::<Result<_, _>>()?;
175 .collect::<Result<_, _>>()?;
172 *self = ChildNodes::InMemory(nodes);
176 *self = ChildNodes::InMemory(nodes);
173 match self {
177 match self {
174 ChildNodes::InMemory(nodes) => Ok(nodes),
178 ChildNodes::InMemory(nodes) => Ok(nodes),
175 ChildNodes::OnDisk(_) => unreachable!(),
179 ChildNodes::OnDisk(_) => unreachable!(),
176 }
180 }
177 }
181 }
178 }
182 }
179 }
183 }
180 }
184 }
181
185
182 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
186 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
183 pub(super) fn get(
187 pub(super) fn get(
184 &self,
188 &self,
185 base_name: &HgPath,
189 base_name: &HgPath,
186 on_disk: &'on_disk [u8],
190 on_disk: &'on_disk [u8],
187 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
191 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
188 match self {
192 match self {
189 ChildNodesRef::InMemory(nodes) => Ok(nodes
193 ChildNodesRef::InMemory(nodes) => Ok(nodes
190 .get_key_value(base_name)
194 .get_key_value(base_name)
191 .map(|(k, v)| NodeRef::InMemory(k, v))),
195 .map(|(k, v)| NodeRef::InMemory(k, v))),
192 ChildNodesRef::OnDisk(nodes) => {
196 ChildNodesRef::OnDisk(nodes) => {
193 let mut parse_result = Ok(());
197 let mut parse_result = Ok(());
194 let search_result = nodes.binary_search_by(|node| {
198 let search_result = nodes.binary_search_by(|node| {
195 match node.base_name(on_disk) {
199 match node.base_name(on_disk) {
196 Ok(node_base_name) => node_base_name.cmp(base_name),
200 Ok(node_base_name) => node_base_name.cmp(base_name),
197 Err(e) => {
201 Err(e) => {
198 parse_result = Err(e);
202 parse_result = Err(e);
199 // Dummy comparison result, `search_result` won’t
203 // Dummy comparison result, `search_result` won’t
200 // be used since `parse_result` is an error
204 // be used since `parse_result` is an error
201 std::cmp::Ordering::Equal
205 std::cmp::Ordering::Equal
202 }
206 }
203 }
207 }
204 });
208 });
205 parse_result.map(|()| {
209 parse_result.map(|()| {
206 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
210 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
207 })
211 })
208 }
212 }
209 }
213 }
210 }
214 }
211
215
212 /// Iterate in undefined order
216 /// Iterate in undefined order
213 pub(super) fn iter(
217 pub(super) fn iter(
214 &self,
218 &self,
215 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
219 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
216 match self {
220 match self {
217 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
221 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
218 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
222 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
219 ),
223 ),
220 ChildNodesRef::OnDisk(nodes) => {
224 ChildNodesRef::OnDisk(nodes) => {
221 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
225 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
222 }
226 }
223 }
227 }
224 }
228 }
225
229
226 /// Iterate in parallel in undefined order
230 /// Iterate in parallel in undefined order
227 pub(super) fn par_iter(
231 pub(super) fn par_iter(
228 &self,
232 &self,
229 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
233 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
230 {
234 {
231 use rayon::prelude::*;
235 use rayon::prelude::*;
232 match self {
236 match self {
233 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
237 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
234 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
238 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
235 ),
239 ),
236 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
240 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
237 nodes.par_iter().map(NodeRef::OnDisk),
241 nodes.par_iter().map(NodeRef::OnDisk),
238 ),
242 ),
239 }
243 }
240 }
244 }
241
245
242 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
246 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
243 match self {
247 match self {
244 ChildNodesRef::InMemory(nodes) => {
248 ChildNodesRef::InMemory(nodes) => {
245 let mut vec: Vec<_> = nodes
249 let mut vec: Vec<_> = nodes
246 .iter()
250 .iter()
247 .map(|(k, v)| NodeRef::InMemory(k, v))
251 .map(|(k, v)| NodeRef::InMemory(k, v))
248 .collect();
252 .collect();
249 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
253 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
250 match node {
254 match node {
251 NodeRef::InMemory(path, _node) => path.base_name(),
255 NodeRef::InMemory(path, _node) => path.base_name(),
252 NodeRef::OnDisk(_) => unreachable!(),
256 NodeRef::OnDisk(_) => unreachable!(),
253 }
257 }
254 }
258 }
255 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
259 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
256 // value: https://github.com/rust-lang/rust/issues/34162
260 // value: https://github.com/rust-lang/rust/issues/34162
257 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
261 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
258 vec
262 vec
259 }
263 }
260 ChildNodesRef::OnDisk(nodes) => {
264 ChildNodesRef::OnDisk(nodes) => {
261 // Nodes on disk are already sorted
265 // Nodes on disk are already sorted
262 nodes.iter().map(NodeRef::OnDisk).collect()
266 nodes.iter().map(NodeRef::OnDisk).collect()
263 }
267 }
264 }
268 }
265 }
269 }
266 }
270 }
267
271
268 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
272 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
269 pub(super) fn full_path(
273 pub(super) fn full_path(
270 &self,
274 &self,
271 on_disk: &'on_disk [u8],
275 on_disk: &'on_disk [u8],
272 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
276 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
273 match self {
277 match self {
274 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
278 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
275 NodeRef::OnDisk(node) => node.full_path(on_disk),
279 NodeRef::OnDisk(node) => node.full_path(on_disk),
276 }
280 }
277 }
281 }
278
282
279 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
283 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
280 /// HgPath>` detached from `'tree`
284 /// HgPath>` detached from `'tree`
281 pub(super) fn full_path_borrowed(
285 pub(super) fn full_path_borrowed(
282 &self,
286 &self,
283 on_disk: &'on_disk [u8],
287 on_disk: &'on_disk [u8],
284 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
288 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
285 match self {
289 match self {
286 NodeRef::InMemory(path, _node) => match path.full_path() {
290 NodeRef::InMemory(path, _node) => match path.full_path() {
287 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
291 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
288 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
292 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
289 },
293 },
290 NodeRef::OnDisk(node) => {
294 NodeRef::OnDisk(node) => {
291 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
295 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
292 }
296 }
293 }
297 }
294 }
298 }
295
299
296 pub(super) fn base_name(
300 pub(super) fn base_name(
297 &self,
301 &self,
298 on_disk: &'on_disk [u8],
302 on_disk: &'on_disk [u8],
299 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
303 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
300 match self {
304 match self {
301 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
305 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
302 NodeRef::OnDisk(node) => node.base_name(on_disk),
306 NodeRef::OnDisk(node) => node.base_name(on_disk),
303 }
307 }
304 }
308 }
305
309
306 pub(super) fn children(
310 pub(super) fn children(
307 &self,
311 &self,
308 on_disk: &'on_disk [u8],
312 on_disk: &'on_disk [u8],
309 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
313 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
310 match self {
314 match self {
311 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
315 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
312 NodeRef::OnDisk(node) => {
316 NodeRef::OnDisk(node) => {
313 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
317 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
314 }
318 }
315 }
319 }
316 }
320 }
317
321
318 pub(super) fn has_copy_source(&self) -> bool {
322 pub(super) fn has_copy_source(&self) -> bool {
319 match self {
323 match self {
320 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
324 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
321 NodeRef::OnDisk(node) => node.has_copy_source(),
325 NodeRef::OnDisk(node) => node.has_copy_source(),
322 }
326 }
323 }
327 }
324
328
325 pub(super) fn copy_source(
329 pub(super) fn copy_source(
326 &self,
330 &self,
327 on_disk: &'on_disk [u8],
331 on_disk: &'on_disk [u8],
328 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
332 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
329 match self {
333 match self {
330 NodeRef::InMemory(_path, node) => {
334 NodeRef::InMemory(_path, node) => {
331 Ok(node.copy_source.as_ref().map(|s| &**s))
335 Ok(node.copy_source.as_ref().map(|s| &**s))
332 }
336 }
333 NodeRef::OnDisk(node) => node.copy_source(on_disk),
337 NodeRef::OnDisk(node) => node.copy_source(on_disk),
334 }
338 }
335 }
339 }
336 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
340 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
337 /// HgPath>` detached from `'tree`
341 /// HgPath>` detached from `'tree`
338 pub(super) fn copy_source_borrowed(
342 pub(super) fn copy_source_borrowed(
339 &self,
343 &self,
340 on_disk: &'on_disk [u8],
344 on_disk: &'on_disk [u8],
341 ) -> Result<Option<BorrowedPath<'tree, 'on_disk>>, DirstateV2ParseError>
345 ) -> Result<Option<BorrowedPath<'tree, 'on_disk>>, DirstateV2ParseError>
342 {
346 {
343 Ok(match self {
347 Ok(match self {
344 NodeRef::InMemory(_path, node) => {
348 NodeRef::InMemory(_path, node) => {
345 node.copy_source.as_ref().map(|source| match source {
349 node.copy_source.as_ref().map(|source| match source {
346 Cow::Borrowed(on_disk) => BorrowedPath::OnDisk(on_disk),
350 Cow::Borrowed(on_disk) => BorrowedPath::OnDisk(on_disk),
347 Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
351 Cow::Owned(in_memory) => BorrowedPath::InMemory(in_memory),
348 })
352 })
349 }
353 }
350 NodeRef::OnDisk(node) => node
354 NodeRef::OnDisk(node) => node
351 .copy_source(on_disk)?
355 .copy_source(on_disk)?
352 .map(|source| BorrowedPath::OnDisk(source)),
356 .map(|source| BorrowedPath::OnDisk(source)),
353 })
357 })
354 }
358 }
355
359
356 pub(super) fn entry(
360 pub(super) fn entry(
357 &self,
361 &self,
358 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
362 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
359 match self {
363 match self {
360 NodeRef::InMemory(_path, node) => {
364 NodeRef::InMemory(_path, node) => {
361 Ok(node.data.as_entry().copied())
365 Ok(node.data.as_entry().copied())
362 }
366 }
363 NodeRef::OnDisk(node) => node.entry(),
367 NodeRef::OnDisk(node) => node.entry(),
364 }
368 }
365 }
369 }
366
370
367 pub(super) fn cached_directory_mtime(
371 pub(super) fn cached_directory_mtime(
368 &self,
372 &self,
369 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
373 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
370 match self {
374 match self {
371 NodeRef::InMemory(_path, node) => Ok(match node.data {
375 NodeRef::InMemory(_path, node) => Ok(match node.data {
372 NodeData::CachedDirectory { mtime } => Some(mtime),
376 NodeData::CachedDirectory { mtime } => Some(mtime),
373 _ => None,
377 _ => None,
374 }),
378 }),
375 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
379 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
376 }
380 }
377 }
381 }
378
382
379 pub(super) fn descendants_with_entry_count(&self) -> u32 {
383 pub(super) fn descendants_with_entry_count(&self) -> u32 {
380 match self {
384 match self {
381 NodeRef::InMemory(_path, node) => {
385 NodeRef::InMemory(_path, node) => {
382 node.descendants_with_entry_count
386 node.descendants_with_entry_count
383 }
387 }
384 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
388 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
385 }
389 }
386 }
390 }
387
391
388 pub(super) fn tracked_descendants_count(&self) -> u32 {
392 pub(super) fn tracked_descendants_count(&self) -> u32 {
389 match self {
393 match self {
390 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
394 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
391 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
395 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
392 }
396 }
393 }
397 }
394 }
398 }
395
399
396 /// Represents a file or a directory
400 /// Represents a file or a directory
397 #[derive(Default, Debug)]
401 #[derive(Default, Debug)]
398 pub(super) struct Node<'on_disk> {
402 pub(super) struct Node<'on_disk> {
399 pub(super) data: NodeData,
403 pub(super) data: NodeData,
400
404
401 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
405 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
402
406
403 pub(super) children: ChildNodes<'on_disk>,
407 pub(super) children: ChildNodes<'on_disk>,
404
408
405 /// How many (non-inclusive) descendants of this node have an entry.
409 /// How many (non-inclusive) descendants of this node have an entry.
406 pub(super) descendants_with_entry_count: u32,
410 pub(super) descendants_with_entry_count: u32,
407
411
408 /// How many (non-inclusive) descendants of this node have an entry whose
412 /// How many (non-inclusive) descendants of this node have an entry whose
409 /// state is "tracked".
413 /// state is "tracked".
410 pub(super) tracked_descendants_count: u32,
414 pub(super) tracked_descendants_count: u32,
411 }
415 }
412
416
413 #[derive(Debug)]
417 #[derive(Debug)]
414 pub(super) enum NodeData {
418 pub(super) enum NodeData {
415 Entry(DirstateEntry),
419 Entry(DirstateEntry),
416 CachedDirectory { mtime: TruncatedTimestamp },
420 CachedDirectory { mtime: TruncatedTimestamp },
417 None,
421 None,
418 }
422 }
419
423
420 impl Default for NodeData {
424 impl Default for NodeData {
421 fn default() -> Self {
425 fn default() -> Self {
422 NodeData::None
426 NodeData::None
423 }
427 }
424 }
428 }
425
429
426 impl NodeData {
430 impl NodeData {
427 fn has_entry(&self) -> bool {
431 fn has_entry(&self) -> bool {
428 match self {
432 match self {
429 NodeData::Entry(_) => true,
433 NodeData::Entry(_) => true,
430 _ => false,
434 _ => false,
431 }
435 }
432 }
436 }
433
437
434 fn as_entry(&self) -> Option<&DirstateEntry> {
438 fn as_entry(&self) -> Option<&DirstateEntry> {
435 match self {
439 match self {
436 NodeData::Entry(entry) => Some(entry),
440 NodeData::Entry(entry) => Some(entry),
437 _ => None,
441 _ => None,
438 }
442 }
439 }
443 }
440
444
441 fn as_entry_mut(&mut self) -> Option<&mut DirstateEntry> {
445 fn as_entry_mut(&mut self) -> Option<&mut DirstateEntry> {
442 match self {
446 match self {
443 NodeData::Entry(entry) => Some(entry),
447 NodeData::Entry(entry) => Some(entry),
444 _ => None,
448 _ => None,
445 }
449 }
446 }
450 }
447 }
451 }
448
452
449 impl<'on_disk> DirstateMap<'on_disk> {
453 impl<'on_disk> DirstateMap<'on_disk> {
450 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
454 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
451 Self {
455 Self {
452 on_disk,
456 on_disk,
453 root: ChildNodes::default(),
457 root: ChildNodes::default(),
454 nodes_with_entry_count: 0,
458 nodes_with_entry_count: 0,
455 nodes_with_copy_source_count: 0,
459 nodes_with_copy_source_count: 0,
456 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
460 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
457 unreachable_bytes: 0,
461 unreachable_bytes: 0,
458 old_data_size: 0,
462 old_data_size: 0,
459 dirstate_version: DirstateVersion::V1,
463 dirstate_version: DirstateVersion::V1,
464 write_mode: DirstateMapWriteMode::Auto,
460 }
465 }
461 }
466 }
462
467
463 #[timed]
468 #[timed]
464 pub fn new_v2(
469 pub fn new_v2(
465 on_disk: &'on_disk [u8],
470 on_disk: &'on_disk [u8],
466 data_size: usize,
471 data_size: usize,
467 metadata: &[u8],
472 metadata: &[u8],
468 ) -> Result<Self, DirstateError> {
473 ) -> Result<Self, DirstateError> {
469 if let Some(data) = on_disk.get(..data_size) {
474 if let Some(data) = on_disk.get(..data_size) {
470 Ok(on_disk::read(data, metadata)?)
475 Ok(on_disk::read(data, metadata)?)
471 } else {
476 } else {
472 Err(DirstateV2ParseError::new("not enough bytes on disk").into())
477 Err(DirstateV2ParseError::new("not enough bytes on disk").into())
473 }
478 }
474 }
479 }
475
480
476 #[timed]
481 #[timed]
477 pub fn new_v1(
482 pub fn new_v1(
478 on_disk: &'on_disk [u8],
483 on_disk: &'on_disk [u8],
479 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
484 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
480 let mut map = Self::empty(on_disk);
485 let mut map = Self::empty(on_disk);
481 if map.on_disk.is_empty() {
486 if map.on_disk.is_empty() {
482 return Ok((map, None));
487 return Ok((map, None));
483 }
488 }
484
489
485 let parents = parse_dirstate_entries(
490 let parents = parse_dirstate_entries(
486 map.on_disk,
491 map.on_disk,
487 |path, entry, copy_source| {
492 |path, entry, copy_source| {
488 let tracked = entry.tracked();
493 let tracked = entry.tracked();
489 let node = Self::get_or_insert_node_inner(
494 let node = Self::get_or_insert_node_inner(
490 map.on_disk,
495 map.on_disk,
491 &mut map.unreachable_bytes,
496 &mut map.unreachable_bytes,
492 &mut map.root,
497 &mut map.root,
493 path,
498 path,
494 WithBasename::to_cow_borrowed,
499 WithBasename::to_cow_borrowed,
495 |ancestor| {
500 |ancestor| {
496 if tracked {
501 if tracked {
497 ancestor.tracked_descendants_count += 1
502 ancestor.tracked_descendants_count += 1
498 }
503 }
499 ancestor.descendants_with_entry_count += 1
504 ancestor.descendants_with_entry_count += 1
500 },
505 },
501 )?;
506 )?;
502 assert!(
507 assert!(
503 !node.data.has_entry(),
508 !node.data.has_entry(),
504 "duplicate dirstate entry in read"
509 "duplicate dirstate entry in read"
505 );
510 );
506 assert!(
511 assert!(
507 node.copy_source.is_none(),
512 node.copy_source.is_none(),
508 "duplicate dirstate entry in read"
513 "duplicate dirstate entry in read"
509 );
514 );
510 node.data = NodeData::Entry(*entry);
515 node.data = NodeData::Entry(*entry);
511 node.copy_source = copy_source.map(Cow::Borrowed);
516 node.copy_source = copy_source.map(Cow::Borrowed);
512 map.nodes_with_entry_count += 1;
517 map.nodes_with_entry_count += 1;
513 if copy_source.is_some() {
518 if copy_source.is_some() {
514 map.nodes_with_copy_source_count += 1
519 map.nodes_with_copy_source_count += 1
515 }
520 }
516 Ok(())
521 Ok(())
517 },
522 },
518 )?;
523 )?;
519 let parents = Some(parents.clone());
524 let parents = Some(parents.clone());
520
525
521 Ok((map, parents))
526 Ok((map, parents))
522 }
527 }
523
528
524 /// Assuming dirstate-v2 format, returns whether the next write should
529 /// Assuming dirstate-v2 format, returns whether the next write should
525 /// append to the existing data file that contains `self.on_disk` (true),
530 /// append to the existing data file that contains `self.on_disk` (true),
526 /// or create a new data file from scratch (false).
531 /// or create a new data file from scratch (false).
527 pub(super) fn write_should_append(&self) -> bool {
532 pub(super) fn write_should_append(&self) -> bool {
528 let ratio = self.unreachable_bytes as f32 / self.on_disk.len() as f32;
533 match self.write_mode {
529 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
534 DirstateMapWriteMode::ForceAppend => true,
535 DirstateMapWriteMode::ForceNewDataFile => false,
536 DirstateMapWriteMode::Auto => {
537 let ratio =
538 self.unreachable_bytes as f32 / self.on_disk.len() as f32;
539 ratio < ACCEPTABLE_UNREACHABLE_BYTES_RATIO
540 }
541 }
530 }
542 }
531
543
532 fn get_node<'tree>(
544 fn get_node<'tree>(
533 &'tree self,
545 &'tree self,
534 path: &HgPath,
546 path: &HgPath,
535 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
547 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
536 let mut children = self.root.as_ref();
548 let mut children = self.root.as_ref();
537 let mut components = path.components();
549 let mut components = path.components();
538 let mut component =
550 let mut component =
539 components.next().expect("expected at least one components");
551 components.next().expect("expected at least one components");
540 loop {
552 loop {
541 if let Some(child) = children.get(component, self.on_disk)? {
553 if let Some(child) = children.get(component, self.on_disk)? {
542 if let Some(next_component) = components.next() {
554 if let Some(next_component) = components.next() {
543 component = next_component;
555 component = next_component;
544 children = child.children(self.on_disk)?;
556 children = child.children(self.on_disk)?;
545 } else {
557 } else {
546 return Ok(Some(child));
558 return Ok(Some(child));
547 }
559 }
548 } else {
560 } else {
549 return Ok(None);
561 return Ok(None);
550 }
562 }
551 }
563 }
552 }
564 }
553
565
554 /// Returns a mutable reference to the node at `path` if it exists
566 /// Returns a mutable reference to the node at `path` if it exists
555 ///
567 ///
556 /// `each_ancestor` is a callback that is called for each ancestor node
568 /// `each_ancestor` is a callback that is called for each ancestor node
557 /// when descending the tree. It is used to keep the different counters
569 /// when descending the tree. It is used to keep the different counters
558 /// of the `DirstateMap` up-to-date.
570 /// of the `DirstateMap` up-to-date.
559 fn get_node_mut<'tree>(
571 fn get_node_mut<'tree>(
560 &'tree mut self,
572 &'tree mut self,
561 path: &HgPath,
573 path: &HgPath,
562 each_ancestor: impl FnMut(&mut Node),
574 each_ancestor: impl FnMut(&mut Node),
563 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
575 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
564 Self::get_node_mut_inner(
576 Self::get_node_mut_inner(
565 self.on_disk,
577 self.on_disk,
566 &mut self.unreachable_bytes,
578 &mut self.unreachable_bytes,
567 &mut self.root,
579 &mut self.root,
568 path,
580 path,
569 each_ancestor,
581 each_ancestor,
570 )
582 )
571 }
583 }
572
584
573 /// Lower-level version of `get_node_mut`.
585 /// Lower-level version of `get_node_mut`.
574 ///
586 ///
575 /// This takes `root` instead of `&mut self` so that callers can mutate
587 /// This takes `root` instead of `&mut self` so that callers can mutate
576 /// other fields while the returned borrow is still valid.
588 /// other fields while the returned borrow is still valid.
577 ///
589 ///
578 /// `each_ancestor` is a callback that is called for each ancestor node
590 /// `each_ancestor` is a callback that is called for each ancestor node
579 /// when descending the tree. It is used to keep the different counters
591 /// when descending the tree. It is used to keep the different counters
580 /// of the `DirstateMap` up-to-date.
592 /// of the `DirstateMap` up-to-date.
581 fn get_node_mut_inner<'tree>(
593 fn get_node_mut_inner<'tree>(
582 on_disk: &'on_disk [u8],
594 on_disk: &'on_disk [u8],
583 unreachable_bytes: &mut u32,
595 unreachable_bytes: &mut u32,
584 root: &'tree mut ChildNodes<'on_disk>,
596 root: &'tree mut ChildNodes<'on_disk>,
585 path: &HgPath,
597 path: &HgPath,
586 mut each_ancestor: impl FnMut(&mut Node),
598 mut each_ancestor: impl FnMut(&mut Node),
587 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
599 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
588 let mut children = root;
600 let mut children = root;
589 let mut components = path.components();
601 let mut components = path.components();
590 let mut component =
602 let mut component =
591 components.next().expect("expected at least one components");
603 components.next().expect("expected at least one components");
592 loop {
604 loop {
593 if let Some(child) = children
605 if let Some(child) = children
594 .make_mut(on_disk, unreachable_bytes)?
606 .make_mut(on_disk, unreachable_bytes)?
595 .get_mut(component)
607 .get_mut(component)
596 {
608 {
597 if let Some(next_component) = components.next() {
609 if let Some(next_component) = components.next() {
598 each_ancestor(child);
610 each_ancestor(child);
599 component = next_component;
611 component = next_component;
600 children = &mut child.children;
612 children = &mut child.children;
601 } else {
613 } else {
602 return Ok(Some(child));
614 return Ok(Some(child));
603 }
615 }
604 } else {
616 } else {
605 return Ok(None);
617 return Ok(None);
606 }
618 }
607 }
619 }
608 }
620 }
609
621
610 /// Get a mutable reference to the node at `path`, creating it if it does
622 /// Get a mutable reference to the node at `path`, creating it if it does
611 /// not exist.
623 /// not exist.
612 ///
624 ///
613 /// `each_ancestor` is a callback that is called for each ancestor node
625 /// `each_ancestor` is a callback that is called for each ancestor node
614 /// when descending the tree. It is used to keep the different counters
626 /// when descending the tree. It is used to keep the different counters
615 /// of the `DirstateMap` up-to-date.
627 /// of the `DirstateMap` up-to-date.
616 fn get_or_insert_node<'tree, 'path>(
628 fn get_or_insert_node<'tree, 'path>(
617 &'tree mut self,
629 &'tree mut self,
618 path: &'path HgPath,
630 path: &'path HgPath,
619 each_ancestor: impl FnMut(&mut Node),
631 each_ancestor: impl FnMut(&mut Node),
620 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
632 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
621 Self::get_or_insert_node_inner(
633 Self::get_or_insert_node_inner(
622 self.on_disk,
634 self.on_disk,
623 &mut self.unreachable_bytes,
635 &mut self.unreachable_bytes,
624 &mut self.root,
636 &mut self.root,
625 path,
637 path,
626 WithBasename::to_cow_owned,
638 WithBasename::to_cow_owned,
627 each_ancestor,
639 each_ancestor,
628 )
640 )
629 }
641 }
630
642
631 /// Lower-level version of `get_or_insert_node_inner`, which is used when
643 /// Lower-level version of `get_or_insert_node_inner`, which is used when
632 /// parsing disk data to remove allocations for new nodes.
644 /// parsing disk data to remove allocations for new nodes.
633 fn get_or_insert_node_inner<'tree, 'path>(
645 fn get_or_insert_node_inner<'tree, 'path>(
634 on_disk: &'on_disk [u8],
646 on_disk: &'on_disk [u8],
635 unreachable_bytes: &mut u32,
647 unreachable_bytes: &mut u32,
636 root: &'tree mut ChildNodes<'on_disk>,
648 root: &'tree mut ChildNodes<'on_disk>,
637 path: &'path HgPath,
649 path: &'path HgPath,
638 to_cow: impl Fn(
650 to_cow: impl Fn(
639 WithBasename<&'path HgPath>,
651 WithBasename<&'path HgPath>,
640 ) -> WithBasename<Cow<'on_disk, HgPath>>,
652 ) -> WithBasename<Cow<'on_disk, HgPath>>,
641 mut each_ancestor: impl FnMut(&mut Node),
653 mut each_ancestor: impl FnMut(&mut Node),
642 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
654 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
643 let mut child_nodes = root;
655 let mut child_nodes = root;
644 let mut inclusive_ancestor_paths =
656 let mut inclusive_ancestor_paths =
645 WithBasename::inclusive_ancestors_of(path);
657 WithBasename::inclusive_ancestors_of(path);
646 let mut ancestor_path = inclusive_ancestor_paths
658 let mut ancestor_path = inclusive_ancestor_paths
647 .next()
659 .next()
648 .expect("expected at least one inclusive ancestor");
660 .expect("expected at least one inclusive ancestor");
649 loop {
661 loop {
650 let (_, child_node) = child_nodes
662 let (_, child_node) = child_nodes
651 .make_mut(on_disk, unreachable_bytes)?
663 .make_mut(on_disk, unreachable_bytes)?
652 .raw_entry_mut()
664 .raw_entry_mut()
653 .from_key(ancestor_path.base_name())
665 .from_key(ancestor_path.base_name())
654 .or_insert_with(|| (to_cow(ancestor_path), Node::default()));
666 .or_insert_with(|| (to_cow(ancestor_path), Node::default()));
655 if let Some(next) = inclusive_ancestor_paths.next() {
667 if let Some(next) = inclusive_ancestor_paths.next() {
656 each_ancestor(child_node);
668 each_ancestor(child_node);
657 ancestor_path = next;
669 ancestor_path = next;
658 child_nodes = &mut child_node.children;
670 child_nodes = &mut child_node.children;
659 } else {
671 } else {
660 return Ok(child_node);
672 return Ok(child_node);
661 }
673 }
662 }
674 }
663 }
675 }
664
676
665 fn reset_state(
677 fn reset_state(
666 &mut self,
678 &mut self,
667 filename: &HgPath,
679 filename: &HgPath,
668 old_entry_opt: Option<DirstateEntry>,
680 old_entry_opt: Option<DirstateEntry>,
669 wc_tracked: bool,
681 wc_tracked: bool,
670 p1_tracked: bool,
682 p1_tracked: bool,
671 p2_info: bool,
683 p2_info: bool,
672 has_meaningful_mtime: bool,
684 has_meaningful_mtime: bool,
673 parent_file_data_opt: Option<ParentFileData>,
685 parent_file_data_opt: Option<ParentFileData>,
674 ) -> Result<(), DirstateError> {
686 ) -> Result<(), DirstateError> {
675 let (had_entry, was_tracked) = match old_entry_opt {
687 let (had_entry, was_tracked) = match old_entry_opt {
676 Some(old_entry) => (true, old_entry.tracked()),
688 Some(old_entry) => (true, old_entry.tracked()),
677 None => (false, false),
689 None => (false, false),
678 };
690 };
679 let node = self.get_or_insert_node(filename, |ancestor| {
691 let node = self.get_or_insert_node(filename, |ancestor| {
680 if !had_entry {
692 if !had_entry {
681 ancestor.descendants_with_entry_count += 1;
693 ancestor.descendants_with_entry_count += 1;
682 }
694 }
683 if was_tracked {
695 if was_tracked {
684 if !wc_tracked {
696 if !wc_tracked {
685 ancestor.tracked_descendants_count = ancestor
697 ancestor.tracked_descendants_count = ancestor
686 .tracked_descendants_count
698 .tracked_descendants_count
687 .checked_sub(1)
699 .checked_sub(1)
688 .expect("tracked count to be >= 0");
700 .expect("tracked count to be >= 0");
689 }
701 }
690 } else {
702 } else {
691 if wc_tracked {
703 if wc_tracked {
692 ancestor.tracked_descendants_count += 1;
704 ancestor.tracked_descendants_count += 1;
693 }
705 }
694 }
706 }
695 })?;
707 })?;
696
708
697 let v2_data = if let Some(parent_file_data) = parent_file_data_opt {
709 let v2_data = if let Some(parent_file_data) = parent_file_data_opt {
698 DirstateV2Data {
710 DirstateV2Data {
699 wc_tracked,
711 wc_tracked,
700 p1_tracked,
712 p1_tracked,
701 p2_info,
713 p2_info,
702 mode_size: parent_file_data.mode_size,
714 mode_size: parent_file_data.mode_size,
703 mtime: if has_meaningful_mtime {
715 mtime: if has_meaningful_mtime {
704 parent_file_data.mtime
716 parent_file_data.mtime
705 } else {
717 } else {
706 None
718 None
707 },
719 },
708 ..Default::default()
720 ..Default::default()
709 }
721 }
710 } else {
722 } else {
711 DirstateV2Data {
723 DirstateV2Data {
712 wc_tracked,
724 wc_tracked,
713 p1_tracked,
725 p1_tracked,
714 p2_info,
726 p2_info,
715 ..Default::default()
727 ..Default::default()
716 }
728 }
717 };
729 };
718 node.data = NodeData::Entry(DirstateEntry::from_v2_data(v2_data));
730 node.data = NodeData::Entry(DirstateEntry::from_v2_data(v2_data));
719 if !had_entry {
731 if !had_entry {
720 self.nodes_with_entry_count += 1;
732 self.nodes_with_entry_count += 1;
721 }
733 }
722 Ok(())
734 Ok(())
723 }
735 }
724
736
725 fn set_tracked(
737 fn set_tracked(
726 &mut self,
738 &mut self,
727 filename: &HgPath,
739 filename: &HgPath,
728 old_entry_opt: Option<DirstateEntry>,
740 old_entry_opt: Option<DirstateEntry>,
729 ) -> Result<bool, DirstateV2ParseError> {
741 ) -> Result<bool, DirstateV2ParseError> {
730 let was_tracked = old_entry_opt.map_or(false, |e| e.tracked());
742 let was_tracked = old_entry_opt.map_or(false, |e| e.tracked());
731 let had_entry = old_entry_opt.is_some();
743 let had_entry = old_entry_opt.is_some();
732 let tracked_count_increment = if was_tracked { 0 } else { 1 };
744 let tracked_count_increment = if was_tracked { 0 } else { 1 };
733 let mut new = false;
745 let mut new = false;
734
746
735 let node = self.get_or_insert_node(filename, |ancestor| {
747 let node = self.get_or_insert_node(filename, |ancestor| {
736 if !had_entry {
748 if !had_entry {
737 ancestor.descendants_with_entry_count += 1;
749 ancestor.descendants_with_entry_count += 1;
738 }
750 }
739
751
740 ancestor.tracked_descendants_count += tracked_count_increment;
752 ancestor.tracked_descendants_count += tracked_count_increment;
741 })?;
753 })?;
742 if let Some(old_entry) = old_entry_opt {
754 if let Some(old_entry) = old_entry_opt {
743 let mut e = old_entry.clone();
755 let mut e = old_entry.clone();
744 if e.tracked() {
756 if e.tracked() {
745 // XXX
757 // XXX
746 // This is probably overkill for more case, but we need this to
758 // This is probably overkill for more case, but we need this to
747 // fully replace the `normallookup` call with `set_tracked`
759 // fully replace the `normallookup` call with `set_tracked`
748 // one. Consider smoothing this in the future.
760 // one. Consider smoothing this in the future.
749 e.set_possibly_dirty();
761 e.set_possibly_dirty();
750 } else {
762 } else {
751 new = true;
763 new = true;
752 e.set_tracked();
764 e.set_tracked();
753 }
765 }
754 node.data = NodeData::Entry(e)
766 node.data = NodeData::Entry(e)
755 } else {
767 } else {
756 node.data = NodeData::Entry(DirstateEntry::new_tracked());
768 node.data = NodeData::Entry(DirstateEntry::new_tracked());
757 self.nodes_with_entry_count += 1;
769 self.nodes_with_entry_count += 1;
758 new = true;
770 new = true;
759 };
771 };
760 Ok(new)
772 Ok(new)
761 }
773 }
762
774
763 /// Set a node as untracked in the dirstate.
775 /// Set a node as untracked in the dirstate.
764 ///
776 ///
765 /// It is the responsibility of the caller to remove the copy source and/or
777 /// It is the responsibility of the caller to remove the copy source and/or
766 /// the entry itself if appropriate.
778 /// the entry itself if appropriate.
767 ///
779 ///
768 /// # Panics
780 /// # Panics
769 ///
781 ///
770 /// Panics if the node does not exist.
782 /// Panics if the node does not exist.
771 fn set_untracked(
783 fn set_untracked(
772 &mut self,
784 &mut self,
773 filename: &HgPath,
785 filename: &HgPath,
774 old_entry: DirstateEntry,
786 old_entry: DirstateEntry,
775 ) -> Result<(), DirstateV2ParseError> {
787 ) -> Result<(), DirstateV2ParseError> {
776 let node = self
788 let node = self
777 .get_node_mut(filename, |ancestor| {
789 .get_node_mut(filename, |ancestor| {
778 ancestor.tracked_descendants_count = ancestor
790 ancestor.tracked_descendants_count = ancestor
779 .tracked_descendants_count
791 .tracked_descendants_count
780 .checked_sub(1)
792 .checked_sub(1)
781 .expect("tracked_descendants_count should be >= 0");
793 .expect("tracked_descendants_count should be >= 0");
782 })?
794 })?
783 .expect("node should exist");
795 .expect("node should exist");
784 let mut new_entry = old_entry.clone();
796 let mut new_entry = old_entry.clone();
785 new_entry.set_untracked();
797 new_entry.set_untracked();
786 node.data = NodeData::Entry(new_entry);
798 node.data = NodeData::Entry(new_entry);
787 Ok(())
799 Ok(())
788 }
800 }
789
801
790 /// Set a node as clean in the dirstate.
802 /// Set a node as clean in the dirstate.
791 ///
803 ///
792 /// It is the responsibility of the caller to remove the copy source.
804 /// It is the responsibility of the caller to remove the copy source.
793 ///
805 ///
794 /// # Panics
806 /// # Panics
795 ///
807 ///
796 /// Panics if the node does not exist.
808 /// Panics if the node does not exist.
797 fn set_clean(
809 fn set_clean(
798 &mut self,
810 &mut self,
799 filename: &HgPath,
811 filename: &HgPath,
800 old_entry: DirstateEntry,
812 old_entry: DirstateEntry,
801 mode: u32,
813 mode: u32,
802 size: u32,
814 size: u32,
803 mtime: TruncatedTimestamp,
815 mtime: TruncatedTimestamp,
804 ) -> Result<(), DirstateError> {
816 ) -> Result<(), DirstateError> {
805 let node = self
817 let node = self
806 .get_node_mut(filename, |ancestor| {
818 .get_node_mut(filename, |ancestor| {
807 if !old_entry.tracked() {
819 if !old_entry.tracked() {
808 ancestor.tracked_descendants_count += 1;
820 ancestor.tracked_descendants_count += 1;
809 }
821 }
810 })?
822 })?
811 .expect("node should exist");
823 .expect("node should exist");
812 let mut new_entry = old_entry.clone();
824 let mut new_entry = old_entry.clone();
813 new_entry.set_clean(mode, size, mtime);
825 new_entry.set_clean(mode, size, mtime);
814 node.data = NodeData::Entry(new_entry);
826 node.data = NodeData::Entry(new_entry);
815 Ok(())
827 Ok(())
816 }
828 }
817
829
818 /// Set a node as possibly dirty in the dirstate.
830 /// Set a node as possibly dirty in the dirstate.
819 ///
831 ///
820 /// # Panics
832 /// # Panics
821 ///
833 ///
822 /// Panics if the node does not exist.
834 /// Panics if the node does not exist.
823 fn set_possibly_dirty(
835 fn set_possibly_dirty(
824 &mut self,
836 &mut self,
825 filename: &HgPath,
837 filename: &HgPath,
826 ) -> Result<(), DirstateError> {
838 ) -> Result<(), DirstateError> {
827 let node = self
839 let node = self
828 .get_node_mut(filename, |_ancestor| {})?
840 .get_node_mut(filename, |_ancestor| {})?
829 .expect("node should exist");
841 .expect("node should exist");
830 let entry = node.data.as_entry_mut().expect("entry should exist");
842 let entry = node.data.as_entry_mut().expect("entry should exist");
831 entry.set_possibly_dirty();
843 entry.set_possibly_dirty();
832 node.data = NodeData::Entry(*entry);
844 node.data = NodeData::Entry(*entry);
833 Ok(())
845 Ok(())
834 }
846 }
835
847
836 /// Clears the cached mtime for the (potential) folder at `path`.
848 /// Clears the cached mtime for the (potential) folder at `path`.
837 pub(super) fn clear_cached_mtime(
849 pub(super) fn clear_cached_mtime(
838 &mut self,
850 &mut self,
839 path: &HgPath,
851 path: &HgPath,
840 ) -> Result<(), DirstateV2ParseError> {
852 ) -> Result<(), DirstateV2ParseError> {
841 let node = match self.get_node_mut(path, |_ancestor| {})? {
853 let node = match self.get_node_mut(path, |_ancestor| {})? {
842 Some(node) => node,
854 Some(node) => node,
843 None => return Ok(()),
855 None => return Ok(()),
844 };
856 };
845 if let NodeData::CachedDirectory { .. } = &node.data {
857 if let NodeData::CachedDirectory { .. } = &node.data {
846 node.data = NodeData::None
858 node.data = NodeData::None
847 }
859 }
848 Ok(())
860 Ok(())
849 }
861 }
850
862
851 /// Sets the cached mtime for the (potential) folder at `path`.
863 /// Sets the cached mtime for the (potential) folder at `path`.
852 pub(super) fn set_cached_mtime(
864 pub(super) fn set_cached_mtime(
853 &mut self,
865 &mut self,
854 path: &HgPath,
866 path: &HgPath,
855 mtime: TruncatedTimestamp,
867 mtime: TruncatedTimestamp,
856 ) -> Result<(), DirstateV2ParseError> {
868 ) -> Result<(), DirstateV2ParseError> {
857 let node = match self.get_node_mut(path, |_ancestor| {})? {
869 let node = match self.get_node_mut(path, |_ancestor| {})? {
858 Some(node) => node,
870 Some(node) => node,
859 None => return Ok(()),
871 None => return Ok(()),
860 };
872 };
861 match &node.data {
873 match &node.data {
862 NodeData::Entry(_) => {} // Don’t overwrite an entry
874 NodeData::Entry(_) => {} // Don’t overwrite an entry
863 NodeData::CachedDirectory { .. } | NodeData::None => {
875 NodeData::CachedDirectory { .. } | NodeData::None => {
864 node.data = NodeData::CachedDirectory { mtime }
876 node.data = NodeData::CachedDirectory { mtime }
865 }
877 }
866 }
878 }
867 Ok(())
879 Ok(())
868 }
880 }
869
881
870 fn iter_nodes<'tree>(
882 fn iter_nodes<'tree>(
871 &'tree self,
883 &'tree self,
872 ) -> impl Iterator<
884 ) -> impl Iterator<
873 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
885 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
874 > + 'tree {
886 > + 'tree {
875 // Depth first tree traversal.
887 // Depth first tree traversal.
876 //
888 //
877 // If we could afford internal iteration and recursion,
889 // If we could afford internal iteration and recursion,
878 // this would look like:
890 // this would look like:
879 //
891 //
880 // ```
892 // ```
881 // fn traverse_children(
893 // fn traverse_children(
882 // children: &ChildNodes,
894 // children: &ChildNodes,
883 // each: &mut impl FnMut(&Node),
895 // each: &mut impl FnMut(&Node),
884 // ) {
896 // ) {
885 // for child in children.values() {
897 // for child in children.values() {
886 // traverse_children(&child.children, each);
898 // traverse_children(&child.children, each);
887 // each(child);
899 // each(child);
888 // }
900 // }
889 // }
901 // }
890 // ```
902 // ```
891 //
903 //
892 // However we want an external iterator and therefore can’t use the
904 // However we want an external iterator and therefore can’t use the
893 // call stack. Use an explicit stack instead:
905 // call stack. Use an explicit stack instead:
894 let mut stack = Vec::new();
906 let mut stack = Vec::new();
895 let mut iter = self.root.as_ref().iter();
907 let mut iter = self.root.as_ref().iter();
896 std::iter::from_fn(move || {
908 std::iter::from_fn(move || {
897 while let Some(child_node) = iter.next() {
909 while let Some(child_node) = iter.next() {
898 let children = match child_node.children(self.on_disk) {
910 let children = match child_node.children(self.on_disk) {
899 Ok(children) => children,
911 Ok(children) => children,
900 Err(error) => return Some(Err(error)),
912 Err(error) => return Some(Err(error)),
901 };
913 };
902 // Pseudo-recursion
914 // Pseudo-recursion
903 let new_iter = children.iter();
915 let new_iter = children.iter();
904 let old_iter = std::mem::replace(&mut iter, new_iter);
916 let old_iter = std::mem::replace(&mut iter, new_iter);
905 stack.push((child_node, old_iter));
917 stack.push((child_node, old_iter));
906 }
918 }
907 // Found the end of a `children.iter()` iterator.
919 // Found the end of a `children.iter()` iterator.
908 if let Some((child_node, next_iter)) = stack.pop() {
920 if let Some((child_node, next_iter)) = stack.pop() {
909 // "Return" from pseudo-recursion by restoring state from the
921 // "Return" from pseudo-recursion by restoring state from the
910 // explicit stack
922 // explicit stack
911 iter = next_iter;
923 iter = next_iter;
912
924
913 Some(Ok(child_node))
925 Some(Ok(child_node))
914 } else {
926 } else {
915 // Reached the bottom of the stack, we’re done
927 // Reached the bottom of the stack, we’re done
916 None
928 None
917 }
929 }
918 })
930 })
919 }
931 }
920
932
921 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
933 fn count_dropped_path(unreachable_bytes: &mut u32, path: &Cow<HgPath>) {
922 if let Cow::Borrowed(path) = path {
934 if let Cow::Borrowed(path) = path {
923 *unreachable_bytes += path.len() as u32
935 *unreachable_bytes += path.len() as u32
924 }
936 }
925 }
937 }
938
939 pub(crate) fn set_write_mode(&mut self, write_mode: DirstateMapWriteMode) {
940 self.write_mode = write_mode;
941 }
926 }
942 }
927
943
928 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
944 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
929 ///
945 ///
930 /// The callback is only called for incoming `Ok` values. Errors are passed
946 /// The callback is only called for incoming `Ok` values. Errors are passed
931 /// through as-is. In order to let it use the `?` operator the callback is
947 /// through as-is. In order to let it use the `?` operator the callback is
932 /// expected to return a `Result` of `Option`, instead of an `Option` of
948 /// expected to return a `Result` of `Option`, instead of an `Option` of
933 /// `Result`.
949 /// `Result`.
934 fn filter_map_results<'a, I, F, A, B, E>(
950 fn filter_map_results<'a, I, F, A, B, E>(
935 iter: I,
951 iter: I,
936 f: F,
952 f: F,
937 ) -> impl Iterator<Item = Result<B, E>> + 'a
953 ) -> impl Iterator<Item = Result<B, E>> + 'a
938 where
954 where
939 I: Iterator<Item = Result<A, E>> + 'a,
955 I: Iterator<Item = Result<A, E>> + 'a,
940 F: Fn(A) -> Result<Option<B>, E> + 'a,
956 F: Fn(A) -> Result<Option<B>, E> + 'a,
941 {
957 {
942 iter.filter_map(move |result| match result {
958 iter.filter_map(move |result| match result {
943 Ok(node) => f(node).transpose(),
959 Ok(node) => f(node).transpose(),
944 Err(e) => Some(Err(e)),
960 Err(e) => Some(Err(e)),
945 })
961 })
946 }
962 }
947
963
948 impl OwningDirstateMap {
964 impl OwningDirstateMap {
949 pub fn clear(&mut self) {
965 pub fn clear(&mut self) {
950 self.with_dmap_mut(|map| {
966 self.with_dmap_mut(|map| {
951 map.root = Default::default();
967 map.root = Default::default();
952 map.nodes_with_entry_count = 0;
968 map.nodes_with_entry_count = 0;
953 map.nodes_with_copy_source_count = 0;
969 map.nodes_with_copy_source_count = 0;
954 });
970 });
955 }
971 }
956
972
957 pub fn set_tracked(
973 pub fn set_tracked(
958 &mut self,
974 &mut self,
959 filename: &HgPath,
975 filename: &HgPath,
960 ) -> Result<bool, DirstateV2ParseError> {
976 ) -> Result<bool, DirstateV2ParseError> {
961 let old_entry_opt = self.get(filename)?;
977 let old_entry_opt = self.get(filename)?;
962 self.with_dmap_mut(|map| map.set_tracked(filename, old_entry_opt))
978 self.with_dmap_mut(|map| map.set_tracked(filename, old_entry_opt))
963 }
979 }
964
980
965 pub fn set_untracked(
981 pub fn set_untracked(
966 &mut self,
982 &mut self,
967 filename: &HgPath,
983 filename: &HgPath,
968 ) -> Result<bool, DirstateError> {
984 ) -> Result<bool, DirstateError> {
969 let old_entry_opt = self.get(filename)?;
985 let old_entry_opt = self.get(filename)?;
970 match old_entry_opt {
986 match old_entry_opt {
971 None => Ok(false),
987 None => Ok(false),
972 Some(old_entry) => {
988 Some(old_entry) => {
973 if !old_entry.tracked() {
989 if !old_entry.tracked() {
974 // `DirstateMap::set_untracked` is not a noop if
990 // `DirstateMap::set_untracked` is not a noop if
975 // already not tracked as it will decrement the
991 // already not tracked as it will decrement the
976 // tracked counters while going down.
992 // tracked counters while going down.
977 return Ok(true);
993 return Ok(true);
978 }
994 }
979 if old_entry.added() {
995 if old_entry.added() {
980 // Untracking an "added" entry will just result in a
996 // Untracking an "added" entry will just result in a
981 // worthless entry (and other parts of the code will
997 // worthless entry (and other parts of the code will
982 // complain about it), just drop it entirely.
998 // complain about it), just drop it entirely.
983 self.drop_entry_and_copy_source(filename)?;
999 self.drop_entry_and_copy_source(filename)?;
984 return Ok(true);
1000 return Ok(true);
985 }
1001 }
986 if !old_entry.p2_info() {
1002 if !old_entry.p2_info() {
987 self.copy_map_remove(filename)?;
1003 self.copy_map_remove(filename)?;
988 }
1004 }
989
1005
990 self.with_dmap_mut(|map| {
1006 self.with_dmap_mut(|map| {
991 map.set_untracked(filename, old_entry)?;
1007 map.set_untracked(filename, old_entry)?;
992 Ok(true)
1008 Ok(true)
993 })
1009 })
994 }
1010 }
995 }
1011 }
996 }
1012 }
997
1013
998 pub fn set_clean(
1014 pub fn set_clean(
999 &mut self,
1015 &mut self,
1000 filename: &HgPath,
1016 filename: &HgPath,
1001 mode: u32,
1017 mode: u32,
1002 size: u32,
1018 size: u32,
1003 mtime: TruncatedTimestamp,
1019 mtime: TruncatedTimestamp,
1004 ) -> Result<(), DirstateError> {
1020 ) -> Result<(), DirstateError> {
1005 let old_entry = match self.get(filename)? {
1021 let old_entry = match self.get(filename)? {
1006 None => {
1022 None => {
1007 return Err(
1023 return Err(
1008 DirstateMapError::PathNotFound(filename.into()).into()
1024 DirstateMapError::PathNotFound(filename.into()).into()
1009 )
1025 )
1010 }
1026 }
1011 Some(e) => e,
1027 Some(e) => e,
1012 };
1028 };
1013 self.copy_map_remove(filename)?;
1029 self.copy_map_remove(filename)?;
1014 self.with_dmap_mut(|map| {
1030 self.with_dmap_mut(|map| {
1015 map.set_clean(filename, old_entry, mode, size, mtime)
1031 map.set_clean(filename, old_entry, mode, size, mtime)
1016 })
1032 })
1017 }
1033 }
1018
1034
1019 pub fn set_possibly_dirty(
1035 pub fn set_possibly_dirty(
1020 &mut self,
1036 &mut self,
1021 filename: &HgPath,
1037 filename: &HgPath,
1022 ) -> Result<(), DirstateError> {
1038 ) -> Result<(), DirstateError> {
1023 if self.get(filename)?.is_none() {
1039 if self.get(filename)?.is_none() {
1024 return Err(DirstateMapError::PathNotFound(filename.into()).into());
1040 return Err(DirstateMapError::PathNotFound(filename.into()).into());
1025 }
1041 }
1026 self.with_dmap_mut(|map| map.set_possibly_dirty(filename))
1042 self.with_dmap_mut(|map| map.set_possibly_dirty(filename))
1027 }
1043 }
1028
1044
1029 pub fn reset_state(
1045 pub fn reset_state(
1030 &mut self,
1046 &mut self,
1031 filename: &HgPath,
1047 filename: &HgPath,
1032 wc_tracked: bool,
1048 wc_tracked: bool,
1033 p1_tracked: bool,
1049 p1_tracked: bool,
1034 p2_info: bool,
1050 p2_info: bool,
1035 has_meaningful_mtime: bool,
1051 has_meaningful_mtime: bool,
1036 parent_file_data_opt: Option<ParentFileData>,
1052 parent_file_data_opt: Option<ParentFileData>,
1037 ) -> Result<(), DirstateError> {
1053 ) -> Result<(), DirstateError> {
1038 if !(p1_tracked || p2_info || wc_tracked) {
1054 if !(p1_tracked || p2_info || wc_tracked) {
1039 self.drop_entry_and_copy_source(filename)?;
1055 self.drop_entry_and_copy_source(filename)?;
1040 return Ok(());
1056 return Ok(());
1041 }
1057 }
1042 self.copy_map_remove(filename)?;
1058 self.copy_map_remove(filename)?;
1043 let old_entry_opt = self.get(filename)?;
1059 let old_entry_opt = self.get(filename)?;
1044 self.with_dmap_mut(|map| {
1060 self.with_dmap_mut(|map| {
1045 map.reset_state(
1061 map.reset_state(
1046 filename,
1062 filename,
1047 old_entry_opt,
1063 old_entry_opt,
1048 wc_tracked,
1064 wc_tracked,
1049 p1_tracked,
1065 p1_tracked,
1050 p2_info,
1066 p2_info,
1051 has_meaningful_mtime,
1067 has_meaningful_mtime,
1052 parent_file_data_opt,
1068 parent_file_data_opt,
1053 )
1069 )
1054 })
1070 })
1055 }
1071 }
1056
1072
1057 pub fn drop_entry_and_copy_source(
1073 pub fn drop_entry_and_copy_source(
1058 &mut self,
1074 &mut self,
1059 filename: &HgPath,
1075 filename: &HgPath,
1060 ) -> Result<(), DirstateError> {
1076 ) -> Result<(), DirstateError> {
1061 let was_tracked = self.get(filename)?.map_or(false, |e| e.tracked());
1077 let was_tracked = self.get(filename)?.map_or(false, |e| e.tracked());
1062 struct Dropped {
1078 struct Dropped {
1063 was_tracked: bool,
1079 was_tracked: bool,
1064 had_entry: bool,
1080 had_entry: bool,
1065 had_copy_source: bool,
1081 had_copy_source: bool,
1066 }
1082 }
1067
1083
1068 /// If this returns `Ok(Some((dropped, removed)))`, then
1084 /// If this returns `Ok(Some((dropped, removed)))`, then
1069 ///
1085 ///
1070 /// * `dropped` is about the leaf node that was at `filename`
1086 /// * `dropped` is about the leaf node that was at `filename`
1071 /// * `removed` is whether this particular level of recursion just
1087 /// * `removed` is whether this particular level of recursion just
1072 /// removed a node in `nodes`.
1088 /// removed a node in `nodes`.
1073 fn recur<'on_disk>(
1089 fn recur<'on_disk>(
1074 on_disk: &'on_disk [u8],
1090 on_disk: &'on_disk [u8],
1075 unreachable_bytes: &mut u32,
1091 unreachable_bytes: &mut u32,
1076 nodes: &mut ChildNodes<'on_disk>,
1092 nodes: &mut ChildNodes<'on_disk>,
1077 path: &HgPath,
1093 path: &HgPath,
1078 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
1094 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
1079 let (first_path_component, rest_of_path) =
1095 let (first_path_component, rest_of_path) =
1080 path.split_first_component();
1096 path.split_first_component();
1081 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
1097 let nodes = nodes.make_mut(on_disk, unreachable_bytes)?;
1082 let node = if let Some(node) = nodes.get_mut(first_path_component)
1098 let node = if let Some(node) = nodes.get_mut(first_path_component)
1083 {
1099 {
1084 node
1100 node
1085 } else {
1101 } else {
1086 return Ok(None);
1102 return Ok(None);
1087 };
1103 };
1088 let dropped;
1104 let dropped;
1089 if let Some(rest) = rest_of_path {
1105 if let Some(rest) = rest_of_path {
1090 if let Some((d, removed)) = recur(
1106 if let Some((d, removed)) = recur(
1091 on_disk,
1107 on_disk,
1092 unreachable_bytes,
1108 unreachable_bytes,
1093 &mut node.children,
1109 &mut node.children,
1094 rest,
1110 rest,
1095 )? {
1111 )? {
1096 dropped = d;
1112 dropped = d;
1097 if dropped.had_entry {
1113 if dropped.had_entry {
1098 node.descendants_with_entry_count = node
1114 node.descendants_with_entry_count = node
1099 .descendants_with_entry_count
1115 .descendants_with_entry_count
1100 .checked_sub(1)
1116 .checked_sub(1)
1101 .expect(
1117 .expect(
1102 "descendants_with_entry_count should be >= 0",
1118 "descendants_with_entry_count should be >= 0",
1103 );
1119 );
1104 }
1120 }
1105 if dropped.was_tracked {
1121 if dropped.was_tracked {
1106 node.tracked_descendants_count = node
1122 node.tracked_descendants_count = node
1107 .tracked_descendants_count
1123 .tracked_descendants_count
1108 .checked_sub(1)
1124 .checked_sub(1)
1109 .expect(
1125 .expect(
1110 "tracked_descendants_count should be >= 0",
1126 "tracked_descendants_count should be >= 0",
1111 );
1127 );
1112 }
1128 }
1113
1129
1114 // Directory caches must be invalidated when removing a
1130 // Directory caches must be invalidated when removing a
1115 // child node
1131 // child node
1116 if removed {
1132 if removed {
1117 if let NodeData::CachedDirectory { .. } = &node.data {
1133 if let NodeData::CachedDirectory { .. } = &node.data {
1118 node.data = NodeData::None
1134 node.data = NodeData::None
1119 }
1135 }
1120 }
1136 }
1121 } else {
1137 } else {
1122 return Ok(None);
1138 return Ok(None);
1123 }
1139 }
1124 } else {
1140 } else {
1125 let entry = node.data.as_entry();
1141 let entry = node.data.as_entry();
1126 let was_tracked = entry.map_or(false, |entry| entry.tracked());
1142 let was_tracked = entry.map_or(false, |entry| entry.tracked());
1127 let had_entry = entry.is_some();
1143 let had_entry = entry.is_some();
1128 if had_entry {
1144 if had_entry {
1129 node.data = NodeData::None
1145 node.data = NodeData::None
1130 }
1146 }
1131 let mut had_copy_source = false;
1147 let mut had_copy_source = false;
1132 if let Some(source) = &node.copy_source {
1148 if let Some(source) = &node.copy_source {
1133 DirstateMap::count_dropped_path(unreachable_bytes, source);
1149 DirstateMap::count_dropped_path(unreachable_bytes, source);
1134 had_copy_source = true;
1150 had_copy_source = true;
1135 node.copy_source = None
1151 node.copy_source = None
1136 }
1152 }
1137 dropped = Dropped {
1153 dropped = Dropped {
1138 was_tracked,
1154 was_tracked,
1139 had_entry,
1155 had_entry,
1140 had_copy_source,
1156 had_copy_source,
1141 };
1157 };
1142 }
1158 }
1143 // After recursion, for both leaf (rest_of_path is None) nodes and
1159 // After recursion, for both leaf (rest_of_path is None) nodes and
1144 // parent nodes, remove a node if it just became empty.
1160 // parent nodes, remove a node if it just became empty.
1145 let remove = !node.data.has_entry()
1161 let remove = !node.data.has_entry()
1146 && node.copy_source.is_none()
1162 && node.copy_source.is_none()
1147 && node.children.is_empty();
1163 && node.children.is_empty();
1148 if remove {
1164 if remove {
1149 let (key, _) =
1165 let (key, _) =
1150 nodes.remove_entry(first_path_component).unwrap();
1166 nodes.remove_entry(first_path_component).unwrap();
1151 DirstateMap::count_dropped_path(
1167 DirstateMap::count_dropped_path(
1152 unreachable_bytes,
1168 unreachable_bytes,
1153 key.full_path(),
1169 key.full_path(),
1154 )
1170 )
1155 }
1171 }
1156 Ok(Some((dropped, remove)))
1172 Ok(Some((dropped, remove)))
1157 }
1173 }
1158
1174
1159 self.with_dmap_mut(|map| {
1175 self.with_dmap_mut(|map| {
1160 if let Some((dropped, _removed)) = recur(
1176 if let Some((dropped, _removed)) = recur(
1161 map.on_disk,
1177 map.on_disk,
1162 &mut map.unreachable_bytes,
1178 &mut map.unreachable_bytes,
1163 &mut map.root,
1179 &mut map.root,
1164 filename,
1180 filename,
1165 )? {
1181 )? {
1166 if dropped.had_entry {
1182 if dropped.had_entry {
1167 map.nodes_with_entry_count = map
1183 map.nodes_with_entry_count = map
1168 .nodes_with_entry_count
1184 .nodes_with_entry_count
1169 .checked_sub(1)
1185 .checked_sub(1)
1170 .expect("nodes_with_entry_count should be >= 0");
1186 .expect("nodes_with_entry_count should be >= 0");
1171 }
1187 }
1172 if dropped.had_copy_source {
1188 if dropped.had_copy_source {
1173 map.nodes_with_copy_source_count = map
1189 map.nodes_with_copy_source_count = map
1174 .nodes_with_copy_source_count
1190 .nodes_with_copy_source_count
1175 .checked_sub(1)
1191 .checked_sub(1)
1176 .expect("nodes_with_copy_source_count should be >= 0");
1192 .expect("nodes_with_copy_source_count should be >= 0");
1177 }
1193 }
1178 } else {
1194 } else {
1179 debug_assert!(!was_tracked);
1195 debug_assert!(!was_tracked);
1180 }
1196 }
1181 Ok(())
1197 Ok(())
1182 })
1198 })
1183 }
1199 }
1184
1200
1185 pub fn has_tracked_dir(
1201 pub fn has_tracked_dir(
1186 &mut self,
1202 &mut self,
1187 directory: &HgPath,
1203 directory: &HgPath,
1188 ) -> Result<bool, DirstateError> {
1204 ) -> Result<bool, DirstateError> {
1189 self.with_dmap_mut(|map| {
1205 self.with_dmap_mut(|map| {
1190 if let Some(node) = map.get_node(directory)? {
1206 if let Some(node) = map.get_node(directory)? {
1191 // A node without a `DirstateEntry` was created to hold child
1207 // A node without a `DirstateEntry` was created to hold child
1192 // nodes, and is therefore a directory.
1208 // nodes, and is therefore a directory.
1193 let is_dir = node.entry()?.is_none();
1209 let is_dir = node.entry()?.is_none();
1194 Ok(is_dir && node.tracked_descendants_count() > 0)
1210 Ok(is_dir && node.tracked_descendants_count() > 0)
1195 } else {
1211 } else {
1196 Ok(false)
1212 Ok(false)
1197 }
1213 }
1198 })
1214 })
1199 }
1215 }
1200
1216
1201 pub fn has_dir(
1217 pub fn has_dir(
1202 &mut self,
1218 &mut self,
1203 directory: &HgPath,
1219 directory: &HgPath,
1204 ) -> Result<bool, DirstateError> {
1220 ) -> Result<bool, DirstateError> {
1205 self.with_dmap_mut(|map| {
1221 self.with_dmap_mut(|map| {
1206 if let Some(node) = map.get_node(directory)? {
1222 if let Some(node) = map.get_node(directory)? {
1207 // A node without a `DirstateEntry` was created to hold child
1223 // A node without a `DirstateEntry` was created to hold child
1208 // nodes, and is therefore a directory.
1224 // nodes, and is therefore a directory.
1209 let is_dir = node.entry()?.is_none();
1225 let is_dir = node.entry()?.is_none();
1210 Ok(is_dir && node.descendants_with_entry_count() > 0)
1226 Ok(is_dir && node.descendants_with_entry_count() > 0)
1211 } else {
1227 } else {
1212 Ok(false)
1228 Ok(false)
1213 }
1229 }
1214 })
1230 })
1215 }
1231 }
1216
1232
1217 #[timed]
1233 #[timed]
1218 pub fn pack_v1(
1234 pub fn pack_v1(
1219 &self,
1235 &self,
1220 parents: DirstateParents,
1236 parents: DirstateParents,
1221 ) -> Result<Vec<u8>, DirstateError> {
1237 ) -> Result<Vec<u8>, DirstateError> {
1222 let map = self.get_map();
1238 let map = self.get_map();
1223 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1239 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1224 // reallocations
1240 // reallocations
1225 let mut size = parents.as_bytes().len();
1241 let mut size = parents.as_bytes().len();
1226 for node in map.iter_nodes() {
1242 for node in map.iter_nodes() {
1227 let node = node?;
1243 let node = node?;
1228 if node.entry()?.is_some() {
1244 if node.entry()?.is_some() {
1229 size += packed_entry_size(
1245 size += packed_entry_size(
1230 node.full_path(map.on_disk)?,
1246 node.full_path(map.on_disk)?,
1231 node.copy_source(map.on_disk)?,
1247 node.copy_source(map.on_disk)?,
1232 );
1248 );
1233 }
1249 }
1234 }
1250 }
1235
1251
1236 let mut packed = Vec::with_capacity(size);
1252 let mut packed = Vec::with_capacity(size);
1237 packed.extend(parents.as_bytes());
1253 packed.extend(parents.as_bytes());
1238
1254
1239 for node in map.iter_nodes() {
1255 for node in map.iter_nodes() {
1240 let node = node?;
1256 let node = node?;
1241 if let Some(entry) = node.entry()? {
1257 if let Some(entry) = node.entry()? {
1242 pack_entry(
1258 pack_entry(
1243 node.full_path(map.on_disk)?,
1259 node.full_path(map.on_disk)?,
1244 &entry,
1260 &entry,
1245 node.copy_source(map.on_disk)?,
1261 node.copy_source(map.on_disk)?,
1246 &mut packed,
1262 &mut packed,
1247 );
1263 );
1248 }
1264 }
1249 }
1265 }
1250 Ok(packed)
1266 Ok(packed)
1251 }
1267 }
1252
1268
1253 /// Returns new data and metadata together with whether that data should be
1269 /// Returns new data and metadata together with whether that data should be
1254 /// appended to the existing data file whose content is at
1270 /// appended to the existing data file whose content is at
1255 /// `map.on_disk` (true), instead of written to a new data file
1271 /// `map.on_disk` (true), instead of written to a new data file
1256 /// (false), and the previous size of data on disk.
1272 /// (false), and the previous size of data on disk.
1257 #[timed]
1273 #[timed]
1258 pub fn pack_v2(
1274 pub fn pack_v2(
1259 &self,
1275 &self,
1260 write_mode: DirstateMapWriteMode,
1276 write_mode: DirstateMapWriteMode,
1261 ) -> Result<(Vec<u8>, on_disk::TreeMetadata, bool, usize), DirstateError>
1277 ) -> Result<(Vec<u8>, on_disk::TreeMetadata, bool, usize), DirstateError>
1262 {
1278 {
1263 let map = self.get_map();
1279 let map = self.get_map();
1264 on_disk::write(map, write_mode)
1280 on_disk::write(map, write_mode)
1265 }
1281 }
1266
1282
1267 /// `callback` allows the caller to process and do something with the
1283 /// `callback` allows the caller to process and do something with the
1268 /// results of the status. This is needed to do so efficiently (i.e.
1284 /// results of the status. This is needed to do so efficiently (i.e.
1269 /// without cloning the `DirstateStatus` object with its paths) because
1285 /// without cloning the `DirstateStatus` object with its paths) because
1270 /// we need to borrow from `Self`.
1286 /// we need to borrow from `Self`.
1271 pub fn with_status<R>(
1287 pub fn with_status<R>(
1272 &mut self,
1288 &mut self,
1273 matcher: &(dyn Matcher + Sync),
1289 matcher: &(dyn Matcher + Sync),
1274 root_dir: PathBuf,
1290 root_dir: PathBuf,
1275 ignore_files: Vec<PathBuf>,
1291 ignore_files: Vec<PathBuf>,
1276 options: StatusOptions,
1292 options: StatusOptions,
1277 callback: impl for<'r> FnOnce(
1293 callback: impl for<'r> FnOnce(
1278 Result<(DirstateStatus<'r>, Vec<PatternFileWarning>), StatusError>,
1294 Result<(DirstateStatus<'r>, Vec<PatternFileWarning>), StatusError>,
1279 ) -> R,
1295 ) -> R,
1280 ) -> R {
1296 ) -> R {
1281 self.with_dmap_mut(|map| {
1297 self.with_dmap_mut(|map| {
1282 callback(super::status::status(
1298 callback(super::status::status(
1283 map,
1299 map,
1284 matcher,
1300 matcher,
1285 root_dir,
1301 root_dir,
1286 ignore_files,
1302 ignore_files,
1287 options,
1303 options,
1288 ))
1304 ))
1289 })
1305 })
1290 }
1306 }
1291
1307
1292 pub fn copy_map_len(&self) -> usize {
1308 pub fn copy_map_len(&self) -> usize {
1293 let map = self.get_map();
1309 let map = self.get_map();
1294 map.nodes_with_copy_source_count as usize
1310 map.nodes_with_copy_source_count as usize
1295 }
1311 }
1296
1312
1297 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1313 pub fn copy_map_iter(&self) -> CopyMapIter<'_> {
1298 let map = self.get_map();
1314 let map = self.get_map();
1299 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1315 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1300 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1316 Ok(if let Some(source) = node.copy_source(map.on_disk)? {
1301 Some((node.full_path(map.on_disk)?, source))
1317 Some((node.full_path(map.on_disk)?, source))
1302 } else {
1318 } else {
1303 None
1319 None
1304 })
1320 })
1305 }))
1321 }))
1306 }
1322 }
1307
1323
1308 pub fn copy_map_contains_key(
1324 pub fn copy_map_contains_key(
1309 &self,
1325 &self,
1310 key: &HgPath,
1326 key: &HgPath,
1311 ) -> Result<bool, DirstateV2ParseError> {
1327 ) -> Result<bool, DirstateV2ParseError> {
1312 let map = self.get_map();
1328 let map = self.get_map();
1313 Ok(if let Some(node) = map.get_node(key)? {
1329 Ok(if let Some(node) = map.get_node(key)? {
1314 node.has_copy_source()
1330 node.has_copy_source()
1315 } else {
1331 } else {
1316 false
1332 false
1317 })
1333 })
1318 }
1334 }
1319
1335
1320 pub fn copy_map_get(
1336 pub fn copy_map_get(
1321 &self,
1337 &self,
1322 key: &HgPath,
1338 key: &HgPath,
1323 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1339 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1324 let map = self.get_map();
1340 let map = self.get_map();
1325 if let Some(node) = map.get_node(key)? {
1341 if let Some(node) = map.get_node(key)? {
1326 if let Some(source) = node.copy_source(map.on_disk)? {
1342 if let Some(source) = node.copy_source(map.on_disk)? {
1327 return Ok(Some(source));
1343 return Ok(Some(source));
1328 }
1344 }
1329 }
1345 }
1330 Ok(None)
1346 Ok(None)
1331 }
1347 }
1332
1348
1333 pub fn copy_map_remove(
1349 pub fn copy_map_remove(
1334 &mut self,
1350 &mut self,
1335 key: &HgPath,
1351 key: &HgPath,
1336 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1352 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1337 self.with_dmap_mut(|map| {
1353 self.with_dmap_mut(|map| {
1338 let count = &mut map.nodes_with_copy_source_count;
1354 let count = &mut map.nodes_with_copy_source_count;
1339 let unreachable_bytes = &mut map.unreachable_bytes;
1355 let unreachable_bytes = &mut map.unreachable_bytes;
1340 Ok(DirstateMap::get_node_mut_inner(
1356 Ok(DirstateMap::get_node_mut_inner(
1341 map.on_disk,
1357 map.on_disk,
1342 unreachable_bytes,
1358 unreachable_bytes,
1343 &mut map.root,
1359 &mut map.root,
1344 key,
1360 key,
1345 |_ancestor| {},
1361 |_ancestor| {},
1346 )?
1362 )?
1347 .and_then(|node| {
1363 .and_then(|node| {
1348 if let Some(source) = &node.copy_source {
1364 if let Some(source) = &node.copy_source {
1349 *count = count
1365 *count = count
1350 .checked_sub(1)
1366 .checked_sub(1)
1351 .expect("nodes_with_copy_source_count should be >= 0");
1367 .expect("nodes_with_copy_source_count should be >= 0");
1352 DirstateMap::count_dropped_path(unreachable_bytes, source);
1368 DirstateMap::count_dropped_path(unreachable_bytes, source);
1353 }
1369 }
1354 node.copy_source.take().map(Cow::into_owned)
1370 node.copy_source.take().map(Cow::into_owned)
1355 }))
1371 }))
1356 })
1372 })
1357 }
1373 }
1358
1374
1359 pub fn copy_map_insert(
1375 pub fn copy_map_insert(
1360 &mut self,
1376 &mut self,
1361 key: &HgPath,
1377 key: &HgPath,
1362 value: &HgPath,
1378 value: &HgPath,
1363 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1379 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1364 self.with_dmap_mut(|map| {
1380 self.with_dmap_mut(|map| {
1365 let node = map.get_or_insert_node(&key, |_ancestor| {})?;
1381 let node = map.get_or_insert_node(&key, |_ancestor| {})?;
1366 let had_copy_source = node.copy_source.is_none();
1382 let had_copy_source = node.copy_source.is_none();
1367 let old = node
1383 let old = node
1368 .copy_source
1384 .copy_source
1369 .replace(value.to_owned().into())
1385 .replace(value.to_owned().into())
1370 .map(Cow::into_owned);
1386 .map(Cow::into_owned);
1371 if had_copy_source {
1387 if had_copy_source {
1372 map.nodes_with_copy_source_count += 1
1388 map.nodes_with_copy_source_count += 1
1373 }
1389 }
1374 Ok(old)
1390 Ok(old)
1375 })
1391 })
1376 }
1392 }
1377
1393
1378 pub fn len(&self) -> usize {
1394 pub fn len(&self) -> usize {
1379 let map = self.get_map();
1395 let map = self.get_map();
1380 map.nodes_with_entry_count as usize
1396 map.nodes_with_entry_count as usize
1381 }
1397 }
1382
1398
1383 pub fn contains_key(
1399 pub fn contains_key(
1384 &self,
1400 &self,
1385 key: &HgPath,
1401 key: &HgPath,
1386 ) -> Result<bool, DirstateV2ParseError> {
1402 ) -> Result<bool, DirstateV2ParseError> {
1387 Ok(self.get(key)?.is_some())
1403 Ok(self.get(key)?.is_some())
1388 }
1404 }
1389
1405
1390 pub fn get(
1406 pub fn get(
1391 &self,
1407 &self,
1392 key: &HgPath,
1408 key: &HgPath,
1393 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1409 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1394 let map = self.get_map();
1410 let map = self.get_map();
1395 Ok(if let Some(node) = map.get_node(key)? {
1411 Ok(if let Some(node) = map.get_node(key)? {
1396 node.entry()?
1412 node.entry()?
1397 } else {
1413 } else {
1398 None
1414 None
1399 })
1415 })
1400 }
1416 }
1401
1417
1402 pub fn iter(&self) -> StateMapIter<'_> {
1418 pub fn iter(&self) -> StateMapIter<'_> {
1403 let map = self.get_map();
1419 let map = self.get_map();
1404 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1420 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1405 Ok(if let Some(entry) = node.entry()? {
1421 Ok(if let Some(entry) = node.entry()? {
1406 Some((node.full_path(map.on_disk)?, entry))
1422 Some((node.full_path(map.on_disk)?, entry))
1407 } else {
1423 } else {
1408 None
1424 None
1409 })
1425 })
1410 }))
1426 }))
1411 }
1427 }
1412
1428
1413 pub fn iter_tracked_dirs(
1429 pub fn iter_tracked_dirs(
1414 &mut self,
1430 &mut self,
1415 ) -> Result<
1431 ) -> Result<
1416 Box<
1432 Box<
1417 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1433 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>>
1418 + Send
1434 + Send
1419 + '_,
1435 + '_,
1420 >,
1436 >,
1421 DirstateError,
1437 DirstateError,
1422 > {
1438 > {
1423 let map = self.get_map();
1439 let map = self.get_map();
1424 let on_disk = map.on_disk;
1440 let on_disk = map.on_disk;
1425 Ok(Box::new(filter_map_results(
1441 Ok(Box::new(filter_map_results(
1426 map.iter_nodes(),
1442 map.iter_nodes(),
1427 move |node| {
1443 move |node| {
1428 Ok(if node.tracked_descendants_count() > 0 {
1444 Ok(if node.tracked_descendants_count() > 0 {
1429 Some(node.full_path(on_disk)?)
1445 Some(node.full_path(on_disk)?)
1430 } else {
1446 } else {
1431 None
1447 None
1432 })
1448 })
1433 },
1449 },
1434 )))
1450 )))
1435 }
1451 }
1436
1452
1437 /// Only public because it needs to be exposed to the Python layer.
1453 /// Only public because it needs to be exposed to the Python layer.
1438 /// It is not the full `setparents` logic, only the parts that mutate the
1454 /// It is not the full `setparents` logic, only the parts that mutate the
1439 /// entries.
1455 /// entries.
1440 pub fn setparents_fixup(
1456 pub fn setparents_fixup(
1441 &mut self,
1457 &mut self,
1442 ) -> Result<Vec<(HgPathBuf, HgPathBuf)>, DirstateV2ParseError> {
1458 ) -> Result<Vec<(HgPathBuf, HgPathBuf)>, DirstateV2ParseError> {
1443 // XXX
1459 // XXX
1444 // All the copying and re-querying is quite inefficient, but this is
1460 // All the copying and re-querying is quite inefficient, but this is
1445 // still a lot better than doing it from Python.
1461 // still a lot better than doing it from Python.
1446 //
1462 //
1447 // The better solution is to develop a mechanism for `iter_mut`,
1463 // The better solution is to develop a mechanism for `iter_mut`,
1448 // which will be a lot more involved: we're dealing with a lazy,
1464 // which will be a lot more involved: we're dealing with a lazy,
1449 // append-mostly, tree-like data structure. This will do for now.
1465 // append-mostly, tree-like data structure. This will do for now.
1450 let mut copies = vec![];
1466 let mut copies = vec![];
1451 let mut files_with_p2_info = vec![];
1467 let mut files_with_p2_info = vec![];
1452 for res in self.iter() {
1468 for res in self.iter() {
1453 let (path, entry) = res?;
1469 let (path, entry) = res?;
1454 if entry.p2_info() {
1470 if entry.p2_info() {
1455 files_with_p2_info.push(path.to_owned())
1471 files_with_p2_info.push(path.to_owned())
1456 }
1472 }
1457 }
1473 }
1458 self.with_dmap_mut(|map| {
1474 self.with_dmap_mut(|map| {
1459 for path in files_with_p2_info.iter() {
1475 for path in files_with_p2_info.iter() {
1460 let node = map.get_or_insert_node(path, |_| {})?;
1476 let node = map.get_or_insert_node(path, |_| {})?;
1461 let entry =
1477 let entry =
1462 node.data.as_entry_mut().expect("entry should exist");
1478 node.data.as_entry_mut().expect("entry should exist");
1463 entry.drop_merge_data();
1479 entry.drop_merge_data();
1464 if let Some(source) = node.copy_source.take().as_deref() {
1480 if let Some(source) = node.copy_source.take().as_deref() {
1465 copies.push((path.to_owned(), source.to_owned()));
1481 copies.push((path.to_owned(), source.to_owned()));
1466 }
1482 }
1467 }
1483 }
1468 Ok(copies)
1484 Ok(copies)
1469 })
1485 })
1470 }
1486 }
1471
1487
1472 pub fn debug_iter(
1488 pub fn debug_iter(
1473 &self,
1489 &self,
1474 all: bool,
1490 all: bool,
1475 ) -> Box<
1491 ) -> Box<
1476 dyn Iterator<
1492 dyn Iterator<
1477 Item = Result<
1493 Item = Result<
1478 (&HgPath, (u8, i32, i32, i32)),
1494 (&HgPath, (u8, i32, i32, i32)),
1479 DirstateV2ParseError,
1495 DirstateV2ParseError,
1480 >,
1496 >,
1481 > + Send
1497 > + Send
1482 + '_,
1498 + '_,
1483 > {
1499 > {
1484 let map = self.get_map();
1500 let map = self.get_map();
1485 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1501 Box::new(filter_map_results(map.iter_nodes(), move |node| {
1486 let debug_tuple = if let Some(entry) = node.entry()? {
1502 let debug_tuple = if let Some(entry) = node.entry()? {
1487 entry.debug_tuple()
1503 entry.debug_tuple()
1488 } else if !all {
1504 } else if !all {
1489 return Ok(None);
1505 return Ok(None);
1490 } else if let Some(mtime) = node.cached_directory_mtime()? {
1506 } else if let Some(mtime) = node.cached_directory_mtime()? {
1491 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1507 (b' ', 0, -1, mtime.truncated_seconds() as i32)
1492 } else {
1508 } else {
1493 (b' ', 0, -1, -1)
1509 (b' ', 0, -1, -1)
1494 };
1510 };
1495 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1511 Ok(Some((node.full_path(map.on_disk)?, debug_tuple)))
1496 }))
1512 }))
1497 }
1513 }
1498 }
1514 }
1499 #[cfg(test)]
1515 #[cfg(test)]
1500 mod tests {
1516 mod tests {
1501 use super::*;
1517 use super::*;
1502
1518
1503 /// Shortcut to return tracked descendants of a path.
1519 /// Shortcut to return tracked descendants of a path.
1504 /// Panics if the path does not exist.
1520 /// Panics if the path does not exist.
1505 fn tracked_descendants(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1521 fn tracked_descendants(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1506 let path = dbg!(HgPath::new(path));
1522 let path = dbg!(HgPath::new(path));
1507 let node = map.get_map().get_node(path);
1523 let node = map.get_map().get_node(path);
1508 node.unwrap().unwrap().tracked_descendants_count()
1524 node.unwrap().unwrap().tracked_descendants_count()
1509 }
1525 }
1510
1526
1511 /// Shortcut to return descendants with an entry.
1527 /// Shortcut to return descendants with an entry.
1512 /// Panics if the path does not exist.
1528 /// Panics if the path does not exist.
1513 fn descendants_with_an_entry(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1529 fn descendants_with_an_entry(map: &OwningDirstateMap, path: &[u8]) -> u32 {
1514 let path = dbg!(HgPath::new(path));
1530 let path = dbg!(HgPath::new(path));
1515 let node = map.get_map().get_node(path);
1531 let node = map.get_map().get_node(path);
1516 node.unwrap().unwrap().descendants_with_entry_count()
1532 node.unwrap().unwrap().descendants_with_entry_count()
1517 }
1533 }
1518
1534
1519 fn assert_does_not_exist(map: &OwningDirstateMap, path: &[u8]) {
1535 fn assert_does_not_exist(map: &OwningDirstateMap, path: &[u8]) {
1520 let path = dbg!(HgPath::new(path));
1536 let path = dbg!(HgPath::new(path));
1521 let node = map.get_map().get_node(path);
1537 let node = map.get_map().get_node(path);
1522 assert!(node.unwrap().is_none());
1538 assert!(node.unwrap().is_none());
1523 }
1539 }
1524
1540
1525 /// Shortcut for path creation in tests
1541 /// Shortcut for path creation in tests
1526 fn p(b: &[u8]) -> &HgPath {
1542 fn p(b: &[u8]) -> &HgPath {
1527 HgPath::new(b)
1543 HgPath::new(b)
1528 }
1544 }
1529
1545
1530 /// Test the very simple case a single tracked file
1546 /// Test the very simple case a single tracked file
1531 #[test]
1547 #[test]
1532 fn test_tracked_descendants_simple() -> Result<(), DirstateError> {
1548 fn test_tracked_descendants_simple() -> Result<(), DirstateError> {
1533 let mut map = OwningDirstateMap::new_empty(vec![]);
1549 let mut map = OwningDirstateMap::new_empty(vec![]);
1534 assert_eq!(map.len(), 0);
1550 assert_eq!(map.len(), 0);
1535
1551
1536 map.set_tracked(p(b"some/nested/path"))?;
1552 map.set_tracked(p(b"some/nested/path"))?;
1537
1553
1538 assert_eq!(map.len(), 1);
1554 assert_eq!(map.len(), 1);
1539 assert_eq!(tracked_descendants(&map, b"some"), 1);
1555 assert_eq!(tracked_descendants(&map, b"some"), 1);
1540 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1556 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1541 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1557 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1542
1558
1543 map.set_untracked(p(b"some/nested/path"))?;
1559 map.set_untracked(p(b"some/nested/path"))?;
1544 assert_eq!(map.len(), 0);
1560 assert_eq!(map.len(), 0);
1545 assert!(map.get_map().get_node(p(b"some"))?.is_none());
1561 assert!(map.get_map().get_node(p(b"some"))?.is_none());
1546
1562
1547 Ok(())
1563 Ok(())
1548 }
1564 }
1549
1565
1550 /// Test the simple case of all tracked, but multiple files
1566 /// Test the simple case of all tracked, but multiple files
1551 #[test]
1567 #[test]
1552 fn test_tracked_descendants_multiple() -> Result<(), DirstateError> {
1568 fn test_tracked_descendants_multiple() -> Result<(), DirstateError> {
1553 let mut map = OwningDirstateMap::new_empty(vec![]);
1569 let mut map = OwningDirstateMap::new_empty(vec![]);
1554
1570
1555 map.set_tracked(p(b"some/nested/path"))?;
1571 map.set_tracked(p(b"some/nested/path"))?;
1556 map.set_tracked(p(b"some/nested/file"))?;
1572 map.set_tracked(p(b"some/nested/file"))?;
1557 // one layer without any files to test deletion cascade
1573 // one layer without any files to test deletion cascade
1558 map.set_tracked(p(b"some/other/nested/path"))?;
1574 map.set_tracked(p(b"some/other/nested/path"))?;
1559 map.set_tracked(p(b"root_file"))?;
1575 map.set_tracked(p(b"root_file"))?;
1560 map.set_tracked(p(b"some/file"))?;
1576 map.set_tracked(p(b"some/file"))?;
1561 map.set_tracked(p(b"some/file2"))?;
1577 map.set_tracked(p(b"some/file2"))?;
1562 map.set_tracked(p(b"some/file3"))?;
1578 map.set_tracked(p(b"some/file3"))?;
1563
1579
1564 assert_eq!(map.len(), 7);
1580 assert_eq!(map.len(), 7);
1565 assert_eq!(tracked_descendants(&map, b"some"), 6);
1581 assert_eq!(tracked_descendants(&map, b"some"), 6);
1566 assert_eq!(tracked_descendants(&map, b"some/nested"), 2);
1582 assert_eq!(tracked_descendants(&map, b"some/nested"), 2);
1567 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1583 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1568 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1584 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1569 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1585 assert_eq!(tracked_descendants(&map, b"some/nested/path"), 0);
1570
1586
1571 map.set_untracked(p(b"some/nested/path"))?;
1587 map.set_untracked(p(b"some/nested/path"))?;
1572 assert_eq!(map.len(), 6);
1588 assert_eq!(map.len(), 6);
1573 assert_eq!(tracked_descendants(&map, b"some"), 5);
1589 assert_eq!(tracked_descendants(&map, b"some"), 5);
1574 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1590 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1575 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1591 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1576 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1592 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1577
1593
1578 map.set_untracked(p(b"some/nested/file"))?;
1594 map.set_untracked(p(b"some/nested/file"))?;
1579 assert_eq!(map.len(), 5);
1595 assert_eq!(map.len(), 5);
1580 assert_eq!(tracked_descendants(&map, b"some"), 4);
1596 assert_eq!(tracked_descendants(&map, b"some"), 4);
1581 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1597 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1582 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1598 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1583 assert_does_not_exist(&map, b"some_nested");
1599 assert_does_not_exist(&map, b"some_nested");
1584
1600
1585 map.set_untracked(p(b"some/other/nested/path"))?;
1601 map.set_untracked(p(b"some/other/nested/path"))?;
1586 assert_eq!(map.len(), 4);
1602 assert_eq!(map.len(), 4);
1587 assert_eq!(tracked_descendants(&map, b"some"), 3);
1603 assert_eq!(tracked_descendants(&map, b"some"), 3);
1588 assert_does_not_exist(&map, b"some/other");
1604 assert_does_not_exist(&map, b"some/other");
1589
1605
1590 map.set_untracked(p(b"root_file"))?;
1606 map.set_untracked(p(b"root_file"))?;
1591 assert_eq!(map.len(), 3);
1607 assert_eq!(map.len(), 3);
1592 assert_eq!(tracked_descendants(&map, b"some"), 3);
1608 assert_eq!(tracked_descendants(&map, b"some"), 3);
1593 assert_does_not_exist(&map, b"root_file");
1609 assert_does_not_exist(&map, b"root_file");
1594
1610
1595 map.set_untracked(p(b"some/file"))?;
1611 map.set_untracked(p(b"some/file"))?;
1596 assert_eq!(map.len(), 2);
1612 assert_eq!(map.len(), 2);
1597 assert_eq!(tracked_descendants(&map, b"some"), 2);
1613 assert_eq!(tracked_descendants(&map, b"some"), 2);
1598 assert_does_not_exist(&map, b"some/file");
1614 assert_does_not_exist(&map, b"some/file");
1599
1615
1600 map.set_untracked(p(b"some/file2"))?;
1616 map.set_untracked(p(b"some/file2"))?;
1601 assert_eq!(map.len(), 1);
1617 assert_eq!(map.len(), 1);
1602 assert_eq!(tracked_descendants(&map, b"some"), 1);
1618 assert_eq!(tracked_descendants(&map, b"some"), 1);
1603 assert_does_not_exist(&map, b"some/file2");
1619 assert_does_not_exist(&map, b"some/file2");
1604
1620
1605 map.set_untracked(p(b"some/file3"))?;
1621 map.set_untracked(p(b"some/file3"))?;
1606 assert_eq!(map.len(), 0);
1622 assert_eq!(map.len(), 0);
1607 assert_does_not_exist(&map, b"some/file3");
1623 assert_does_not_exist(&map, b"some/file3");
1608
1624
1609 Ok(())
1625 Ok(())
1610 }
1626 }
1611
1627
1612 /// Check with a mix of tracked and non-tracked items
1628 /// Check with a mix of tracked and non-tracked items
1613 #[test]
1629 #[test]
1614 fn test_tracked_descendants_different() -> Result<(), DirstateError> {
1630 fn test_tracked_descendants_different() -> Result<(), DirstateError> {
1615 let mut map = OwningDirstateMap::new_empty(vec![]);
1631 let mut map = OwningDirstateMap::new_empty(vec![]);
1616
1632
1617 // A file that was just added
1633 // A file that was just added
1618 map.set_tracked(p(b"some/nested/path"))?;
1634 map.set_tracked(p(b"some/nested/path"))?;
1619 // This has no information, the dirstate should ignore it
1635 // This has no information, the dirstate should ignore it
1620 map.reset_state(p(b"some/file"), false, false, false, false, None)?;
1636 map.reset_state(p(b"some/file"), false, false, false, false, None)?;
1621 assert_does_not_exist(&map, b"some/file");
1637 assert_does_not_exist(&map, b"some/file");
1622
1638
1623 // A file that was removed
1639 // A file that was removed
1624 map.reset_state(
1640 map.reset_state(
1625 p(b"some/nested/file"),
1641 p(b"some/nested/file"),
1626 false,
1642 false,
1627 true,
1643 true,
1628 false,
1644 false,
1629 false,
1645 false,
1630 None,
1646 None,
1631 )?;
1647 )?;
1632 assert!(!map.get(p(b"some/nested/file"))?.unwrap().tracked());
1648 assert!(!map.get(p(b"some/nested/file"))?.unwrap().tracked());
1633 // Only present in p2
1649 // Only present in p2
1634 map.reset_state(p(b"some/file3"), false, false, true, false, None)?;
1650 map.reset_state(p(b"some/file3"), false, false, true, false, None)?;
1635 assert!(!map.get(p(b"some/file3"))?.unwrap().tracked());
1651 assert!(!map.get(p(b"some/file3"))?.unwrap().tracked());
1636 // A file that was merged
1652 // A file that was merged
1637 map.reset_state(p(b"root_file"), true, true, true, false, None)?;
1653 map.reset_state(p(b"root_file"), true, true, true, false, None)?;
1638 assert!(map.get(p(b"root_file"))?.unwrap().tracked());
1654 assert!(map.get(p(b"root_file"))?.unwrap().tracked());
1639 // A file that is added, with info from p2
1655 // A file that is added, with info from p2
1640 // XXX is that actually possible?
1656 // XXX is that actually possible?
1641 map.reset_state(p(b"some/file2"), true, false, true, false, None)?;
1657 map.reset_state(p(b"some/file2"), true, false, true, false, None)?;
1642 assert!(map.get(p(b"some/file2"))?.unwrap().tracked());
1658 assert!(map.get(p(b"some/file2"))?.unwrap().tracked());
1643 // A clean file
1659 // A clean file
1644 // One layer without any files to test deletion cascade
1660 // One layer without any files to test deletion cascade
1645 map.reset_state(
1661 map.reset_state(
1646 p(b"some/other/nested/path"),
1662 p(b"some/other/nested/path"),
1647 true,
1663 true,
1648 true,
1664 true,
1649 false,
1665 false,
1650 false,
1666 false,
1651 None,
1667 None,
1652 )?;
1668 )?;
1653 assert!(map.get(p(b"some/other/nested/path"))?.unwrap().tracked());
1669 assert!(map.get(p(b"some/other/nested/path"))?.unwrap().tracked());
1654
1670
1655 assert_eq!(map.len(), 6);
1671 assert_eq!(map.len(), 6);
1656 assert_eq!(tracked_descendants(&map, b"some"), 3);
1672 assert_eq!(tracked_descendants(&map, b"some"), 3);
1657 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1673 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1658 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1674 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1659 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1675 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1660 assert_eq!(tracked_descendants(&map, b"some/other/nested/path"), 0);
1676 assert_eq!(tracked_descendants(&map, b"some/other/nested/path"), 0);
1661 assert_eq!(
1677 assert_eq!(
1662 descendants_with_an_entry(&map, b"some/other/nested/path"),
1678 descendants_with_an_entry(&map, b"some/other/nested/path"),
1663 0
1679 0
1664 );
1680 );
1665 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1681 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1666 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1682 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1667
1683
1668 // might as well check this
1684 // might as well check this
1669 map.set_untracked(p(b"path/does/not/exist"))?;
1685 map.set_untracked(p(b"path/does/not/exist"))?;
1670 assert_eq!(map.len(), 6);
1686 assert_eq!(map.len(), 6);
1671
1687
1672 map.set_untracked(p(b"some/other/nested/path"))?;
1688 map.set_untracked(p(b"some/other/nested/path"))?;
1673 // It is set untracked but not deleted since it held other information
1689 // It is set untracked but not deleted since it held other information
1674 assert_eq!(map.len(), 6);
1690 assert_eq!(map.len(), 6);
1675 assert_eq!(tracked_descendants(&map, b"some"), 2);
1691 assert_eq!(tracked_descendants(&map, b"some"), 2);
1676 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1692 assert_eq!(descendants_with_an_entry(&map, b"some"), 5);
1677 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1693 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1678 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1694 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1679 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1695 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1680 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1696 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1681
1697
1682 map.set_untracked(p(b"some/nested/path"))?;
1698 map.set_untracked(p(b"some/nested/path"))?;
1683 // It is set untracked *and* deleted since it was only added
1699 // It is set untracked *and* deleted since it was only added
1684 assert_eq!(map.len(), 5);
1700 assert_eq!(map.len(), 5);
1685 assert_eq!(tracked_descendants(&map, b"some"), 1);
1701 assert_eq!(tracked_descendants(&map, b"some"), 1);
1686 assert_eq!(descendants_with_an_entry(&map, b"some"), 4);
1702 assert_eq!(descendants_with_an_entry(&map, b"some"), 4);
1687 assert_eq!(tracked_descendants(&map, b"some/nested"), 0);
1703 assert_eq!(tracked_descendants(&map, b"some/nested"), 0);
1688 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 1);
1704 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 1);
1689 assert_does_not_exist(&map, b"some/nested/path");
1705 assert_does_not_exist(&map, b"some/nested/path");
1690
1706
1691 map.set_untracked(p(b"root_file"))?;
1707 map.set_untracked(p(b"root_file"))?;
1692 // Untracked but not deleted
1708 // Untracked but not deleted
1693 assert_eq!(map.len(), 5);
1709 assert_eq!(map.len(), 5);
1694 assert!(map.get(p(b"root_file"))?.is_some());
1710 assert!(map.get(p(b"root_file"))?.is_some());
1695
1711
1696 map.set_untracked(p(b"some/file2"))?;
1712 map.set_untracked(p(b"some/file2"))?;
1697 assert_eq!(map.len(), 5);
1713 assert_eq!(map.len(), 5);
1698 assert_eq!(tracked_descendants(&map, b"some"), 0);
1714 assert_eq!(tracked_descendants(&map, b"some"), 0);
1699 assert!(map.get(p(b"some/file2"))?.is_some());
1715 assert!(map.get(p(b"some/file2"))?.is_some());
1700
1716
1701 map.set_untracked(p(b"some/file3"))?;
1717 map.set_untracked(p(b"some/file3"))?;
1702 assert_eq!(map.len(), 5);
1718 assert_eq!(map.len(), 5);
1703 assert_eq!(tracked_descendants(&map, b"some"), 0);
1719 assert_eq!(tracked_descendants(&map, b"some"), 0);
1704 assert!(map.get(p(b"some/file3"))?.is_some());
1720 assert!(map.get(p(b"some/file3"))?.is_some());
1705
1721
1706 Ok(())
1722 Ok(())
1707 }
1723 }
1708
1724
1709 /// Check that copies counter is correctly updated
1725 /// Check that copies counter is correctly updated
1710 #[test]
1726 #[test]
1711 fn test_copy_source() -> Result<(), DirstateError> {
1727 fn test_copy_source() -> Result<(), DirstateError> {
1712 let mut map = OwningDirstateMap::new_empty(vec![]);
1728 let mut map = OwningDirstateMap::new_empty(vec![]);
1713
1729
1714 // Clean file
1730 // Clean file
1715 map.reset_state(p(b"files/clean"), true, true, false, false, None)?;
1731 map.reset_state(p(b"files/clean"), true, true, false, false, None)?;
1716 // Merged file
1732 // Merged file
1717 map.reset_state(p(b"files/from_p2"), true, true, true, false, None)?;
1733 map.reset_state(p(b"files/from_p2"), true, true, true, false, None)?;
1718 // Removed file
1734 // Removed file
1719 map.reset_state(p(b"removed"), false, true, false, false, None)?;
1735 map.reset_state(p(b"removed"), false, true, false, false, None)?;
1720 // Added file
1736 // Added file
1721 map.reset_state(p(b"files/added"), true, false, false, false, None)?;
1737 map.reset_state(p(b"files/added"), true, false, false, false, None)?;
1722 // Add copy
1738 // Add copy
1723 map.copy_map_insert(p(b"files/clean"), p(b"clean_copy_source"))?;
1739 map.copy_map_insert(p(b"files/clean"), p(b"clean_copy_source"))?;
1724 assert_eq!(map.copy_map_len(), 1);
1740 assert_eq!(map.copy_map_len(), 1);
1725
1741
1726 // Copy override
1742 // Copy override
1727 map.copy_map_insert(p(b"files/clean"), p(b"other_clean_copy_source"))?;
1743 map.copy_map_insert(p(b"files/clean"), p(b"other_clean_copy_source"))?;
1728 assert_eq!(map.copy_map_len(), 1);
1744 assert_eq!(map.copy_map_len(), 1);
1729
1745
1730 // Multiple copies
1746 // Multiple copies
1731 map.copy_map_insert(p(b"removed"), p(b"removed_copy_source"))?;
1747 map.copy_map_insert(p(b"removed"), p(b"removed_copy_source"))?;
1732 assert_eq!(map.copy_map_len(), 2);
1748 assert_eq!(map.copy_map_len(), 2);
1733
1749
1734 map.copy_map_insert(p(b"files/added"), p(b"added_copy_source"))?;
1750 map.copy_map_insert(p(b"files/added"), p(b"added_copy_source"))?;
1735 assert_eq!(map.copy_map_len(), 3);
1751 assert_eq!(map.copy_map_len(), 3);
1736
1752
1737 // Added, so the entry is completely removed
1753 // Added, so the entry is completely removed
1738 map.set_untracked(p(b"files/added"))?;
1754 map.set_untracked(p(b"files/added"))?;
1739 assert_does_not_exist(&map, b"files/added");
1755 assert_does_not_exist(&map, b"files/added");
1740 assert_eq!(map.copy_map_len(), 2);
1756 assert_eq!(map.copy_map_len(), 2);
1741
1757
1742 // Removed, so the entry is kept around, so is its copy
1758 // Removed, so the entry is kept around, so is its copy
1743 map.set_untracked(p(b"removed"))?;
1759 map.set_untracked(p(b"removed"))?;
1744 assert!(map.get(p(b"removed"))?.is_some());
1760 assert!(map.get(p(b"removed"))?.is_some());
1745 assert_eq!(map.copy_map_len(), 2);
1761 assert_eq!(map.copy_map_len(), 2);
1746
1762
1747 // Clean, so the entry is kept around, but not its copy
1763 // Clean, so the entry is kept around, but not its copy
1748 map.set_untracked(p(b"files/clean"))?;
1764 map.set_untracked(p(b"files/clean"))?;
1749 assert!(map.get(p(b"files/clean"))?.is_some());
1765 assert!(map.get(p(b"files/clean"))?.is_some());
1750 assert_eq!(map.copy_map_len(), 1);
1766 assert_eq!(map.copy_map_len(), 1);
1751
1767
1752 map.copy_map_insert(p(b"files/from_p2"), p(b"from_p2_copy_source"))?;
1768 map.copy_map_insert(p(b"files/from_p2"), p(b"from_p2_copy_source"))?;
1753 assert_eq!(map.copy_map_len(), 2);
1769 assert_eq!(map.copy_map_len(), 2);
1754
1770
1755 // Info from p2, so its copy source info is kept around
1771 // Info from p2, so its copy source info is kept around
1756 map.set_untracked(p(b"files/from_p2"))?;
1772 map.set_untracked(p(b"files/from_p2"))?;
1757 assert!(map.get(p(b"files/from_p2"))?.is_some());
1773 assert!(map.get(p(b"files/from_p2"))?.is_some());
1758 assert_eq!(map.copy_map_len(), 2);
1774 assert_eq!(map.copy_map_len(), 2);
1759
1775
1760 Ok(())
1776 Ok(())
1761 }
1777 }
1762
1778
1763 /// Test with "on disk" data. For the sake of this test, the "on disk" data
1779 /// Test with "on disk" data. For the sake of this test, the "on disk" data
1764 /// does not actually come from the disk, but it's opaque to the code being
1780 /// does not actually come from the disk, but it's opaque to the code being
1765 /// tested.
1781 /// tested.
1766 #[test]
1782 #[test]
1767 fn test_on_disk() -> Result<(), DirstateError> {
1783 fn test_on_disk() -> Result<(), DirstateError> {
1768 // First let's create some data to put "on disk"
1784 // First let's create some data to put "on disk"
1769 let mut map = OwningDirstateMap::new_empty(vec![]);
1785 let mut map = OwningDirstateMap::new_empty(vec![]);
1770
1786
1771 // A file that was just added
1787 // A file that was just added
1772 map.set_tracked(p(b"some/nested/added"))?;
1788 map.set_tracked(p(b"some/nested/added"))?;
1773 map.copy_map_insert(p(b"some/nested/added"), p(b"added_copy_source"))?;
1789 map.copy_map_insert(p(b"some/nested/added"), p(b"added_copy_source"))?;
1774
1790
1775 // A file that was removed
1791 // A file that was removed
1776 map.reset_state(
1792 map.reset_state(
1777 p(b"some/nested/removed"),
1793 p(b"some/nested/removed"),
1778 false,
1794 false,
1779 true,
1795 true,
1780 false,
1796 false,
1781 false,
1797 false,
1782 None,
1798 None,
1783 )?;
1799 )?;
1784 // Only present in p2
1800 // Only present in p2
1785 map.reset_state(
1801 map.reset_state(
1786 p(b"other/p2_info_only"),
1802 p(b"other/p2_info_only"),
1787 false,
1803 false,
1788 false,
1804 false,
1789 true,
1805 true,
1790 false,
1806 false,
1791 None,
1807 None,
1792 )?;
1808 )?;
1793 map.copy_map_insert(
1809 map.copy_map_insert(
1794 p(b"other/p2_info_only"),
1810 p(b"other/p2_info_only"),
1795 p(b"other/p2_info_copy_source"),
1811 p(b"other/p2_info_copy_source"),
1796 )?;
1812 )?;
1797 // A file that was merged
1813 // A file that was merged
1798 map.reset_state(p(b"merged"), true, true, true, false, None)?;
1814 map.reset_state(p(b"merged"), true, true, true, false, None)?;
1799 // A file that is added, with info from p2
1815 // A file that is added, with info from p2
1800 // XXX is that actually possible?
1816 // XXX is that actually possible?
1801 map.reset_state(
1817 map.reset_state(
1802 p(b"other/added_with_p2"),
1818 p(b"other/added_with_p2"),
1803 true,
1819 true,
1804 false,
1820 false,
1805 true,
1821 true,
1806 false,
1822 false,
1807 None,
1823 None,
1808 )?;
1824 )?;
1809 // One layer without any files to test deletion cascade
1825 // One layer without any files to test deletion cascade
1810 // A clean file
1826 // A clean file
1811 map.reset_state(
1827 map.reset_state(
1812 p(b"some/other/nested/clean"),
1828 p(b"some/other/nested/clean"),
1813 true,
1829 true,
1814 true,
1830 true,
1815 false,
1831 false,
1816 false,
1832 false,
1817 None,
1833 None,
1818 )?;
1834 )?;
1819
1835
1820 let (packed, metadata, _should_append, _old_data_size) =
1836 let (packed, metadata, _should_append, _old_data_size) =
1821 map.pack_v2(DirstateMapWriteMode::ForceNewDataFile)?;
1837 map.pack_v2(DirstateMapWriteMode::ForceNewDataFile)?;
1822 let packed_len = packed.len();
1838 let packed_len = packed.len();
1823 assert!(packed_len > 0);
1839 assert!(packed_len > 0);
1824
1840
1825 // Recreate "from disk"
1841 // Recreate "from disk"
1826 let mut map = OwningDirstateMap::new_v2(
1842 let mut map = OwningDirstateMap::new_v2(
1827 packed,
1843 packed,
1828 packed_len,
1844 packed_len,
1829 metadata.as_bytes(),
1845 metadata.as_bytes(),
1830 )?;
1846 )?;
1831
1847
1832 // Check that everything is accounted for
1848 // Check that everything is accounted for
1833 assert!(map.contains_key(p(b"some/nested/added"))?);
1849 assert!(map.contains_key(p(b"some/nested/added"))?);
1834 assert!(map.contains_key(p(b"some/nested/removed"))?);
1850 assert!(map.contains_key(p(b"some/nested/removed"))?);
1835 assert!(map.contains_key(p(b"merged"))?);
1851 assert!(map.contains_key(p(b"merged"))?);
1836 assert!(map.contains_key(p(b"other/p2_info_only"))?);
1852 assert!(map.contains_key(p(b"other/p2_info_only"))?);
1837 assert!(map.contains_key(p(b"other/added_with_p2"))?);
1853 assert!(map.contains_key(p(b"other/added_with_p2"))?);
1838 assert!(map.contains_key(p(b"some/other/nested/clean"))?);
1854 assert!(map.contains_key(p(b"some/other/nested/clean"))?);
1839 assert_eq!(
1855 assert_eq!(
1840 map.copy_map_get(p(b"some/nested/added"))?,
1856 map.copy_map_get(p(b"some/nested/added"))?,
1841 Some(p(b"added_copy_source"))
1857 Some(p(b"added_copy_source"))
1842 );
1858 );
1843 assert_eq!(
1859 assert_eq!(
1844 map.copy_map_get(p(b"other/p2_info_only"))?,
1860 map.copy_map_get(p(b"other/p2_info_only"))?,
1845 Some(p(b"other/p2_info_copy_source"))
1861 Some(p(b"other/p2_info_copy_source"))
1846 );
1862 );
1847 assert_eq!(tracked_descendants(&map, b"some"), 2);
1863 assert_eq!(tracked_descendants(&map, b"some"), 2);
1848 assert_eq!(descendants_with_an_entry(&map, b"some"), 3);
1864 assert_eq!(descendants_with_an_entry(&map, b"some"), 3);
1849 assert_eq!(tracked_descendants(&map, b"other"), 1);
1865 assert_eq!(tracked_descendants(&map, b"other"), 1);
1850 assert_eq!(descendants_with_an_entry(&map, b"other"), 2);
1866 assert_eq!(descendants_with_an_entry(&map, b"other"), 2);
1851 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1867 assert_eq!(tracked_descendants(&map, b"some/other"), 1);
1852 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1868 assert_eq!(descendants_with_an_entry(&map, b"some/other"), 1);
1853 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1869 assert_eq!(tracked_descendants(&map, b"some/other/nested"), 1);
1854 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1870 assert_eq!(descendants_with_an_entry(&map, b"some/other/nested"), 1);
1855 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1871 assert_eq!(tracked_descendants(&map, b"some/nested"), 1);
1856 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1872 assert_eq!(descendants_with_an_entry(&map, b"some/nested"), 2);
1857 assert_eq!(map.len(), 6);
1873 assert_eq!(map.len(), 6);
1858 assert_eq!(map.get_map().unreachable_bytes, 0);
1874 assert_eq!(map.get_map().unreachable_bytes, 0);
1859 assert_eq!(map.copy_map_len(), 2);
1875 assert_eq!(map.copy_map_len(), 2);
1860
1876
1861 // Shouldn't change anything since it's already not tracked
1877 // Shouldn't change anything since it's already not tracked
1862 map.set_untracked(p(b"some/nested/removed"))?;
1878 map.set_untracked(p(b"some/nested/removed"))?;
1863 assert_eq!(map.get_map().unreachable_bytes, 0);
1879 assert_eq!(map.get_map().unreachable_bytes, 0);
1864
1880
1865 match map.get_map().root {
1881 match map.get_map().root {
1866 ChildNodes::InMemory(_) => {
1882 ChildNodes::InMemory(_) => {
1867 panic!("root should not have been mutated")
1883 panic!("root should not have been mutated")
1868 }
1884 }
1869 _ => (),
1885 _ => (),
1870 }
1886 }
1871 // We haven't mutated enough (nothing, actually), we should still be in
1887 // We haven't mutated enough (nothing, actually), we should still be in
1872 // the append strategy
1888 // the append strategy
1873 assert!(map.get_map().write_should_append());
1889 assert!(map.get_map().write_should_append());
1874
1890
1875 // But this mutates the structure, so there should be unreachable_bytes
1891 // But this mutates the structure, so there should be unreachable_bytes
1876 assert!(map.set_untracked(p(b"some/nested/added"))?);
1892 assert!(map.set_untracked(p(b"some/nested/added"))?);
1877 let unreachable_bytes = map.get_map().unreachable_bytes;
1893 let unreachable_bytes = map.get_map().unreachable_bytes;
1878 assert!(unreachable_bytes > 0);
1894 assert!(unreachable_bytes > 0);
1879
1895
1880 match map.get_map().root {
1896 match map.get_map().root {
1881 ChildNodes::OnDisk(_) => panic!("root should have been mutated"),
1897 ChildNodes::OnDisk(_) => panic!("root should have been mutated"),
1882 _ => (),
1898 _ => (),
1883 }
1899 }
1884
1900
1885 // This should not mutate the structure either, since `root` has
1901 // This should not mutate the structure either, since `root` has
1886 // already been mutated along with its direct children.
1902 // already been mutated along with its direct children.
1887 map.set_untracked(p(b"merged"))?;
1903 map.set_untracked(p(b"merged"))?;
1888 assert_eq!(map.get_map().unreachable_bytes, unreachable_bytes);
1904 assert_eq!(map.get_map().unreachable_bytes, unreachable_bytes);
1889
1905
1890 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1906 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1891 NodeRef::InMemory(_, _) => {
1907 NodeRef::InMemory(_, _) => {
1892 panic!("'other/added_with_p2' should not have been mutated")
1908 panic!("'other/added_with_p2' should not have been mutated")
1893 }
1909 }
1894 _ => (),
1910 _ => (),
1895 }
1911 }
1896 // But this should, since it's in a different path
1912 // But this should, since it's in a different path
1897 // than `<root>some/nested/add`
1913 // than `<root>some/nested/add`
1898 map.set_untracked(p(b"other/added_with_p2"))?;
1914 map.set_untracked(p(b"other/added_with_p2"))?;
1899 assert!(map.get_map().unreachable_bytes > unreachable_bytes);
1915 assert!(map.get_map().unreachable_bytes > unreachable_bytes);
1900
1916
1901 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1917 match map.get_map().get_node(p(b"other/added_with_p2"))?.unwrap() {
1902 NodeRef::OnDisk(_) => {
1918 NodeRef::OnDisk(_) => {
1903 panic!("'other/added_with_p2' should have been mutated")
1919 panic!("'other/added_with_p2' should have been mutated")
1904 }
1920 }
1905 _ => (),
1921 _ => (),
1906 }
1922 }
1907
1923
1908 // We have rewritten most of the tree, we should create a new file
1924 // We have rewritten most of the tree, we should create a new file
1909 assert!(!map.get_map().write_should_append());
1925 assert!(!map.get_map().write_should_append());
1910
1926
1911 Ok(())
1927 Ok(())
1912 }
1928 }
1913 }
1929 }
@@ -1,888 +1,890 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4
4
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
6 use crate::dirstate_tree::dirstate_map::DirstateVersion;
6 use crate::dirstate_tree::dirstate_map::DirstateVersion;
7 use crate::dirstate_tree::dirstate_map::{
7 use crate::dirstate_tree::dirstate_map::{
8 self, DirstateMap, DirstateMapWriteMode, NodeRef,
8 self, DirstateMap, DirstateMapWriteMode, NodeRef,
9 };
9 };
10 use crate::dirstate_tree::path_with_basename::WithBasename;
10 use crate::dirstate_tree::path_with_basename::WithBasename;
11 use crate::errors::HgError;
11 use crate::errors::HgError;
12 use crate::utils::hg_path::HgPath;
12 use crate::utils::hg_path::HgPath;
13 use crate::DirstateEntry;
13 use crate::DirstateEntry;
14 use crate::DirstateError;
14 use crate::DirstateError;
15 use crate::DirstateParents;
15 use crate::DirstateParents;
16 use bitflags::bitflags;
16 use bitflags::bitflags;
17 use bytes_cast::unaligned::{U16Be, U32Be};
17 use bytes_cast::unaligned::{U16Be, U32Be};
18 use bytes_cast::BytesCast;
18 use bytes_cast::BytesCast;
19 use format_bytes::format_bytes;
19 use format_bytes::format_bytes;
20 use rand::Rng;
20 use rand::Rng;
21 use std::borrow::Cow;
21 use std::borrow::Cow;
22 use std::convert::{TryFrom, TryInto};
22 use std::convert::{TryFrom, TryInto};
23 use std::fmt::Write;
23 use std::fmt::Write;
24
24
25 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
25 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
26 /// This a redundant sanity check more than an actual "magic number" since
26 /// This a redundant sanity check more than an actual "magic number" since
27 /// `.hg/requires` already governs which format should be used.
27 /// `.hg/requires` already governs which format should be used.
28 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
28 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
29
29
30 /// Keep space for 256-bit hashes
30 /// Keep space for 256-bit hashes
31 const STORED_NODE_ID_BYTES: usize = 32;
31 const STORED_NODE_ID_BYTES: usize = 32;
32
32
33 /// … even though only 160 bits are used for now, with SHA-1
33 /// … even though only 160 bits are used for now, with SHA-1
34 const USED_NODE_ID_BYTES: usize = 20;
34 const USED_NODE_ID_BYTES: usize = 20;
35
35
36 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
36 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
37 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
37 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
38
38
39 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
39 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
40 const TREE_METADATA_SIZE: usize = 44;
40 const TREE_METADATA_SIZE: usize = 44;
41 const NODE_SIZE: usize = 44;
41 const NODE_SIZE: usize = 44;
42
42
43 /// Make sure that size-affecting changes are made knowingly
43 /// Make sure that size-affecting changes are made knowingly
44 #[allow(unused)]
44 #[allow(unused)]
45 fn static_assert_size_of() {
45 fn static_assert_size_of() {
46 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
46 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
47 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
47 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
48 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
48 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
49 }
49 }
50
50
51 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
51 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
52 #[derive(BytesCast)]
52 #[derive(BytesCast)]
53 #[repr(C)]
53 #[repr(C)]
54 struct DocketHeader {
54 struct DocketHeader {
55 marker: [u8; V2_FORMAT_MARKER.len()],
55 marker: [u8; V2_FORMAT_MARKER.len()],
56 parent_1: [u8; STORED_NODE_ID_BYTES],
56 parent_1: [u8; STORED_NODE_ID_BYTES],
57 parent_2: [u8; STORED_NODE_ID_BYTES],
57 parent_2: [u8; STORED_NODE_ID_BYTES],
58
58
59 metadata: TreeMetadata,
59 metadata: TreeMetadata,
60
60
61 /// Counted in bytes
61 /// Counted in bytes
62 data_size: Size,
62 data_size: Size,
63
63
64 uuid_size: u8,
64 uuid_size: u8,
65 }
65 }
66
66
67 pub struct Docket<'on_disk> {
67 pub struct Docket<'on_disk> {
68 header: &'on_disk DocketHeader,
68 header: &'on_disk DocketHeader,
69 pub uuid: &'on_disk [u8],
69 pub uuid: &'on_disk [u8],
70 }
70 }
71
71
72 /// Fields are documented in the *Tree metadata in the docket file*
72 /// Fields are documented in the *Tree metadata in the docket file*
73 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
73 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
74 #[derive(BytesCast)]
74 #[derive(BytesCast)]
75 #[repr(C)]
75 #[repr(C)]
76 pub struct TreeMetadata {
76 pub struct TreeMetadata {
77 root_nodes: ChildNodes,
77 root_nodes: ChildNodes,
78 nodes_with_entry_count: Size,
78 nodes_with_entry_count: Size,
79 nodes_with_copy_source_count: Size,
79 nodes_with_copy_source_count: Size,
80 unreachable_bytes: Size,
80 unreachable_bytes: Size,
81 unused: [u8; 4],
81 unused: [u8; 4],
82
82
83 /// See *Optional hash of ignore patterns* section of
83 /// See *Optional hash of ignore patterns* section of
84 /// `mercurial/helptext/internals/dirstate-v2.txt`
84 /// `mercurial/helptext/internals/dirstate-v2.txt`
85 ignore_patterns_hash: IgnorePatternsHash,
85 ignore_patterns_hash: IgnorePatternsHash,
86 }
86 }
87
87
88 /// Fields are documented in the *The data file format*
88 /// Fields are documented in the *The data file format*
89 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
89 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
90 #[derive(BytesCast, Debug)]
90 #[derive(BytesCast, Debug)]
91 #[repr(C)]
91 #[repr(C)]
92 pub(super) struct Node {
92 pub(super) struct Node {
93 full_path: PathSlice,
93 full_path: PathSlice,
94
94
95 /// In bytes from `self.full_path.start`
95 /// In bytes from `self.full_path.start`
96 base_name_start: PathSize,
96 base_name_start: PathSize,
97
97
98 copy_source: OptPathSlice,
98 copy_source: OptPathSlice,
99 children: ChildNodes,
99 children: ChildNodes,
100 pub(super) descendants_with_entry_count: Size,
100 pub(super) descendants_with_entry_count: Size,
101 pub(super) tracked_descendants_count: Size,
101 pub(super) tracked_descendants_count: Size,
102 flags: U16Be,
102 flags: U16Be,
103 size: U32Be,
103 size: U32Be,
104 mtime: PackedTruncatedTimestamp,
104 mtime: PackedTruncatedTimestamp,
105 }
105 }
106
106
107 bitflags! {
107 bitflags! {
108 #[repr(C)]
108 #[repr(C)]
109 struct Flags: u16 {
109 struct Flags: u16 {
110 const WDIR_TRACKED = 1 << 0;
110 const WDIR_TRACKED = 1 << 0;
111 const P1_TRACKED = 1 << 1;
111 const P1_TRACKED = 1 << 1;
112 const P2_INFO = 1 << 2;
112 const P2_INFO = 1 << 2;
113 const MODE_EXEC_PERM = 1 << 3;
113 const MODE_EXEC_PERM = 1 << 3;
114 const MODE_IS_SYMLINK = 1 << 4;
114 const MODE_IS_SYMLINK = 1 << 4;
115 const HAS_FALLBACK_EXEC = 1 << 5;
115 const HAS_FALLBACK_EXEC = 1 << 5;
116 const FALLBACK_EXEC = 1 << 6;
116 const FALLBACK_EXEC = 1 << 6;
117 const HAS_FALLBACK_SYMLINK = 1 << 7;
117 const HAS_FALLBACK_SYMLINK = 1 << 7;
118 const FALLBACK_SYMLINK = 1 << 8;
118 const FALLBACK_SYMLINK = 1 << 8;
119 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
119 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
120 const HAS_MODE_AND_SIZE = 1 <<10;
120 const HAS_MODE_AND_SIZE = 1 <<10;
121 const HAS_MTIME = 1 <<11;
121 const HAS_MTIME = 1 <<11;
122 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
122 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
123 const DIRECTORY = 1 <<13;
123 const DIRECTORY = 1 <<13;
124 const ALL_UNKNOWN_RECORDED = 1 <<14;
124 const ALL_UNKNOWN_RECORDED = 1 <<14;
125 const ALL_IGNORED_RECORDED = 1 <<15;
125 const ALL_IGNORED_RECORDED = 1 <<15;
126 }
126 }
127 }
127 }
128
128
129 /// Duration since the Unix epoch
129 /// Duration since the Unix epoch
130 #[derive(BytesCast, Copy, Clone, Debug)]
130 #[derive(BytesCast, Copy, Clone, Debug)]
131 #[repr(C)]
131 #[repr(C)]
132 struct PackedTruncatedTimestamp {
132 struct PackedTruncatedTimestamp {
133 truncated_seconds: U32Be,
133 truncated_seconds: U32Be,
134 nanoseconds: U32Be,
134 nanoseconds: U32Be,
135 }
135 }
136
136
137 /// Counted in bytes from the start of the file
137 /// Counted in bytes from the start of the file
138 ///
138 ///
139 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
139 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
140 type Offset = U32Be;
140 type Offset = U32Be;
141
141
142 /// Counted in number of items
142 /// Counted in number of items
143 ///
143 ///
144 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
144 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
145 type Size = U32Be;
145 type Size = U32Be;
146
146
147 /// Counted in bytes
147 /// Counted in bytes
148 ///
148 ///
149 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
149 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
150 type PathSize = U16Be;
150 type PathSize = U16Be;
151
151
152 /// A contiguous sequence of `len` times `Node`, representing the child nodes
152 /// A contiguous sequence of `len` times `Node`, representing the child nodes
153 /// of either some other node or of the repository root.
153 /// of either some other node or of the repository root.
154 ///
154 ///
155 /// Always sorted by ascending `full_path`, to allow binary search.
155 /// Always sorted by ascending `full_path`, to allow binary search.
156 /// Since nodes with the same parent nodes also have the same parent path,
156 /// Since nodes with the same parent nodes also have the same parent path,
157 /// only the `base_name`s need to be compared during binary search.
157 /// only the `base_name`s need to be compared during binary search.
158 #[derive(BytesCast, Copy, Clone, Debug)]
158 #[derive(BytesCast, Copy, Clone, Debug)]
159 #[repr(C)]
159 #[repr(C)]
160 struct ChildNodes {
160 struct ChildNodes {
161 start: Offset,
161 start: Offset,
162 len: Size,
162 len: Size,
163 }
163 }
164
164
165 /// A `HgPath` of `len` bytes
165 /// A `HgPath` of `len` bytes
166 #[derive(BytesCast, Copy, Clone, Debug)]
166 #[derive(BytesCast, Copy, Clone, Debug)]
167 #[repr(C)]
167 #[repr(C)]
168 struct PathSlice {
168 struct PathSlice {
169 start: Offset,
169 start: Offset,
170 len: PathSize,
170 len: PathSize,
171 }
171 }
172
172
173 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
173 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
174 type OptPathSlice = PathSlice;
174 type OptPathSlice = PathSlice;
175
175
176 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
176 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
177 ///
177 ///
178 /// This should only happen if Mercurial is buggy or a repository is corrupted.
178 /// This should only happen if Mercurial is buggy or a repository is corrupted.
179 #[derive(Debug)]
179 #[derive(Debug)]
180 pub struct DirstateV2ParseError {
180 pub struct DirstateV2ParseError {
181 message: String,
181 message: String,
182 }
182 }
183
183
184 impl DirstateV2ParseError {
184 impl DirstateV2ParseError {
185 pub fn new<S: Into<String>>(message: S) -> Self {
185 pub fn new<S: Into<String>>(message: S) -> Self {
186 Self {
186 Self {
187 message: message.into(),
187 message: message.into(),
188 }
188 }
189 }
189 }
190 }
190 }
191
191
192 impl From<DirstateV2ParseError> for HgError {
192 impl From<DirstateV2ParseError> for HgError {
193 fn from(e: DirstateV2ParseError) -> Self {
193 fn from(e: DirstateV2ParseError) -> Self {
194 HgError::corrupted(format!("dirstate-v2 parse error: {}", e.message))
194 HgError::corrupted(format!("dirstate-v2 parse error: {}", e.message))
195 }
195 }
196 }
196 }
197
197
198 impl From<DirstateV2ParseError> for crate::DirstateError {
198 impl From<DirstateV2ParseError> for crate::DirstateError {
199 fn from(error: DirstateV2ParseError) -> Self {
199 fn from(error: DirstateV2ParseError) -> Self {
200 HgError::from(error).into()
200 HgError::from(error).into()
201 }
201 }
202 }
202 }
203
203
204 impl TreeMetadata {
204 impl TreeMetadata {
205 pub fn as_bytes(&self) -> &[u8] {
205 pub fn as_bytes(&self) -> &[u8] {
206 BytesCast::as_bytes(self)
206 BytesCast::as_bytes(self)
207 }
207 }
208 }
208 }
209
209
210 impl<'on_disk> Docket<'on_disk> {
210 impl<'on_disk> Docket<'on_disk> {
211 /// Generate the identifier for a new data file
211 /// Generate the identifier for a new data file
212 ///
212 ///
213 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
213 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
214 /// See `mercurial/revlogutils/docket.py`
214 /// See `mercurial/revlogutils/docket.py`
215 pub fn new_uid() -> String {
215 pub fn new_uid() -> String {
216 const ID_LENGTH: usize = 8;
216 const ID_LENGTH: usize = 8;
217 let mut id = String::with_capacity(ID_LENGTH);
217 let mut id = String::with_capacity(ID_LENGTH);
218 let mut rng = rand::thread_rng();
218 let mut rng = rand::thread_rng();
219 for _ in 0..ID_LENGTH {
219 for _ in 0..ID_LENGTH {
220 // One random hexadecimal digit.
220 // One random hexadecimal digit.
221 // `unwrap` never panics because `impl Write for String`
221 // `unwrap` never panics because `impl Write for String`
222 // never returns an error.
222 // never returns an error.
223 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
223 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
224 }
224 }
225 id
225 id
226 }
226 }
227
227
228 pub fn serialize(
228 pub fn serialize(
229 parents: DirstateParents,
229 parents: DirstateParents,
230 tree_metadata: TreeMetadata,
230 tree_metadata: TreeMetadata,
231 data_size: u64,
231 data_size: u64,
232 uuid: &[u8],
232 uuid: &[u8],
233 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
233 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
234 let header = DocketHeader {
234 let header = DocketHeader {
235 marker: *V2_FORMAT_MARKER,
235 marker: *V2_FORMAT_MARKER,
236 parent_1: parents.p1.pad_to_256_bits(),
236 parent_1: parents.p1.pad_to_256_bits(),
237 parent_2: parents.p2.pad_to_256_bits(),
237 parent_2: parents.p2.pad_to_256_bits(),
238 metadata: tree_metadata,
238 metadata: tree_metadata,
239 data_size: u32::try_from(data_size)?.into(),
239 data_size: u32::try_from(data_size)?.into(),
240 uuid_size: uuid.len().try_into()?,
240 uuid_size: uuid.len().try_into()?,
241 };
241 };
242 let header = header.as_bytes();
242 let header = header.as_bytes();
243 let mut docket = Vec::with_capacity(header.len() + uuid.len());
243 let mut docket = Vec::with_capacity(header.len() + uuid.len());
244 docket.extend_from_slice(header);
244 docket.extend_from_slice(header);
245 docket.extend_from_slice(uuid);
245 docket.extend_from_slice(uuid);
246 Ok(docket)
246 Ok(docket)
247 }
247 }
248
248
249 pub fn parents(&self) -> DirstateParents {
249 pub fn parents(&self) -> DirstateParents {
250 use crate::Node;
250 use crate::Node;
251 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
251 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
252 .unwrap()
252 .unwrap()
253 .clone();
253 .clone();
254 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
254 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
255 .unwrap()
255 .unwrap()
256 .clone();
256 .clone();
257 DirstateParents { p1, p2 }
257 DirstateParents { p1, p2 }
258 }
258 }
259
259
260 pub fn tree_metadata(&self) -> &[u8] {
260 pub fn tree_metadata(&self) -> &[u8] {
261 self.header.metadata.as_bytes()
261 self.header.metadata.as_bytes()
262 }
262 }
263
263
264 pub fn data_size(&self) -> usize {
264 pub fn data_size(&self) -> usize {
265 // This `unwrap` could only panic on a 16-bit CPU
265 // This `unwrap` could only panic on a 16-bit CPU
266 self.header.data_size.get().try_into().unwrap()
266 self.header.data_size.get().try_into().unwrap()
267 }
267 }
268
268
269 pub fn data_filename(&self) -> String {
269 pub fn data_filename(&self) -> String {
270 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
270 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
271 }
271 }
272 }
272 }
273
273
274 pub fn read_docket(
274 pub fn read_docket(
275 on_disk: &[u8],
275 on_disk: &[u8],
276 ) -> Result<Docket<'_>, DirstateV2ParseError> {
276 ) -> Result<Docket<'_>, DirstateV2ParseError> {
277 let (header, uuid) = DocketHeader::from_bytes(on_disk).map_err(|e| {
277 let (header, uuid) = DocketHeader::from_bytes(on_disk).map_err(|e| {
278 DirstateV2ParseError::new(format!("when reading docket, {}", e))
278 DirstateV2ParseError::new(format!("when reading docket, {}", e))
279 })?;
279 })?;
280 let uuid_size = header.uuid_size as usize;
280 let uuid_size = header.uuid_size as usize;
281 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
281 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
282 Ok(Docket { header, uuid })
282 Ok(Docket { header, uuid })
283 } else {
283 } else {
284 Err(DirstateV2ParseError::new(
284 Err(DirstateV2ParseError::new(
285 "invalid format marker or uuid size",
285 "invalid format marker or uuid size",
286 ))
286 ))
287 }
287 }
288 }
288 }
289
289
290 pub(super) fn read<'on_disk>(
290 pub(super) fn read<'on_disk>(
291 on_disk: &'on_disk [u8],
291 on_disk: &'on_disk [u8],
292 metadata: &[u8],
292 metadata: &[u8],
293 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
293 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
294 if on_disk.is_empty() {
294 if on_disk.is_empty() {
295 let mut map = DirstateMap::empty(on_disk);
295 let mut map = DirstateMap::empty(on_disk);
296 map.dirstate_version = DirstateVersion::V2;
296 map.dirstate_version = DirstateVersion::V2;
297 return Ok(map);
297 return Ok(map);
298 }
298 }
299 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
299 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
300 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
300 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
301 })?;
301 })?;
302 let dirstate_map = DirstateMap {
302 let dirstate_map = DirstateMap {
303 on_disk,
303 on_disk,
304 root: dirstate_map::ChildNodes::OnDisk(
304 root: dirstate_map::ChildNodes::OnDisk(
305 read_nodes(on_disk, meta.root_nodes).map_err(|mut e| {
305 read_nodes(on_disk, meta.root_nodes).map_err(|mut e| {
306 e.message = format!("{}, when reading root notes", e.message);
306 e.message = format!("{}, when reading root notes", e.message);
307 e
307 e
308 })?,
308 })?,
309 ),
309 ),
310 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
310 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
311 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
311 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
312 ignore_patterns_hash: meta.ignore_patterns_hash,
312 ignore_patterns_hash: meta.ignore_patterns_hash,
313 unreachable_bytes: meta.unreachable_bytes.get(),
313 unreachable_bytes: meta.unreachable_bytes.get(),
314 old_data_size: on_disk.len(),
314 old_data_size: on_disk.len(),
315 dirstate_version: DirstateVersion::V2,
315 dirstate_version: DirstateVersion::V2,
316 write_mode: DirstateMapWriteMode::Auto,
316 };
317 };
317 Ok(dirstate_map)
318 Ok(dirstate_map)
318 }
319 }
319
320
320 impl Node {
321 impl Node {
321 pub(super) fn full_path<'on_disk>(
322 pub(super) fn full_path<'on_disk>(
322 &self,
323 &self,
323 on_disk: &'on_disk [u8],
324 on_disk: &'on_disk [u8],
324 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
325 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
325 read_hg_path(on_disk, self.full_path)
326 read_hg_path(on_disk, self.full_path)
326 }
327 }
327
328
328 pub(super) fn base_name_start<'on_disk>(
329 pub(super) fn base_name_start<'on_disk>(
329 &self,
330 &self,
330 ) -> Result<usize, DirstateV2ParseError> {
331 ) -> Result<usize, DirstateV2ParseError> {
331 let start = self.base_name_start.get();
332 let start = self.base_name_start.get();
332 if start < self.full_path.len.get() {
333 if start < self.full_path.len.get() {
333 let start = usize::try_from(start)
334 let start = usize::try_from(start)
334 // u32 -> usize, could only panic on a 16-bit CPU
335 // u32 -> usize, could only panic on a 16-bit CPU
335 .expect("dirstate-v2 base_name_start out of bounds");
336 .expect("dirstate-v2 base_name_start out of bounds");
336 Ok(start)
337 Ok(start)
337 } else {
338 } else {
338 Err(DirstateV2ParseError::new("not enough bytes for base name"))
339 Err(DirstateV2ParseError::new("not enough bytes for base name"))
339 }
340 }
340 }
341 }
341
342
342 pub(super) fn base_name<'on_disk>(
343 pub(super) fn base_name<'on_disk>(
343 &self,
344 &self,
344 on_disk: &'on_disk [u8],
345 on_disk: &'on_disk [u8],
345 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
346 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
346 let full_path = self.full_path(on_disk)?;
347 let full_path = self.full_path(on_disk)?;
347 let base_name_start = self.base_name_start()?;
348 let base_name_start = self.base_name_start()?;
348 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
349 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
349 }
350 }
350
351
351 pub(super) fn path<'on_disk>(
352 pub(super) fn path<'on_disk>(
352 &self,
353 &self,
353 on_disk: &'on_disk [u8],
354 on_disk: &'on_disk [u8],
354 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
355 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
355 Ok(WithBasename::from_raw_parts(
356 Ok(WithBasename::from_raw_parts(
356 Cow::Borrowed(self.full_path(on_disk)?),
357 Cow::Borrowed(self.full_path(on_disk)?),
357 self.base_name_start()?,
358 self.base_name_start()?,
358 ))
359 ))
359 }
360 }
360
361
361 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
362 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
362 self.copy_source.start.get() != 0
363 self.copy_source.start.get() != 0
363 }
364 }
364
365
365 pub(super) fn copy_source<'on_disk>(
366 pub(super) fn copy_source<'on_disk>(
366 &self,
367 &self,
367 on_disk: &'on_disk [u8],
368 on_disk: &'on_disk [u8],
368 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
369 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
369 Ok(if self.has_copy_source() {
370 Ok(if self.has_copy_source() {
370 Some(read_hg_path(on_disk, self.copy_source)?)
371 Some(read_hg_path(on_disk, self.copy_source)?)
371 } else {
372 } else {
372 None
373 None
373 })
374 })
374 }
375 }
375
376
376 fn flags(&self) -> Flags {
377 fn flags(&self) -> Flags {
377 Flags::from_bits_truncate(self.flags.get())
378 Flags::from_bits_truncate(self.flags.get())
378 }
379 }
379
380
380 fn has_entry(&self) -> bool {
381 fn has_entry(&self) -> bool {
381 self.flags().intersects(
382 self.flags().intersects(
382 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
383 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
383 )
384 )
384 }
385 }
385
386
386 pub(super) fn node_data(
387 pub(super) fn node_data(
387 &self,
388 &self,
388 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
389 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
389 if self.has_entry() {
390 if self.has_entry() {
390 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
391 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
391 } else if let Some(mtime) = self.cached_directory_mtime()? {
392 } else if let Some(mtime) = self.cached_directory_mtime()? {
392 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
393 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
393 } else {
394 } else {
394 Ok(dirstate_map::NodeData::None)
395 Ok(dirstate_map::NodeData::None)
395 }
396 }
396 }
397 }
397
398
398 pub(super) fn cached_directory_mtime(
399 pub(super) fn cached_directory_mtime(
399 &self,
400 &self,
400 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
401 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
401 // For now we do not have code to handle the absence of
402 // For now we do not have code to handle the absence of
402 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
403 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
403 // unset.
404 // unset.
404 if self.flags().contains(Flags::DIRECTORY)
405 if self.flags().contains(Flags::DIRECTORY)
405 && self.flags().contains(Flags::HAS_MTIME)
406 && self.flags().contains(Flags::HAS_MTIME)
406 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
407 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
407 {
408 {
408 Ok(Some(self.mtime()?))
409 Ok(Some(self.mtime()?))
409 } else {
410 } else {
410 Ok(None)
411 Ok(None)
411 }
412 }
412 }
413 }
413
414
414 fn synthesize_unix_mode(&self) -> u32 {
415 fn synthesize_unix_mode(&self) -> u32 {
415 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
416 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
416 libc::S_IFLNK
417 libc::S_IFLNK
417 } else {
418 } else {
418 libc::S_IFREG
419 libc::S_IFREG
419 };
420 };
420 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
421 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
421 0o755
422 0o755
422 } else {
423 } else {
423 0o644
424 0o644
424 };
425 };
425 (file_type | permisions).into()
426 (file_type | permisions).into()
426 }
427 }
427
428
428 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
429 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
429 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
430 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
430 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
431 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
431 m.second_ambiguous = true;
432 m.second_ambiguous = true;
432 }
433 }
433 Ok(m)
434 Ok(m)
434 }
435 }
435
436
436 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
437 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
437 // TODO: convert through raw bits instead?
438 // TODO: convert through raw bits instead?
438 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
439 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
439 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
440 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
440 let p2_info = self.flags().contains(Flags::P2_INFO);
441 let p2_info = self.flags().contains(Flags::P2_INFO);
441 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
442 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
442 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
443 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
443 {
444 {
444 Some((self.synthesize_unix_mode(), self.size.into()))
445 Some((self.synthesize_unix_mode(), self.size.into()))
445 } else {
446 } else {
446 None
447 None
447 };
448 };
448 let mtime = if self.flags().contains(Flags::HAS_MTIME)
449 let mtime = if self.flags().contains(Flags::HAS_MTIME)
449 && !self.flags().contains(Flags::DIRECTORY)
450 && !self.flags().contains(Flags::DIRECTORY)
450 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
451 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
451 {
452 {
452 Some(self.mtime()?)
453 Some(self.mtime()?)
453 } else {
454 } else {
454 None
455 None
455 };
456 };
456 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
457 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
457 {
458 {
458 Some(self.flags().contains(Flags::FALLBACK_EXEC))
459 Some(self.flags().contains(Flags::FALLBACK_EXEC))
459 } else {
460 } else {
460 None
461 None
461 };
462 };
462 let fallback_symlink =
463 let fallback_symlink =
463 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
464 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
464 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
465 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
465 } else {
466 } else {
466 None
467 None
467 };
468 };
468 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
469 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
469 wc_tracked,
470 wc_tracked,
470 p1_tracked,
471 p1_tracked,
471 p2_info,
472 p2_info,
472 mode_size,
473 mode_size,
473 mtime,
474 mtime,
474 fallback_exec,
475 fallback_exec,
475 fallback_symlink,
476 fallback_symlink,
476 }))
477 }))
477 }
478 }
478
479
479 pub(super) fn entry(
480 pub(super) fn entry(
480 &self,
481 &self,
481 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
482 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
482 if self.has_entry() {
483 if self.has_entry() {
483 Ok(Some(self.assume_entry()?))
484 Ok(Some(self.assume_entry()?))
484 } else {
485 } else {
485 Ok(None)
486 Ok(None)
486 }
487 }
487 }
488 }
488
489
489 pub(super) fn children<'on_disk>(
490 pub(super) fn children<'on_disk>(
490 &self,
491 &self,
491 on_disk: &'on_disk [u8],
492 on_disk: &'on_disk [u8],
492 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
493 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
493 read_nodes(on_disk, self.children)
494 read_nodes(on_disk, self.children)
494 }
495 }
495
496
496 pub(super) fn to_in_memory_node<'on_disk>(
497 pub(super) fn to_in_memory_node<'on_disk>(
497 &self,
498 &self,
498 on_disk: &'on_disk [u8],
499 on_disk: &'on_disk [u8],
499 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
500 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
500 Ok(dirstate_map::Node {
501 Ok(dirstate_map::Node {
501 children: dirstate_map::ChildNodes::OnDisk(
502 children: dirstate_map::ChildNodes::OnDisk(
502 self.children(on_disk)?,
503 self.children(on_disk)?,
503 ),
504 ),
504 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
505 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
505 data: self.node_data()?,
506 data: self.node_data()?,
506 descendants_with_entry_count: self
507 descendants_with_entry_count: self
507 .descendants_with_entry_count
508 .descendants_with_entry_count
508 .get(),
509 .get(),
509 tracked_descendants_count: self.tracked_descendants_count.get(),
510 tracked_descendants_count: self.tracked_descendants_count.get(),
510 })
511 })
511 }
512 }
512
513
513 fn from_dirstate_entry(
514 fn from_dirstate_entry(
514 entry: &DirstateEntry,
515 entry: &DirstateEntry,
515 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
516 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
516 let DirstateV2Data {
517 let DirstateV2Data {
517 wc_tracked,
518 wc_tracked,
518 p1_tracked,
519 p1_tracked,
519 p2_info,
520 p2_info,
520 mode_size: mode_size_opt,
521 mode_size: mode_size_opt,
521 mtime: mtime_opt,
522 mtime: mtime_opt,
522 fallback_exec,
523 fallback_exec,
523 fallback_symlink,
524 fallback_symlink,
524 } = entry.v2_data();
525 } = entry.v2_data();
525 // TODO: convert through raw flag bits instead?
526 // TODO: convert through raw flag bits instead?
526 let mut flags = Flags::empty();
527 let mut flags = Flags::empty();
527 flags.set(Flags::WDIR_TRACKED, wc_tracked);
528 flags.set(Flags::WDIR_TRACKED, wc_tracked);
528 flags.set(Flags::P1_TRACKED, p1_tracked);
529 flags.set(Flags::P1_TRACKED, p1_tracked);
529 flags.set(Flags::P2_INFO, p2_info);
530 flags.set(Flags::P2_INFO, p2_info);
530 let size = if let Some((m, s)) = mode_size_opt {
531 let size = if let Some((m, s)) = mode_size_opt {
531 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
532 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
532 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
533 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
533 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
534 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
534 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
535 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
535 flags.insert(Flags::HAS_MODE_AND_SIZE);
536 flags.insert(Flags::HAS_MODE_AND_SIZE);
536 s.into()
537 s.into()
537 } else {
538 } else {
538 0.into()
539 0.into()
539 };
540 };
540 let mtime = if let Some(m) = mtime_opt {
541 let mtime = if let Some(m) = mtime_opt {
541 flags.insert(Flags::HAS_MTIME);
542 flags.insert(Flags::HAS_MTIME);
542 if m.second_ambiguous {
543 if m.second_ambiguous {
543 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
544 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
544 };
545 };
545 m.into()
546 m.into()
546 } else {
547 } else {
547 PackedTruncatedTimestamp::null()
548 PackedTruncatedTimestamp::null()
548 };
549 };
549 if let Some(f_exec) = fallback_exec {
550 if let Some(f_exec) = fallback_exec {
550 flags.insert(Flags::HAS_FALLBACK_EXEC);
551 flags.insert(Flags::HAS_FALLBACK_EXEC);
551 if f_exec {
552 if f_exec {
552 flags.insert(Flags::FALLBACK_EXEC);
553 flags.insert(Flags::FALLBACK_EXEC);
553 }
554 }
554 }
555 }
555 if let Some(f_symlink) = fallback_symlink {
556 if let Some(f_symlink) = fallback_symlink {
556 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
557 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
557 if f_symlink {
558 if f_symlink {
558 flags.insert(Flags::FALLBACK_SYMLINK);
559 flags.insert(Flags::FALLBACK_SYMLINK);
559 }
560 }
560 }
561 }
561 (flags, size, mtime)
562 (flags, size, mtime)
562 }
563 }
563 }
564 }
564
565
565 fn read_hg_path(
566 fn read_hg_path(
566 on_disk: &[u8],
567 on_disk: &[u8],
567 slice: PathSlice,
568 slice: PathSlice,
568 ) -> Result<&HgPath, DirstateV2ParseError> {
569 ) -> Result<&HgPath, DirstateV2ParseError> {
569 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
570 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
570 }
571 }
571
572
572 fn read_nodes(
573 fn read_nodes(
573 on_disk: &[u8],
574 on_disk: &[u8],
574 slice: ChildNodes,
575 slice: ChildNodes,
575 ) -> Result<&[Node], DirstateV2ParseError> {
576 ) -> Result<&[Node], DirstateV2ParseError> {
576 read_slice(on_disk, slice.start, slice.len.get())
577 read_slice(on_disk, slice.start, slice.len.get())
577 }
578 }
578
579
579 fn read_slice<T, Len>(
580 fn read_slice<T, Len>(
580 on_disk: &[u8],
581 on_disk: &[u8],
581 start: Offset,
582 start: Offset,
582 len: Len,
583 len: Len,
583 ) -> Result<&[T], DirstateV2ParseError>
584 ) -> Result<&[T], DirstateV2ParseError>
584 where
585 where
585 T: BytesCast,
586 T: BytesCast,
586 Len: TryInto<usize>,
587 Len: TryInto<usize>,
587 {
588 {
588 // Either `usize::MAX` would result in "out of bounds" error since a single
589 // Either `usize::MAX` would result in "out of bounds" error since a single
589 // `&[u8]` cannot occupy the entire addess space.
590 // `&[u8]` cannot occupy the entire addess space.
590 let start = start.get().try_into().unwrap_or(std::usize::MAX);
591 let start = start.get().try_into().unwrap_or(std::usize::MAX);
591 let len = len.try_into().unwrap_or(std::usize::MAX);
592 let len = len.try_into().unwrap_or(std::usize::MAX);
592 let bytes = match on_disk.get(start..) {
593 let bytes = match on_disk.get(start..) {
593 Some(bytes) => bytes,
594 Some(bytes) => bytes,
594 None => {
595 None => {
595 return Err(DirstateV2ParseError::new(
596 return Err(DirstateV2ParseError::new(
596 "not enough bytes from disk",
597 "not enough bytes from disk",
597 ))
598 ))
598 }
599 }
599 };
600 };
600 T::slice_from_bytes(bytes, len)
601 T::slice_from_bytes(bytes, len)
601 .map_err(|e| {
602 .map_err(|e| {
602 DirstateV2ParseError::new(format!("when reading a slice, {}", e))
603 DirstateV2ParseError::new(format!("when reading a slice, {}", e))
603 })
604 })
604 .map(|(slice, _rest)| slice)
605 .map(|(slice, _rest)| slice)
605 }
606 }
606
607
607 pub(crate) fn for_each_tracked_path<'on_disk>(
608 pub(crate) fn for_each_tracked_path<'on_disk>(
608 on_disk: &'on_disk [u8],
609 on_disk: &'on_disk [u8],
609 metadata: &[u8],
610 metadata: &[u8],
610 mut f: impl FnMut(&'on_disk HgPath),
611 mut f: impl FnMut(&'on_disk HgPath),
611 ) -> Result<(), DirstateV2ParseError> {
612 ) -> Result<(), DirstateV2ParseError> {
612 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
613 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
613 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
614 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
614 })?;
615 })?;
615 fn recur<'on_disk>(
616 fn recur<'on_disk>(
616 on_disk: &'on_disk [u8],
617 on_disk: &'on_disk [u8],
617 nodes: ChildNodes,
618 nodes: ChildNodes,
618 f: &mut impl FnMut(&'on_disk HgPath),
619 f: &mut impl FnMut(&'on_disk HgPath),
619 ) -> Result<(), DirstateV2ParseError> {
620 ) -> Result<(), DirstateV2ParseError> {
620 for node in read_nodes(on_disk, nodes)? {
621 for node in read_nodes(on_disk, nodes)? {
621 if let Some(entry) = node.entry()? {
622 if let Some(entry) = node.entry()? {
622 if entry.tracked() {
623 if entry.tracked() {
623 f(node.full_path(on_disk)?)
624 f(node.full_path(on_disk)?)
624 }
625 }
625 }
626 }
626 recur(on_disk, node.children, f)?
627 recur(on_disk, node.children, f)?
627 }
628 }
628 Ok(())
629 Ok(())
629 }
630 }
630 recur(on_disk, meta.root_nodes, &mut f)
631 recur(on_disk, meta.root_nodes, &mut f)
631 }
632 }
632
633
633 /// Returns new data and metadata, together with whether that data should be
634 /// Returns new data and metadata, together with whether that data should be
634 /// appended to the existing data file whose content is at
635 /// appended to the existing data file whose content is at
635 /// `dirstate_map.on_disk` (true), instead of written to a new data file
636 /// `dirstate_map.on_disk` (true), instead of written to a new data file
636 /// (false), and the previous size of data on disk.
637 /// (false), and the previous size of data on disk.
637 pub(super) fn write(
638 pub(super) fn write(
638 dirstate_map: &DirstateMap,
639 dirstate_map: &DirstateMap,
639 write_mode: DirstateMapWriteMode,
640 write_mode: DirstateMapWriteMode,
640 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
641 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
641 let append = match write_mode {
642 let append = match write_mode {
642 DirstateMapWriteMode::Auto => dirstate_map.write_should_append(),
643 DirstateMapWriteMode::Auto => dirstate_map.write_should_append(),
643 DirstateMapWriteMode::ForceNewDataFile => false,
644 DirstateMapWriteMode::ForceNewDataFile => false,
645 DirstateMapWriteMode::ForceAppend => true,
644 };
646 };
645 if append {
647 if append {
646 log::trace!("appending to the dirstate data file");
648 log::trace!("appending to the dirstate data file");
647 } else {
649 } else {
648 log::trace!("creating new dirstate data file");
650 log::trace!("creating new dirstate data file");
649 }
651 }
650
652
651 // This ignores the space for paths, and for nodes without an entry.
653 // This ignores the space for paths, and for nodes without an entry.
652 // TODO: better estimate? Skip the `Vec` and write to a file directly?
654 // TODO: better estimate? Skip the `Vec` and write to a file directly?
653 let size_guess = std::mem::size_of::<Node>()
655 let size_guess = std::mem::size_of::<Node>()
654 * dirstate_map.nodes_with_entry_count as usize;
656 * dirstate_map.nodes_with_entry_count as usize;
655
657
656 let mut writer = Writer {
658 let mut writer = Writer {
657 dirstate_map,
659 dirstate_map,
658 append,
660 append,
659 out: Vec::with_capacity(size_guess),
661 out: Vec::with_capacity(size_guess),
660 };
662 };
661
663
662 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
664 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
663
665
664 let unreachable_bytes = if append {
666 let unreachable_bytes = if append {
665 dirstate_map.unreachable_bytes
667 dirstate_map.unreachable_bytes
666 } else {
668 } else {
667 0
669 0
668 };
670 };
669 let meta = TreeMetadata {
671 let meta = TreeMetadata {
670 root_nodes,
672 root_nodes,
671 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
673 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
672 nodes_with_copy_source_count: dirstate_map
674 nodes_with_copy_source_count: dirstate_map
673 .nodes_with_copy_source_count
675 .nodes_with_copy_source_count
674 .into(),
676 .into(),
675 unreachable_bytes: unreachable_bytes.into(),
677 unreachable_bytes: unreachable_bytes.into(),
676 unused: [0; 4],
678 unused: [0; 4],
677 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
679 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
678 };
680 };
679 Ok((writer.out, meta, append, dirstate_map.old_data_size))
681 Ok((writer.out, meta, append, dirstate_map.old_data_size))
680 }
682 }
681
683
682 struct Writer<'dmap, 'on_disk> {
684 struct Writer<'dmap, 'on_disk> {
683 dirstate_map: &'dmap DirstateMap<'on_disk>,
685 dirstate_map: &'dmap DirstateMap<'on_disk>,
684 append: bool,
686 append: bool,
685 out: Vec<u8>,
687 out: Vec<u8>,
686 }
688 }
687
689
688 impl Writer<'_, '_> {
690 impl Writer<'_, '_> {
689 fn write_nodes(
691 fn write_nodes(
690 &mut self,
692 &mut self,
691 nodes: dirstate_map::ChildNodesRef,
693 nodes: dirstate_map::ChildNodesRef,
692 ) -> Result<ChildNodes, DirstateError> {
694 ) -> Result<ChildNodes, DirstateError> {
693 // Reuse already-written nodes if possible
695 // Reuse already-written nodes if possible
694 if self.append {
696 if self.append {
695 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
697 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
696 let start = self.on_disk_offset_of(nodes_slice).expect(
698 let start = self.on_disk_offset_of(nodes_slice).expect(
697 "dirstate-v2 OnDisk nodes not found within on_disk",
699 "dirstate-v2 OnDisk nodes not found within on_disk",
698 );
700 );
699 let len = child_nodes_len_from_usize(nodes_slice.len());
701 let len = child_nodes_len_from_usize(nodes_slice.len());
700 return Ok(ChildNodes { start, len });
702 return Ok(ChildNodes { start, len });
701 }
703 }
702 }
704 }
703
705
704 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
706 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
705 // undefined iteration order. Sort to enable binary search in the
707 // undefined iteration order. Sort to enable binary search in the
706 // written file.
708 // written file.
707 let nodes = nodes.sorted();
709 let nodes = nodes.sorted();
708 let nodes_len = nodes.len();
710 let nodes_len = nodes.len();
709
711
710 // First accumulate serialized nodes in a `Vec`
712 // First accumulate serialized nodes in a `Vec`
711 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
713 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
712 for node in nodes {
714 for node in nodes {
713 let children =
715 let children =
714 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
716 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
715 let full_path = node.full_path(self.dirstate_map.on_disk)?;
717 let full_path = node.full_path(self.dirstate_map.on_disk)?;
716 let full_path = self.write_path(full_path.as_bytes());
718 let full_path = self.write_path(full_path.as_bytes());
717 let copy_source = if let Some(source) =
719 let copy_source = if let Some(source) =
718 node.copy_source(self.dirstate_map.on_disk)?
720 node.copy_source(self.dirstate_map.on_disk)?
719 {
721 {
720 self.write_path(source.as_bytes())
722 self.write_path(source.as_bytes())
721 } else {
723 } else {
722 PathSlice {
724 PathSlice {
723 start: 0.into(),
725 start: 0.into(),
724 len: 0.into(),
726 len: 0.into(),
725 }
727 }
726 };
728 };
727 on_disk_nodes.push(match node {
729 on_disk_nodes.push(match node {
728 NodeRef::InMemory(path, node) => {
730 NodeRef::InMemory(path, node) => {
729 let (flags, size, mtime) = match &node.data {
731 let (flags, size, mtime) = match &node.data {
730 dirstate_map::NodeData::Entry(entry) => {
732 dirstate_map::NodeData::Entry(entry) => {
731 Node::from_dirstate_entry(entry)
733 Node::from_dirstate_entry(entry)
732 }
734 }
733 dirstate_map::NodeData::CachedDirectory { mtime } => {
735 dirstate_map::NodeData::CachedDirectory { mtime } => {
734 // we currently never set a mtime if unknown file
736 // we currently never set a mtime if unknown file
735 // are present.
737 // are present.
736 // So if we have a mtime for a directory, we know
738 // So if we have a mtime for a directory, we know
737 // they are no unknown
739 // they are no unknown
738 // files and we
740 // files and we
739 // blindly set ALL_UNKNOWN_RECORDED.
741 // blindly set ALL_UNKNOWN_RECORDED.
740 //
742 //
741 // We never set ALL_IGNORED_RECORDED since we
743 // We never set ALL_IGNORED_RECORDED since we
742 // don't track that case
744 // don't track that case
743 // currently.
745 // currently.
744 let mut flags = Flags::DIRECTORY
746 let mut flags = Flags::DIRECTORY
745 | Flags::HAS_MTIME
747 | Flags::HAS_MTIME
746 | Flags::ALL_UNKNOWN_RECORDED;
748 | Flags::ALL_UNKNOWN_RECORDED;
747 if mtime.second_ambiguous {
749 if mtime.second_ambiguous {
748 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
750 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
749 }
751 }
750 (flags, 0.into(), (*mtime).into())
752 (flags, 0.into(), (*mtime).into())
751 }
753 }
752 dirstate_map::NodeData::None => (
754 dirstate_map::NodeData::None => (
753 Flags::DIRECTORY,
755 Flags::DIRECTORY,
754 0.into(),
756 0.into(),
755 PackedTruncatedTimestamp::null(),
757 PackedTruncatedTimestamp::null(),
756 ),
758 ),
757 };
759 };
758 Node {
760 Node {
759 children,
761 children,
760 copy_source,
762 copy_source,
761 full_path,
763 full_path,
762 base_name_start: u16::try_from(path.base_name_start())
764 base_name_start: u16::try_from(path.base_name_start())
763 // Could only panic for paths over 64 KiB
765 // Could only panic for paths over 64 KiB
764 .expect("dirstate-v2 path length overflow")
766 .expect("dirstate-v2 path length overflow")
765 .into(),
767 .into(),
766 descendants_with_entry_count: node
768 descendants_with_entry_count: node
767 .descendants_with_entry_count
769 .descendants_with_entry_count
768 .into(),
770 .into(),
769 tracked_descendants_count: node
771 tracked_descendants_count: node
770 .tracked_descendants_count
772 .tracked_descendants_count
771 .into(),
773 .into(),
772 flags: flags.bits().into(),
774 flags: flags.bits().into(),
773 size,
775 size,
774 mtime,
776 mtime,
775 }
777 }
776 }
778 }
777 NodeRef::OnDisk(node) => Node {
779 NodeRef::OnDisk(node) => Node {
778 children,
780 children,
779 copy_source,
781 copy_source,
780 full_path,
782 full_path,
781 ..*node
783 ..*node
782 },
784 },
783 })
785 })
784 }
786 }
785 // … so we can write them contiguously, after writing everything else
787 // … so we can write them contiguously, after writing everything else
786 // they refer to.
788 // they refer to.
787 let start = self.current_offset();
789 let start = self.current_offset();
788 let len = child_nodes_len_from_usize(nodes_len);
790 let len = child_nodes_len_from_usize(nodes_len);
789 self.out.extend(on_disk_nodes.as_bytes());
791 self.out.extend(on_disk_nodes.as_bytes());
790 Ok(ChildNodes { start, len })
792 Ok(ChildNodes { start, len })
791 }
793 }
792
794
793 /// If the given slice of items is within `on_disk`, returns its offset
795 /// If the given slice of items is within `on_disk`, returns its offset
794 /// from the start of `on_disk`.
796 /// from the start of `on_disk`.
795 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
797 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
796 where
798 where
797 T: BytesCast,
799 T: BytesCast,
798 {
800 {
799 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
801 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
800 let start = slice.as_ptr() as usize;
802 let start = slice.as_ptr() as usize;
801 let end = start + slice.len();
803 let end = start + slice.len();
802 start..=end
804 start..=end
803 }
805 }
804 let slice_addresses = address_range(slice.as_bytes());
806 let slice_addresses = address_range(slice.as_bytes());
805 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
807 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
806 if on_disk_addresses.contains(slice_addresses.start())
808 if on_disk_addresses.contains(slice_addresses.start())
807 && on_disk_addresses.contains(slice_addresses.end())
809 && on_disk_addresses.contains(slice_addresses.end())
808 {
810 {
809 let offset = slice_addresses.start() - on_disk_addresses.start();
811 let offset = slice_addresses.start() - on_disk_addresses.start();
810 Some(offset_from_usize(offset))
812 Some(offset_from_usize(offset))
811 } else {
813 } else {
812 None
814 None
813 }
815 }
814 }
816 }
815
817
816 fn current_offset(&mut self) -> Offset {
818 fn current_offset(&mut self) -> Offset {
817 let mut offset = self.out.len();
819 let mut offset = self.out.len();
818 if self.append {
820 if self.append {
819 offset += self.dirstate_map.on_disk.len()
821 offset += self.dirstate_map.on_disk.len()
820 }
822 }
821 offset_from_usize(offset)
823 offset_from_usize(offset)
822 }
824 }
823
825
824 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
826 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
825 let len = path_len_from_usize(slice.len());
827 let len = path_len_from_usize(slice.len());
826 // Reuse an already-written path if possible
828 // Reuse an already-written path if possible
827 if self.append {
829 if self.append {
828 if let Some(start) = self.on_disk_offset_of(slice) {
830 if let Some(start) = self.on_disk_offset_of(slice) {
829 return PathSlice { start, len };
831 return PathSlice { start, len };
830 }
832 }
831 }
833 }
832 let start = self.current_offset();
834 let start = self.current_offset();
833 self.out.extend(slice.as_bytes());
835 self.out.extend(slice.as_bytes());
834 PathSlice { start, len }
836 PathSlice { start, len }
835 }
837 }
836 }
838 }
837
839
838 fn offset_from_usize(x: usize) -> Offset {
840 fn offset_from_usize(x: usize) -> Offset {
839 u32::try_from(x)
841 u32::try_from(x)
840 // Could only panic for a dirstate file larger than 4 GiB
842 // Could only panic for a dirstate file larger than 4 GiB
841 .expect("dirstate-v2 offset overflow")
843 .expect("dirstate-v2 offset overflow")
842 .into()
844 .into()
843 }
845 }
844
846
845 fn child_nodes_len_from_usize(x: usize) -> Size {
847 fn child_nodes_len_from_usize(x: usize) -> Size {
846 u32::try_from(x)
848 u32::try_from(x)
847 // Could only panic with over 4 billion nodes
849 // Could only panic with over 4 billion nodes
848 .expect("dirstate-v2 slice length overflow")
850 .expect("dirstate-v2 slice length overflow")
849 .into()
851 .into()
850 }
852 }
851
853
852 fn path_len_from_usize(x: usize) -> PathSize {
854 fn path_len_from_usize(x: usize) -> PathSize {
853 u16::try_from(x)
855 u16::try_from(x)
854 // Could only panic for paths over 64 KiB
856 // Could only panic for paths over 64 KiB
855 .expect("dirstate-v2 path length overflow")
857 .expect("dirstate-v2 path length overflow")
856 .into()
858 .into()
857 }
859 }
858
860
859 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
861 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
860 fn from(timestamp: TruncatedTimestamp) -> Self {
862 fn from(timestamp: TruncatedTimestamp) -> Self {
861 Self {
863 Self {
862 truncated_seconds: timestamp.truncated_seconds().into(),
864 truncated_seconds: timestamp.truncated_seconds().into(),
863 nanoseconds: timestamp.nanoseconds().into(),
865 nanoseconds: timestamp.nanoseconds().into(),
864 }
866 }
865 }
867 }
866 }
868 }
867
869
868 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
870 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
869 type Error = DirstateV2ParseError;
871 type Error = DirstateV2ParseError;
870
872
871 fn try_from(
873 fn try_from(
872 timestamp: PackedTruncatedTimestamp,
874 timestamp: PackedTruncatedTimestamp,
873 ) -> Result<Self, Self::Error> {
875 ) -> Result<Self, Self::Error> {
874 Self::from_already_truncated(
876 Self::from_already_truncated(
875 timestamp.truncated_seconds.get(),
877 timestamp.truncated_seconds.get(),
876 timestamp.nanoseconds.get(),
878 timestamp.nanoseconds.get(),
877 false,
879 false,
878 )
880 )
879 }
881 }
880 }
882 }
881 impl PackedTruncatedTimestamp {
883 impl PackedTruncatedTimestamp {
882 fn null() -> Self {
884 fn null() -> Self {
883 Self {
885 Self {
884 truncated_seconds: 0.into(),
886 truncated_seconds: 0.into(),
885 nanoseconds: 0.into(),
887 nanoseconds: 0.into(),
886 }
888 }
887 }
889 }
888 }
890 }
@@ -1,582 +1,599 b''
1 use crate::changelog::Changelog;
1 use crate::changelog::Changelog;
2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::config::{Config, ConfigError, ConfigParseError};
3 use crate::dirstate::DirstateParents;
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
6 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::dirstate_tree::owning::OwningDirstateMap;
7 use crate::errors::HgResultExt;
7 use crate::errors::HgResultExt;
8 use crate::errors::{HgError, IoResultExt};
8 use crate::errors::{HgError, IoResultExt};
9 use crate::lock::{try_with_lock_no_wait, LockError};
9 use crate::lock::{try_with_lock_no_wait, LockError};
10 use crate::manifest::{Manifest, Manifestlog};
10 use crate::manifest::{Manifest, Manifestlog};
11 use crate::revlog::filelog::Filelog;
11 use crate::revlog::filelog::Filelog;
12 use crate::revlog::revlog::RevlogError;
12 use crate::revlog::revlog::RevlogError;
13 use crate::utils::files::get_path_from_bytes;
13 use crate::utils::files::get_path_from_bytes;
14 use crate::utils::hg_path::HgPath;
14 use crate::utils::hg_path::HgPath;
15 use crate::utils::SliceExt;
15 use crate::utils::SliceExt;
16 use crate::vfs::{is_dir, is_file, Vfs};
16 use crate::vfs::{is_dir, is_file, Vfs};
17 use crate::{requirements, NodePrefix};
17 use crate::{requirements, NodePrefix};
18 use crate::{DirstateError, Revision};
18 use crate::{DirstateError, Revision};
19 use std::cell::{Ref, RefCell, RefMut};
19 use std::cell::{Ref, RefCell, RefMut};
20 use std::collections::HashSet;
20 use std::collections::HashSet;
21 use std::io::Seek;
21 use std::io::Seek;
22 use std::io::SeekFrom;
22 use std::io::SeekFrom;
23 use std::io::Write as IoWrite;
23 use std::io::Write as IoWrite;
24 use std::path::{Path, PathBuf};
24 use std::path::{Path, PathBuf};
25
25
26 /// A repository on disk
26 /// A repository on disk
27 pub struct Repo {
27 pub struct Repo {
28 working_directory: PathBuf,
28 working_directory: PathBuf,
29 dot_hg: PathBuf,
29 dot_hg: PathBuf,
30 store: PathBuf,
30 store: PathBuf,
31 requirements: HashSet<String>,
31 requirements: HashSet<String>,
32 config: Config,
32 config: Config,
33 dirstate_parents: LazyCell<DirstateParents>,
33 dirstate_parents: LazyCell<DirstateParents>,
34 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>>,
34 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>>,
35 dirstate_map: LazyCell<OwningDirstateMap>,
35 dirstate_map: LazyCell<OwningDirstateMap>,
36 changelog: LazyCell<Changelog>,
36 changelog: LazyCell<Changelog>,
37 manifestlog: LazyCell<Manifestlog>,
37 manifestlog: LazyCell<Manifestlog>,
38 }
38 }
39
39
40 #[derive(Debug, derive_more::From)]
40 #[derive(Debug, derive_more::From)]
41 pub enum RepoError {
41 pub enum RepoError {
42 NotFound {
42 NotFound {
43 at: PathBuf,
43 at: PathBuf,
44 },
44 },
45 #[from]
45 #[from]
46 ConfigParseError(ConfigParseError),
46 ConfigParseError(ConfigParseError),
47 #[from]
47 #[from]
48 Other(HgError),
48 Other(HgError),
49 }
49 }
50
50
51 impl From<ConfigError> for RepoError {
51 impl From<ConfigError> for RepoError {
52 fn from(error: ConfigError) -> Self {
52 fn from(error: ConfigError) -> Self {
53 match error {
53 match error {
54 ConfigError::Parse(error) => error.into(),
54 ConfigError::Parse(error) => error.into(),
55 ConfigError::Other(error) => error.into(),
55 ConfigError::Other(error) => error.into(),
56 }
56 }
57 }
57 }
58 }
58 }
59
59
60 impl Repo {
60 impl Repo {
61 /// tries to find nearest repository root in current working directory or
61 /// tries to find nearest repository root in current working directory or
62 /// its ancestors
62 /// its ancestors
63 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
63 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
64 let current_directory = crate::utils::current_dir()?;
64 let current_directory = crate::utils::current_dir()?;
65 // ancestors() is inclusive: it first yields `current_directory`
65 // ancestors() is inclusive: it first yields `current_directory`
66 // as-is.
66 // as-is.
67 for ancestor in current_directory.ancestors() {
67 for ancestor in current_directory.ancestors() {
68 if is_dir(ancestor.join(".hg"))? {
68 if is_dir(ancestor.join(".hg"))? {
69 return Ok(ancestor.to_path_buf());
69 return Ok(ancestor.to_path_buf());
70 }
70 }
71 }
71 }
72 return Err(RepoError::NotFound {
72 return Err(RepoError::NotFound {
73 at: current_directory,
73 at: current_directory,
74 });
74 });
75 }
75 }
76
76
77 /// Find a repository, either at the given path (which must contain a `.hg`
77 /// Find a repository, either at the given path (which must contain a `.hg`
78 /// sub-directory) or by searching the current directory and its
78 /// sub-directory) or by searching the current directory and its
79 /// ancestors.
79 /// ancestors.
80 ///
80 ///
81 /// A method with two very different "modes" like this usually a code smell
81 /// A method with two very different "modes" like this usually a code smell
82 /// to make two methods instead, but in this case an `Option` is what rhg
82 /// to make two methods instead, but in this case an `Option` is what rhg
83 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
83 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
84 /// Having two methods would just move that `if` to almost all callers.
84 /// Having two methods would just move that `if` to almost all callers.
85 pub fn find(
85 pub fn find(
86 config: &Config,
86 config: &Config,
87 explicit_path: Option<PathBuf>,
87 explicit_path: Option<PathBuf>,
88 ) -> Result<Self, RepoError> {
88 ) -> Result<Self, RepoError> {
89 if let Some(root) = explicit_path {
89 if let Some(root) = explicit_path {
90 if is_dir(root.join(".hg"))? {
90 if is_dir(root.join(".hg"))? {
91 Self::new_at_path(root.to_owned(), config)
91 Self::new_at_path(root.to_owned(), config)
92 } else if is_file(&root)? {
92 } else if is_file(&root)? {
93 Err(HgError::unsupported("bundle repository").into())
93 Err(HgError::unsupported("bundle repository").into())
94 } else {
94 } else {
95 Err(RepoError::NotFound {
95 Err(RepoError::NotFound {
96 at: root.to_owned(),
96 at: root.to_owned(),
97 })
97 })
98 }
98 }
99 } else {
99 } else {
100 let root = Self::find_repo_root()?;
100 let root = Self::find_repo_root()?;
101 Self::new_at_path(root, config)
101 Self::new_at_path(root, config)
102 }
102 }
103 }
103 }
104
104
105 /// To be called after checking that `.hg` is a sub-directory
105 /// To be called after checking that `.hg` is a sub-directory
106 fn new_at_path(
106 fn new_at_path(
107 working_directory: PathBuf,
107 working_directory: PathBuf,
108 config: &Config,
108 config: &Config,
109 ) -> Result<Self, RepoError> {
109 ) -> Result<Self, RepoError> {
110 let dot_hg = working_directory.join(".hg");
110 let dot_hg = working_directory.join(".hg");
111
111
112 let mut repo_config_files = Vec::new();
112 let mut repo_config_files = Vec::new();
113 repo_config_files.push(dot_hg.join("hgrc"));
113 repo_config_files.push(dot_hg.join("hgrc"));
114 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
114 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
115
115
116 let hg_vfs = Vfs { base: &dot_hg };
116 let hg_vfs = Vfs { base: &dot_hg };
117 let mut reqs = requirements::load_if_exists(hg_vfs)?;
117 let mut reqs = requirements::load_if_exists(hg_vfs)?;
118 let relative =
118 let relative =
119 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
119 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
120 let shared =
120 let shared =
121 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
121 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
122
122
123 // From `mercurial/localrepo.py`:
123 // From `mercurial/localrepo.py`:
124 //
124 //
125 // if .hg/requires contains the sharesafe requirement, it means
125 // if .hg/requires contains the sharesafe requirement, it means
126 // there exists a `.hg/store/requires` too and we should read it
126 // there exists a `.hg/store/requires` too and we should read it
127 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
127 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
128 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
128 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
129 // is not present, refer checkrequirementscompat() for that
129 // is not present, refer checkrequirementscompat() for that
130 //
130 //
131 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
131 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
132 // repository was shared the old way. We check the share source
132 // repository was shared the old way. We check the share source
133 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
133 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
134 // current repository needs to be reshared
134 // current repository needs to be reshared
135 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
135 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
136
136
137 let store_path;
137 let store_path;
138 if !shared {
138 if !shared {
139 store_path = dot_hg.join("store");
139 store_path = dot_hg.join("store");
140 } else {
140 } else {
141 let bytes = hg_vfs.read("sharedpath")?;
141 let bytes = hg_vfs.read("sharedpath")?;
142 let mut shared_path =
142 let mut shared_path =
143 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
143 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
144 .to_owned();
144 .to_owned();
145 if relative {
145 if relative {
146 shared_path = dot_hg.join(shared_path)
146 shared_path = dot_hg.join(shared_path)
147 }
147 }
148 if !is_dir(&shared_path)? {
148 if !is_dir(&shared_path)? {
149 return Err(HgError::corrupted(format!(
149 return Err(HgError::corrupted(format!(
150 ".hg/sharedpath points to nonexistent directory {}",
150 ".hg/sharedpath points to nonexistent directory {}",
151 shared_path.display()
151 shared_path.display()
152 ))
152 ))
153 .into());
153 .into());
154 }
154 }
155
155
156 store_path = shared_path.join("store");
156 store_path = shared_path.join("store");
157
157
158 let source_is_share_safe =
158 let source_is_share_safe =
159 requirements::load(Vfs { base: &shared_path })?
159 requirements::load(Vfs { base: &shared_path })?
160 .contains(requirements::SHARESAFE_REQUIREMENT);
160 .contains(requirements::SHARESAFE_REQUIREMENT);
161
161
162 if share_safe != source_is_share_safe {
162 if share_safe != source_is_share_safe {
163 return Err(HgError::unsupported("share-safe mismatch").into());
163 return Err(HgError::unsupported("share-safe mismatch").into());
164 }
164 }
165
165
166 if share_safe {
166 if share_safe {
167 repo_config_files.insert(0, shared_path.join("hgrc"))
167 repo_config_files.insert(0, shared_path.join("hgrc"))
168 }
168 }
169 }
169 }
170 if share_safe {
170 if share_safe {
171 reqs.extend(requirements::load(Vfs { base: &store_path })?);
171 reqs.extend(requirements::load(Vfs { base: &store_path })?);
172 }
172 }
173
173
174 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
174 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
175 config.combine_with_repo(&repo_config_files)?
175 config.combine_with_repo(&repo_config_files)?
176 } else {
176 } else {
177 config.clone()
177 config.clone()
178 };
178 };
179
179
180 let repo = Self {
180 let repo = Self {
181 requirements: reqs,
181 requirements: reqs,
182 working_directory,
182 working_directory,
183 store: store_path,
183 store: store_path,
184 dot_hg,
184 dot_hg,
185 config: repo_config,
185 config: repo_config,
186 dirstate_parents: LazyCell::new(),
186 dirstate_parents: LazyCell::new(),
187 dirstate_data_file_uuid: LazyCell::new(),
187 dirstate_data_file_uuid: LazyCell::new(),
188 dirstate_map: LazyCell::new(),
188 dirstate_map: LazyCell::new(),
189 changelog: LazyCell::new(),
189 changelog: LazyCell::new(),
190 manifestlog: LazyCell::new(),
190 manifestlog: LazyCell::new(),
191 };
191 };
192
192
193 requirements::check(&repo)?;
193 requirements::check(&repo)?;
194
194
195 Ok(repo)
195 Ok(repo)
196 }
196 }
197
197
198 pub fn working_directory_path(&self) -> &Path {
198 pub fn working_directory_path(&self) -> &Path {
199 &self.working_directory
199 &self.working_directory
200 }
200 }
201
201
202 pub fn requirements(&self) -> &HashSet<String> {
202 pub fn requirements(&self) -> &HashSet<String> {
203 &self.requirements
203 &self.requirements
204 }
204 }
205
205
206 pub fn config(&self) -> &Config {
206 pub fn config(&self) -> &Config {
207 &self.config
207 &self.config
208 }
208 }
209
209
210 /// For accessing repository files (in `.hg`), except for the store
210 /// For accessing repository files (in `.hg`), except for the store
211 /// (`.hg/store`).
211 /// (`.hg/store`).
212 pub fn hg_vfs(&self) -> Vfs<'_> {
212 pub fn hg_vfs(&self) -> Vfs<'_> {
213 Vfs { base: &self.dot_hg }
213 Vfs { base: &self.dot_hg }
214 }
214 }
215
215
216 /// For accessing repository store files (in `.hg/store`)
216 /// For accessing repository store files (in `.hg/store`)
217 pub fn store_vfs(&self) -> Vfs<'_> {
217 pub fn store_vfs(&self) -> Vfs<'_> {
218 Vfs { base: &self.store }
218 Vfs { base: &self.store }
219 }
219 }
220
220
221 /// For accessing the working copy
221 /// For accessing the working copy
222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
223 Vfs {
223 Vfs {
224 base: &self.working_directory,
224 base: &self.working_directory,
225 }
225 }
226 }
226 }
227
227
228 pub fn try_with_wlock_no_wait<R>(
228 pub fn try_with_wlock_no_wait<R>(
229 &self,
229 &self,
230 f: impl FnOnce() -> R,
230 f: impl FnOnce() -> R,
231 ) -> Result<R, LockError> {
231 ) -> Result<R, LockError> {
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 }
233 }
234
234
235 pub fn has_dirstate_v2(&self) -> bool {
235 pub fn has_dirstate_v2(&self) -> bool {
236 self.requirements
236 self.requirements
237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 }
238 }
239
239
240 pub fn has_sparse(&self) -> bool {
240 pub fn has_sparse(&self) -> bool {
241 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
241 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
242 }
242 }
243
243
244 pub fn has_narrow(&self) -> bool {
244 pub fn has_narrow(&self) -> bool {
245 self.requirements.contains(requirements::NARROW_REQUIREMENT)
245 self.requirements.contains(requirements::NARROW_REQUIREMENT)
246 }
246 }
247
247
248 pub fn has_nodemap(&self) -> bool {
248 pub fn has_nodemap(&self) -> bool {
249 self.requirements
249 self.requirements
250 .contains(requirements::NODEMAP_REQUIREMENT)
250 .contains(requirements::NODEMAP_REQUIREMENT)
251 }
251 }
252
252
253 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
253 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
254 Ok(self
254 Ok(self
255 .hg_vfs()
255 .hg_vfs()
256 .read("dirstate")
256 .read("dirstate")
257 .io_not_found_as_none()?
257 .io_not_found_as_none()?
258 .unwrap_or(Vec::new()))
258 .unwrap_or(Vec::new()))
259 }
259 }
260
260
261 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
261 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
262 Ok(*self
262 Ok(*self
263 .dirstate_parents
263 .dirstate_parents
264 .get_or_init(|| self.read_dirstate_parents())?)
264 .get_or_init(|| self.read_dirstate_parents())?)
265 }
265 }
266
266
267 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
267 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
268 let dirstate = self.dirstate_file_contents()?;
268 let dirstate = self.dirstate_file_contents()?;
269 let parents = if dirstate.is_empty() {
269 let parents = if dirstate.is_empty() {
270 if self.has_dirstate_v2() {
270 if self.has_dirstate_v2() {
271 self.dirstate_data_file_uuid.set(None);
271 self.dirstate_data_file_uuid.set(None);
272 }
272 }
273 DirstateParents::NULL
273 DirstateParents::NULL
274 } else if self.has_dirstate_v2() {
274 } else if self.has_dirstate_v2() {
275 let docket =
275 let docket =
276 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
276 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
277 self.dirstate_data_file_uuid
277 self.dirstate_data_file_uuid
278 .set(Some(docket.uuid.to_owned()));
278 .set(Some(docket.uuid.to_owned()));
279 docket.parents()
279 docket.parents()
280 } else {
280 } else {
281 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
281 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
282 .clone()
282 .clone()
283 };
283 };
284 self.dirstate_parents.set(parents);
284 self.dirstate_parents.set(parents);
285 Ok(parents)
285 Ok(parents)
286 }
286 }
287
287
288 fn read_dirstate_data_file_uuid(
288 fn read_dirstate_data_file_uuid(
289 &self,
289 &self,
290 ) -> Result<Option<Vec<u8>>, HgError> {
290 ) -> Result<Option<Vec<u8>>, HgError> {
291 assert!(
291 assert!(
292 self.has_dirstate_v2(),
292 self.has_dirstate_v2(),
293 "accessing dirstate data file ID without dirstate-v2"
293 "accessing dirstate data file ID without dirstate-v2"
294 );
294 );
295 let dirstate = self.dirstate_file_contents()?;
295 let dirstate = self.dirstate_file_contents()?;
296 if dirstate.is_empty() {
296 if dirstate.is_empty() {
297 self.dirstate_parents.set(DirstateParents::NULL);
297 self.dirstate_parents.set(DirstateParents::NULL);
298 Ok(None)
298 Ok(None)
299 } else {
299 } else {
300 let docket =
300 let docket =
301 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
301 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
302 self.dirstate_parents.set(docket.parents());
302 self.dirstate_parents.set(docket.parents());
303 Ok(Some(docket.uuid.to_owned()))
303 Ok(Some(docket.uuid.to_owned()))
304 }
304 }
305 }
305 }
306
306
307 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
307 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
308 let dirstate_file_contents = self.dirstate_file_contents()?;
308 let dirstate_file_contents = self.dirstate_file_contents()?;
309 if dirstate_file_contents.is_empty() {
309 if dirstate_file_contents.is_empty() {
310 self.dirstate_parents.set(DirstateParents::NULL);
310 self.dirstate_parents.set(DirstateParents::NULL);
311 if self.has_dirstate_v2() {
311 if self.has_dirstate_v2() {
312 self.dirstate_data_file_uuid.set(None);
312 self.dirstate_data_file_uuid.set(None);
313 }
313 }
314 Ok(OwningDirstateMap::new_empty(Vec::new()))
314 Ok(OwningDirstateMap::new_empty(Vec::new()))
315 } else if self.has_dirstate_v2() {
315 } else if self.has_dirstate_v2() {
316 let docket = crate::dirstate_tree::on_disk::read_docket(
316 let docket = crate::dirstate_tree::on_disk::read_docket(
317 &dirstate_file_contents,
317 &dirstate_file_contents,
318 )?;
318 )?;
319 self.dirstate_parents.set(docket.parents());
319 self.dirstate_parents.set(docket.parents());
320 self.dirstate_data_file_uuid
320 self.dirstate_data_file_uuid
321 .set(Some(docket.uuid.to_owned()));
321 .set(Some(docket.uuid.to_owned()));
322 let data_size = docket.data_size();
322 let data_size = docket.data_size();
323 let metadata = docket.tree_metadata();
323 let metadata = docket.tree_metadata();
324 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
324 let mut map =
325 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
325 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
326 OwningDirstateMap::new_v2(
326 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
327 self.hg_vfs().read(docket.data_filename())?,
327 OwningDirstateMap::new_v2(
328 data_size,
328 self.hg_vfs().read(docket.data_filename())?,
329 metadata,
329 data_size,
330 )
330 metadata,
331 } else if let Some(data_mmap) = self
331 )
332 .hg_vfs()
332 } else if let Some(data_mmap) = self
333 .mmap_open(docket.data_filename())
333 .hg_vfs()
334 .io_not_found_as_none()?
334 .mmap_open(docket.data_filename())
335 {
335 .io_not_found_as_none()?
336 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
336 {
337 } else {
337 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
338 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
338 } else {
339 }
339 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
340 }?;
341
342 let write_mode_config = self
343 .config()
344 .get_str(b"devel", b"dirstate.v2.data_update_mode")
345 .unwrap_or(Some("auto"))
346 .unwrap_or("auto"); // don't bother for devel options
347 let write_mode = match write_mode_config {
348 "auto" => DirstateMapWriteMode::Auto,
349 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
350 "force-append" => DirstateMapWriteMode::ForceAppend,
351 _ => DirstateMapWriteMode::Auto,
352 };
353
354 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
355
356 Ok(map)
340 } else {
357 } else {
341 let (map, parents) =
358 let (map, parents) =
342 OwningDirstateMap::new_v1(dirstate_file_contents)?;
359 OwningDirstateMap::new_v1(dirstate_file_contents)?;
343 self.dirstate_parents.set(parents);
360 self.dirstate_parents.set(parents);
344 Ok(map)
361 Ok(map)
345 }
362 }
346 }
363 }
347
364
348 pub fn dirstate_map(
365 pub fn dirstate_map(
349 &self,
366 &self,
350 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
367 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
351 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
368 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
352 }
369 }
353
370
354 pub fn dirstate_map_mut(
371 pub fn dirstate_map_mut(
355 &self,
372 &self,
356 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
373 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
357 self.dirstate_map
374 self.dirstate_map
358 .get_mut_or_init(|| self.new_dirstate_map())
375 .get_mut_or_init(|| self.new_dirstate_map())
359 }
376 }
360
377
361 fn new_changelog(&self) -> Result<Changelog, HgError> {
378 fn new_changelog(&self) -> Result<Changelog, HgError> {
362 Changelog::open(&self.store_vfs(), self.has_nodemap())
379 Changelog::open(&self.store_vfs(), self.has_nodemap())
363 }
380 }
364
381
365 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
382 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
366 self.changelog.get_or_init(|| self.new_changelog())
383 self.changelog.get_or_init(|| self.new_changelog())
367 }
384 }
368
385
369 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
386 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
370 self.changelog.get_mut_or_init(|| self.new_changelog())
387 self.changelog.get_mut_or_init(|| self.new_changelog())
371 }
388 }
372
389
373 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
390 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
374 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
391 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
375 }
392 }
376
393
377 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
394 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
378 self.manifestlog.get_or_init(|| self.new_manifestlog())
395 self.manifestlog.get_or_init(|| self.new_manifestlog())
379 }
396 }
380
397
381 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
398 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
382 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
399 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
383 }
400 }
384
401
385 /// Returns the manifest of the *changeset* with the given node ID
402 /// Returns the manifest of the *changeset* with the given node ID
386 pub fn manifest_for_node(
403 pub fn manifest_for_node(
387 &self,
404 &self,
388 node: impl Into<NodePrefix>,
405 node: impl Into<NodePrefix>,
389 ) -> Result<Manifest, RevlogError> {
406 ) -> Result<Manifest, RevlogError> {
390 self.manifestlog()?.data_for_node(
407 self.manifestlog()?.data_for_node(
391 self.changelog()?
408 self.changelog()?
392 .data_for_node(node.into())?
409 .data_for_node(node.into())?
393 .manifest_node()?
410 .manifest_node()?
394 .into(),
411 .into(),
395 )
412 )
396 }
413 }
397
414
398 /// Returns the manifest of the *changeset* with the given revision number
415 /// Returns the manifest of the *changeset* with the given revision number
399 pub fn manifest_for_rev(
416 pub fn manifest_for_rev(
400 &self,
417 &self,
401 revision: Revision,
418 revision: Revision,
402 ) -> Result<Manifest, RevlogError> {
419 ) -> Result<Manifest, RevlogError> {
403 self.manifestlog()?.data_for_node(
420 self.manifestlog()?.data_for_node(
404 self.changelog()?
421 self.changelog()?
405 .data_for_rev(revision)?
422 .data_for_rev(revision)?
406 .manifest_node()?
423 .manifest_node()?
407 .into(),
424 .into(),
408 )
425 )
409 }
426 }
410
427
411 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
428 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
412 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
429 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
413 Ok(entry.tracked())
430 Ok(entry.tracked())
414 } else {
431 } else {
415 Ok(false)
432 Ok(false)
416 }
433 }
417 }
434 }
418
435
419 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
436 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
420 Filelog::open(self, path)
437 Filelog::open(self, path)
421 }
438 }
422
439
423 /// Write to disk any updates that were made through `dirstate_map_mut`.
440 /// Write to disk any updates that were made through `dirstate_map_mut`.
424 ///
441 ///
425 /// The "wlock" must be held while calling this.
442 /// The "wlock" must be held while calling this.
426 /// See for example `try_with_wlock_no_wait`.
443 /// See for example `try_with_wlock_no_wait`.
427 ///
444 ///
428 /// TODO: have a `WritableRepo` type only accessible while holding the
445 /// TODO: have a `WritableRepo` type only accessible while holding the
429 /// lock?
446 /// lock?
430 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
447 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
431 let map = self.dirstate_map()?;
448 let map = self.dirstate_map()?;
432 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
449 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
433 // it’s unset
450 // it’s unset
434 let parents = self.dirstate_parents()?;
451 let parents = self.dirstate_parents()?;
435 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
452 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
436 let uuid_opt = self
453 let uuid_opt = self
437 .dirstate_data_file_uuid
454 .dirstate_data_file_uuid
438 .get_or_init(|| self.read_dirstate_data_file_uuid())?;
455 .get_or_init(|| self.read_dirstate_data_file_uuid())?;
439 let uuid_opt = uuid_opt.as_ref();
456 let uuid_opt = uuid_opt.as_ref();
440 let write_mode = if uuid_opt.is_some() {
457 let write_mode = if uuid_opt.is_some() {
441 DirstateMapWriteMode::Auto
458 DirstateMapWriteMode::Auto
442 } else {
459 } else {
443 DirstateMapWriteMode::ForceNewDataFile
460 DirstateMapWriteMode::ForceNewDataFile
444 };
461 };
445 let (data, tree_metadata, append, old_data_size) =
462 let (data, tree_metadata, append, old_data_size) =
446 map.pack_v2(write_mode)?;
463 map.pack_v2(write_mode)?;
447
464
448 // Reuse the uuid, or generate a new one, keeping the old for
465 // Reuse the uuid, or generate a new one, keeping the old for
449 // deletion.
466 // deletion.
450 let (uuid, old_uuid) = match uuid_opt {
467 let (uuid, old_uuid) = match uuid_opt {
451 Some(uuid) => {
468 Some(uuid) => {
452 let as_str = std::str::from_utf8(uuid)
469 let as_str = std::str::from_utf8(uuid)
453 .map_err(|_| {
470 .map_err(|_| {
454 HgError::corrupted(
471 HgError::corrupted(
455 "non-UTF-8 dirstate data file ID",
472 "non-UTF-8 dirstate data file ID",
456 )
473 )
457 })?
474 })?
458 .to_owned();
475 .to_owned();
459 if append {
476 if append {
460 (as_str, None)
477 (as_str, None)
461 } else {
478 } else {
462 (DirstateDocket::new_uid(), Some(as_str))
479 (DirstateDocket::new_uid(), Some(as_str))
463 }
480 }
464 }
481 }
465 None => (DirstateDocket::new_uid(), None),
482 None => (DirstateDocket::new_uid(), None),
466 };
483 };
467
484
468 let data_filename = format!("dirstate.{}", uuid);
485 let data_filename = format!("dirstate.{}", uuid);
469 let data_filename = self.hg_vfs().join(data_filename);
486 let data_filename = self.hg_vfs().join(data_filename);
470 let mut options = std::fs::OpenOptions::new();
487 let mut options = std::fs::OpenOptions::new();
471 options.write(true);
488 options.write(true);
472
489
473 // Why are we not using the O_APPEND flag when appending?
490 // Why are we not using the O_APPEND flag when appending?
474 //
491 //
475 // - O_APPEND makes it trickier to deal with garbage at the end of
492 // - O_APPEND makes it trickier to deal with garbage at the end of
476 // the file, left by a previous uncommitted transaction. By
493 // the file, left by a previous uncommitted transaction. By
477 // starting the write at [old_data_size] we make sure we erase
494 // starting the write at [old_data_size] we make sure we erase
478 // all such garbage.
495 // all such garbage.
479 //
496 //
480 // - O_APPEND requires to special-case 0-byte writes, whereas we
497 // - O_APPEND requires to special-case 0-byte writes, whereas we
481 // don't need that.
498 // don't need that.
482 //
499 //
483 // - Some OSes have bugs in implementation O_APPEND:
500 // - Some OSes have bugs in implementation O_APPEND:
484 // revlog.py talks about a Solaris bug, but we also saw some ZFS
501 // revlog.py talks about a Solaris bug, but we also saw some ZFS
485 // bug: https://github.com/openzfs/zfs/pull/3124,
502 // bug: https://github.com/openzfs/zfs/pull/3124,
486 // https://github.com/openzfs/zfs/issues/13370
503 // https://github.com/openzfs/zfs/issues/13370
487 //
504 //
488 if !append {
505 if !append {
489 log::trace!("creating a new dirstate data file");
506 log::trace!("creating a new dirstate data file");
490 options.create_new(true);
507 options.create_new(true);
491 } else {
508 } else {
492 log::trace!("appending to the dirstate data file");
509 log::trace!("appending to the dirstate data file");
493 }
510 }
494
511
495 let data_size = (|| {
512 let data_size = (|| {
496 // TODO: loop and try another random ID if !append and this
513 // TODO: loop and try another random ID if !append and this
497 // returns `ErrorKind::AlreadyExists`? Collision chance of two
514 // returns `ErrorKind::AlreadyExists`? Collision chance of two
498 // random IDs is one in 2**32
515 // random IDs is one in 2**32
499 let mut file = options.open(&data_filename)?;
516 let mut file = options.open(&data_filename)?;
500 if append {
517 if append {
501 file.seek(SeekFrom::Start(old_data_size as u64))?;
518 file.seek(SeekFrom::Start(old_data_size as u64))?;
502 }
519 }
503 file.write_all(&data)?;
520 file.write_all(&data)?;
504 file.flush()?;
521 file.flush()?;
505 file.seek(SeekFrom::Current(0))
522 file.seek(SeekFrom::Current(0))
506 })()
523 })()
507 .when_writing_file(&data_filename)?;
524 .when_writing_file(&data_filename)?;
508
525
509 let packed_dirstate = DirstateDocket::serialize(
526 let packed_dirstate = DirstateDocket::serialize(
510 parents,
527 parents,
511 tree_metadata,
528 tree_metadata,
512 data_size,
529 data_size,
513 uuid.as_bytes(),
530 uuid.as_bytes(),
514 )
531 )
515 .map_err(|_: std::num::TryFromIntError| {
532 .map_err(|_: std::num::TryFromIntError| {
516 HgError::corrupted("overflow in dirstate docket serialization")
533 HgError::corrupted("overflow in dirstate docket serialization")
517 })?;
534 })?;
518
535
519 (packed_dirstate, old_uuid)
536 (packed_dirstate, old_uuid)
520 } else {
537 } else {
521 (map.pack_v1(parents)?, None)
538 (map.pack_v1(parents)?, None)
522 };
539 };
523
540
524 let vfs = self.hg_vfs();
541 let vfs = self.hg_vfs();
525 vfs.atomic_write("dirstate", &packed_dirstate)?;
542 vfs.atomic_write("dirstate", &packed_dirstate)?;
526 if let Some(uuid) = old_uuid_to_remove {
543 if let Some(uuid) = old_uuid_to_remove {
527 // Remove the old data file after the new docket pointing to the
544 // Remove the old data file after the new docket pointing to the
528 // new data file was written.
545 // new data file was written.
529 vfs.remove_file(format!("dirstate.{}", uuid))?;
546 vfs.remove_file(format!("dirstate.{}", uuid))?;
530 }
547 }
531 Ok(())
548 Ok(())
532 }
549 }
533 }
550 }
534
551
535 /// Lazily-initialized component of `Repo` with interior mutability
552 /// Lazily-initialized component of `Repo` with interior mutability
536 ///
553 ///
537 /// This differs from `OnceCell` in that the value can still be "deinitialized"
554 /// This differs from `OnceCell` in that the value can still be "deinitialized"
538 /// later by setting its inner `Option` to `None`. It also takes the
555 /// later by setting its inner `Option` to `None`. It also takes the
539 /// initialization function as an argument when the value is requested, not
556 /// initialization function as an argument when the value is requested, not
540 /// when the instance is created.
557 /// when the instance is created.
541 struct LazyCell<T> {
558 struct LazyCell<T> {
542 value: RefCell<Option<T>>,
559 value: RefCell<Option<T>>,
543 }
560 }
544
561
545 impl<T> LazyCell<T> {
562 impl<T> LazyCell<T> {
546 fn new() -> Self {
563 fn new() -> Self {
547 Self {
564 Self {
548 value: RefCell::new(None),
565 value: RefCell::new(None),
549 }
566 }
550 }
567 }
551
568
552 fn set(&self, value: T) {
569 fn set(&self, value: T) {
553 *self.value.borrow_mut() = Some(value)
570 *self.value.borrow_mut() = Some(value)
554 }
571 }
555
572
556 fn get_or_init<E>(
573 fn get_or_init<E>(
557 &self,
574 &self,
558 init: impl Fn() -> Result<T, E>,
575 init: impl Fn() -> Result<T, E>,
559 ) -> Result<Ref<T>, E> {
576 ) -> Result<Ref<T>, E> {
560 let mut borrowed = self.value.borrow();
577 let mut borrowed = self.value.borrow();
561 if borrowed.is_none() {
578 if borrowed.is_none() {
562 drop(borrowed);
579 drop(borrowed);
563 // Only use `borrow_mut` if it is really needed to avoid panic in
580 // Only use `borrow_mut` if it is really needed to avoid panic in
564 // case there is another outstanding borrow but mutation is not
581 // case there is another outstanding borrow but mutation is not
565 // needed.
582 // needed.
566 *self.value.borrow_mut() = Some(init()?);
583 *self.value.borrow_mut() = Some(init()?);
567 borrowed = self.value.borrow()
584 borrowed = self.value.borrow()
568 }
585 }
569 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
586 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
570 }
587 }
571
588
572 fn get_mut_or_init<E>(
589 fn get_mut_or_init<E>(
573 &self,
590 &self,
574 init: impl Fn() -> Result<T, E>,
591 init: impl Fn() -> Result<T, E>,
575 ) -> Result<RefMut<T>, E> {
592 ) -> Result<RefMut<T>, E> {
576 let mut borrowed = self.value.borrow_mut();
593 let mut borrowed = self.value.borrow_mut();
577 if borrowed.is_none() {
594 if borrowed.is_none() {
578 *borrowed = Some(init()?);
595 *borrowed = Some(init()?);
579 }
596 }
580 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
597 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
581 }
598 }
582 }
599 }
@@ -1,550 +1,551 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
15 exc, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList, PyNone, PyObject,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
16 PyResult, Python, PythonObject, ToPyObject, UnsafePyLeaked,
17 };
17 };
18 use hg::dirstate::{ParentFileData, TruncatedTimestamp};
18 use hg::dirstate::{ParentFileData, TruncatedTimestamp};
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::item::DirstateItem,
22 dirstate::item::DirstateItem,
23 pybytes_deref::PyBytesDeref,
23 pybytes_deref::PyBytesDeref,
24 };
24 };
25 use hg::{
25 use hg::{
26 dirstate::StateMapIter, dirstate_tree::dirstate_map::DirstateMapWriteMode,
26 dirstate::StateMapIter, dirstate_tree::dirstate_map::DirstateMapWriteMode,
27 dirstate_tree::on_disk::DirstateV2ParseError,
27 dirstate_tree::on_disk::DirstateV2ParseError,
28 dirstate_tree::owning::OwningDirstateMap, revlog::Node,
28 dirstate_tree::owning::OwningDirstateMap, revlog::Node,
29 utils::files::normalize_case, utils::hg_path::HgPath, DirstateEntry,
29 utils::files::normalize_case, utils::hg_path::HgPath, DirstateEntry,
30 DirstateError, DirstateParents,
30 DirstateError, DirstateParents,
31 };
31 };
32
32
33 // TODO
33 // TODO
34 // This object needs to share references to multiple members of its Rust
34 // This object needs to share references to multiple members of its Rust
35 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
35 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
36 // Right now `CopyMap` is done, but it needs to have an explicit reference
36 // Right now `CopyMap` is done, but it needs to have an explicit reference
37 // to `RustDirstateMap` which itself needs to have an encapsulation for
37 // to `RustDirstateMap` which itself needs to have an encapsulation for
38 // every method in `CopyMap` (copymapcopy, etc.).
38 // every method in `CopyMap` (copymapcopy, etc.).
39 // This is ugly and hard to maintain.
39 // This is ugly and hard to maintain.
40 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
40 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
41 // `py_class!` is already implemented and does not mention
41 // `py_class!` is already implemented and does not mention
42 // `RustDirstateMap`, rightfully so.
42 // `RustDirstateMap`, rightfully so.
43 // All attributes also have to have a separate refcount data attribute for
43 // All attributes also have to have a separate refcount data attribute for
44 // leaks, with all methods that go along for reference sharing.
44 // leaks, with all methods that go along for reference sharing.
45 py_class!(pub class DirstateMap |py| {
45 py_class!(pub class DirstateMap |py| {
46 @shared data inner: OwningDirstateMap;
46 @shared data inner: OwningDirstateMap;
47
47
48 /// Returns a `(dirstate_map, parents)` tuple
48 /// Returns a `(dirstate_map, parents)` tuple
49 @staticmethod
49 @staticmethod
50 def new_v1(
50 def new_v1(
51 on_disk: PyBytes,
51 on_disk: PyBytes,
52 ) -> PyResult<PyObject> {
52 ) -> PyResult<PyObject> {
53 let on_disk = PyBytesDeref::new(py, on_disk);
53 let on_disk = PyBytesDeref::new(py, on_disk);
54 let (map, parents) = OwningDirstateMap::new_v1(on_disk)
54 let (map, parents) = OwningDirstateMap::new_v1(on_disk)
55 .map_err(|e| dirstate_error(py, e))?;
55 .map_err(|e| dirstate_error(py, e))?;
56 let map = Self::create_instance(py, map)?;
56 let map = Self::create_instance(py, map)?;
57 let p1 = PyBytes::new(py, parents.p1.as_bytes());
57 let p1 = PyBytes::new(py, parents.p1.as_bytes());
58 let p2 = PyBytes::new(py, parents.p2.as_bytes());
58 let p2 = PyBytes::new(py, parents.p2.as_bytes());
59 let parents = (p1, p2);
59 let parents = (p1, p2);
60 Ok((map, parents).to_py_object(py).into_object())
60 Ok((map, parents).to_py_object(py).into_object())
61 }
61 }
62
62
63 /// Returns a DirstateMap
63 /// Returns a DirstateMap
64 @staticmethod
64 @staticmethod
65 def new_v2(
65 def new_v2(
66 on_disk: PyBytes,
66 on_disk: PyBytes,
67 data_size: usize,
67 data_size: usize,
68 tree_metadata: PyBytes,
68 tree_metadata: PyBytes,
69 ) -> PyResult<PyObject> {
69 ) -> PyResult<PyObject> {
70 let dirstate_error = |e: DirstateError| {
70 let dirstate_error = |e: DirstateError| {
71 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
71 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
72 };
72 };
73 let on_disk = PyBytesDeref::new(py, on_disk);
73 let on_disk = PyBytesDeref::new(py, on_disk);
74 let map = OwningDirstateMap::new_v2(
74 let map = OwningDirstateMap::new_v2(
75 on_disk, data_size, tree_metadata.data(py),
75 on_disk, data_size, tree_metadata.data(py),
76 ).map_err(dirstate_error)?;
76 ).map_err(dirstate_error)?;
77 let map = Self::create_instance(py, map)?;
77 let map = Self::create_instance(py, map)?;
78 Ok(map.into_object())
78 Ok(map.into_object())
79 }
79 }
80
80
81 def clear(&self) -> PyResult<PyObject> {
81 def clear(&self) -> PyResult<PyObject> {
82 self.inner(py).borrow_mut().clear();
82 self.inner(py).borrow_mut().clear();
83 Ok(py.None())
83 Ok(py.None())
84 }
84 }
85
85
86 def get(
86 def get(
87 &self,
87 &self,
88 key: PyObject,
88 key: PyObject,
89 default: Option<PyObject> = None
89 default: Option<PyObject> = None
90 ) -> PyResult<Option<PyObject>> {
90 ) -> PyResult<Option<PyObject>> {
91 let key = key.extract::<PyBytes>(py)?;
91 let key = key.extract::<PyBytes>(py)?;
92 match self
92 match self
93 .inner(py)
93 .inner(py)
94 .borrow()
94 .borrow()
95 .get(HgPath::new(key.data(py)))
95 .get(HgPath::new(key.data(py)))
96 .map_err(|e| v2_error(py, e))?
96 .map_err(|e| v2_error(py, e))?
97 {
97 {
98 Some(entry) => {
98 Some(entry) => {
99 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
99 Ok(Some(DirstateItem::new_as_pyobject(py, entry)?))
100 },
100 },
101 None => Ok(default)
101 None => Ok(default)
102 }
102 }
103 }
103 }
104
104
105 def set_tracked(&self, f: PyObject) -> PyResult<PyBool> {
105 def set_tracked(&self, f: PyObject) -> PyResult<PyBool> {
106 let bytes = f.extract::<PyBytes>(py)?;
106 let bytes = f.extract::<PyBytes>(py)?;
107 let path = HgPath::new(bytes.data(py));
107 let path = HgPath::new(bytes.data(py));
108 let res = self.inner(py).borrow_mut().set_tracked(path);
108 let res = self.inner(py).borrow_mut().set_tracked(path);
109 let was_tracked = res.or_else(|_| {
109 let was_tracked = res.or_else(|_| {
110 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
110 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
111 })?;
111 })?;
112 Ok(was_tracked.to_py_object(py))
112 Ok(was_tracked.to_py_object(py))
113 }
113 }
114
114
115 def set_untracked(&self, f: PyObject) -> PyResult<PyBool> {
115 def set_untracked(&self, f: PyObject) -> PyResult<PyBool> {
116 let bytes = f.extract::<PyBytes>(py)?;
116 let bytes = f.extract::<PyBytes>(py)?;
117 let path = HgPath::new(bytes.data(py));
117 let path = HgPath::new(bytes.data(py));
118 let res = self.inner(py).borrow_mut().set_untracked(path);
118 let res = self.inner(py).borrow_mut().set_untracked(path);
119 let was_tracked = res.or_else(|_| {
119 let was_tracked = res.or_else(|_| {
120 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
120 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
121 })?;
121 })?;
122 Ok(was_tracked.to_py_object(py))
122 Ok(was_tracked.to_py_object(py))
123 }
123 }
124
124
125 def set_clean(
125 def set_clean(
126 &self,
126 &self,
127 f: PyObject,
127 f: PyObject,
128 mode: u32,
128 mode: u32,
129 size: u32,
129 size: u32,
130 mtime: (i64, u32, bool)
130 mtime: (i64, u32, bool)
131 ) -> PyResult<PyNone> {
131 ) -> PyResult<PyNone> {
132 let (mtime_s, mtime_ns, second_ambiguous) = mtime;
132 let (mtime_s, mtime_ns, second_ambiguous) = mtime;
133 let timestamp = TruncatedTimestamp::new_truncate(
133 let timestamp = TruncatedTimestamp::new_truncate(
134 mtime_s, mtime_ns, second_ambiguous
134 mtime_s, mtime_ns, second_ambiguous
135 );
135 );
136 let bytes = f.extract::<PyBytes>(py)?;
136 let bytes = f.extract::<PyBytes>(py)?;
137 let path = HgPath::new(bytes.data(py));
137 let path = HgPath::new(bytes.data(py));
138 let res = self.inner(py).borrow_mut().set_clean(
138 let res = self.inner(py).borrow_mut().set_clean(
139 path, mode, size, timestamp,
139 path, mode, size, timestamp,
140 );
140 );
141 res.or_else(|_| {
141 res.or_else(|_| {
142 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
142 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
143 })?;
143 })?;
144 Ok(PyNone)
144 Ok(PyNone)
145 }
145 }
146
146
147 def set_possibly_dirty(&self, f: PyObject) -> PyResult<PyNone> {
147 def set_possibly_dirty(&self, f: PyObject) -> PyResult<PyNone> {
148 let bytes = f.extract::<PyBytes>(py)?;
148 let bytes = f.extract::<PyBytes>(py)?;
149 let path = HgPath::new(bytes.data(py));
149 let path = HgPath::new(bytes.data(py));
150 let res = self.inner(py).borrow_mut().set_possibly_dirty(path);
150 let res = self.inner(py).borrow_mut().set_possibly_dirty(path);
151 res.or_else(|_| {
151 res.or_else(|_| {
152 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
152 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
153 })?;
153 })?;
154 Ok(PyNone)
154 Ok(PyNone)
155 }
155 }
156
156
157 def reset_state(
157 def reset_state(
158 &self,
158 &self,
159 f: PyObject,
159 f: PyObject,
160 wc_tracked: bool,
160 wc_tracked: bool,
161 p1_tracked: bool,
161 p1_tracked: bool,
162 p2_info: bool,
162 p2_info: bool,
163 has_meaningful_mtime: bool,
163 has_meaningful_mtime: bool,
164 parentfiledata: Option<(u32, u32, Option<(i64, u32, bool)>)>,
164 parentfiledata: Option<(u32, u32, Option<(i64, u32, bool)>)>,
165 ) -> PyResult<PyNone> {
165 ) -> PyResult<PyNone> {
166 let mut has_meaningful_mtime = has_meaningful_mtime;
166 let mut has_meaningful_mtime = has_meaningful_mtime;
167 let parent_file_data = match parentfiledata {
167 let parent_file_data = match parentfiledata {
168 None => {
168 None => {
169 has_meaningful_mtime = false;
169 has_meaningful_mtime = false;
170 None
170 None
171 },
171 },
172 Some(data) => {
172 Some(data) => {
173 let (mode, size, mtime_info) = data;
173 let (mode, size, mtime_info) = data;
174 let mtime = if let Some(mtime_info) = mtime_info {
174 let mtime = if let Some(mtime_info) = mtime_info {
175 let (mtime_s, mtime_ns, second_ambiguous) = mtime_info;
175 let (mtime_s, mtime_ns, second_ambiguous) = mtime_info;
176 let timestamp = TruncatedTimestamp::new_truncate(
176 let timestamp = TruncatedTimestamp::new_truncate(
177 mtime_s, mtime_ns, second_ambiguous
177 mtime_s, mtime_ns, second_ambiguous
178 );
178 );
179 Some(timestamp)
179 Some(timestamp)
180 } else {
180 } else {
181 has_meaningful_mtime = false;
181 has_meaningful_mtime = false;
182 None
182 None
183 };
183 };
184 Some(ParentFileData {
184 Some(ParentFileData {
185 mode_size: Some((mode, size)),
185 mode_size: Some((mode, size)),
186 mtime,
186 mtime,
187 })
187 })
188 }
188 }
189 };
189 };
190 let bytes = f.extract::<PyBytes>(py)?;
190 let bytes = f.extract::<PyBytes>(py)?;
191 let path = HgPath::new(bytes.data(py));
191 let path = HgPath::new(bytes.data(py));
192 let res = self.inner(py).borrow_mut().reset_state(
192 let res = self.inner(py).borrow_mut().reset_state(
193 path,
193 path,
194 wc_tracked,
194 wc_tracked,
195 p1_tracked,
195 p1_tracked,
196 p2_info,
196 p2_info,
197 has_meaningful_mtime,
197 has_meaningful_mtime,
198 parent_file_data,
198 parent_file_data,
199 );
199 );
200 res.or_else(|_| {
200 res.or_else(|_| {
201 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
201 Err(PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string()))
202 })?;
202 })?;
203 Ok(PyNone)
203 Ok(PyNone)
204 }
204 }
205
205
206 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
206 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
207 let d = d.extract::<PyBytes>(py)?;
207 let d = d.extract::<PyBytes>(py)?;
208 Ok(self.inner(py).borrow_mut()
208 Ok(self.inner(py).borrow_mut()
209 .has_tracked_dir(HgPath::new(d.data(py)))
209 .has_tracked_dir(HgPath::new(d.data(py)))
210 .map_err(|e| {
210 .map_err(|e| {
211 PyErr::new::<exc::ValueError, _>(py, e.to_string())
211 PyErr::new::<exc::ValueError, _>(py, e.to_string())
212 })?
212 })?
213 .to_py_object(py))
213 .to_py_object(py))
214 }
214 }
215
215
216 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
216 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
217 let d = d.extract::<PyBytes>(py)?;
217 let d = d.extract::<PyBytes>(py)?;
218 Ok(self.inner(py).borrow_mut()
218 Ok(self.inner(py).borrow_mut()
219 .has_dir(HgPath::new(d.data(py)))
219 .has_dir(HgPath::new(d.data(py)))
220 .map_err(|e| {
220 .map_err(|e| {
221 PyErr::new::<exc::ValueError, _>(py, e.to_string())
221 PyErr::new::<exc::ValueError, _>(py, e.to_string())
222 })?
222 })?
223 .to_py_object(py))
223 .to_py_object(py))
224 }
224 }
225
225
226 def write_v1(
226 def write_v1(
227 &self,
227 &self,
228 p1: PyObject,
228 p1: PyObject,
229 p2: PyObject,
229 p2: PyObject,
230 ) -> PyResult<PyBytes> {
230 ) -> PyResult<PyBytes> {
231 let inner = self.inner(py).borrow();
231 let inner = self.inner(py).borrow();
232 let parents = DirstateParents {
232 let parents = DirstateParents {
233 p1: extract_node_id(py, &p1)?,
233 p1: extract_node_id(py, &p1)?,
234 p2: extract_node_id(py, &p2)?,
234 p2: extract_node_id(py, &p2)?,
235 };
235 };
236 let result = inner.pack_v1(parents);
236 let result = inner.pack_v1(parents);
237 match result {
237 match result {
238 Ok(packed) => Ok(PyBytes::new(py, &packed)),
238 Ok(packed) => Ok(PyBytes::new(py, &packed)),
239 Err(_) => Err(PyErr::new::<exc::OSError, _>(
239 Err(_) => Err(PyErr::new::<exc::OSError, _>(
240 py,
240 py,
241 "Dirstate error".to_string(),
241 "Dirstate error".to_string(),
242 )),
242 )),
243 }
243 }
244 }
244 }
245
245
246 /// Returns new data together with whether that data should be appended to
246 /// Returns new data together with whether that data should be appended to
247 /// the existing data file whose content is at `self.on_disk` (True),
247 /// the existing data file whose content is at `self.on_disk` (True),
248 /// instead of written to a new data file (False).
248 /// instead of written to a new data file (False).
249 def write_v2(
249 def write_v2(
250 &self,
250 &self,
251 write_mode: usize,
251 write_mode: usize,
252 ) -> PyResult<PyObject> {
252 ) -> PyResult<PyObject> {
253 let inner = self.inner(py).borrow();
253 let inner = self.inner(py).borrow();
254 let rust_write_mode = match write_mode {
254 let rust_write_mode = match write_mode {
255 0 => DirstateMapWriteMode::Auto,
255 0 => DirstateMapWriteMode::Auto,
256 1 => DirstateMapWriteMode::ForceNewDataFile,
256 1 => DirstateMapWriteMode::ForceNewDataFile,
257 2 => DirstateMapWriteMode::ForceAppend,
257 _ => DirstateMapWriteMode::Auto, // XXX should we error out?
258 _ => DirstateMapWriteMode::Auto, // XXX should we error out?
258 };
259 };
259 let result = inner.pack_v2(rust_write_mode);
260 let result = inner.pack_v2(rust_write_mode);
260 match result {
261 match result {
261 Ok((packed, tree_metadata, append, _old_data_size)) => {
262 Ok((packed, tree_metadata, append, _old_data_size)) => {
262 let packed = PyBytes::new(py, &packed);
263 let packed = PyBytes::new(py, &packed);
263 let tree_metadata = PyBytes::new(py, tree_metadata.as_bytes());
264 let tree_metadata = PyBytes::new(py, tree_metadata.as_bytes());
264 let tuple = (packed, tree_metadata, append);
265 let tuple = (packed, tree_metadata, append);
265 Ok(tuple.to_py_object(py).into_object())
266 Ok(tuple.to_py_object(py).into_object())
266 },
267 },
267 Err(_) => Err(PyErr::new::<exc::OSError, _>(
268 Err(_) => Err(PyErr::new::<exc::OSError, _>(
268 py,
269 py,
269 "Dirstate error".to_string(),
270 "Dirstate error".to_string(),
270 )),
271 )),
271 }
272 }
272 }
273 }
273
274
274 def filefoldmapasdict(&self) -> PyResult<PyDict> {
275 def filefoldmapasdict(&self) -> PyResult<PyDict> {
275 let dict = PyDict::new(py);
276 let dict = PyDict::new(py);
276 for item in self.inner(py).borrow_mut().iter() {
277 for item in self.inner(py).borrow_mut().iter() {
277 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
278 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
278 if !entry.removed() {
279 if !entry.removed() {
279 let key = normalize_case(path);
280 let key = normalize_case(path);
280 let value = path;
281 let value = path;
281 dict.set_item(
282 dict.set_item(
282 py,
283 py,
283 PyBytes::new(py, key.as_bytes()).into_object(),
284 PyBytes::new(py, key.as_bytes()).into_object(),
284 PyBytes::new(py, value.as_bytes()).into_object(),
285 PyBytes::new(py, value.as_bytes()).into_object(),
285 )?;
286 )?;
286 }
287 }
287 }
288 }
288 Ok(dict)
289 Ok(dict)
289 }
290 }
290
291
291 def __len__(&self) -> PyResult<usize> {
292 def __len__(&self) -> PyResult<usize> {
292 Ok(self.inner(py).borrow().len())
293 Ok(self.inner(py).borrow().len())
293 }
294 }
294
295
295 def __contains__(&self, key: PyObject) -> PyResult<bool> {
296 def __contains__(&self, key: PyObject) -> PyResult<bool> {
296 let key = key.extract::<PyBytes>(py)?;
297 let key = key.extract::<PyBytes>(py)?;
297 self.inner(py)
298 self.inner(py)
298 .borrow()
299 .borrow()
299 .contains_key(HgPath::new(key.data(py)))
300 .contains_key(HgPath::new(key.data(py)))
300 .map_err(|e| v2_error(py, e))
301 .map_err(|e| v2_error(py, e))
301 }
302 }
302
303
303 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
304 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
304 let key = key.extract::<PyBytes>(py)?;
305 let key = key.extract::<PyBytes>(py)?;
305 let key = HgPath::new(key.data(py));
306 let key = HgPath::new(key.data(py));
306 match self
307 match self
307 .inner(py)
308 .inner(py)
308 .borrow()
309 .borrow()
309 .get(key)
310 .get(key)
310 .map_err(|e| v2_error(py, e))?
311 .map_err(|e| v2_error(py, e))?
311 {
312 {
312 Some(entry) => {
313 Some(entry) => {
313 Ok(DirstateItem::new_as_pyobject(py, entry)?)
314 Ok(DirstateItem::new_as_pyobject(py, entry)?)
314 },
315 },
315 None => Err(PyErr::new::<exc::KeyError, _>(
316 None => Err(PyErr::new::<exc::KeyError, _>(
316 py,
317 py,
317 String::from_utf8_lossy(key.as_bytes()),
318 String::from_utf8_lossy(key.as_bytes()),
318 )),
319 )),
319 }
320 }
320 }
321 }
321
322
322 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
323 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
323 let leaked_ref = self.inner(py).leak_immutable();
324 let leaked_ref = self.inner(py).leak_immutable();
324 DirstateMapKeysIterator::from_inner(
325 DirstateMapKeysIterator::from_inner(
325 py,
326 py,
326 unsafe { leaked_ref.map(py, |o| o.iter()) },
327 unsafe { leaked_ref.map(py, |o| o.iter()) },
327 )
328 )
328 }
329 }
329
330
330 def items(&self) -> PyResult<DirstateMapItemsIterator> {
331 def items(&self) -> PyResult<DirstateMapItemsIterator> {
331 let leaked_ref = self.inner(py).leak_immutable();
332 let leaked_ref = self.inner(py).leak_immutable();
332 DirstateMapItemsIterator::from_inner(
333 DirstateMapItemsIterator::from_inner(
333 py,
334 py,
334 unsafe { leaked_ref.map(py, |o| o.iter()) },
335 unsafe { leaked_ref.map(py, |o| o.iter()) },
335 )
336 )
336 }
337 }
337
338
338 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
339 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
339 let leaked_ref = self.inner(py).leak_immutable();
340 let leaked_ref = self.inner(py).leak_immutable();
340 DirstateMapKeysIterator::from_inner(
341 DirstateMapKeysIterator::from_inner(
341 py,
342 py,
342 unsafe { leaked_ref.map(py, |o| o.iter()) },
343 unsafe { leaked_ref.map(py, |o| o.iter()) },
343 )
344 )
344 }
345 }
345
346
346 // TODO all copymap* methods, see docstring above
347 // TODO all copymap* methods, see docstring above
347 def copymapcopy(&self) -> PyResult<PyDict> {
348 def copymapcopy(&self) -> PyResult<PyDict> {
348 let dict = PyDict::new(py);
349 let dict = PyDict::new(py);
349 for item in self.inner(py).borrow().copy_map_iter() {
350 for item in self.inner(py).borrow().copy_map_iter() {
350 let (key, value) = item.map_err(|e| v2_error(py, e))?;
351 let (key, value) = item.map_err(|e| v2_error(py, e))?;
351 dict.set_item(
352 dict.set_item(
352 py,
353 py,
353 PyBytes::new(py, key.as_bytes()),
354 PyBytes::new(py, key.as_bytes()),
354 PyBytes::new(py, value.as_bytes()),
355 PyBytes::new(py, value.as_bytes()),
355 )?;
356 )?;
356 }
357 }
357 Ok(dict)
358 Ok(dict)
358 }
359 }
359
360
360 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
361 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
361 let key = key.extract::<PyBytes>(py)?;
362 let key = key.extract::<PyBytes>(py)?;
362 match self
363 match self
363 .inner(py)
364 .inner(py)
364 .borrow()
365 .borrow()
365 .copy_map_get(HgPath::new(key.data(py)))
366 .copy_map_get(HgPath::new(key.data(py)))
366 .map_err(|e| v2_error(py, e))?
367 .map_err(|e| v2_error(py, e))?
367 {
368 {
368 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
369 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
369 None => Err(PyErr::new::<exc::KeyError, _>(
370 None => Err(PyErr::new::<exc::KeyError, _>(
370 py,
371 py,
371 String::from_utf8_lossy(key.data(py)),
372 String::from_utf8_lossy(key.data(py)),
372 )),
373 )),
373 }
374 }
374 }
375 }
375 def copymap(&self) -> PyResult<CopyMap> {
376 def copymap(&self) -> PyResult<CopyMap> {
376 CopyMap::from_inner(py, self.clone_ref(py))
377 CopyMap::from_inner(py, self.clone_ref(py))
377 }
378 }
378
379
379 def copymaplen(&self) -> PyResult<usize> {
380 def copymaplen(&self) -> PyResult<usize> {
380 Ok(self.inner(py).borrow().copy_map_len())
381 Ok(self.inner(py).borrow().copy_map_len())
381 }
382 }
382 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
383 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
383 let key = key.extract::<PyBytes>(py)?;
384 let key = key.extract::<PyBytes>(py)?;
384 self.inner(py)
385 self.inner(py)
385 .borrow()
386 .borrow()
386 .copy_map_contains_key(HgPath::new(key.data(py)))
387 .copy_map_contains_key(HgPath::new(key.data(py)))
387 .map_err(|e| v2_error(py, e))
388 .map_err(|e| v2_error(py, e))
388 }
389 }
389 def copymapget(
390 def copymapget(
390 &self,
391 &self,
391 key: PyObject,
392 key: PyObject,
392 default: Option<PyObject>
393 default: Option<PyObject>
393 ) -> PyResult<Option<PyObject>> {
394 ) -> PyResult<Option<PyObject>> {
394 let key = key.extract::<PyBytes>(py)?;
395 let key = key.extract::<PyBytes>(py)?;
395 match self
396 match self
396 .inner(py)
397 .inner(py)
397 .borrow()
398 .borrow()
398 .copy_map_get(HgPath::new(key.data(py)))
399 .copy_map_get(HgPath::new(key.data(py)))
399 .map_err(|e| v2_error(py, e))?
400 .map_err(|e| v2_error(py, e))?
400 {
401 {
401 Some(copy) => Ok(Some(
402 Some(copy) => Ok(Some(
402 PyBytes::new(py, copy.as_bytes()).into_object(),
403 PyBytes::new(py, copy.as_bytes()).into_object(),
403 )),
404 )),
404 None => Ok(default),
405 None => Ok(default),
405 }
406 }
406 }
407 }
407 def copymapsetitem(
408 def copymapsetitem(
408 &self,
409 &self,
409 key: PyObject,
410 key: PyObject,
410 value: PyObject
411 value: PyObject
411 ) -> PyResult<PyObject> {
412 ) -> PyResult<PyObject> {
412 let key = key.extract::<PyBytes>(py)?;
413 let key = key.extract::<PyBytes>(py)?;
413 let value = value.extract::<PyBytes>(py)?;
414 let value = value.extract::<PyBytes>(py)?;
414 self.inner(py)
415 self.inner(py)
415 .borrow_mut()
416 .borrow_mut()
416 .copy_map_insert(
417 .copy_map_insert(
417 HgPath::new(key.data(py)),
418 HgPath::new(key.data(py)),
418 HgPath::new(value.data(py)),
419 HgPath::new(value.data(py)),
419 )
420 )
420 .map_err(|e| v2_error(py, e))?;
421 .map_err(|e| v2_error(py, e))?;
421 Ok(py.None())
422 Ok(py.None())
422 }
423 }
423 def copymappop(
424 def copymappop(
424 &self,
425 &self,
425 key: PyObject,
426 key: PyObject,
426 default: Option<PyObject>
427 default: Option<PyObject>
427 ) -> PyResult<Option<PyObject>> {
428 ) -> PyResult<Option<PyObject>> {
428 let key = key.extract::<PyBytes>(py)?;
429 let key = key.extract::<PyBytes>(py)?;
429 match self
430 match self
430 .inner(py)
431 .inner(py)
431 .borrow_mut()
432 .borrow_mut()
432 .copy_map_remove(HgPath::new(key.data(py)))
433 .copy_map_remove(HgPath::new(key.data(py)))
433 .map_err(|e| v2_error(py, e))?
434 .map_err(|e| v2_error(py, e))?
434 {
435 {
435 Some(copy) => Ok(Some(
436 Some(copy) => Ok(Some(
436 PyBytes::new(py, copy.as_bytes()).into_object(),
437 PyBytes::new(py, copy.as_bytes()).into_object(),
437 )),
438 )),
438 None => Ok(default),
439 None => Ok(default),
439 }
440 }
440 }
441 }
441
442
442 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
443 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
443 let leaked_ref = self.inner(py).leak_immutable();
444 let leaked_ref = self.inner(py).leak_immutable();
444 CopyMapKeysIterator::from_inner(
445 CopyMapKeysIterator::from_inner(
445 py,
446 py,
446 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
447 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
447 )
448 )
448 }
449 }
449
450
450 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
451 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
451 let leaked_ref = self.inner(py).leak_immutable();
452 let leaked_ref = self.inner(py).leak_immutable();
452 CopyMapItemsIterator::from_inner(
453 CopyMapItemsIterator::from_inner(
453 py,
454 py,
454 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
455 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
455 )
456 )
456 }
457 }
457
458
458 def tracked_dirs(&self) -> PyResult<PyList> {
459 def tracked_dirs(&self) -> PyResult<PyList> {
459 let dirs = PyList::new(py, &[]);
460 let dirs = PyList::new(py, &[]);
460 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
461 for path in self.inner(py).borrow_mut().iter_tracked_dirs()
461 .map_err(|e |dirstate_error(py, e))?
462 .map_err(|e |dirstate_error(py, e))?
462 {
463 {
463 let path = path.map_err(|e| v2_error(py, e))?;
464 let path = path.map_err(|e| v2_error(py, e))?;
464 let path = PyBytes::new(py, path.as_bytes());
465 let path = PyBytes::new(py, path.as_bytes());
465 dirs.append(py, path.into_object())
466 dirs.append(py, path.into_object())
466 }
467 }
467 Ok(dirs)
468 Ok(dirs)
468 }
469 }
469
470
470 def setparents_fixup(&self) -> PyResult<PyDict> {
471 def setparents_fixup(&self) -> PyResult<PyDict> {
471 let dict = PyDict::new(py);
472 let dict = PyDict::new(py);
472 let copies = self.inner(py).borrow_mut().setparents_fixup();
473 let copies = self.inner(py).borrow_mut().setparents_fixup();
473 for (key, value) in copies.map_err(|e| v2_error(py, e))? {
474 for (key, value) in copies.map_err(|e| v2_error(py, e))? {
474 dict.set_item(
475 dict.set_item(
475 py,
476 py,
476 PyBytes::new(py, key.as_bytes()),
477 PyBytes::new(py, key.as_bytes()),
477 PyBytes::new(py, value.as_bytes()),
478 PyBytes::new(py, value.as_bytes()),
478 )?;
479 )?;
479 }
480 }
480 Ok(dict)
481 Ok(dict)
481 }
482 }
482
483
483 def debug_iter(&self, all: bool) -> PyResult<PyList> {
484 def debug_iter(&self, all: bool) -> PyResult<PyList> {
484 let dirs = PyList::new(py, &[]);
485 let dirs = PyList::new(py, &[]);
485 for item in self.inner(py).borrow().debug_iter(all) {
486 for item in self.inner(py).borrow().debug_iter(all) {
486 let (path, (state, mode, size, mtime)) =
487 let (path, (state, mode, size, mtime)) =
487 item.map_err(|e| v2_error(py, e))?;
488 item.map_err(|e| v2_error(py, e))?;
488 let path = PyBytes::new(py, path.as_bytes());
489 let path = PyBytes::new(py, path.as_bytes());
489 let item = (path, state, mode, size, mtime);
490 let item = (path, state, mode, size, mtime);
490 dirs.append(py, item.to_py_object(py).into_object())
491 dirs.append(py, item.to_py_object(py).into_object())
491 }
492 }
492 Ok(dirs)
493 Ok(dirs)
493 }
494 }
494 });
495 });
495
496
496 impl DirstateMap {
497 impl DirstateMap {
497 pub fn get_inner_mut<'a>(
498 pub fn get_inner_mut<'a>(
498 &'a self,
499 &'a self,
499 py: Python<'a>,
500 py: Python<'a>,
500 ) -> RefMut<'a, OwningDirstateMap> {
501 ) -> RefMut<'a, OwningDirstateMap> {
501 self.inner(py).borrow_mut()
502 self.inner(py).borrow_mut()
502 }
503 }
503 fn translate_key(
504 fn translate_key(
504 py: Python,
505 py: Python,
505 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
506 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
506 ) -> PyResult<Option<PyBytes>> {
507 ) -> PyResult<Option<PyBytes>> {
507 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
508 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
508 Ok(Some(PyBytes::new(py, f.as_bytes())))
509 Ok(Some(PyBytes::new(py, f.as_bytes())))
509 }
510 }
510 fn translate_key_value(
511 fn translate_key_value(
511 py: Python,
512 py: Python,
512 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
513 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
513 ) -> PyResult<Option<(PyBytes, PyObject)>> {
514 ) -> PyResult<Option<(PyBytes, PyObject)>> {
514 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
515 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
515 Ok(Some((
516 Ok(Some((
516 PyBytes::new(py, f.as_bytes()),
517 PyBytes::new(py, f.as_bytes()),
517 DirstateItem::new_as_pyobject(py, entry)?,
518 DirstateItem::new_as_pyobject(py, entry)?,
518 )))
519 )))
519 }
520 }
520 }
521 }
521
522
522 py_shared_iterator!(
523 py_shared_iterator!(
523 DirstateMapKeysIterator,
524 DirstateMapKeysIterator,
524 UnsafePyLeaked<StateMapIter<'static>>,
525 UnsafePyLeaked<StateMapIter<'static>>,
525 DirstateMap::translate_key,
526 DirstateMap::translate_key,
526 Option<PyBytes>
527 Option<PyBytes>
527 );
528 );
528
529
529 py_shared_iterator!(
530 py_shared_iterator!(
530 DirstateMapItemsIterator,
531 DirstateMapItemsIterator,
531 UnsafePyLeaked<StateMapIter<'static>>,
532 UnsafePyLeaked<StateMapIter<'static>>,
532 DirstateMap::translate_key_value,
533 DirstateMap::translate_key_value,
533 Option<(PyBytes, PyObject)>
534 Option<(PyBytes, PyObject)>
534 );
535 );
535
536
536 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
537 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
537 let bytes = obj.extract::<PyBytes>(py)?;
538 let bytes = obj.extract::<PyBytes>(py)?;
538 match bytes.data(py).try_into() {
539 match bytes.data(py).try_into() {
539 Ok(s) => Ok(s),
540 Ok(s) => Ok(s),
540 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
541 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
541 }
542 }
542 }
543 }
543
544
544 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
545 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
545 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
546 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
546 }
547 }
547
548
548 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
549 fn dirstate_error(py: Python<'_>, e: DirstateError) -> PyErr {
549 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
550 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
550 }
551 }
@@ -1,433 +1,433 b''
1 =====================================================================
1 =====================================================================
2 Check potential race conditions between a status and other operations
2 Check potential race conditions between a status and other operations
3 =====================================================================
3 =====================================================================
4
4
5 #testcases dirstate-v1 dirstate-v2
5 #testcases dirstate-v1 dirstate-v2
6
6
7 The `hg status` command can run without the wlock, however it might end up
7 The `hg status` command can run without the wlock, however it might end up
8 having to update the on-disk dirstate files, for example to mark ambiguous
8 having to update the on-disk dirstate files, for example to mark ambiguous
9 files as clean, or to update directory caches information with dirstate-v2.
9 files as clean, or to update directory caches information with dirstate-v2.
10
10
11
11
12 If another process updates the dirstate in the meantime we might run into
12 If another process updates the dirstate in the meantime we might run into
13 trouble. Especially, commands doing semantic changes like `hg add` or
13 trouble. Especially, commands doing semantic changes like `hg add` or
14 `hg commit` should not see their update erased by a concurrent status.
14 `hg commit` should not see their update erased by a concurrent status.
15
15
16 Unlike commands like `add` or `commit`, `status` only writes the dirstate
16 Unlike commands like `add` or `commit`, `status` only writes the dirstate
17 to update caches, no actual information is lost if we fail to write to disk.
17 to update caches, no actual information is lost if we fail to write to disk.
18
18
19
19
20 This test file is meant to test various cases where such parallel operations
20 This test file is meant to test various cases where such parallel operations
21 between a status with reasons to update the dirstate and another semantic
21 between a status with reasons to update the dirstate and another semantic
22 changes happen.
22 changes happen.
23
23
24
24
25 Setup
25 Setup
26 =====
26 =====
27
27
28 $ cat >> $HGRCPATH << EOF
28 $ cat >> $HGRCPATH << EOF
29 > [storage]
29 > [storage]
30 > dirstate-v2.slow-path=allow
30 > dirstate-v2.slow-path=allow
31 > EOF
31 > EOF
32
32
33 #if dirstate-v2
33 #if dirstate-v2
34 $ cat >> $HGRCPATH << EOF
34 $ cat >> $HGRCPATH << EOF
35 > [format]
35 > [format]
36 > use-dirstate-v2=yes
36 > use-dirstate-v2=yes
37 > EOF
37 > EOF
38 #else
38 #else
39 $ cat >> $HGRCPATH << EOF
39 $ cat >> $HGRCPATH << EOF
40 > [format]
40 > [format]
41 > use-dirstate-v2=no
41 > use-dirstate-v2=no
42 > EOF
42 > EOF
43 #endif
43 #endif
44
44
45 $ directories="dir dir/nested dir2"
45 $ directories="dir dir/nested dir2"
46 $ first_files="dir/nested/a dir/b dir/c dir/d dir2/e f"
46 $ first_files="dir/nested/a dir/b dir/c dir/d dir2/e f"
47 $ second_files="g dir/nested/h dir/i dir/j dir2/k dir2/l dir/nested/m"
47 $ second_files="g dir/nested/h dir/i dir/j dir2/k dir2/l dir/nested/m"
48 $ extra_files="dir/n dir/o p q"
48 $ extra_files="dir/n dir/o p q"
49
49
50 $ hg init reference-repo
50 $ hg init reference-repo
51 $ cd reference-repo
51 $ cd reference-repo
52 $ mkdir -p dir/nested dir2
52 $ mkdir -p dir/nested dir2
53 $ touch -t 200001010000 $first_files $directories
53 $ touch -t 200001010000 $first_files $directories
54 $ hg commit -Aqm "recreate a bunch of files to facilitate dirstate-v2 append"
54 $ hg commit -Aqm "recreate a bunch of files to facilitate dirstate-v2 append"
55 $ touch -t 200001010010 $second_files $directories
55 $ touch -t 200001010010 $second_files $directories
56 $ hg commit -Aqm "more files to have two commits"
56 $ hg commit -Aqm "more files to have two commits"
57 $ hg log -G -v
57 $ hg log -G -v
58 @ changeset: 1:c349430a1631
58 @ changeset: 1:c349430a1631
59 | tag: tip
59 | tag: tip
60 | user: test
60 | user: test
61 | date: Thu Jan 01 00:00:00 1970 +0000
61 | date: Thu Jan 01 00:00:00 1970 +0000
62 | files: dir/i dir/j dir/nested/h dir/nested/m dir2/k dir2/l g
62 | files: dir/i dir/j dir/nested/h dir/nested/m dir2/k dir2/l g
63 | description:
63 | description:
64 | more files to have two commits
64 | more files to have two commits
65 |
65 |
66 |
66 |
67 o changeset: 0:4f23db756b09
67 o changeset: 0:4f23db756b09
68 user: test
68 user: test
69 date: Thu Jan 01 00:00:00 1970 +0000
69 date: Thu Jan 01 00:00:00 1970 +0000
70 files: dir/b dir/c dir/d dir/nested/a dir2/e f
70 files: dir/b dir/c dir/d dir/nested/a dir2/e f
71 description:
71 description:
72 recreate a bunch of files to facilitate dirstate-v2 append
72 recreate a bunch of files to facilitate dirstate-v2 append
73
73
74
74
75 $ hg manifest
75 $ hg manifest
76 dir/b
76 dir/b
77 dir/c
77 dir/c
78 dir/d
78 dir/d
79 dir/i
79 dir/i
80 dir/j
80 dir/j
81 dir/nested/a
81 dir/nested/a
82 dir/nested/h
82 dir/nested/h
83 dir/nested/m
83 dir/nested/m
84 dir2/e
84 dir2/e
85 dir2/k
85 dir2/k
86 dir2/l
86 dir2/l
87 f
87 f
88 g
88 g
89
89
90 Add some unknown files and refresh the dirstate
90 Add some unknown files and refresh the dirstate
91
91
92 $ touch -t 200001010020 $extra_files
92 $ touch -t 200001010020 $extra_files
93 $ hg add dir/o
93 $ hg add dir/o
94 $ hg remove dir/nested/m
94 $ hg remove dir/nested/m
95
95
96 $ hg st
96 $ hg st --config devel.dirstate.v2.data_update_mode=force-new
97 A dir/o
97 A dir/o
98 R dir/nested/m
98 R dir/nested/m
99 ? dir/n
99 ? dir/n
100 ? p
100 ? p
101 ? q
101 ? q
102 $ hg debugstate
102 $ hg debugstate
103 n 644 0 2000-01-01 00:00:00 dir/b
103 n 644 0 2000-01-01 00:00:00 dir/b
104 n 644 0 2000-01-01 00:00:00 dir/c
104 n 644 0 2000-01-01 00:00:00 dir/c
105 n 644 0 2000-01-01 00:00:00 dir/d
105 n 644 0 2000-01-01 00:00:00 dir/d
106 n 644 0 2000-01-01 00:10:00 dir/i
106 n 644 0 2000-01-01 00:10:00 dir/i
107 n 644 0 2000-01-01 00:10:00 dir/j
107 n 644 0 2000-01-01 00:10:00 dir/j
108 n 644 0 2000-01-01 00:00:00 dir/nested/a
108 n 644 0 2000-01-01 00:00:00 dir/nested/a
109 n 644 0 2000-01-01 00:10:00 dir/nested/h
109 n 644 0 2000-01-01 00:10:00 dir/nested/h
110 r ?????????????????????????????????? dir/nested/m (glob)
110 r ?????????????????????????????????? dir/nested/m (glob)
111 a ?????????????????????????????????? dir/o (glob)
111 a ?????????????????????????????????? dir/o (glob)
112 n 644 0 2000-01-01 00:00:00 dir2/e
112 n 644 0 2000-01-01 00:00:00 dir2/e
113 n 644 0 2000-01-01 00:10:00 dir2/k
113 n 644 0 2000-01-01 00:10:00 dir2/k
114 n 644 0 2000-01-01 00:10:00 dir2/l
114 n 644 0 2000-01-01 00:10:00 dir2/l
115 n 644 0 2000-01-01 00:00:00 f
115 n 644 0 2000-01-01 00:00:00 f
116 n 644 0 2000-01-01 00:10:00 g
116 n 644 0 2000-01-01 00:10:00 g
117 $ hg debugstate > ../reference
117 $ hg debugstate > ../reference
118 $ cd ..
118 $ cd ..
119
119
120 Explain / verify the test principles
120 Explain / verify the test principles
121 ------------------------------------
121 ------------------------------------
122
122
123 First, we can properly copy the reference
123 First, we can properly copy the reference
124
124
125 $ cp -a reference-repo sanity-check
125 $ cp -a reference-repo sanity-check
126 $ cd sanity-check
126 $ cd sanity-check
127 $ hg debugstate
127 $ hg debugstate
128 n 644 0 2000-01-01 00:00:00 dir/b
128 n 644 0 2000-01-01 00:00:00 dir/b
129 n 644 0 2000-01-01 00:00:00 dir/c
129 n 644 0 2000-01-01 00:00:00 dir/c
130 n 644 0 2000-01-01 00:00:00 dir/d
130 n 644 0 2000-01-01 00:00:00 dir/d
131 n 644 0 2000-01-01 00:10:00 dir/i
131 n 644 0 2000-01-01 00:10:00 dir/i
132 n 644 0 2000-01-01 00:10:00 dir/j
132 n 644 0 2000-01-01 00:10:00 dir/j
133 n 644 0 2000-01-01 00:00:00 dir/nested/a
133 n 644 0 2000-01-01 00:00:00 dir/nested/a
134 n 644 0 2000-01-01 00:10:00 dir/nested/h
134 n 644 0 2000-01-01 00:10:00 dir/nested/h
135 r ?????????????????????????????????? dir/nested/m (glob)
135 r ?????????????????????????????????? dir/nested/m (glob)
136 a ?????????????????????????????????? dir/o (glob)
136 a ?????????????????????????????????? dir/o (glob)
137 n 644 0 2000-01-01 00:00:00 dir2/e
137 n 644 0 2000-01-01 00:00:00 dir2/e
138 n 644 0 2000-01-01 00:10:00 dir2/k
138 n 644 0 2000-01-01 00:10:00 dir2/k
139 n 644 0 2000-01-01 00:10:00 dir2/l
139 n 644 0 2000-01-01 00:10:00 dir2/l
140 n 644 0 2000-01-01 00:00:00 f
140 n 644 0 2000-01-01 00:00:00 f
141 n 644 0 2000-01-01 00:10:00 g
141 n 644 0 2000-01-01 00:10:00 g
142 $ hg debugstate > ../post-copy
142 $ hg debugstate > ../post-copy
143 $ diff ../reference ../post-copy
143 $ diff ../reference ../post-copy
144
144
145 And status thinks the cache is in a proper state
145 And status thinks the cache is in a proper state
146
146
147 $ hg st
147 $ hg st
148 A dir/o
148 A dir/o
149 R dir/nested/m
149 R dir/nested/m
150 ? dir/n
150 ? dir/n
151 ? p
151 ? p
152 ? q
152 ? q
153 $ hg debugstate
153 $ hg debugstate
154 n 644 0 2000-01-01 00:00:00 dir/b
154 n 644 0 2000-01-01 00:00:00 dir/b
155 n 644 0 2000-01-01 00:00:00 dir/c
155 n 644 0 2000-01-01 00:00:00 dir/c
156 n 644 0 2000-01-01 00:00:00 dir/d
156 n 644 0 2000-01-01 00:00:00 dir/d
157 n 644 0 2000-01-01 00:10:00 dir/i
157 n 644 0 2000-01-01 00:10:00 dir/i
158 n 644 0 2000-01-01 00:10:00 dir/j
158 n 644 0 2000-01-01 00:10:00 dir/j
159 n 644 0 2000-01-01 00:00:00 dir/nested/a
159 n 644 0 2000-01-01 00:00:00 dir/nested/a
160 n 644 0 2000-01-01 00:10:00 dir/nested/h
160 n 644 0 2000-01-01 00:10:00 dir/nested/h
161 r ?????????????????????????????????? dir/nested/m (glob)
161 r ?????????????????????????????????? dir/nested/m (glob)
162 a ?????????????????????????????????? dir/o (glob)
162 a ?????????????????????????????????? dir/o (glob)
163 n 644 0 2000-01-01 00:00:00 dir2/e
163 n 644 0 2000-01-01 00:00:00 dir2/e
164 n 644 0 2000-01-01 00:10:00 dir2/k
164 n 644 0 2000-01-01 00:10:00 dir2/k
165 n 644 0 2000-01-01 00:10:00 dir2/l
165 n 644 0 2000-01-01 00:10:00 dir2/l
166 n 644 0 2000-01-01 00:00:00 f
166 n 644 0 2000-01-01 00:00:00 f
167 n 644 0 2000-01-01 00:10:00 g
167 n 644 0 2000-01-01 00:10:00 g
168 $ hg debugstate > ../post-status
168 $ hg debugstate > ../post-status
169 $ diff ../reference ../post-status
169 $ diff ../reference ../post-status
170
170
171 Then we can start a status that:
171 Then we can start a status that:
172 - has some update to do (the touch call)
172 - has some update to do (the touch call)
173 - will wait AFTER running status, but before updating the cache on disk
173 - will wait AFTER running status, but before updating the cache on disk
174
174
175 $ touch -t 200001010001 dir/c
175 $ touch -t 200001010001 dir/c
176 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
176 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
177 > --config rhg.on-unsupported=abort \
177 > --config rhg.on-unsupported=abort \
178 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
178 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
179 > &
179 > &
180 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
180 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
181
181
182 We check it runs the status first by modifying a file and updating another timestamp
182 We check it runs the status first by modifying a file and updating another timestamp
183
183
184 $ touch -t 200001010003 dir/i
184 $ touch -t 200001010003 dir/i
185 $ echo babar > dir/j
185 $ echo babar > dir/j
186 $ touch $TESTTMP/status-race-lock
186 $ touch $TESTTMP/status-race-lock
187 $ wait
187 $ wait
188
188
189 The test process should have reported a status before the change we made,
189 The test process should have reported a status before the change we made,
190 and should have missed the timestamp update
190 and should have missed the timestamp update
191
191
192 $ cat $TESTTMP/status-race-lock.out
192 $ cat $TESTTMP/status-race-lock.out
193 A dir/o
193 A dir/o
194 R dir/nested/m
194 R dir/nested/m
195 ? dir/n
195 ? dir/n
196 ? p
196 ? p
197 ? q
197 ? q
198 $ cat $TESTTMP/status-race-lock.log
198 $ cat $TESTTMP/status-race-lock.log
199 $ hg debugstate | grep dir/c
199 $ hg debugstate | grep dir/c
200 n 644 0 2000-01-01 00:01:00 dir/c
200 n 644 0 2000-01-01 00:01:00 dir/c
201 $ hg debugstate | grep dir/i
201 $ hg debugstate | grep dir/i
202 n 644 0 2000-01-01 00:10:00 dir/i
202 n 644 0 2000-01-01 00:10:00 dir/i
203 $ hg debugstate | grep dir/j
203 $ hg debugstate | grep dir/j
204 n 644 0 2000-01-01 00:10:00 dir/j
204 n 644 0 2000-01-01 00:10:00 dir/j
205
205
206 final cleanup
206 final cleanup
207
207
208 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
208 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
209 $ cd ..
209 $ cd ..
210
210
211 Actual Testing
211 Actual Testing
212 ==============
212 ==============
213
213
214 Race with a `hg add`
214 Race with a `hg add`
215 -------------------
215 -------------------
216
216
217 $ cp -a reference-repo race-with-add
217 $ cp -a reference-repo race-with-add
218 $ cd race-with-add
218 $ cd race-with-add
219
219
220 spin a `hg status` with some caches to update
220 spin a `hg status` with some caches to update
221
221
222 $ touch -t 200001020001 f
222 $ touch -t 200001020001 f
223 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
223 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
224 > --config rhg.on-unsupported=abort \
224 > --config rhg.on-unsupported=abort \
225 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
225 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
226 > &
226 > &
227 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
227 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
228
228
229 Add a file
229 Add a file
230
230
231 $ hg add dir/n
231 $ hg add dir/n
232 $ touch $TESTTMP/status-race-lock
232 $ touch $TESTTMP/status-race-lock
233 $ wait
233 $ wait
234
234
235 The file should in a "added" state
235 The file should in a "added" state
236
236
237 $ hg status
237 $ hg status
238 A dir/n (no-rhg !)
238 A dir/n (no-rhg !)
239 A dir/n (rhg no-dirstate-v1 !)
239 A dir/n (rhg no-dirstate-v1 !)
240 A dir/n (missing-correct-output rhg dirstate-v1 !)
240 A dir/n (missing-correct-output rhg dirstate-v1 !)
241 A dir/o
241 A dir/o
242 R dir/nested/m
242 R dir/nested/m
243 ? dir/n (known-bad-output rhg dirstate-v1 !)
243 ? dir/n (known-bad-output rhg dirstate-v1 !)
244 ? p
244 ? p
245 ? q
245 ? q
246
246
247 The status process should return a consistent result and not crash.
247 The status process should return a consistent result and not crash.
248
248
249 $ cat $TESTTMP/status-race-lock.out
249 $ cat $TESTTMP/status-race-lock.out
250 A dir/o
250 A dir/o
251 R dir/nested/m
251 R dir/nested/m
252 ? dir/n
252 ? dir/n
253 ? p
253 ? p
254 ? q
254 ? q
255 $ cat $TESTTMP/status-race-lock.log
255 $ cat $TESTTMP/status-race-lock.log
256 abort: when writing $TESTTMP/race-with-add/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
256 abort: when writing $TESTTMP/race-with-add/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
257
257
258 final cleanup
258 final cleanup
259
259
260 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
260 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
261 $ cd ..
261 $ cd ..
262
262
263 Race with a `hg commit`
263 Race with a `hg commit`
264 ----------------------
264 ----------------------
265
265
266 $ cp -a reference-repo race-with-commit
266 $ cp -a reference-repo race-with-commit
267 $ cd race-with-commit
267 $ cd race-with-commit
268
268
269 spin a `hg status` with some caches to update
269 spin a `hg status` with some caches to update
270
270
271 $ touch -t 200001020001 dir/j
271 $ touch -t 200001020001 dir/j
272 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
272 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
273 > --config rhg.on-unsupported=abort \
273 > --config rhg.on-unsupported=abort \
274 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
274 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
275 > &
275 > &
276 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
276 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
277
277
278 Add a file and force the data file rewrite
278 Add a file and force the data file rewrite
279
279
280 $ hg commit -m created-during-status dir/o
280 $ hg commit -m created-during-status dir/o
281 $ touch $TESTTMP/status-race-lock
281 $ touch $TESTTMP/status-race-lock
282 $ wait
282 $ wait
283
283
284 The parent must change and the status should be clean
284 The parent must change and the status should be clean
285
285
286 # XXX rhg misbehaves here
286 # XXX rhg misbehaves here
287 #if no-rhg
287 #if no-rhg
288 $ hg summary
288 $ hg summary
289 parent: 2:2e3b442a2fd4 tip
289 parent: 2:2e3b442a2fd4 tip
290 created-during-status
290 created-during-status
291 branch: default
291 branch: default
292 commit: 1 removed, 3 unknown
292 commit: 1 removed, 3 unknown
293 update: (current)
293 update: (current)
294 phases: 3 draft
294 phases: 3 draft
295 $ hg status
295 $ hg status
296 R dir/nested/m
296 R dir/nested/m
297 ? dir/n
297 ? dir/n
298 ? p
298 ? p
299 ? q
299 ? q
300 #else
300 #else
301 $ hg summary
301 $ hg summary
302 parent: 1:c349430a1631
302 parent: 1:c349430a1631
303 more files to have two commits
303 more files to have two commits
304 branch: default
304 branch: default
305 commit: 1 added, 1 removed, 3 unknown (new branch head)
305 commit: 1 added, 1 removed, 3 unknown (new branch head)
306 update: 1 new changesets (update)
306 update: 1 new changesets (update)
307 phases: 3 draft
307 phases: 3 draft
308 $ hg status
308 $ hg status
309 A dir/o
309 A dir/o
310 R dir/nested/m
310 R dir/nested/m
311 ? dir/n
311 ? dir/n
312 ? p
312 ? p
313 ? q
313 ? q
314 #endif
314 #endif
315
315
316 The status process should return a consistent result and not crash.
316 The status process should return a consistent result and not crash.
317
317
318 $ cat $TESTTMP/status-race-lock.out
318 $ cat $TESTTMP/status-race-lock.out
319 A dir/o
319 A dir/o
320 R dir/nested/m
320 R dir/nested/m
321 ? dir/n
321 ? dir/n
322 ? p
322 ? p
323 ? q
323 ? q
324 $ cat $TESTTMP/status-race-lock.log
324 $ cat $TESTTMP/status-race-lock.log
325 abort: when removing $TESTTMP/race-with-commit/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
325 abort: when removing $TESTTMP/race-with-commit/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
326
326
327 final cleanup
327 final cleanup
328
328
329 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
329 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
330 $ cd ..
330 $ cd ..
331
331
332 Race with a `hg update`
332 Race with a `hg update`
333 ----------------------
333 ----------------------
334
334
335 $ cp -a reference-repo race-with-update
335 $ cp -a reference-repo race-with-update
336 $ cd race-with-update
336 $ cd race-with-update
337
337
338 spin a `hg status` with some caches to update
338 spin a `hg status` with some caches to update
339
339
340 $ touch -t 200001020001 dir2/k
340 $ touch -t 200001020001 dir2/k
341 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
341 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
342 > --config rhg.on-unsupported=abort \
342 > --config rhg.on-unsupported=abort \
343 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
343 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
344 > &
344 > &
345 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
345 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
346
346
347 Add a file and force the data file rewrite
347 Add a file and force the data file rewrite
348
348
349 $ hg update ".~1"
349 $ hg update ".~1"
350 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
350 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
351 $ touch $TESTTMP/status-race-lock
351 $ touch $TESTTMP/status-race-lock
352 $ wait
352 $ wait
353
353
354 The parent must change and the status should be clean
354 The parent must change and the status should be clean
355
355
356 $ hg summary
356 $ hg summary
357 parent: 0:4f23db756b09
357 parent: 0:4f23db756b09
358 recreate a bunch of files to facilitate dirstate-v2 append
358 recreate a bunch of files to facilitate dirstate-v2 append
359 branch: default
359 branch: default
360 commit: 1 added, 3 unknown (new branch head)
360 commit: 1 added, 3 unknown (new branch head)
361 update: 1 new changesets (update)
361 update: 1 new changesets (update)
362 phases: 2 draft
362 phases: 2 draft
363 $ hg status
363 $ hg status
364 A dir/o
364 A dir/o
365 ? dir/n
365 ? dir/n
366 ? p
366 ? p
367 ? q
367 ? q
368
368
369 The status process should return a consistent result and not crash.
369 The status process should return a consistent result and not crash.
370
370
371 $ cat $TESTTMP/status-race-lock.out
371 $ cat $TESTTMP/status-race-lock.out
372 A dir/o
372 A dir/o
373 R dir/nested/m
373 R dir/nested/m
374 ? dir/n
374 ? dir/n
375 ? p
375 ? p
376 ? q
376 ? q
377 $ cat $TESTTMP/status-race-lock.log
377 $ cat $TESTTMP/status-race-lock.log
378 abort: when reading $TESTTMP/race-with-update/dir2/k: $ENOENT$ (known-bad-output rhg !)
378 abort: when reading $TESTTMP/race-with-update/dir2/k: $ENOENT$ (known-bad-output rhg !)
379
379
380 final cleanup
380 final cleanup
381
381
382 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
382 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
383 $ cd ..
383 $ cd ..
384
384
385 Race with another status
385 Race with another status
386 ------------------------
386 ------------------------
387
387
388 $ cp -a reference-repo race-with-status
388 $ cp -a reference-repo race-with-status
389 $ cd race-with-status
389 $ cd race-with-status
390
390
391 spin a `hg status` with some caches to update
391 spin a `hg status` with some caches to update
392
392
393 $ touch -t 200001010030 dir/nested/h
393 $ touch -t 200001010030 dir/nested/h
394 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
394 $ hg st >$TESTTMP/status-race-lock.out 2>$TESTTMP/status-race-lock.log \
395 > --config rhg.on-unsupported=abort \
395 > --config rhg.on-unsupported=abort \
396 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
396 > --config devel.sync.status.pre-dirstate-write-file=$TESTTMP/status-race-lock \
397 > &
397 > &
398 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
398 $ $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/status-race-lock.waiting
399
399
400 touch g
400 touch g
401
401
402 $ touch -t 200001010025 g
402 $ touch -t 200001010025 g
403 $ hg status
403 $ hg status
404 A dir/o
404 A dir/o
405 R dir/nested/m
405 R dir/nested/m
406 ? dir/n
406 ? dir/n
407 ? p
407 ? p
408 ? q
408 ? q
409 $ touch $TESTTMP/status-race-lock
409 $ touch $TESTTMP/status-race-lock
410 $ wait
410 $ wait
411
411
412 the first update should be on disk
412 the first update should be on disk
413
413
414 $ hg debugstate --all | grep "g"
414 $ hg debugstate --all | grep "g"
415 n 644 0 2000-01-01 00:25:00 g (no-rhg !)
415 n 644 0 2000-01-01 00:25:00 g (no-rhg !)
416 n 644 0 2000-01-01 00:25:00 g (missing-correct-output rhg !)
416 n 644 0 2000-01-01 00:25:00 g (missing-correct-output rhg !)
417 n 644 0 2000-01-01 00:10:00 g (known-bad-output rhg !)
417 n 644 0 2000-01-01 00:10:00 g (known-bad-output rhg !)
418
418
419 The status process should return a consistent result and not crash.
419 The status process should return a consistent result and not crash.
420
420
421 $ cat $TESTTMP/status-race-lock.out
421 $ cat $TESTTMP/status-race-lock.out
422 A dir/o
422 A dir/o
423 R dir/nested/m
423 R dir/nested/m
424 ? dir/n
424 ? dir/n
425 ? p
425 ? p
426 ? q
426 ? q
427 $ cat $TESTTMP/status-race-lock.log
427 $ cat $TESTTMP/status-race-lock.log
428 abort: when removing $TESTTMP/race-with-status/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
428 abort: when removing $TESTTMP/race-with-status/.hg/dirstate.*: $ENOENT$ (glob) (known-bad-output rhg dirstate-v2 !)
429
429
430 final cleanup
430 final cleanup
431
431
432 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
432 $ rm $TESTTMP/status-race-lock $TESTTMP/status-race-lock.waiting
433 $ cd ..
433 $ cd ..
@@ -1,259 +1,474 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [format]
5 > [format]
6 > use-dirstate-v2=1
6 > use-dirstate-v2=1
7 > [storage]
7 > [storage]
8 > dirstate-v2.slow-path=allow
8 > dirstate-v2.slow-path=allow
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 ------ Test dirstate._dirs refcounting
12 ------ Test dirstate._dirs refcounting
13
13
14 $ hg init t
14 $ hg init t
15 $ cd t
15 $ cd t
16 $ mkdir -p a/b/c/d
16 $ mkdir -p a/b/c/d
17 $ touch a/b/c/d/x
17 $ touch a/b/c/d/x
18 $ touch a/b/c/d/y
18 $ touch a/b/c/d/y
19 $ touch a/b/c/d/z
19 $ touch a/b/c/d/z
20 $ hg ci -Am m
20 $ hg ci -Am m
21 adding a/b/c/d/x
21 adding a/b/c/d/x
22 adding a/b/c/d/y
22 adding a/b/c/d/y
23 adding a/b/c/d/z
23 adding a/b/c/d/z
24 $ hg mv a z
24 $ hg mv a z
25 moving a/b/c/d/x to z/b/c/d/x
25 moving a/b/c/d/x to z/b/c/d/x
26 moving a/b/c/d/y to z/b/c/d/y
26 moving a/b/c/d/y to z/b/c/d/y
27 moving a/b/c/d/z to z/b/c/d/z
27 moving a/b/c/d/z to z/b/c/d/z
28
28
29 Test name collisions
29 Test name collisions
30
30
31 $ rm z/b/c/d/x
31 $ rm z/b/c/d/x
32 $ mkdir z/b/c/d/x
32 $ mkdir z/b/c/d/x
33 $ touch z/b/c/d/x/y
33 $ touch z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
36 [255]
36 [255]
37 $ rm -rf z/b/c/d
37 $ rm -rf z/b/c/d
38 $ touch z/b/c/d
38 $ touch z/b/c/d
39 $ hg add z/b/c/d
39 $ hg add z/b/c/d
40 abort: directory 'z/b/c/d' already in dirstate
40 abort: directory 'z/b/c/d' already in dirstate
41 [255]
41 [255]
42
42
43 $ cd ..
43 $ cd ..
44
44
45 Issue1790: dirstate entry locked into unset if file mtime is set into
45 Issue1790: dirstate entry locked into unset if file mtime is set into
46 the future
46 the future
47
47
48 Prepare test repo:
48 Prepare test repo:
49
49
50 $ hg init u
50 $ hg init u
51 $ cd u
51 $ cd u
52 $ echo a > a
52 $ echo a > a
53 $ hg add
53 $ hg add
54 adding a
54 adding a
55 $ hg ci -m1
55 $ hg ci -m1
56
56
57 Set mtime of a into the future:
57 Set mtime of a into the future:
58
58
59 $ touch -t 203101011200 a
59 $ touch -t 203101011200 a
60
60
61 Status must not set a's entry to unset (issue1790):
61 Status must not set a's entry to unset (issue1790):
62
62
63 $ hg status
63 $ hg status
64 $ hg debugstate
64 $ hg debugstate
65 n 644 2 2031-01-01 12:00:00 a
65 n 644 2 2031-01-01 12:00:00 a
66
66
67 Test modulo storage/comparison of absurd dates:
67 Test modulo storage/comparison of absurd dates:
68
68
69 #if no-aix
69 #if no-aix
70 $ touch -t 195001011200 a
70 $ touch -t 195001011200 a
71 $ hg st
71 $ hg st
72 $ hg debugstate
72 $ hg debugstate
73 n 644 2 2018-01-19 15:14:08 a
73 n 644 2 2018-01-19 15:14:08 a
74 #endif
74 #endif
75
75
76 Verify that exceptions during a dirstate change leave the dirstate
76 Verify that exceptions during a dirstate change leave the dirstate
77 coherent (issue4353)
77 coherent (issue4353)
78
78
79 $ cat > ../dirstateexception.py <<EOF
79 $ cat > ../dirstateexception.py <<EOF
80 > from mercurial import (
80 > from mercurial import (
81 > error,
81 > error,
82 > extensions,
82 > extensions,
83 > mergestate as mergestatemod,
83 > mergestate as mergestatemod,
84 > )
84 > )
85 >
85 >
86 > def wraprecordupdates(*args):
86 > def wraprecordupdates(*args):
87 > raise error.Abort(b"simulated error while recording dirstateupdates")
87 > raise error.Abort(b"simulated error while recording dirstateupdates")
88 >
88 >
89 > def reposetup(ui, repo):
89 > def reposetup(ui, repo):
90 > extensions.wrapfunction(mergestatemod, 'recordupdates',
90 > extensions.wrapfunction(mergestatemod, 'recordupdates',
91 > wraprecordupdates)
91 > wraprecordupdates)
92 > EOF
92 > EOF
93
93
94 $ hg rm a
94 $ hg rm a
95 $ hg commit -m 'rm a'
95 $ hg commit -m 'rm a'
96 $ echo "[extensions]" >> .hg/hgrc
96 $ echo "[extensions]" >> .hg/hgrc
97 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
97 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
98 $ hg up 0
98 $ hg up 0
99 abort: simulated error while recording dirstateupdates
99 abort: simulated error while recording dirstateupdates
100 [255]
100 [255]
101 $ hg log -r . -T '{rev}\n'
101 $ hg log -r . -T '{rev}\n'
102 1
102 1
103 $ hg status
103 $ hg status
104 ? a
104 ? a
105
105
106 #if dirstate-v2
106 #if dirstate-v2
107 Check that folders that are prefixes of others do not throw the packer into an
107 Check that folders that are prefixes of others do not throw the packer into an
108 infinite loop.
108 infinite loop.
109
109
110 $ cd ..
110 $ cd ..
111 $ hg init infinite-loop
111 $ hg init infinite-loop
112 $ cd infinite-loop
112 $ cd infinite-loop
113 $ mkdir hgext3rd hgext
113 $ mkdir hgext3rd hgext
114 $ touch hgext3rd/__init__.py hgext/zeroconf.py
114 $ touch hgext3rd/__init__.py hgext/zeroconf.py
115 $ hg commit -Aqm0
115 $ hg commit -Aqm0
116
116
117 $ hg st -c
117 $ hg st -c
118 C hgext/zeroconf.py
118 C hgext/zeroconf.py
119 C hgext3rd/__init__.py
119 C hgext3rd/__init__.py
120
120
121 $ cd ..
121 $ cd ..
122
122
123 Check that the old dirstate data file is removed correctly and the new one is
123 Check that the old dirstate data file is removed correctly and the new one is
124 valid.
124 valid.
125
125
126 $ dirstate_data_files () {
126 $ dirstate_data_files () {
127 > find .hg -maxdepth 1 -name "dirstate.*"
127 > find .hg -maxdepth 1 -name "dirstate.*"
128 > }
128 > }
129
129
130 $ find_dirstate_uuid () {
130 $ find_dirstate_uuid () {
131 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
131 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
132 > }
132 > }
133
133
134 $ find_dirstate_data_size () {
134 $ find_dirstate_data_size () {
135 > hg debugstate --docket | grep 'size of dirstate data' | sed 's/.*size of dirstate data: \(.*\)/\1/'
135 > hg debugstate --docket | grep 'size of dirstate data' | sed 's/.*size of dirstate data: \(.*\)/\1/'
136 > }
136 > }
137
137
138 $ dirstate_uuid_has_not_changed () {
138 $ dirstate_uuid_has_not_changed () {
139 > # Non-Rust always rewrites the whole dirstate
139 > # Non-Rust always rewrites the whole dirstate
140 > if [ $# -eq 1 ] || ([ -n "$HGMODULEPOLICY" ] && [ -z "${HGMODULEPOLICY##*rust*}" ]) || [ -n "$RHG_INSTALLED_AS_HG" ]; then
140 > if [ $# -eq 1 ] || ([ -n "$HGMODULEPOLICY" ] && [ -z "${HGMODULEPOLICY##*rust*}" ]) || [ -n "$RHG_INSTALLED_AS_HG" ]; then
141 > test $current_uid = $(find_dirstate_uuid)
141 > test $current_uid = $(find_dirstate_uuid)
142 > else
142 > else
143 > echo "not testing because using Python implementation"
143 > echo "not testing because using Python implementation"
144 > fi
144 > fi
145 > }
145 > }
146
146
147 $ cd ..
147 $ cd ..
148 $ hg init append-mostly
148 $ hg init append-mostly
149 $ cd append-mostly
149 $ cd append-mostly
150 $ mkdir dir dir2
150 $ mkdir dir dir2
151 $ touch -t 200001010000 dir/a dir/b dir/c dir/d dir/e dir2/f dir dir2
151 $ touch -t 200001010000 dir/a dir/b dir/c dir/d dir/e dir2/f dir dir2
152 $ hg commit -Aqm initial
152 $ hg commit -Aqm initial
153 $ hg st
153 $ hg st
154 $ dirstate_data_files | wc -l
154 $ dirstate_data_files | wc -l
155 *1 (re)
155 *1 (re)
156 $ current_uid=$(find_dirstate_uuid)
156 $ current_uid=$(find_dirstate_uuid)
157
157
158 Nothing changes here
158 Nothing changes here
159
159
160 $ hg st
160 $ hg st
161 $ dirstate_data_files | wc -l
161 $ dirstate_data_files | wc -l
162 *1 (re)
162 *1 (re)
163 $ dirstate_uuid_has_not_changed
163 $ dirstate_uuid_has_not_changed
164 not testing because using Python implementation (no-rust no-rhg !)
164 not testing because using Python implementation (no-rust no-rhg !)
165
165
166 Trigger an append with a small change to directory mtime
166 Trigger an append with a small change to directory mtime
167
167
168 $ current_data_size=$(find_dirstate_data_size)
168 $ current_data_size=$(find_dirstate_data_size)
169 $ touch -t 201001010000 dir2
169 $ touch -t 201001010000 dir2
170 $ hg st
170 $ hg st
171 $ dirstate_data_files | wc -l
171 $ dirstate_data_files | wc -l
172 *1 (re)
172 *1 (re)
173 $ dirstate_uuid_has_not_changed
173 $ dirstate_uuid_has_not_changed
174 not testing because using Python implementation (no-rust no-rhg !)
174 not testing because using Python implementation (no-rust no-rhg !)
175 $ new_data_size=$(find_dirstate_data_size)
175 $ new_data_size=$(find_dirstate_data_size)
176 $ [ "$current_data_size" -eq "$new_data_size" ]; echo $?
176 $ [ "$current_data_size" -eq "$new_data_size" ]; echo $?
177 0 (no-rust no-rhg !)
177 0 (no-rust no-rhg !)
178 1 (rust !)
178 1 (rust !)
179 1 (no-rust rhg !)
179 1 (no-rust rhg !)
180
180
181 Unused bytes counter is non-0 when appending
181 Unused bytes counter is non-0 when appending
182 $ touch file
182 $ touch file
183 $ hg add file
183 $ hg add file
184 $ current_uid=$(find_dirstate_uuid)
184 $ current_uid=$(find_dirstate_uuid)
185
185
186 Trigger a rust/rhg run which updates the unused bytes value
186 Trigger a rust/rhg run which updates the unused bytes value
187 $ hg st
187 $ hg st
188 A file
188 A file
189 $ dirstate_data_files | wc -l
189 $ dirstate_data_files | wc -l
190 *1 (re)
190 *1 (re)
191 $ dirstate_uuid_has_not_changed
191 $ dirstate_uuid_has_not_changed
192 not testing because using Python implementation (no-rust no-rhg !)
192 not testing because using Python implementation (no-rust no-rhg !)
193
193
194 $ hg debugstate --docket | grep unused
194 $ hg debugstate --docket | grep unused
195 number of unused bytes: 0 (no-rust no-rhg !)
195 number of unused bytes: 0 (no-rust no-rhg !)
196 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
196 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
197 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
197 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
198 number of unused bytes: [1-9]\d* (re) (rust rhg !)
198 number of unused bytes: [1-9]\d* (re) (rust rhg !)
199
199
200 Delete most of the dirstate to trigger a non-append
200 Delete most of the dirstate to trigger a non-append
201 $ hg rm dir/a dir/b dir/c dir/d
201 $ hg rm dir/a dir/b dir/c dir/d
202 $ dirstate_data_files | wc -l
202 $ dirstate_data_files | wc -l
203 *1 (re)
203 *1 (re)
204 $ dirstate_uuid_has_not_changed also-if-python
204 $ dirstate_uuid_has_not_changed also-if-python
205 [1]
205 [1]
206
206
207 Check that unused bytes counter is reset when creating a new docket
207 Check that unused bytes counter is reset when creating a new docket
208
208
209 $ hg debugstate --docket | grep unused
209 $ hg debugstate --docket | grep unused
210 number of unused bytes: 0
210 number of unused bytes: 0
211
211
212 #endif
212 #endif
213
213
214 (non-Rust always rewrites)
215
216 Test the devel option to control write behavior
217 ==============================================
218
219 Sometimes, debugging or testing the dirstate requires making sure that we have
220 done a complete rewrite of the data file and have no unreachable data around,
221 sometimes it requires we ensure we don't.
222
223 We test the option to force this rewrite by creating the situation where an
224 append would happen and check that it doesn't happen.
225
226 $ cd ..
227 $ hg init force-base
228 $ cd force-base
229 $ mkdir -p dir/nested dir2
230 $ touch -t 200001010000 f dir/nested/a dir/b dir/c dir/d dir2/e dir/nested dir dir2
231 $ hg commit -Aqm "recreate a bunch of files to facilitate append"
232 $ hg st --config devel.dirstate.v2.data_update_mode=force-new
233 $ cd ..
234
235 #if dirstate-v2
236 $ hg -R force-base debugstate --docket | grep unused
237 number of unused bytes: 0
238
239 Check with the option in "auto" mode
240 ------------------------------------
241 $ cp -a force-base append-mostly-no-force-rewrite
242 $ cd append-mostly-no-force-rewrite
243 $ current_uid=$(find_dirstate_uuid)
244
245 Change mtime of dir on disk which will be recorded, causing a small enough change
246 to warrant only an append
247
248 $ touch -t 202212010000 dir2
249 $ hg st \
250 > --config rhg.on-unsupported=abort \
251 > --config devel.dirstate.v2.data_update_mode=auto
252
253 UUID hasn't changed and a non-zero number of unused bytes means we've appended
254
255 $ dirstate_uuid_has_not_changed
256 not testing because using Python implementation (no-rust no-rhg !)
257
258 #if no-rust no-rhg
259 The pure python implementation never appends at the time this is written.
260 $ hg debugstate --docket | grep unused
261 number of unused bytes: 0 (known-bad-output !)
262 #else
263 $ hg debugstate --docket | grep unused
264 number of unused bytes: [1-9]\d* (re)
265 #endif
266 $ cd ..
267
268 Check the same scenario with the option set to "force-new"
269 ---------------------------------------------------------
270
271 $ cp -a force-base append-mostly-force-rewrite
272 $ cd append-mostly-force-rewrite
273 $ current_uid=$(find_dirstate_uuid)
274
275 Change mtime of dir on disk which will be recorded, causing a small enough change
276 to warrant only an append, but we force the rewrite
277
278 $ touch -t 202212010000 dir2
279 $ hg st \
280 > --config rhg.on-unsupported=abort \
281 > --config devel.dirstate.v2.data_update_mode=force-new
282
283 UUID has changed and zero unused bytes means a full-rewrite happened
284
285
286 #if no-rust no-rhg
287 $ dirstate_uuid_has_not_changed
288 not testing because using Python implementation
289 #else
290 $ dirstate_uuid_has_not_changed
291 [1]
292 #endif
293 $ hg debugstate --docket | grep unused
294 number of unused bytes: 0
295 $ cd ..
296
297
298 Check the same scenario with the option set to "force-append"
299 -------------------------------------------------------------
300
301 (should behave the same as "auto" here)
302
303 $ cp -a force-base append-mostly-force-append
304 $ cd append-mostly-force-append
305 $ current_uid=$(find_dirstate_uuid)
306
307 Change mtime of dir on disk which will be recorded, causing a small enough change
308 to warrant only an append, which we are forcing here anyway.
309
310 $ touch -t 202212010000 dir2
311 $ hg st \
312 > --config rhg.on-unsupported=abort \
313 > --config devel.dirstate.v2.data_update_mode=force-append
314
315 UUID has not changed and some unused bytes exist in the data file
316
317 $ dirstate_uuid_has_not_changed
318 not testing because using Python implementation (no-rust no-rhg !)
319
320 #if no-rust no-rhg
321 The pure python implementation never appends at the time this is written.
322 $ hg debugstate --docket | grep unused
323 number of unused bytes: 0 (known-bad-output !)
324 #else
325 $ hg debugstate --docket | grep unused
326 number of unused bytes: [1-9]\d* (re)
327 #endif
328 $ cd ..
329
330 Check with the option in "auto" mode
331 ------------------------------------
332 $ cp -a force-base append-mostly-no-force-rewrite
333 $ cd append-mostly-no-force-rewrite
334 $ current_uid=$(find_dirstate_uuid)
335
336 Change mtime of everything on disk causing a full rewrite
337
338 $ touch -t 202212010005 `hg files`
339 $ hg st \
340 > --config rhg.on-unsupported=abort \
341 > --config devel.dirstate.v2.data_update_mode=auto
342
343 UUID has changed and zero unused bytes means we've rewritten.
344
345 #if no-rust no-rhg
346 $ dirstate_uuid_has_not_changed
347 not testing because using Python implementation
348 #else
349 $ dirstate_uuid_has_not_changed
350 [1]
351 #endif
352
353 $ hg debugstate --docket | grep unused
354 number of unused bytes: 0 (known-bad-output !)
355 $ cd ..
356
357 Check the same scenario with the option set to "force-new"
358 ---------------------------------------------------------
359
360 (should be the same as auto)
361
362 $ cp -a force-base append-mostly-force-rewrite
363 $ cd append-mostly-force-rewrite
364 $ current_uid=$(find_dirstate_uuid)
365
366 Change mtime of everything on disk causing a full rewrite
367
368 $ touch -t 202212010005 `hg files`
369 $ hg st \
370 > --config rhg.on-unsupported=abort \
371 > --config devel.dirstate.v2.data_update_mode=force-new
372
373 UUID has changed and a zero number unused bytes means we've rewritten.
374
375
376 #if no-rust no-rhg
377 $ dirstate_uuid_has_not_changed
378 not testing because using Python implementation
379 #else
380 $ dirstate_uuid_has_not_changed
381 [1]
382 #endif
383 $ hg debugstate --docket | grep unused
384 number of unused bytes: 0
385 $ cd ..
386
387
388 Check the same scenario with the option set to "force-append"
389 -------------------------------------------------------------
390
391 Should append even if "auto" did not
392
393 $ cp -a force-base append-mostly-force-append
394 $ cd append-mostly-force-append
395 $ current_uid=$(find_dirstate_uuid)
396
397 Change mtime of everything on disk causing a full rewrite
398
399 $ touch -t 202212010005 `hg files`
400 $ hg st \
401 > --config rhg.on-unsupported=abort \
402 > --config devel.dirstate.v2.data_update_mode=force-append
403
404 UUID has not changed and some unused bytes exist in the data file
405
406 $ dirstate_uuid_has_not_changed
407 not testing because using Python implementation (no-rust no-rhg !)
408
409 #if no-rust no-rhg
410 The pure python implementation is never appending at the time this is written.
411 $ hg debugstate --docket | grep unused
412 number of unused bytes: 0 (known-bad-output !)
413 #else
414 $ hg debugstate --docket | grep unused
415 number of unused bytes: [1-9]\d* (re)
416 #endif
417 $ cd ..
418
419
420
421 Get back into a state suitable for the test of the file.
422
423 $ cd ./append-mostly
424
425 #else
426 $ cd ./u
427 #endif
428
214 Transaction compatibility
429 Transaction compatibility
215 -------------------------
430 =========================
216
431
217 The transaction preserves the dirstate.
432 The transaction preserves the dirstate.
218 We should make sure all of it (docket + data) is preserved
433 We should make sure all of it (docket + data) is preserved
219
434
220 #if dirstate-v2
435 #if dirstate-v2
221 $ hg commit -m 'bli'
436 $ hg commit -m 'bli'
222 #endif
437 #endif
223
438
224 $ hg update --quiet
439 $ hg update --quiet
225 $ hg revert --all --quiet
440 $ hg revert --all --quiet
226 $ rm -f a
441 $ rm -f a
227 $ echo foo > foo
442 $ echo foo > foo
228 $ hg add foo
443 $ hg add foo
229 $ hg commit -m foo
444 $ hg commit -m foo
230
445
231 #if dirstate-v2
446 #if dirstate-v2
232 $ uid=$(find_dirstate_uuid)
447 $ uid=$(find_dirstate_uuid)
233 $ touch bar
448 $ touch bar
234 $ while [ uid = $(find_dirstate_uuid) ]; do
449 $ while [ uid = $(find_dirstate_uuid) ]; do
235 > hg add bar;
450 > hg add bar;
236 > hg remove bar;
451 > hg remove bar;
237 > done;
452 > done;
238 $ rm bar
453 $ rm bar
239 #endif
454 #endif
240 $ hg rollback
455 $ hg rollback
241 repository tip rolled back to revision 1 (undo commit)
456 repository tip rolled back to revision 1 (undo commit)
242 working directory now based on revision 1
457 working directory now based on revision 1
243
458
244 $ hg status
459 $ hg status
245 A foo
460 A foo
246 $ cd ..
461 $ cd ..
247
462
248 Check dirstate ordering
463 Check dirstate ordering
249 (e.g. `src/dirstate/` and `src/dirstate.rs` shouldn't cause issues)
464 (e.g. `src/dirstate/` and `src/dirstate.rs` shouldn't cause issues)
250
465
251 $ hg init repro
466 $ hg init repro
252 $ cd repro
467 $ cd repro
253 $ mkdir src
468 $ mkdir src
254 $ mkdir src/dirstate
469 $ mkdir src/dirstate
255 $ touch src/dirstate/file1 src/dirstate/file2 src/dirstate.rs
470 $ touch src/dirstate/file1 src/dirstate/file2 src/dirstate.rs
256 $ touch file1 file2
471 $ touch file1 file2
257 $ hg commit -Aqm1
472 $ hg commit -Aqm1
258 $ hg st
473 $ hg st
259 $ cd ..
474 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now