##// END OF EJS Templates
test-lock: use synchronisation file instead of sleep...
marmoute -
r52390:9da3fcc5 stable
parent child Browse files
Show More
@@ -1,2942 +1,2947 b''
1 # configitems.toml - centralized declaration of configuration options
1 # configitems.toml - centralized declaration of configuration options
2 #
2 #
3 # This file contains declarations of the core Mercurial configuration options.
3 # This file contains declarations of the core Mercurial configuration options.
4 #
4 #
5 # # Structure
5 # # Structure
6 #
6 #
7 # items: array of config items
7 # items: array of config items
8 # templates: mapping of template name to template declaration
8 # templates: mapping of template name to template declaration
9 # template-applications: array of template applications
9 # template-applications: array of template applications
10 #
10 #
11 # # Elements
11 # # Elements
12 #
12 #
13 # ## Item
13 # ## Item
14 #
14 #
15 # Declares a core Mercurial option.
15 # Declares a core Mercurial option.
16 #
16 #
17 # - section: string (required)
17 # - section: string (required)
18 # - name: string (required)
18 # - name: string (required)
19 # - default-type: boolean, changes how `default` is read
19 # - default-type: boolean, changes how `default` is read
20 # - default: any
20 # - default: any
21 # - generic: boolean
21 # - generic: boolean
22 # - priority: integer, only if `generic` is true
22 # - priority: integer, only if `generic` is true
23 # - alias: list of 2-tuples of strings
23 # - alias: list of 2-tuples of strings
24 # - experimental: boolean
24 # - experimental: boolean
25 # - documentation: string
25 # - documentation: string
26 # - in_core_extension: string
26 # - in_core_extension: string
27 #
27 #
28 # ## Template
28 # ## Template
29 #
29 #
30 # Declares a group of options to be re-used for multiple sections.
30 # Declares a group of options to be re-used for multiple sections.
31 #
31 #
32 # - all the same fields as `Item`, except `section` and `name`
32 # - all the same fields as `Item`, except `section` and `name`
33 # - `suffix` (string, required)
33 # - `suffix` (string, required)
34 #
34 #
35 # ## Template applications
35 # ## Template applications
36 #
36 #
37 # Uses a `Template` to instanciate its options in a given section.
37 # Uses a `Template` to instanciate its options in a given section.
38 #
38 #
39 # - template: string (required, must match a `Template` name)
39 # - template: string (required, must match a `Template` name)
40 # - section: string (required)
40 # - section: string (required)
41
41
42 [[items]]
42 [[items]]
43 section = "alias"
43 section = "alias"
44 name = ".*"
44 name = ".*"
45 default-type = "dynamic"
45 default-type = "dynamic"
46 generic = true
46 generic = true
47
47
48 [[items]]
48 [[items]]
49 section = "auth"
49 section = "auth"
50 name = "cookiefile"
50 name = "cookiefile"
51
51
52 # bookmarks.pushing: internal hack for discovery
52 # bookmarks.pushing: internal hack for discovery
53 [[items]]
53 [[items]]
54 section = "bookmarks"
54 section = "bookmarks"
55 name = "pushing"
55 name = "pushing"
56 default-type = "list_type"
56 default-type = "list_type"
57
57
58 # bundle.mainreporoot: internal hack for bundlerepo
58 # bundle.mainreporoot: internal hack for bundlerepo
59 [[items]]
59 [[items]]
60 section = "bundle"
60 section = "bundle"
61 name = "mainreporoot"
61 name = "mainreporoot"
62 default = ""
62 default = ""
63
63
64 [[items]]
64 [[items]]
65 section = "censor"
65 section = "censor"
66 name = "policy"
66 name = "policy"
67 default = "abort"
67 default = "abort"
68 experimental = true
68 experimental = true
69
69
70 [[items]]
70 [[items]]
71 section = "chgserver"
71 section = "chgserver"
72 name = "idletimeout"
72 name = "idletimeout"
73 default = 3600
73 default = 3600
74
74
75 [[items]]
75 [[items]]
76 section = "chgserver"
76 section = "chgserver"
77 name = "skiphash"
77 name = "skiphash"
78 default = false
78 default = false
79
79
80 [[items]]
80 [[items]]
81 section = "cmdserver"
81 section = "cmdserver"
82 name = "log"
82 name = "log"
83
83
84 [[items]]
84 [[items]]
85 section = "cmdserver"
85 section = "cmdserver"
86 name = "max-log-files"
86 name = "max-log-files"
87 default = 7
87 default = 7
88
88
89 [[items]]
89 [[items]]
90 section = "cmdserver"
90 section = "cmdserver"
91 name = "max-log-size"
91 name = "max-log-size"
92 default = "1 MB"
92 default = "1 MB"
93
93
94 [[items]]
94 [[items]]
95 section = "cmdserver"
95 section = "cmdserver"
96 name = "max-repo-cache"
96 name = "max-repo-cache"
97 default = 0
97 default = 0
98 experimental = true
98 experimental = true
99
99
100 [[items]]
100 [[items]]
101 section = "cmdserver"
101 section = "cmdserver"
102 name = "message-encodings"
102 name = "message-encodings"
103 default-type = "list_type"
103 default-type = "list_type"
104
104
105 [[items]]
105 [[items]]
106 section = "cmdserver"
106 section = "cmdserver"
107 name = "shutdown-on-interrupt"
107 name = "shutdown-on-interrupt"
108 default = true
108 default = true
109
109
110 [[items]]
110 [[items]]
111 section = "cmdserver"
111 section = "cmdserver"
112 name = "track-log"
112 name = "track-log"
113 default-type = "lambda"
113 default-type = "lambda"
114 default = [ "chgserver", "cmdserver", "repocache",]
114 default = [ "chgserver", "cmdserver", "repocache",]
115
115
116 [[items]]
116 [[items]]
117 section = "color"
117 section = "color"
118 name = ".*"
118 name = ".*"
119 generic = true
119 generic = true
120
120
121 [[items]]
121 [[items]]
122 section = "color"
122 section = "color"
123 name = "mode"
123 name = "mode"
124 default = "auto"
124 default = "auto"
125
125
126 [[items]]
126 [[items]]
127 section = "color"
127 section = "color"
128 name = "pagermode"
128 name = "pagermode"
129 default-type = "dynamic"
129 default-type = "dynamic"
130
130
131 [[items]]
131 [[items]]
132 section = "command-templates"
132 section = "command-templates"
133 name = "graphnode"
133 name = "graphnode"
134 alias = [["ui", "graphnodetemplate"]]
134 alias = [["ui", "graphnodetemplate"]]
135
135
136 [[items]]
136 [[items]]
137 section = "command-templates"
137 section = "command-templates"
138 name = "log"
138 name = "log"
139 alias = [["ui", "logtemplate"]]
139 alias = [["ui", "logtemplate"]]
140
140
141 [[items]]
141 [[items]]
142 section = "command-templates"
142 section = "command-templates"
143 name = "mergemarker"
143 name = "mergemarker"
144 default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
144 default = '{node|short} {ifeq(tags, "tip", "", ifeq(tags, "", "", "{tags} "))}{if(bookmarks, "{bookmarks} ")}{ifeq(branch, "default", "", "{branch} ")}- {author|user}: {desc|firstline}'
145 alias = [["ui", "mergemarkertemplate"]]
145 alias = [["ui", "mergemarkertemplate"]]
146
146
147 [[items]]
147 [[items]]
148 section = "command-templates"
148 section = "command-templates"
149 name = "oneline-summary"
149 name = "oneline-summary"
150
150
151 [[items]]
151 [[items]]
152 section = "command-templates"
152 section = "command-templates"
153 name = "oneline-summary.*"
153 name = "oneline-summary.*"
154 default-type = "dynamic"
154 default-type = "dynamic"
155 generic = true
155 generic = true
156
156
157 [[items]]
157 [[items]]
158 section = "command-templates"
158 section = "command-templates"
159 name = "pre-merge-tool-output"
159 name = "pre-merge-tool-output"
160 alias = [["ui", "pre-merge-tool-output-template"]]
160 alias = [["ui", "pre-merge-tool-output-template"]]
161
161
162 [[items]]
162 [[items]]
163 section = "commands"
163 section = "commands"
164 name = "commit.post-status"
164 name = "commit.post-status"
165 default = false
165 default = false
166
166
167 [[items]]
167 [[items]]
168 section = "commands"
168 section = "commands"
169 name = "grep.all-files"
169 name = "grep.all-files"
170 default = false
170 default = false
171 experimental = true
171 experimental = true
172
172
173 [[items]]
173 [[items]]
174 section = "commands"
174 section = "commands"
175 name = "merge.require-rev"
175 name = "merge.require-rev"
176 default = false
176 default = false
177
177
178 [[items]]
178 [[items]]
179 section = "commands"
179 section = "commands"
180 name = "push.require-revs"
180 name = "push.require-revs"
181 default = false
181 default = false
182
182
183 # Rebase related configuration moved to core because other extension are doing
183 # Rebase related configuration moved to core because other extension are doing
184 # strange things. For example, shelve import the extensions to reuse some bit
184 # strange things. For example, shelve import the extensions to reuse some bit
185 # without formally loading it.
185 # without formally loading it.
186 [[items]]
186 [[items]]
187 section = "commands"
187 section = "commands"
188 name = "rebase.requiredest"
188 name = "rebase.requiredest"
189 default = false
189 default = false
190
190
191 [[items]]
191 [[items]]
192 section = "commands"
192 section = "commands"
193 name = "resolve.confirm"
193 name = "resolve.confirm"
194 default = false
194 default = false
195
195
196 [[items]]
196 [[items]]
197 section = "commands"
197 section = "commands"
198 name = "resolve.explicit-re-merge"
198 name = "resolve.explicit-re-merge"
199 default = false
199 default = false
200
200
201 [[items]]
201 [[items]]
202 section = "commands"
202 section = "commands"
203 name = "resolve.mark-check"
203 name = "resolve.mark-check"
204 default = "none"
204 default = "none"
205
205
206 [[items]]
206 [[items]]
207 section = "commands"
207 section = "commands"
208 name = "show.aliasprefix"
208 name = "show.aliasprefix"
209 default-type = "list_type"
209 default-type = "list_type"
210
210
211 [[items]]
211 [[items]]
212 section = "commands"
212 section = "commands"
213 name = "status.relative"
213 name = "status.relative"
214 default = false
214 default = false
215
215
216 [[items]]
216 [[items]]
217 section = "commands"
217 section = "commands"
218 name = "status.skipstates"
218 name = "status.skipstates"
219 default = []
219 default = []
220 experimental = true
220 experimental = true
221
221
222 [[items]]
222 [[items]]
223 section = "commands"
223 section = "commands"
224 name = "status.terse"
224 name = "status.terse"
225 default = ""
225 default = ""
226
226
227 [[items]]
227 [[items]]
228 section = "commands"
228 section = "commands"
229 name = "status.verbose"
229 name = "status.verbose"
230 default = false
230 default = false
231
231
232 [[items]]
232 [[items]]
233 section = "commands"
233 section = "commands"
234 name = "update.check"
234 name = "update.check"
235
235
236 [[items]]
236 [[items]]
237 section = "commands"
237 section = "commands"
238 name = "update.requiredest"
238 name = "update.requiredest"
239 default = false
239 default = false
240
240
241 [[items]]
241 [[items]]
242 section = "committemplate"
242 section = "committemplate"
243 name = ".*"
243 name = ".*"
244 generic = true
244 generic = true
245
245
246 [[items]]
246 [[items]]
247 section = "convert"
247 section = "convert"
248 name = "bzr.saverev"
248 name = "bzr.saverev"
249 default = true
249 default = true
250
250
251 [[items]]
251 [[items]]
252 section = "convert"
252 section = "convert"
253 name = "cvsps.cache"
253 name = "cvsps.cache"
254 default = true
254 default = true
255
255
256 [[items]]
256 [[items]]
257 section = "convert"
257 section = "convert"
258 name = "cvsps.fuzz"
258 name = "cvsps.fuzz"
259 default = 60
259 default = 60
260
260
261 [[items]]
261 [[items]]
262 section = "convert"
262 section = "convert"
263 name = "cvsps.logencoding"
263 name = "cvsps.logencoding"
264
264
265 [[items]]
265 [[items]]
266 section = "convert"
266 section = "convert"
267 name = "cvsps.mergefrom"
267 name = "cvsps.mergefrom"
268
268
269 [[items]]
269 [[items]]
270 section = "convert"
270 section = "convert"
271 name = "cvsps.mergeto"
271 name = "cvsps.mergeto"
272
272
273 [[items]]
273 [[items]]
274 section = "convert"
274 section = "convert"
275 name = "git.committeractions"
275 name = "git.committeractions"
276 default-type = "lambda"
276 default-type = "lambda"
277 default = [ "messagedifferent",]
277 default = [ "messagedifferent",]
278
278
279 [[items]]
279 [[items]]
280 section = "convert"
280 section = "convert"
281 name = "git.extrakeys"
281 name = "git.extrakeys"
282 default-type = "list_type"
282 default-type = "list_type"
283
283
284 [[items]]
284 [[items]]
285 section = "convert"
285 section = "convert"
286 name = "git.findcopiesharder"
286 name = "git.findcopiesharder"
287 default = false
287 default = false
288
288
289 [[items]]
289 [[items]]
290 section = "convert"
290 section = "convert"
291 name = "git.remoteprefix"
291 name = "git.remoteprefix"
292 default = "remote"
292 default = "remote"
293
293
294 [[items]]
294 [[items]]
295 section = "convert"
295 section = "convert"
296 name = "git.renamelimit"
296 name = "git.renamelimit"
297 default = 400
297 default = 400
298
298
299 [[items]]
299 [[items]]
300 section = "convert"
300 section = "convert"
301 name = "git.saverev"
301 name = "git.saverev"
302 default = true
302 default = true
303
303
304 [[items]]
304 [[items]]
305 section = "convert"
305 section = "convert"
306 name = "git.similarity"
306 name = "git.similarity"
307 default = 50
307 default = 50
308
308
309 [[items]]
309 [[items]]
310 section = "convert"
310 section = "convert"
311 name = "git.skipsubmodules"
311 name = "git.skipsubmodules"
312 default = false
312 default = false
313
313
314 [[items]]
314 [[items]]
315 section = "convert"
315 section = "convert"
316 name = "hg.clonebranches"
316 name = "hg.clonebranches"
317 default = false
317 default = false
318
318
319 [[items]]
319 [[items]]
320 section = "convert"
320 section = "convert"
321 name = "hg.ignoreerrors"
321 name = "hg.ignoreerrors"
322 default = false
322 default = false
323
323
324 [[items]]
324 [[items]]
325 section = "convert"
325 section = "convert"
326 name = "hg.preserve-hash"
326 name = "hg.preserve-hash"
327 default = false
327 default = false
328
328
329 [[items]]
329 [[items]]
330 section = "convert"
330 section = "convert"
331 name = "hg.revs"
331 name = "hg.revs"
332
332
333 [[items]]
333 [[items]]
334 section = "convert"
334 section = "convert"
335 name = "hg.saverev"
335 name = "hg.saverev"
336 default = false
336 default = false
337
337
338 [[items]]
338 [[items]]
339 section = "convert"
339 section = "convert"
340 name = "hg.sourcename"
340 name = "hg.sourcename"
341
341
342 [[items]]
342 [[items]]
343 section = "convert"
343 section = "convert"
344 name = "hg.startrev"
344 name = "hg.startrev"
345
345
346 [[items]]
346 [[items]]
347 section = "convert"
347 section = "convert"
348 name = "hg.tagsbranch"
348 name = "hg.tagsbranch"
349 default = "default"
349 default = "default"
350
350
351 [[items]]
351 [[items]]
352 section = "convert"
352 section = "convert"
353 name = "hg.usebranchnames"
353 name = "hg.usebranchnames"
354 default = true
354 default = true
355
355
356 [[items]]
356 [[items]]
357 section = "convert"
357 section = "convert"
358 name = "ignoreancestorcheck"
358 name = "ignoreancestorcheck"
359 default = false
359 default = false
360 experimental = true
360 experimental = true
361
361
362 [[items]]
362 [[items]]
363 section = "convert"
363 section = "convert"
364 name = "localtimezone"
364 name = "localtimezone"
365 default = false
365 default = false
366
366
367 [[items]]
367 [[items]]
368 section = "convert"
368 section = "convert"
369 name = "p4.encoding"
369 name = "p4.encoding"
370 default-type = "dynamic"
370 default-type = "dynamic"
371
371
372 [[items]]
372 [[items]]
373 section = "convert"
373 section = "convert"
374 name = "p4.startrev"
374 name = "p4.startrev"
375 default = 0
375 default = 0
376
376
377 [[items]]
377 [[items]]
378 section = "convert"
378 section = "convert"
379 name = "skiptags"
379 name = "skiptags"
380 default = false
380 default = false
381
381
382 [[items]]
382 [[items]]
383 section = "convert"
383 section = "convert"
384 name = "svn.branches"
384 name = "svn.branches"
385
385
386 [[items]]
386 [[items]]
387 section = "convert"
387 section = "convert"
388 name = "svn.dangerous-set-commit-dates"
388 name = "svn.dangerous-set-commit-dates"
389 default = false
389 default = false
390
390
391 [[items]]
391 [[items]]
392 section = "convert"
392 section = "convert"
393 name = "svn.debugsvnlog"
393 name = "svn.debugsvnlog"
394 default = true
394 default = true
395
395
396 [[items]]
396 [[items]]
397 section = "convert"
397 section = "convert"
398 name = "svn.startrev"
398 name = "svn.startrev"
399 default = 0
399 default = 0
400
400
401 [[items]]
401 [[items]]
402 section = "convert"
402 section = "convert"
403 name = "svn.tags"
403 name = "svn.tags"
404
404
405 [[items]]
405 [[items]]
406 section = "convert"
406 section = "convert"
407 name = "svn.trunk"
407 name = "svn.trunk"
408
408
409 [[items]]
409 [[items]]
410 section = "debug"
410 section = "debug"
411 name = "bundling-stats"
411 name = "bundling-stats"
412 default = false
412 default = false
413 documentation = "Display extra information about the bundling process."
413 documentation = "Display extra information about the bundling process."
414
414
415 [[items]]
415 [[items]]
416 section = "debug"
416 section = "debug"
417 name = "dirstate.delaywrite"
417 name = "dirstate.delaywrite"
418 default = 0
418 default = 0
419
419
420 [[items]]
420 [[items]]
421 section = "debug"
421 section = "debug"
422 name = "revlog.debug-delta"
422 name = "revlog.debug-delta"
423 default = false
423 default = false
424
424
425 [[items]]
425 [[items]]
426 section = "debug"
426 section = "debug"
427 name = "revlog.verifyposition.changelog"
427 name = "revlog.verifyposition.changelog"
428 default = ""
428 default = ""
429
429
430 [[items]]
430 [[items]]
431 section = "debug"
431 section = "debug"
432 name = "unbundling-stats"
432 name = "unbundling-stats"
433 default = false
433 default = false
434 documentation = "Display extra information about the unbundling process."
434 documentation = "Display extra information about the unbundling process."
435
435
436 [[items]]
436 [[items]]
437 section = "defaults"
437 section = "defaults"
438 name = ".*"
438 name = ".*"
439 generic = true
439 generic = true
440
440
441 [[items]]
441 [[items]]
442 section = "devel"
442 section = "devel"
443 name = "all-warnings"
443 name = "all-warnings"
444 default = false
444 default = false
445
445
446 [[items]]
446 [[items]]
447 section = "devel"
447 section = "devel"
448 name = "bundle.delta"
448 name = "bundle.delta"
449 default = ""
449 default = ""
450
450
451 [[items]]
451 [[items]]
452 section = "devel"
452 section = "devel"
453 name = "bundle2.debug"
453 name = "bundle2.debug"
454 default = false
454 default = false
455
455
456 [[items]]
456 [[items]]
457 section = "devel"
457 section = "devel"
458 name = "cache-vfs"
458 name = "cache-vfs"
459
459
460 [[items]]
460 [[items]]
461 section = "devel"
461 section = "devel"
462 name = "check-locks"
462 name = "check-locks"
463 default = false
463 default = false
464
464
465 [[items]]
465 [[items]]
466 section = "devel"
466 section = "devel"
467 name = "check-relroot"
467 name = "check-relroot"
468 default = false
468 default = false
469
469
470 [[items]]
470 [[items]]
471 section = "devel"
471 section = "devel"
472 name = "copy-tracing.multi-thread"
472 name = "copy-tracing.multi-thread"
473 default = true
473 default = true
474
474
475 # Track copy information for all files, not just "added" ones (very slow)
475 # Track copy information for all files, not just "added" ones (very slow)
476 [[items]]
476 [[items]]
477 section = "devel"
477 section = "devel"
478 name = "copy-tracing.trace-all-files"
478 name = "copy-tracing.trace-all-files"
479 default = false
479 default = false
480
480
481 [[items]]
481 [[items]]
482 section = "devel"
482 section = "devel"
483 name = "debug.abort-update"
483 name = "debug.abort-update"
484 default = false
484 default = false
485 documentation = """If true, then any merge with the working copy, \
485 documentation = """If true, then any merge with the working copy, \
486 e.g. [hg update], will be aborted after figuring out what needs to be done, \
486 e.g. [hg update], will be aborted after figuring out what needs to be done, \
487 but before spawning the parallel worker."""
487 but before spawning the parallel worker."""
488
488
489 [[items]]
489 [[items]]
490 section = "devel"
490 section = "devel"
491 name = "debug.copies"
491 name = "debug.copies"
492 default = false
492 default = false
493
493
494 [[items]]
494 [[items]]
495 section = "devel"
495 section = "devel"
496 name = "debug.extensions"
496 name = "debug.extensions"
497 default = false
497 default = false
498
498
499 [[items]]
499 [[items]]
500 section = "devel"
500 section = "devel"
501 name = "debug.peer-request"
501 name = "debug.peer-request"
502 default = false
502 default = false
503
503
504 [[items]]
504 [[items]]
505 section = "devel"
505 section = "devel"
506 name = "debug.repo-filters"
506 name = "debug.repo-filters"
507 default = false
507 default = false
508
508
509 [[items]]
509 [[items]]
510 section = "devel"
510 section = "devel"
511 name = "default-date"
511 name = "default-date"
512
512
513 [[items]]
513 [[items]]
514 section = "devel"
514 section = "devel"
515 name = "deprec-warn"
515 name = "deprec-warn"
516 default = false
516 default = false
517
517
518 # possible values:
518 # possible values:
519 # - auto (the default)
519 # - auto (the default)
520 # - force-append
520 # - force-append
521 # - force-new
521 # - force-new
522 [[items]]
522 [[items]]
523 section = "devel"
523 section = "devel"
524 name = "dirstate.v2.data_update_mode"
524 name = "dirstate.v2.data_update_mode"
525 default = "auto"
525 default = "auto"
526
526
527 [[items]]
527 [[items]]
528 section = "devel"
528 section = "devel"
529 name = "disableloaddefaultcerts"
529 name = "disableloaddefaultcerts"
530 default = false
530 default = false
531
531
532 [[items]]
532 [[items]]
533 section = "devel"
533 section = "devel"
534 name = "discovery.exchange-heads"
534 name = "discovery.exchange-heads"
535 default = true
535 default = true
536 documentation = """If false, the discovery will not start with remote \
536 documentation = """If false, the discovery will not start with remote \
537 head fetching and local head querying."""
537 head fetching and local head querying."""
538
538
539 [[items]]
539 [[items]]
540 section = "devel"
540 section = "devel"
541 name = "discovery.grow-sample"
541 name = "discovery.grow-sample"
542 default = true
542 default = true
543 documentation = """If false, the sample size used in set discovery \
543 documentation = """If false, the sample size used in set discovery \
544 will not be increased through the process."""
544 will not be increased through the process."""
545
545
546 [[items]]
546 [[items]]
547 section = "devel"
547 section = "devel"
548 name = "discovery.grow-sample.dynamic"
548 name = "discovery.grow-sample.dynamic"
549 default = true
549 default = true
550 documentation = """If true, the default, the sample size is adapted to the shape \
550 documentation = """If true, the default, the sample size is adapted to the shape \
551 of the undecided set. It is set to the max of:
551 of the undecided set. It is set to the max of:
552 `<target-size>, len(roots(undecided)), len(heads(undecided))`"""
552 `<target-size>, len(roots(undecided)), len(heads(undecided))`"""
553
553
554 [[items]]
554 [[items]]
555 section = "devel"
555 section = "devel"
556 name = "discovery.grow-sample.rate"
556 name = "discovery.grow-sample.rate"
557 default = 1.05
557 default = 1.05
558 documentation = "Controls the rate at which the sample grows."
558 documentation = "Controls the rate at which the sample grows."
559
559
560 [[items]]
560 [[items]]
561 section = "devel"
561 section = "devel"
562 name = "discovery.randomize"
562 name = "discovery.randomize"
563 default = true
563 default = true
564 documentation = """If false, random samplings during discovery are deterministic. \
564 documentation = """If false, random samplings during discovery are deterministic. \
565 It is meant for integration tests."""
565 It is meant for integration tests."""
566
566
567 [[items]]
567 [[items]]
568 section = "devel"
568 section = "devel"
569 name = "discovery.sample-size"
569 name = "discovery.sample-size"
570 default = 200
570 default = 200
571 documentation = "Controls the initial size of the discovery sample."
571 documentation = "Controls the initial size of the discovery sample."
572
572
573 [[items]]
573 [[items]]
574 section = "devel"
574 section = "devel"
575 name = "discovery.sample-size.initial"
575 name = "discovery.sample-size.initial"
576 default = 100
576 default = 100
577 documentation = "Controls the initial size of the discovery for initial change."
577 documentation = "Controls the initial size of the discovery for initial change."
578
578
579 [[items]]
579 [[items]]
580 section = "devel"
580 section = "devel"
581 name = "legacy.exchange"
581 name = "legacy.exchange"
582 default-type = "list_type"
582 default-type = "list_type"
583
583
584 [[items]]
584 [[items]]
585 section = "devel"
585 section = "devel"
586 name = "lock-wait-sync-file"
587 default = ""
588
589 [[items]]
590 section = "devel"
586 name = "persistent-nodemap"
591 name = "persistent-nodemap"
587 default = false
592 default = false
588 documentation = """When true, revlogs use a special reference version of the \
593 documentation = """When true, revlogs use a special reference version of the \
589 nodemap, that is not performant but is "known" to behave properly."""
594 nodemap, that is not performant but is "known" to behave properly."""
590
595
591 [[items]]
596 [[items]]
592 section = "devel"
597 section = "devel"
593 name = "server-insecure-exact-protocol"
598 name = "server-insecure-exact-protocol"
594 default = ""
599 default = ""
595
600
596 [[items]]
601 [[items]]
597 section = "devel"
602 section = "devel"
598 name = "servercafile"
603 name = "servercafile"
599 default = ""
604 default = ""
600
605
601 [[items]]
606 [[items]]
602 section = "devel"
607 section = "devel"
603 name = "serverexactprotocol"
608 name = "serverexactprotocol"
604 default = ""
609 default = ""
605
610
606 [[items]]
611 [[items]]
607 section = "devel"
612 section = "devel"
608 name = "serverrequirecert"
613 name = "serverrequirecert"
609 default = false
614 default = false
610
615
611 [[items]]
616 [[items]]
612 section = "devel"
617 section = "devel"
613 name = "strip-obsmarkers"
618 name = "strip-obsmarkers"
614 default = true
619 default = true
615
620
616 [[items]]
621 [[items]]
617 section = 'devel'
622 section = 'devel'
618 name = 'sync.status.pre-dirstate-write-file'
623 name = 'sync.status.pre-dirstate-write-file'
619 documentation = """
624 documentation = """
620 Makes the status algorithm wait for the existence of this file \
625 Makes the status algorithm wait for the existence of this file \
621 (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
626 (or until a timeout of `devel.sync.status.pre-dirstate-write-file-timeout` \
622 seconds) before taking the lock and writing the dirstate. \
627 seconds) before taking the lock and writing the dirstate. \
623 Status signals that it's ready to wait by creating a file \
628 Status signals that it's ready to wait by creating a file \
624 with the same name + `.waiting`. \
629 with the same name + `.waiting`. \
625 Useful when testing race conditions."""
630 Useful when testing race conditions."""
626
631
627 [[items]]
632 [[items]]
628 section = 'devel'
633 section = 'devel'
629 name = 'sync.status.pre-dirstate-write-file-timeout'
634 name = 'sync.status.pre-dirstate-write-file-timeout'
630 default=2
635 default=2
631
636
632 [[items]]
637 [[items]]
633 section = 'devel'
638 section = 'devel'
634 name = 'sync.dirstate.post-docket-read-file'
639 name = 'sync.dirstate.post-docket-read-file'
635
640
636 [[items]]
641 [[items]]
637 section = 'devel'
642 section = 'devel'
638 name = 'sync.dirstate.post-docket-read-file-timeout'
643 name = 'sync.dirstate.post-docket-read-file-timeout'
639 default=2
644 default=2
640
645
641 [[items]]
646 [[items]]
642 section = 'devel'
647 section = 'devel'
643 name = 'sync.dirstate.pre-read-file'
648 name = 'sync.dirstate.pre-read-file'
644
649
645 [[items]]
650 [[items]]
646 section = 'devel'
651 section = 'devel'
647 name = 'sync.dirstate.pre-read-file-timeout'
652 name = 'sync.dirstate.pre-read-file-timeout'
648 default=2
653 default=2
649
654
650 [[items]]
655 [[items]]
651 section = "devel"
656 section = "devel"
652 name = "user.obsmarker"
657 name = "user.obsmarker"
653
658
654 [[items]]
659 [[items]]
655 section = "devel"
660 section = "devel"
656 name = "warn-config"
661 name = "warn-config"
657
662
658 [[items]]
663 [[items]]
659 section = "devel"
664 section = "devel"
660 name = "warn-config-default"
665 name = "warn-config-default"
661
666
662 [[items]]
667 [[items]]
663 section = "devel"
668 section = "devel"
664 name = "warn-config-unknown"
669 name = "warn-config-unknown"
665
670
666 [[items]]
671 [[items]]
667 section = "devel"
672 section = "devel"
668 name = "warn-empty-changegroup"
673 name = "warn-empty-changegroup"
669 default = false
674 default = false
670
675
671 [[items]]
676 [[items]]
672 section = "diff"
677 section = "diff"
673 name = "merge"
678 name = "merge"
674 default = false
679 default = false
675 experimental = true
680 experimental = true
676
681
677 [[items]]
682 [[items]]
678 section = "email"
683 section = "email"
679 name = "bcc"
684 name = "bcc"
680
685
681 [[items]]
686 [[items]]
682 section = "email"
687 section = "email"
683 name = "cc"
688 name = "cc"
684
689
685 [[items]]
690 [[items]]
686 section = "email"
691 section = "email"
687 name = "charsets"
692 name = "charsets"
688 default-type = "list_type"
693 default-type = "list_type"
689
694
690 [[items]]
695 [[items]]
691 section = "email"
696 section = "email"
692 name = "from"
697 name = "from"
693
698
694 [[items]]
699 [[items]]
695 section = "email"
700 section = "email"
696 name = "method"
701 name = "method"
697 default = "smtp"
702 default = "smtp"
698
703
699 [[items]]
704 [[items]]
700 section = "email"
705 section = "email"
701 name = "reply-to"
706 name = "reply-to"
702
707
703 [[items]]
708 [[items]]
704 section = "email"
709 section = "email"
705 name = "to"
710 name = "to"
706
711
707 [[items]]
712 [[items]]
708 section = "experimental"
713 section = "experimental"
709 name = "archivemetatemplate"
714 name = "archivemetatemplate"
710 default-type = "dynamic"
715 default-type = "dynamic"
711
716
712 [[items]]
717 [[items]]
713 section = "experimental"
718 section = "experimental"
714 name = "auto-publish"
719 name = "auto-publish"
715 default = "publish"
720 default = "publish"
716
721
717 [[items]]
722 [[items]]
718 section = "experimental"
723 section = "experimental"
719 name = "bundle-phases"
724 name = "bundle-phases"
720 default = false
725 default = false
721
726
722 [[items]]
727 [[items]]
723 section = "experimental"
728 section = "experimental"
724 name = "bundle2-advertise"
729 name = "bundle2-advertise"
725 default = true
730 default = true
726
731
727 [[items]]
732 [[items]]
728 section = "experimental"
733 section = "experimental"
729 name = "bundle2-output-capture"
734 name = "bundle2-output-capture"
730 default = false
735 default = false
731
736
732 [[items]]
737 [[items]]
733 section = "experimental"
738 section = "experimental"
734 name = "bundle2.pushback"
739 name = "bundle2.pushback"
735 default = false
740 default = false
736
741
737 [[items]]
742 [[items]]
738 section = "experimental"
743 section = "experimental"
739 name = "bundle2lazylocking"
744 name = "bundle2lazylocking"
740 default = false
745 default = false
741
746
742 [[items]]
747 [[items]]
743 section = "experimental"
748 section = "experimental"
744 name = "bundlecomplevel"
749 name = "bundlecomplevel"
745
750
746 [[items]]
751 [[items]]
747 section = "experimental"
752 section = "experimental"
748 name = "bundlecomplevel.bzip2"
753 name = "bundlecomplevel.bzip2"
749
754
750 [[items]]
755 [[items]]
751 section = "experimental"
756 section = "experimental"
752 name = "bundlecomplevel.gzip"
757 name = "bundlecomplevel.gzip"
753
758
754 [[items]]
759 [[items]]
755 section = "experimental"
760 section = "experimental"
756 name = "bundlecomplevel.none"
761 name = "bundlecomplevel.none"
757
762
758 [[items]]
763 [[items]]
759 section = "experimental"
764 section = "experimental"
760 name = "bundlecomplevel.zstd"
765 name = "bundlecomplevel.zstd"
761
766
762 [[items]]
767 [[items]]
763 section = "experimental"
768 section = "experimental"
764 name = "bundlecompthreads"
769 name = "bundlecompthreads"
765
770
766 [[items]]
771 [[items]]
767 section = "experimental"
772 section = "experimental"
768 name = "bundlecompthreads.bzip2"
773 name = "bundlecompthreads.bzip2"
769
774
770 [[items]]
775 [[items]]
771 section = "experimental"
776 section = "experimental"
772 name = "bundlecompthreads.gzip"
777 name = "bundlecompthreads.gzip"
773
778
774 [[items]]
779 [[items]]
775 section = "experimental"
780 section = "experimental"
776 name = "bundlecompthreads.none"
781 name = "bundlecompthreads.none"
777
782
778 [[items]]
783 [[items]]
779 section = "experimental"
784 section = "experimental"
780 name = "bundlecompthreads.zstd"
785 name = "bundlecompthreads.zstd"
781
786
782 [[items]]
787 [[items]]
783 section = "experimental"
788 section = "experimental"
784 name = "changegroup3"
789 name = "changegroup3"
785 default = true
790 default = true
786
791
787 [[items]]
792 [[items]]
788 section = "experimental"
793 section = "experimental"
789 name = "changegroup4"
794 name = "changegroup4"
790 default = false
795 default = false
791
796
792 # might remove rank configuration once the computation has no impact
797 # might remove rank configuration once the computation has no impact
793 [[items]]
798 [[items]]
794 section = "experimental"
799 section = "experimental"
795 name = "changelog-v2.compute-rank"
800 name = "changelog-v2.compute-rank"
796 default = true
801 default = true
797
802
798 [[items]]
803 [[items]]
799 section = "experimental"
804 section = "experimental"
800 name = "cleanup-as-archived"
805 name = "cleanup-as-archived"
801 default = false
806 default = false
802
807
803 [[items]]
808 [[items]]
804 section = "experimental"
809 section = "experimental"
805 name = "clientcompressionengines"
810 name = "clientcompressionengines"
806 default-type = "list_type"
811 default-type = "list_type"
807
812
808 [[items]]
813 [[items]]
809 section = "experimental"
814 section = "experimental"
810 name = "copies.read-from"
815 name = "copies.read-from"
811 default = "filelog-only"
816 default = "filelog-only"
812
817
813 [[items]]
818 [[items]]
814 section = "experimental"
819 section = "experimental"
815 name = "copies.write-to"
820 name = "copies.write-to"
816 default = "filelog-only"
821 default = "filelog-only"
817
822
818 [[items]]
823 [[items]]
819 section = "experimental"
824 section = "experimental"
820 name = "copytrace"
825 name = "copytrace"
821 default = "on"
826 default = "on"
822
827
823 [[items]]
828 [[items]]
824 section = "experimental"
829 section = "experimental"
825 name = "copytrace.movecandidateslimit"
830 name = "copytrace.movecandidateslimit"
826 default = 100
831 default = 100
827
832
828 [[items]]
833 [[items]]
829 section = "experimental"
834 section = "experimental"
830 name = "copytrace.sourcecommitlimit"
835 name = "copytrace.sourcecommitlimit"
831 default = 100
836 default = 100
832
837
833 [[items]]
838 [[items]]
834 section = "experimental"
839 section = "experimental"
835 name = "crecordtest"
840 name = "crecordtest"
836
841
837 [[items]]
842 [[items]]
838 section = "experimental"
843 section = "experimental"
839 name = "directaccess"
844 name = "directaccess"
840 default = false
845 default = false
841
846
842 [[items]]
847 [[items]]
843 section = "experimental"
848 section = "experimental"
844 name = "directaccess.revnums"
849 name = "directaccess.revnums"
845 default = false
850 default = false
846
851
847 [[items]]
852 [[items]]
848 section = "experimental"
853 section = "experimental"
849 name = "editortmpinhg"
854 name = "editortmpinhg"
850 default = false
855 default = false
851
856
852 [[items]]
857 [[items]]
853 section = "experimental"
858 section = "experimental"
854 name = "evolution"
859 name = "evolution"
855 default-type = "list_type"
860 default-type = "list_type"
856
861
857 [[items]]
862 [[items]]
858 section = "experimental"
863 section = "experimental"
859 name = "evolution.allowdivergence"
864 name = "evolution.allowdivergence"
860 default = false
865 default = false
861 alias = [["experimental", "allowdivergence"]]
866 alias = [["experimental", "allowdivergence"]]
862
867
863 [[items]]
868 [[items]]
864 section = "experimental"
869 section = "experimental"
865 name = "evolution.allowunstable"
870 name = "evolution.allowunstable"
866
871
867 [[items]]
872 [[items]]
868 section = "experimental"
873 section = "experimental"
869 name = "evolution.bundle-obsmarker"
874 name = "evolution.bundle-obsmarker"
870 default = false
875 default = false
871
876
872 [[items]]
877 [[items]]
873 section = "experimental"
878 section = "experimental"
874 name = "evolution.bundle-obsmarker:mandatory"
879 name = "evolution.bundle-obsmarker:mandatory"
875 default = true
880 default = true
876
881
877 [[items]]
882 [[items]]
878 section = "experimental"
883 section = "experimental"
879 name = "evolution.createmarkers"
884 name = "evolution.createmarkers"
880
885
881 [[items]]
886 [[items]]
882 section = "experimental"
887 section = "experimental"
883 name = "evolution.effect-flags"
888 name = "evolution.effect-flags"
884 default = true
889 default = true
885 alias = [["experimental", "effect-flags"]]
890 alias = [["experimental", "effect-flags"]]
886
891
887 [[items]]
892 [[items]]
888 section = "experimental"
893 section = "experimental"
889 name = "evolution.exchange"
894 name = "evolution.exchange"
890
895
891 [[items]]
896 [[items]]
892 section = "experimental"
897 section = "experimental"
893 name = "evolution.report-instabilities"
898 name = "evolution.report-instabilities"
894 default = true
899 default = true
895
900
896 [[items]]
901 [[items]]
897 section = "experimental"
902 section = "experimental"
898 name = "evolution.track-operation"
903 name = "evolution.track-operation"
899 default = true
904 default = true
900
905
901 [[items]]
906 [[items]]
902 section = "experimental"
907 section = "experimental"
903 name = "exportableenviron"
908 name = "exportableenviron"
904 default-type = "list_type"
909 default-type = "list_type"
905
910
906 [[items]]
911 [[items]]
907 section = "experimental"
912 section = "experimental"
908 name = "extendedheader.index"
913 name = "extendedheader.index"
909
914
910 [[items]]
915 [[items]]
911 section = "experimental"
916 section = "experimental"
912 name = "extendedheader.similarity"
917 name = "extendedheader.similarity"
913 default = false
918 default = false
914
919
915 [[items]]
920 [[items]]
916 section = "experimental"
921 section = "experimental"
917 name = "extra-filter-revs"
922 name = "extra-filter-revs"
918 documentation = """Repo-level config to prevent a revset from being visible.
923 documentation = """Repo-level config to prevent a revset from being visible.
919 The target use case is to use `share` to expose different subsets of the same \
924 The target use case is to use `share` to expose different subsets of the same \
920 repository, especially server side. See also `server.view`."""
925 repository, especially server side. See also `server.view`."""
921
926
922 [[items]]
927 [[items]]
923 section = "experimental"
928 section = "experimental"
924 name = "graphshorten"
929 name = "graphshorten"
925 default = false
930 default = false
926
931
927 [[items]]
932 [[items]]
928 section = "experimental"
933 section = "experimental"
929 name = "graphstyle.grandparent"
934 name = "graphstyle.grandparent"
930 default-type = "dynamic"
935 default-type = "dynamic"
931
936
932 [[items]]
937 [[items]]
933 section = "experimental"
938 section = "experimental"
934 name = "graphstyle.missing"
939 name = "graphstyle.missing"
935 default-type = "dynamic"
940 default-type = "dynamic"
936
941
937 [[items]]
942 [[items]]
938 section = "experimental"
943 section = "experimental"
939 name = "graphstyle.parent"
944 name = "graphstyle.parent"
940 default-type = "dynamic"
945 default-type = "dynamic"
941
946
942 [[items]]
947 [[items]]
943 section = "experimental"
948 section = "experimental"
944 name = "hook-track-tags"
949 name = "hook-track-tags"
945 default = false
950 default = false
946
951
947 [[items]]
952 [[items]]
948 section = "experimental"
953 section = "experimental"
949 name = "httppostargs"
954 name = "httppostargs"
950 default = false
955 default = false
951
956
952 [[items]]
957 [[items]]
953 section = "experimental"
958 section = "experimental"
954 name = "log.topo"
959 name = "log.topo"
955 default = false
960 default = false
956
961
957 [[items]]
962 [[items]]
958 section = "experimental"
963 section = "experimental"
959 name = "maxdeltachainspan"
964 name = "maxdeltachainspan"
960 default = -1
965 default = -1
961
966
962 [[items]]
967 [[items]]
963 section = "experimental"
968 section = "experimental"
964 name = "merge-track-salvaged"
969 name = "merge-track-salvaged"
965 default = false
970 default = false
966 documentation = """Tracks files which were undeleted (merge might delete them \
971 documentation = """Tracks files which were undeleted (merge might delete them \
967 but we explicitly kept/undeleted them) and creates new filenodes for them."""
972 but we explicitly kept/undeleted them) and creates new filenodes for them."""
968
973
969 [[items]]
974 [[items]]
970 section = "experimental"
975 section = "experimental"
971 name = "merge.checkpathconflicts"
976 name = "merge.checkpathconflicts"
972 default = false
977 default = false
973
978
974 [[items]]
979 [[items]]
975 section = "experimental"
980 section = "experimental"
976 name = "mmapindexthreshold"
981 name = "mmapindexthreshold"
977
982
978 [[items]]
983 [[items]]
979 section = "experimental"
984 section = "experimental"
980 name = "narrow"
985 name = "narrow"
981 default = false
986 default = false
982
987
983 [[items]]
988 [[items]]
984 section = "experimental"
989 section = "experimental"
985 name = "nointerrupt"
990 name = "nointerrupt"
986 default = false
991 default = false
987
992
988 [[items]]
993 [[items]]
989 section = "experimental"
994 section = "experimental"
990 name = "nointerrupt-interactiveonly"
995 name = "nointerrupt-interactiveonly"
991 default = true
996 default = true
992
997
993 [[items]]
998 [[items]]
994 section = "experimental"
999 section = "experimental"
995 name = "nonnormalparanoidcheck"
1000 name = "nonnormalparanoidcheck"
996 default = false
1001 default = false
997
1002
998 [[items]]
1003 [[items]]
999 section = "experimental"
1004 section = "experimental"
1000 name = "obsmarkers-exchange-debug"
1005 name = "obsmarkers-exchange-debug"
1001 default = false
1006 default = false
1002
1007
1003 [[items]]
1008 [[items]]
1004 section = "experimental"
1009 section = "experimental"
1005 name = "rebaseskipobsolete"
1010 name = "rebaseskipobsolete"
1006 default = true
1011 default = true
1007
1012
1008 [[items]]
1013 [[items]]
1009 section = "experimental"
1014 section = "experimental"
1010 name = "remotenames"
1015 name = "remotenames"
1011 default = false
1016 default = false
1012
1017
1013 [[items]]
1018 [[items]]
1014 section = "experimental"
1019 section = "experimental"
1015 name = "removeemptydirs"
1020 name = "removeemptydirs"
1016 default = true
1021 default = true
1017
1022
1018 [[items]]
1023 [[items]]
1019 section = "experimental"
1024 section = "experimental"
1020 name = "revert.interactive.select-to-keep"
1025 name = "revert.interactive.select-to-keep"
1021 default = false
1026 default = false
1022
1027
1023 [[items]]
1028 [[items]]
1024 section = "experimental"
1029 section = "experimental"
1025 name = "revisions.disambiguatewithin"
1030 name = "revisions.disambiguatewithin"
1026
1031
1027 [[items]]
1032 [[items]]
1028 section = "experimental"
1033 section = "experimental"
1029 name = "revisions.prefixhexnode"
1034 name = "revisions.prefixhexnode"
1030 default = false
1035 default = false
1031
1036
1032 # "out of experimental" todo list.
1037 # "out of experimental" todo list.
1033 #
1038 #
1034 # * include management of a persistent nodemap in the main docket
1039 # * include management of a persistent nodemap in the main docket
1035 # * enforce a "no-truncate" policy for mmap safety
1040 # * enforce a "no-truncate" policy for mmap safety
1036 # - for censoring operation
1041 # - for censoring operation
1037 # - for stripping operation
1042 # - for stripping operation
1038 # - for rollback operation
1043 # - for rollback operation
1039 # * proper streaming (race free) of the docket file
1044 # * proper streaming (race free) of the docket file
1040 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1045 # * track garbage data to evemtually allow rewriting -existing- sidedata.
1041 # * Exchange-wise, we will also need to do something more efficient than
1046 # * Exchange-wise, we will also need to do something more efficient than
1042 # keeping references to the affected revlogs, especially memory-wise when
1047 # keeping references to the affected revlogs, especially memory-wise when
1043 # rewriting sidedata.
1048 # rewriting sidedata.
1044 # * introduce a proper solution to reduce the number of filelog related files.
1049 # * introduce a proper solution to reduce the number of filelog related files.
1045 # * use caching for reading sidedata (similar to what we do for data).
1050 # * use caching for reading sidedata (similar to what we do for data).
1046 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1051 # * no longer set offset=0 if sidedata_size=0 (simplify cutoff computation).
1047 # * Improvement to consider
1052 # * Improvement to consider
1048 # - avoid compression header in chunk using the default compression?
1053 # - avoid compression header in chunk using the default compression?
1049 # - forbid "inline" compression mode entirely?
1054 # - forbid "inline" compression mode entirely?
1050 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1055 # - split the data offset and flag field (the 2 bytes save are mostly trouble)
1051 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1056 # - keep track of uncompressed -chunk- size (to preallocate memory better)
1052 # - keep track of chain base or size (probably not that useful anymore)
1057 # - keep track of chain base or size (probably not that useful anymore)
1053 [[items]]
1058 [[items]]
1054 section = "experimental"
1059 section = "experimental"
1055 name = "revlogv2"
1060 name = "revlogv2"
1056
1061
1057 [[items]]
1062 [[items]]
1058 section = "experimental"
1063 section = "experimental"
1059 name = "rust.index"
1064 name = "rust.index"
1060 default = false
1065 default = false
1061
1066
1062 [[items]]
1067 [[items]]
1063 section = "experimental"
1068 section = "experimental"
1064 name = "server.allow-hidden-access"
1069 name = "server.allow-hidden-access"
1065 default-type = "list_type"
1070 default-type = "list_type"
1066
1071
1067 [[items]]
1072 [[items]]
1068 section = "experimental"
1073 section = "experimental"
1069 name = "server.filesdata.recommended-batch-size"
1074 name = "server.filesdata.recommended-batch-size"
1070 default = 50000
1075 default = 50000
1071
1076
1072 [[items]]
1077 [[items]]
1073 section = "experimental"
1078 section = "experimental"
1074 name = "server.manifestdata.recommended-batch-size"
1079 name = "server.manifestdata.recommended-batch-size"
1075 default = 100000
1080 default = 100000
1076
1081
1077 [[items]]
1082 [[items]]
1078 section = "experimental"
1083 section = "experimental"
1079 name = "server.stream-narrow-clones"
1084 name = "server.stream-narrow-clones"
1080 default = false
1085 default = false
1081
1086
1082 [[items]]
1087 [[items]]
1083 section = "experimental"
1088 section = "experimental"
1084 name = "single-head-per-branch"
1089 name = "single-head-per-branch"
1085 default = false
1090 default = false
1086
1091
1087 [[items]]
1092 [[items]]
1088 section = "experimental"
1093 section = "experimental"
1089 name = "single-head-per-branch:account-closed-heads"
1094 name = "single-head-per-branch:account-closed-heads"
1090 default = false
1095 default = false
1091
1096
1092 [[items]]
1097 [[items]]
1093 section = "experimental"
1098 section = "experimental"
1094 name = "single-head-per-branch:public-changes-only"
1099 name = "single-head-per-branch:public-changes-only"
1095 default = false
1100 default = false
1096
1101
1097 [[items]]
1102 [[items]]
1098 section = "experimental"
1103 section = "experimental"
1099 name = "sparse-read"
1104 name = "sparse-read"
1100 default = false
1105 default = false
1101
1106
1102 [[items]]
1107 [[items]]
1103 section = "experimental"
1108 section = "experimental"
1104 name = "sparse-read.density-threshold"
1109 name = "sparse-read.density-threshold"
1105 default = 0.5
1110 default = 0.5
1106
1111
1107 [[items]]
1112 [[items]]
1108 section = "experimental"
1113 section = "experimental"
1109 name = "sparse-read.min-gap-size"
1114 name = "sparse-read.min-gap-size"
1110 default = "65K"
1115 default = "65K"
1111
1116
1112 [[items]]
1117 [[items]]
1113 section = "experimental"
1118 section = "experimental"
1114 name = "stream-v3"
1119 name = "stream-v3"
1115 default = false
1120 default = false
1116
1121
1117 [[items]]
1122 [[items]]
1118 section = "experimental"
1123 section = "experimental"
1119 name = "treemanifest"
1124 name = "treemanifest"
1120 default = false
1125 default = false
1121
1126
1122 [[items]]
1127 [[items]]
1123 section = "experimental"
1128 section = "experimental"
1124 name = "update.atomic-file"
1129 name = "update.atomic-file"
1125 default = false
1130 default = false
1126
1131
1127 [[items]]
1132 [[items]]
1128 section = "experimental"
1133 section = "experimental"
1129 name = "web.full-garbage-collection-rate"
1134 name = "web.full-garbage-collection-rate"
1130 default = 1 # still forcing a full collection on each request
1135 default = 1 # still forcing a full collection on each request
1131
1136
1132 [[items]]
1137 [[items]]
1133 section = "experimental"
1138 section = "experimental"
1134 name = "worker.repository-upgrade"
1139 name = "worker.repository-upgrade"
1135 default = false
1140 default = false
1136
1141
1137 [[items]]
1142 [[items]]
1138 section = "experimental"
1143 section = "experimental"
1139 name = "worker.wdir-get-thread-safe"
1144 name = "worker.wdir-get-thread-safe"
1140 default = false
1145 default = false
1141
1146
1142 [[items]]
1147 [[items]]
1143 section = "experimental"
1148 section = "experimental"
1144 name = "xdiff"
1149 name = "xdiff"
1145 default = false
1150 default = false
1146
1151
1147 [[items]]
1152 [[items]]
1148 section = "extdata"
1153 section = "extdata"
1149 name = ".*"
1154 name = ".*"
1150 generic = true
1155 generic = true
1151
1156
1152 [[items]]
1157 [[items]]
1153 section = "extensions"
1158 section = "extensions"
1154 name = "[^:]*"
1159 name = "[^:]*"
1155 generic = true
1160 generic = true
1156
1161
1157 [[items]]
1162 [[items]]
1158 section = "extensions"
1163 section = "extensions"
1159 name = "[^:]*:required"
1164 name = "[^:]*:required"
1160 default = false
1165 default = false
1161 generic = true
1166 generic = true
1162
1167
1163
1168
1164 # The format section is dedicated to control of the repository on disk format
1169 # The format section is dedicated to control of the repository on disk format
1165 # and constraints.
1170 # and constraints.
1166 #
1171 #
1167 # A format change affects which data is expected to be stored in the repository
1172 # A format change affects which data is expected to be stored in the repository
1168 # and how. It impacts other client whichever their version are, format change
1173 # and how. It impacts other client whichever their version are, format change
1169 # often comes with an associated entry in the requirements.
1174 # often comes with an associated entry in the requirements.
1170 #
1175 #
1171 # The option are usually in the form `use-xxx-yyy` (with xxx-yy the feature name).
1176 # The option are usually in the form `use-xxx-yyy` (with xxx-yy the feature name).
1172 #
1177 #
1173 # To configure details of how the repository is accessed, without affect the
1178 # To configure details of how the repository is accessed, without affect the
1174 # repository formats, see the `storage section`.
1179 # repository formats, see the `storage section`.
1175
1180
1176 [[items]]
1181 [[items]]
1177 section = "format"
1182 section = "format"
1178 name = "bookmarks-in-store"
1183 name = "bookmarks-in-store"
1179 default = false
1184 default = false
1180
1185
1181 [[items]]
1186 [[items]]
1182 section = "format"
1187 section = "format"
1183 name = "chunkcachesize"
1188 name = "chunkcachesize"
1184 experimental = true
1189 experimental = true
1185
1190
1186 [[items]]
1191 [[items]]
1187 section = "format"
1192 section = "format"
1188 name = "dotencode"
1193 name = "dotencode"
1189 default = true
1194 default = true
1190
1195
1191 # The interaction between the archived phase and obsolescence markers needs to
1196 # The interaction between the archived phase and obsolescence markers needs to
1192 # be sorted out before wider usage of this are to be considered.
1197 # be sorted out before wider usage of this are to be considered.
1193 #
1198 #
1194 # At the time this message is written, behavior when archiving obsolete
1199 # At the time this message is written, behavior when archiving obsolete
1195 # changeset differ significantly from stripping. As part of stripping, we also
1200 # changeset differ significantly from stripping. As part of stripping, we also
1196 # remove the obsolescence marker associated to the stripped changesets,
1201 # remove the obsolescence marker associated to the stripped changesets,
1197 # revealing the precedecessors changesets when applicable. When archiving, we
1202 # revealing the precedecessors changesets when applicable. When archiving, we
1198 # don't touch the obsolescence markers, keeping everything hidden. This can
1203 # don't touch the obsolescence markers, keeping everything hidden. This can
1199 # result in quite confusing situation for people combining exchanging draft
1204 # result in quite confusing situation for people combining exchanging draft
1200 # with the archived phases. As some markers needed by others may be skipped
1205 # with the archived phases. As some markers needed by others may be skipped
1201 # during exchange.
1206 # during exchange.
1202 [[items]]
1207 [[items]]
1203 section = "format"
1208 section = "format"
1204 name = "exp-archived-phase"
1209 name = "exp-archived-phase"
1205 default = false
1210 default = false
1206 experimental = true
1211 experimental = true
1207
1212
1208 # Experimental TODOs:
1213 # Experimental TODOs:
1209 #
1214 #
1210 # * Same as for revlogv2 (but for the reduction of the number of files)
1215 # * Same as for revlogv2 (but for the reduction of the number of files)
1211 # * Actually computing the rank of changesets
1216 # * Actually computing the rank of changesets
1212 # * Improvement to investigate
1217 # * Improvement to investigate
1213 # - storing .hgtags fnode
1218 # - storing .hgtags fnode
1214 # - storing branch related identifier
1219 # - storing branch related identifier
1215 [[items]]
1220 [[items]]
1216 section = "format"
1221 section = "format"
1217 name = "exp-use-changelog-v2"
1222 name = "exp-use-changelog-v2"
1218 experimental = true
1223 experimental = true
1219
1224
1220 [[items]]
1225 [[items]]
1221 section = "format"
1226 section = "format"
1222 name = "exp-use-copies-side-data-changeset"
1227 name = "exp-use-copies-side-data-changeset"
1223 default = false
1228 default = false
1224 experimental = true
1229 experimental = true
1225
1230
1226 [[items]]
1231 [[items]]
1227 section = "format"
1232 section = "format"
1228 name = "generaldelta"
1233 name = "generaldelta"
1229 default = false
1234 default = false
1230 experimental = true
1235 experimental = true
1231
1236
1232 [[items]]
1237 [[items]]
1233 section = "format"
1238 section = "format"
1234 name = "manifestcachesize"
1239 name = "manifestcachesize"
1235 experimental = true
1240 experimental = true
1236
1241
1237 [[items]]
1242 [[items]]
1238 section = "format"
1243 section = "format"
1239 name = "maxchainlen"
1244 name = "maxchainlen"
1240 default-type = "dynamic"
1245 default-type = "dynamic"
1241 experimental = true
1246 experimental = true
1242
1247
1243 [[items]]
1248 [[items]]
1244 section = "format"
1249 section = "format"
1245 name = "obsstore-version"
1250 name = "obsstore-version"
1246
1251
1247 [[items]]
1252 [[items]]
1248 section = "format"
1253 section = "format"
1249 name = "revlog-compression"
1254 name = "revlog-compression"
1250 default-type = "lambda"
1255 default-type = "lambda"
1251 alias = [["experimental", "format.compression"]]
1256 alias = [["experimental", "format.compression"]]
1252 default = [ "zstd", "zlib",]
1257 default = [ "zstd", "zlib",]
1253
1258
1254 [[items]]
1259 [[items]]
1255 section = "format"
1260 section = "format"
1256 name = "sparse-revlog"
1261 name = "sparse-revlog"
1257 default = true
1262 default = true
1258
1263
1259 [[items]]
1264 [[items]]
1260 section = "format"
1265 section = "format"
1261 name = "use-dirstate-tracked-hint"
1266 name = "use-dirstate-tracked-hint"
1262 default = false
1267 default = false
1263 experimental = true
1268 experimental = true
1264
1269
1265 [[items]]
1270 [[items]]
1266 section = "format"
1271 section = "format"
1267 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
1272 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories"
1268 default = false
1273 default = false
1269 experimental = true
1274 experimental = true
1270
1275
1271 [[items]]
1276 [[items]]
1272 section = "format"
1277 section = "format"
1273 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
1278 name = "use-dirstate-tracked-hint.automatic-upgrade-of-mismatching-repositories:quiet"
1274 default = false
1279 default = false
1275 experimental = true
1280 experimental = true
1276
1281
1277 [[items]]
1282 [[items]]
1278 section = "format"
1283 section = "format"
1279 name = "use-dirstate-tracked-hint.version"
1284 name = "use-dirstate-tracked-hint.version"
1280 default = 1
1285 default = 1
1281 experimental = true
1286 experimental = true
1282
1287
1283 [[items]]
1288 [[items]]
1284 section = "format"
1289 section = "format"
1285 name = "use-dirstate-v2"
1290 name = "use-dirstate-v2"
1286 default = false
1291 default = false
1287 alias = [["format", "exp-rc-dirstate-v2"]]
1292 alias = [["format", "exp-rc-dirstate-v2"]]
1288 experimental = true
1293 experimental = true
1289 documentation = """Enables dirstate-v2 format *when creating a new repository*.
1294 documentation = """Enables dirstate-v2 format *when creating a new repository*.
1290 Which format to use for existing repos is controlled by `.hg/requires`."""
1295 Which format to use for existing repos is controlled by `.hg/requires`."""
1291
1296
1292 [[items]]
1297 [[items]]
1293 section = "format"
1298 section = "format"
1294 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
1299 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories"
1295 default = false
1300 default = false
1296 experimental = true
1301 experimental = true
1297
1302
1298 [[items]]
1303 [[items]]
1299 section = "format"
1304 section = "format"
1300 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
1305 name = "use-dirstate-v2.automatic-upgrade-of-mismatching-repositories:quiet"
1301 default = false
1306 default = false
1302 experimental = true
1307 experimental = true
1303
1308
1304 # Having this on by default means we are confident about the scaling of phases.
1309 # Having this on by default means we are confident about the scaling of phases.
1305 # This is not garanteed to be the case at the time this message is written.
1310 # This is not garanteed to be the case at the time this message is written.
1306 [[items]]
1311 [[items]]
1307 section = "format"
1312 section = "format"
1308 name = "use-internal-phase"
1313 name = "use-internal-phase"
1309 default = false
1314 default = false
1310 experimental = true
1315 experimental = true
1311
1316
1312 [[items]]
1317 [[items]]
1313 section = "format"
1318 section = "format"
1314 name = "use-persistent-nodemap"
1319 name = "use-persistent-nodemap"
1315 default-type = "dynamic"
1320 default-type = "dynamic"
1316
1321
1317 [[items]]
1322 [[items]]
1318 section = "format"
1323 section = "format"
1319 name = "use-share-safe"
1324 name = "use-share-safe"
1320 default = true
1325 default = true
1321
1326
1322 [[items]]
1327 [[items]]
1323 section = "format"
1328 section = "format"
1324 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
1329 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories"
1325 default = false
1330 default = false
1326 experimental = true
1331 experimental = true
1327
1332
1328 [[items]]
1333 [[items]]
1329 section = "format"
1334 section = "format"
1330 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
1335 name = "use-share-safe.automatic-upgrade-of-mismatching-repositories:quiet"
1331 default = false
1336 default = false
1332 experimental = true
1337 experimental = true
1333
1338
1334 [[items]]
1339 [[items]]
1335 section = "format"
1340 section = "format"
1336 name = "usefncache"
1341 name = "usefncache"
1337 default = true
1342 default = true
1338
1343
1339 [[items]]
1344 [[items]]
1340 section = "format"
1345 section = "format"
1341 name = "usegeneraldelta"
1346 name = "usegeneraldelta"
1342 default = true
1347 default = true
1343
1348
1344 [[items]]
1349 [[items]]
1345 section = "format"
1350 section = "format"
1346 name = "usestore"
1351 name = "usestore"
1347 default = true
1352 default = true
1348
1353
1349 [[items]]
1354 [[items]]
1350 section = "fsmonitor"
1355 section = "fsmonitor"
1351 name = "warn_update_file_count"
1356 name = "warn_update_file_count"
1352 default = 50000
1357 default = 50000
1353
1358
1354 [[items]]
1359 [[items]]
1355 section = "fsmonitor"
1360 section = "fsmonitor"
1356 name = "warn_update_file_count_rust"
1361 name = "warn_update_file_count_rust"
1357 default = 400000
1362 default = 400000
1358
1363
1359 [[items]]
1364 [[items]]
1360 section = "fsmonitor"
1365 section = "fsmonitor"
1361 name = "warn_when_unused"
1366 name = "warn_when_unused"
1362 default = true
1367 default = true
1363
1368
1364 [[items]]
1369 [[items]]
1365 section = "help"
1370 section = "help"
1366 name = 'hidden-command\..*'
1371 name = 'hidden-command\..*'
1367 default = false
1372 default = false
1368 generic = true
1373 generic = true
1369
1374
1370 [[items]]
1375 [[items]]
1371 section = "help"
1376 section = "help"
1372 name = 'hidden-topic\..*'
1377 name = 'hidden-topic\..*'
1373 default = false
1378 default = false
1374 generic = true
1379 generic = true
1375
1380
1376 [[items]]
1381 [[items]]
1377 section = "hgweb-paths"
1382 section = "hgweb-paths"
1378 name = ".*"
1383 name = ".*"
1379 default-type = "list_type"
1384 default-type = "list_type"
1380 generic = true
1385 generic = true
1381
1386
1382 [[items]]
1387 [[items]]
1383 section = "hooks"
1388 section = "hooks"
1384 name = ".*:run-with-plain"
1389 name = ".*:run-with-plain"
1385 default = true
1390 default = true
1386 generic = true
1391 generic = true
1387
1392
1388 [[items]]
1393 [[items]]
1389 section = "hooks"
1394 section = "hooks"
1390 name = "[^:]*"
1395 name = "[^:]*"
1391 default-type = "dynamic"
1396 default-type = "dynamic"
1392 generic = true
1397 generic = true
1393
1398
1394 [[items]]
1399 [[items]]
1395 section = "hostfingerprints"
1400 section = "hostfingerprints"
1396 name = ".*"
1401 name = ".*"
1397 default-type = "list_type"
1402 default-type = "list_type"
1398 generic = true
1403 generic = true
1399
1404
1400 [[items]]
1405 [[items]]
1401 section = "hostsecurity"
1406 section = "hostsecurity"
1402 name = ".*:ciphers$"
1407 name = ".*:ciphers$"
1403 default-type = "dynamic"
1408 default-type = "dynamic"
1404 generic = true
1409 generic = true
1405
1410
1406 [[items]]
1411 [[items]]
1407 section = "hostsecurity"
1412 section = "hostsecurity"
1408 name = ".*:fingerprints$"
1413 name = ".*:fingerprints$"
1409 default-type = "list_type"
1414 default-type = "list_type"
1410 generic = true
1415 generic = true
1411
1416
1412 [[items]]
1417 [[items]]
1413 section = "hostsecurity"
1418 section = "hostsecurity"
1414 name = ".*:minimumprotocol$"
1419 name = ".*:minimumprotocol$"
1415 default-type = "dynamic"
1420 default-type = "dynamic"
1416 generic = true
1421 generic = true
1417
1422
1418 [[items]]
1423 [[items]]
1419 section = "hostsecurity"
1424 section = "hostsecurity"
1420 name = ".*:verifycertsfile$"
1425 name = ".*:verifycertsfile$"
1421 generic = true
1426 generic = true
1422
1427
1423 [[items]]
1428 [[items]]
1424 section = "hostsecurity"
1429 section = "hostsecurity"
1425 name = "ciphers"
1430 name = "ciphers"
1426
1431
1427 [[items]]
1432 [[items]]
1428 section = "hostsecurity"
1433 section = "hostsecurity"
1429 name = "minimumprotocol"
1434 name = "minimumprotocol"
1430 default-type = "dynamic"
1435 default-type = "dynamic"
1431
1436
1432 [[items]]
1437 [[items]]
1433 section = "http"
1438 section = "http"
1434 name = "timeout"
1439 name = "timeout"
1435
1440
1436 [[items]]
1441 [[items]]
1437 section = "http_proxy"
1442 section = "http_proxy"
1438 name = "always"
1443 name = "always"
1439 default = false
1444 default = false
1440
1445
1441 [[items]]
1446 [[items]]
1442 section = "http_proxy"
1447 section = "http_proxy"
1443 name = "host"
1448 name = "host"
1444
1449
1445 [[items]]
1450 [[items]]
1446 section = "http_proxy"
1451 section = "http_proxy"
1447 name = "no"
1452 name = "no"
1448 default-type = "list_type"
1453 default-type = "list_type"
1449
1454
1450 [[items]]
1455 [[items]]
1451 section = "http_proxy"
1456 section = "http_proxy"
1452 name = "passwd"
1457 name = "passwd"
1453
1458
1454 [[items]]
1459 [[items]]
1455 section = "http_proxy"
1460 section = "http_proxy"
1456 name = "user"
1461 name = "user"
1457
1462
1458 [[items]]
1463 [[items]]
1459 section = "logtoprocess"
1464 section = "logtoprocess"
1460 name = "command"
1465 name = "command"
1461
1466
1462 [[items]]
1467 [[items]]
1463 section = "logtoprocess"
1468 section = "logtoprocess"
1464 name = "commandexception"
1469 name = "commandexception"
1465
1470
1466 [[items]]
1471 [[items]]
1467 section = "logtoprocess"
1472 section = "logtoprocess"
1468 name = "commandfinish"
1473 name = "commandfinish"
1469
1474
1470 [[items]]
1475 [[items]]
1471 section = "logtoprocess"
1476 section = "logtoprocess"
1472 name = "develwarn"
1477 name = "develwarn"
1473
1478
1474 [[items]]
1479 [[items]]
1475 section = "logtoprocess"
1480 section = "logtoprocess"
1476 name = "uiblocked"
1481 name = "uiblocked"
1477
1482
1478 [[items]]
1483 [[items]]
1479 section = "merge"
1484 section = "merge"
1480 name = "checkignored"
1485 name = "checkignored"
1481 default = "abort"
1486 default = "abort"
1482
1487
1483 [[items]]
1488 [[items]]
1484 section = "merge"
1489 section = "merge"
1485 name = "checkunknown"
1490 name = "checkunknown"
1486 default = "abort"
1491 default = "abort"
1487
1492
1488 [[items]]
1493 [[items]]
1489 section = "merge"
1494 section = "merge"
1490 name = "disable-partial-tools"
1495 name = "disable-partial-tools"
1491 default = false
1496 default = false
1492 experimental = true
1497 experimental = true
1493
1498
1494 [[items]]
1499 [[items]]
1495 section = "merge"
1500 section = "merge"
1496 name = "followcopies"
1501 name = "followcopies"
1497 default = true
1502 default = true
1498
1503
1499 [[items]]
1504 [[items]]
1500 section = "merge"
1505 section = "merge"
1501 name = "on-failure"
1506 name = "on-failure"
1502 default = "continue"
1507 default = "continue"
1503
1508
1504 [[items]]
1509 [[items]]
1505 section = "merge"
1510 section = "merge"
1506 name = "preferancestor"
1511 name = "preferancestor"
1507 default-type = "lambda"
1512 default-type = "lambda"
1508 default = ["*"]
1513 default = ["*"]
1509 experimental = true
1514 experimental = true
1510
1515
1511 [[items]]
1516 [[items]]
1512 section = "merge"
1517 section = "merge"
1513 name = "strict-capability-check"
1518 name = "strict-capability-check"
1514 default = false
1519 default = false
1515
1520
1516 [[items]]
1521 [[items]]
1517 section = "merge-tools"
1522 section = "merge-tools"
1518 name = ".*"
1523 name = ".*"
1519 generic = true
1524 generic = true
1520
1525
1521 [[items]]
1526 [[items]]
1522 section = "merge-tools"
1527 section = "merge-tools"
1523 name = '.*\.args$'
1528 name = '.*\.args$'
1524 default = "$local $base $other"
1529 default = "$local $base $other"
1525 generic = true
1530 generic = true
1526 priority = -1
1531 priority = -1
1527
1532
1528 [[items]]
1533 [[items]]
1529 section = "merge-tools"
1534 section = "merge-tools"
1530 name = '.*\.binary$'
1535 name = '.*\.binary$'
1531 default = false
1536 default = false
1532 generic = true
1537 generic = true
1533 priority = -1
1538 priority = -1
1534
1539
1535 [[items]]
1540 [[items]]
1536 section = "merge-tools"
1541 section = "merge-tools"
1537 name = '.*\.check$'
1542 name = '.*\.check$'
1538 default-type = "list_type"
1543 default-type = "list_type"
1539 generic = true
1544 generic = true
1540 priority = -1
1545 priority = -1
1541
1546
1542 [[items]]
1547 [[items]]
1543 section = "merge-tools"
1548 section = "merge-tools"
1544 name = '.*\.checkchanged$'
1549 name = '.*\.checkchanged$'
1545 default = false
1550 default = false
1546 generic = true
1551 generic = true
1547 priority = -1
1552 priority = -1
1548
1553
1549 [[items]]
1554 [[items]]
1550 section = "merge-tools"
1555 section = "merge-tools"
1551 name = '.*\.executable$'
1556 name = '.*\.executable$'
1552 default-type = "dynamic"
1557 default-type = "dynamic"
1553 generic = true
1558 generic = true
1554 priority = -1
1559 priority = -1
1555
1560
1556 [[items]]
1561 [[items]]
1557 section = "merge-tools"
1562 section = "merge-tools"
1558 name = '.*\.fixeol$'
1563 name = '.*\.fixeol$'
1559 default = false
1564 default = false
1560 generic = true
1565 generic = true
1561 priority = -1
1566 priority = -1
1562
1567
1563 [[items]]
1568 [[items]]
1564 section = "merge-tools"
1569 section = "merge-tools"
1565 name = '.*\.gui$'
1570 name = '.*\.gui$'
1566 default = false
1571 default = false
1567 generic = true
1572 generic = true
1568 priority = -1
1573 priority = -1
1569
1574
1570 [[items]]
1575 [[items]]
1571 section = "merge-tools"
1576 section = "merge-tools"
1572 name = '.*\.mergemarkers$'
1577 name = '.*\.mergemarkers$'
1573 default = "basic"
1578 default = "basic"
1574 generic = true
1579 generic = true
1575 priority = -1
1580 priority = -1
1576
1581
1577 [[items]]
1582 [[items]]
1578 section = "merge-tools"
1583 section = "merge-tools"
1579 name = '.*\.mergemarkertemplate$' # take from command-templates.mergemarker
1584 name = '.*\.mergemarkertemplate$' # take from command-templates.mergemarker
1580 default-type = "dynamic"
1585 default-type = "dynamic"
1581 generic = true
1586 generic = true
1582 priority = -1
1587 priority = -1
1583
1588
1584 [[items]]
1589 [[items]]
1585 section = "merge-tools"
1590 section = "merge-tools"
1586 name = '.*\.premerge$'
1591 name = '.*\.premerge$'
1587 default-type = "dynamic"
1592 default-type = "dynamic"
1588 generic = true
1593 generic = true
1589 priority = -1
1594 priority = -1
1590
1595
1591 [[items]]
1596 [[items]]
1592 section = "merge-tools"
1597 section = "merge-tools"
1593 name = '.*\.priority$'
1598 name = '.*\.priority$'
1594 default = 0
1599 default = 0
1595 generic = true
1600 generic = true
1596 priority = -1
1601 priority = -1
1597
1602
1598 [[items]]
1603 [[items]]
1599 section = "merge-tools"
1604 section = "merge-tools"
1600 name = '.*\.regappend$'
1605 name = '.*\.regappend$'
1601 default = ""
1606 default = ""
1602 generic = true
1607 generic = true
1603 priority = -1
1608 priority = -1
1604
1609
1605 [[items]]
1610 [[items]]
1606 section = "merge-tools"
1611 section = "merge-tools"
1607 name = '.*\.symlink$'
1612 name = '.*\.symlink$'
1608 default = false
1613 default = false
1609 generic = true
1614 generic = true
1610 priority = -1
1615 priority = -1
1611
1616
1612 [[items]]
1617 [[items]]
1613 section = "pager"
1618 section = "pager"
1614 name = "attend-.*"
1619 name = "attend-.*"
1615 default-type = "dynamic"
1620 default-type = "dynamic"
1616 generic = true
1621 generic = true
1617
1622
1618 [[items]]
1623 [[items]]
1619 section = "pager"
1624 section = "pager"
1620 name = "ignore"
1625 name = "ignore"
1621 default-type = "list_type"
1626 default-type = "list_type"
1622
1627
1623 [[items]]
1628 [[items]]
1624 section = "pager"
1629 section = "pager"
1625 name = "pager"
1630 name = "pager"
1626 default-type = "dynamic"
1631 default-type = "dynamic"
1627
1632
1628 [[items]]
1633 [[items]]
1629 section = "partial-merge-tools"
1634 section = "partial-merge-tools"
1630 name = ".*"
1635 name = ".*"
1631 generic = true
1636 generic = true
1632 experimental = true
1637 experimental = true
1633
1638
1634 [[items]]
1639 [[items]]
1635 section = "partial-merge-tools"
1640 section = "partial-merge-tools"
1636 name = '.*\.args'
1641 name = '.*\.args'
1637 default = "$local $base $other"
1642 default = "$local $base $other"
1638 generic = true
1643 generic = true
1639 priority = -1
1644 priority = -1
1640 experimental = true
1645 experimental = true
1641
1646
1642 [[items]]
1647 [[items]]
1643 section = "partial-merge-tools"
1648 section = "partial-merge-tools"
1644 name = '.*\.disable'
1649 name = '.*\.disable'
1645 default = false
1650 default = false
1646 generic = true
1651 generic = true
1647 priority = -1
1652 priority = -1
1648 experimental = true
1653 experimental = true
1649
1654
1650 [[items]]
1655 [[items]]
1651 section = "partial-merge-tools"
1656 section = "partial-merge-tools"
1652 name = '.*\.executable$'
1657 name = '.*\.executable$'
1653 default-type = "dynamic"
1658 default-type = "dynamic"
1654 generic = true
1659 generic = true
1655 priority = -1
1660 priority = -1
1656 experimental = true
1661 experimental = true
1657
1662
1658 [[items]]
1663 [[items]]
1659 section = "partial-merge-tools"
1664 section = "partial-merge-tools"
1660 name = '.*\.order'
1665 name = '.*\.order'
1661 default = 0
1666 default = 0
1662 generic = true
1667 generic = true
1663 priority = -1
1668 priority = -1
1664 experimental = true
1669 experimental = true
1665
1670
1666 [[items]]
1671 [[items]]
1667 section = "partial-merge-tools"
1672 section = "partial-merge-tools"
1668 name = '.*\.patterns'
1673 name = '.*\.patterns'
1669 default-type = "dynamic"
1674 default-type = "dynamic"
1670 generic = true
1675 generic = true
1671 priority = -1
1676 priority = -1
1672 experimental = true
1677 experimental = true
1673
1678
1674 [[items]]
1679 [[items]]
1675 section = "patch"
1680 section = "patch"
1676 name = "eol"
1681 name = "eol"
1677 default = "strict"
1682 default = "strict"
1678
1683
1679 [[items]]
1684 [[items]]
1680 section = "patch"
1685 section = "patch"
1681 name = "fuzz"
1686 name = "fuzz"
1682 default = 2
1687 default = 2
1683
1688
1684 [[items]]
1689 [[items]]
1685 section = "paths"
1690 section = "paths"
1686 name = "[^:]*"
1691 name = "[^:]*"
1687 generic = true
1692 generic = true
1688
1693
1689 [[items]]
1694 [[items]]
1690 section = "paths"
1695 section = "paths"
1691 name = ".*:bookmarks.mode"
1696 name = ".*:bookmarks.mode"
1692 default = "default"
1697 default = "default"
1693 generic = true
1698 generic = true
1694
1699
1695 [[items]]
1700 [[items]]
1696 section = "paths"
1701 section = "paths"
1697 name = ".*:multi-urls"
1702 name = ".*:multi-urls"
1698 default = false
1703 default = false
1699 generic = true
1704 generic = true
1700
1705
1701 [[items]]
1706 [[items]]
1702 section = "paths"
1707 section = "paths"
1703 name = ".*:pulled-delta-reuse-policy"
1708 name = ".*:pulled-delta-reuse-policy"
1704 generic = true
1709 generic = true
1705
1710
1706 [[items]]
1711 [[items]]
1707 section = "paths"
1712 section = "paths"
1708 name = ".*:pushrev"
1713 name = ".*:pushrev"
1709 generic = true
1714 generic = true
1710
1715
1711 [[items]]
1716 [[items]]
1712 section = "paths"
1717 section = "paths"
1713 name = ".*:pushurl"
1718 name = ".*:pushurl"
1714 generic = true
1719 generic = true
1715
1720
1716 [[items]]
1721 [[items]]
1717 section = "paths"
1722 section = "paths"
1718 name = "default"
1723 name = "default"
1719
1724
1720 [[items]]
1725 [[items]]
1721 section = "paths"
1726 section = "paths"
1722 name = "default-push"
1727 name = "default-push"
1723
1728
1724 [[items]]
1729 [[items]]
1725 section = "phases"
1730 section = "phases"
1726 name = "checksubrepos"
1731 name = "checksubrepos"
1727 default = "follow"
1732 default = "follow"
1728
1733
1729 [[items]]
1734 [[items]]
1730 section = "phases"
1735 section = "phases"
1731 name = "new-commit"
1736 name = "new-commit"
1732 default = "draft"
1737 default = "draft"
1733
1738
1734 [[items]]
1739 [[items]]
1735 section = "phases"
1740 section = "phases"
1736 name = "publish"
1741 name = "publish"
1737 default = true
1742 default = true
1738
1743
1739 [[items]]
1744 [[items]]
1740 section = "profiling"
1745 section = "profiling"
1741 name = "enabled"
1746 name = "enabled"
1742 default = false
1747 default = false
1743
1748
1744 [[items]]
1749 [[items]]
1745 section = "profiling"
1750 section = "profiling"
1746 name = "format"
1751 name = "format"
1747 default = "text"
1752 default = "text"
1748
1753
1749 [[items]]
1754 [[items]]
1750 section = "profiling"
1755 section = "profiling"
1751 name = "freq"
1756 name = "freq"
1752 default = 1000
1757 default = 1000
1753
1758
1754 [[items]]
1759 [[items]]
1755 section = "profiling"
1760 section = "profiling"
1756 name = "limit"
1761 name = "limit"
1757 default = 30
1762 default = 30
1758
1763
1759 [[items]]
1764 [[items]]
1760 section = "profiling"
1765 section = "profiling"
1761 name = "nested"
1766 name = "nested"
1762 default = 0
1767 default = 0
1763
1768
1764 [[items]]
1769 [[items]]
1765 section = "profiling"
1770 section = "profiling"
1766 name = "output"
1771 name = "output"
1767
1772
1768 [[items]]
1773 [[items]]
1769 section = "profiling"
1774 section = "profiling"
1770 name = "showmax"
1775 name = "showmax"
1771 default = 0.999
1776 default = 0.999
1772
1777
1773 [[items]]
1778 [[items]]
1774 section = "profiling"
1779 section = "profiling"
1775 name = "showmin"
1780 name = "showmin"
1776 default-type = "dynamic"
1781 default-type = "dynamic"
1777
1782
1778 [[items]]
1783 [[items]]
1779 section = "profiling"
1784 section = "profiling"
1780 name = "showtime"
1785 name = "showtime"
1781 default = true
1786 default = true
1782
1787
1783 [[items]]
1788 [[items]]
1784 section = "profiling"
1789 section = "profiling"
1785 name = "sort"
1790 name = "sort"
1786 default = "inlinetime"
1791 default = "inlinetime"
1787
1792
1788 [[items]]
1793 [[items]]
1789 section = "profiling"
1794 section = "profiling"
1790 name = "statformat"
1795 name = "statformat"
1791 default = "hotpath"
1796 default = "hotpath"
1792
1797
1793 [[items]]
1798 [[items]]
1794 section = "profiling"
1799 section = "profiling"
1795 name = "time-track"
1800 name = "time-track"
1796 default-type = "dynamic"
1801 default-type = "dynamic"
1797
1802
1798 [[items]]
1803 [[items]]
1799 section = "profiling"
1804 section = "profiling"
1800 name = "type"
1805 name = "type"
1801 default = "stat"
1806 default = "stat"
1802
1807
1803 [[items]]
1808 [[items]]
1804 section = "progress"
1809 section = "progress"
1805 name = "assume-tty"
1810 name = "assume-tty"
1806 default = false
1811 default = false
1807
1812
1808 [[items]]
1813 [[items]]
1809 section = "progress"
1814 section = "progress"
1810 name = "changedelay"
1815 name = "changedelay"
1811 default = 1
1816 default = 1
1812
1817
1813 [[items]]
1818 [[items]]
1814 section = "progress"
1819 section = "progress"
1815 name = "clear-complete"
1820 name = "clear-complete"
1816 default = true
1821 default = true
1817
1822
1818 [[items]]
1823 [[items]]
1819 section = "progress"
1824 section = "progress"
1820 name = "debug"
1825 name = "debug"
1821 default = false
1826 default = false
1822
1827
1823 [[items]]
1828 [[items]]
1824 section = "progress"
1829 section = "progress"
1825 name = "delay"
1830 name = "delay"
1826 default = 3
1831 default = 3
1827
1832
1828 [[items]]
1833 [[items]]
1829 section = "progress"
1834 section = "progress"
1830 name = "disable"
1835 name = "disable"
1831 default = false
1836 default = false
1832
1837
1833 [[items]]
1838 [[items]]
1834 section = "progress"
1839 section = "progress"
1835 name = "estimateinterval"
1840 name = "estimateinterval"
1836 default = 60.0
1841 default = 60.0
1837
1842
1838 [[items]]
1843 [[items]]
1839 section = "progress"
1844 section = "progress"
1840 name = "format"
1845 name = "format"
1841 default-type = "lambda"
1846 default-type = "lambda"
1842 default = [ "topic", "bar", "number", "estimate",]
1847 default = [ "topic", "bar", "number", "estimate",]
1843
1848
1844 [[items]]
1849 [[items]]
1845 section = "progress"
1850 section = "progress"
1846 name = "refresh"
1851 name = "refresh"
1847 default = 0.1
1852 default = 0.1
1848
1853
1849 [[items]]
1854 [[items]]
1850 section = "progress"
1855 section = "progress"
1851 name = "width"
1856 name = "width"
1852 default-type = "dynamic"
1857 default-type = "dynamic"
1853
1858
1854 [[items]]
1859 [[items]]
1855 section = "pull"
1860 section = "pull"
1856 name = "confirm"
1861 name = "confirm"
1857 default = false
1862 default = false
1858
1863
1859 [[items]]
1864 [[items]]
1860 section = "push"
1865 section = "push"
1861 name = "pushvars.server"
1866 name = "pushvars.server"
1862 default = false
1867 default = false
1863
1868
1864 [[items]]
1869 [[items]]
1865 section = "rebase"
1870 section = "rebase"
1866 name = "experimental.inmemory"
1871 name = "experimental.inmemory"
1867 default = false
1872 default = false
1868
1873
1869 [[items]]
1874 [[items]]
1870 section = "rebase"
1875 section = "rebase"
1871 name = "singletransaction"
1876 name = "singletransaction"
1872 default = false
1877 default = false
1873
1878
1874 [[items]]
1879 [[items]]
1875 section = "rebase"
1880 section = "rebase"
1876 name = "store-source"
1881 name = "store-source"
1877 default = true
1882 default = true
1878 experimental = true
1883 experimental = true
1879 documentation = """Controls creation of a `rebase_source` extra field during rebase.
1884 documentation = """Controls creation of a `rebase_source` extra field during rebase.
1880 When false, no such field is created. This is useful e.g. for incrementally \
1885 When false, no such field is created. This is useful e.g. for incrementally \
1881 converting changesets and then rebasing them onto an existing repo.
1886 converting changesets and then rebasing them onto an existing repo.
1882 WARNING: this is an advanced setting reserved for people who know \
1887 WARNING: this is an advanced setting reserved for people who know \
1883 exactly what they are doing. Misuse of this setting can easily \
1888 exactly what they are doing. Misuse of this setting can easily \
1884 result in obsmarker cycles and a vivid headache."""
1889 result in obsmarker cycles and a vivid headache."""
1885
1890
1886 [[items]]
1891 [[items]]
1887 section = "rewrite"
1892 section = "rewrite"
1888 name = "backup-bundle"
1893 name = "backup-bundle"
1889 default = true
1894 default = true
1890 alias = [["ui", "history-editing-backup"]]
1895 alias = [["ui", "history-editing-backup"]]
1891
1896
1892 [[items]]
1897 [[items]]
1893 section = "rewrite"
1898 section = "rewrite"
1894 name = "empty-successor"
1899 name = "empty-successor"
1895 default = "skip"
1900 default = "skip"
1896 experimental = true
1901 experimental = true
1897
1902
1898 [[items]]
1903 [[items]]
1899 section = "rewrite"
1904 section = "rewrite"
1900 name = "update-timestamp"
1905 name = "update-timestamp"
1901 default = false
1906 default = false
1902
1907
1903 [[items]]
1908 [[items]]
1904 section = "rhg"
1909 section = "rhg"
1905 name = "cat"
1910 name = "cat"
1906 default = true
1911 default = true
1907 experimental = true
1912 experimental = true
1908 documentation = """rhg cat has some quirks that need to be ironed out. \
1913 documentation = """rhg cat has some quirks that need to be ironed out. \
1909 In particular, the `-r` argument accepts a partial hash, but does not \
1914 In particular, the `-r` argument accepts a partial hash, but does not \
1910 correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
1915 correctly resolve `abcdef` as a potential bookmark, tag or branch name."""
1911
1916
1912 [[items]]
1917 [[items]]
1913 section = "rhg"
1918 section = "rhg"
1914 name = "fallback-exectutable"
1919 name = "fallback-exectutable"
1915 experimental = true
1920 experimental = true
1916
1921
1917 [[items]]
1922 [[items]]
1918 section = "rhg"
1923 section = "rhg"
1919 name = "fallback-immediately"
1924 name = "fallback-immediately"
1920 default = false
1925 default = false
1921 experimental = true
1926 experimental = true
1922
1927
1923 [[items]]
1928 [[items]]
1924 section = "rhg"
1929 section = "rhg"
1925 name = "ignored-extensions"
1930 name = "ignored-extensions"
1926 default-type = "list_type"
1931 default-type = "list_type"
1927 experimental = true
1932 experimental = true
1928
1933
1929 [[items]]
1934 [[items]]
1930 section = "rhg"
1935 section = "rhg"
1931 name = "on-unsupported"
1936 name = "on-unsupported"
1932 default = "abort"
1937 default = "abort"
1933 experimental = true
1938 experimental = true
1934
1939
1935 [[items]]
1940 [[items]]
1936 section = "server"
1941 section = "server"
1937 name = "bookmarks-pushkey-compat"
1942 name = "bookmarks-pushkey-compat"
1938 default = true
1943 default = true
1939
1944
1940 [[items]]
1945 [[items]]
1941 section = "server"
1946 section = "server"
1942 name = "bundle1"
1947 name = "bundle1"
1943 default = true
1948 default = true
1944
1949
1945 [[items]]
1950 [[items]]
1946 section = "server"
1951 section = "server"
1947 name = "bundle1.pull"
1952 name = "bundle1.pull"
1948
1953
1949 [[items]]
1954 [[items]]
1950 section = "server"
1955 section = "server"
1951 name = "bundle1.push"
1956 name = "bundle1.push"
1952
1957
1953 [[items]]
1958 [[items]]
1954 section = "server"
1959 section = "server"
1955 name = "bundle1gd"
1960 name = "bundle1gd"
1956
1961
1957 [[items]]
1962 [[items]]
1958 section = "server"
1963 section = "server"
1959 name = "bundle1gd.pull"
1964 name = "bundle1gd.pull"
1960
1965
1961 [[items]]
1966 [[items]]
1962 section = "server"
1967 section = "server"
1963 name = "bundle1gd.push"
1968 name = "bundle1gd.push"
1964
1969
1965 [[items]]
1970 [[items]]
1966 section = "server"
1971 section = "server"
1967 name = "bundle2.stream"
1972 name = "bundle2.stream"
1968 default = true
1973 default = true
1969 alias = [["experimental", "bundle2.stream"]]
1974 alias = [["experimental", "bundle2.stream"]]
1970
1975
1971 [[items]]
1976 [[items]]
1972 section = "server"
1977 section = "server"
1973 name = "compressionengines"
1978 name = "compressionengines"
1974 default-type = "list_type"
1979 default-type = "list_type"
1975
1980
1976 [[items]]
1981 [[items]]
1977 section = "server"
1982 section = "server"
1978 name = "concurrent-push-mode"
1983 name = "concurrent-push-mode"
1979 default = "check-related"
1984 default = "check-related"
1980
1985
1981 [[items]]
1986 [[items]]
1982 section = "server"
1987 section = "server"
1983 name = "disablefullbundle"
1988 name = "disablefullbundle"
1984 default = false
1989 default = false
1985
1990
1986 [[items]]
1991 [[items]]
1987 section = "server"
1992 section = "server"
1988 name = "maxhttpheaderlen"
1993 name = "maxhttpheaderlen"
1989 default = 1024
1994 default = 1024
1990
1995
1991 [[items]]
1996 [[items]]
1992 section = "server"
1997 section = "server"
1993 name = "preferuncompressed"
1998 name = "preferuncompressed"
1994 default = false
1999 default = false
1995
2000
1996 [[items]]
2001 [[items]]
1997 section = "server"
2002 section = "server"
1998 name = "pullbundle"
2003 name = "pullbundle"
1999 default = true
2004 default = true
2000
2005
2001 [[items]]
2006 [[items]]
2002 section = "server"
2007 section = "server"
2003 name = "streamunbundle"
2008 name = "streamunbundle"
2004 default = false
2009 default = false
2005
2010
2006 [[items]]
2011 [[items]]
2007 section = "server"
2012 section = "server"
2008 name = "uncompressed"
2013 name = "uncompressed"
2009 default = true
2014 default = true
2010
2015
2011 [[items]]
2016 [[items]]
2012 section = "server"
2017 section = "server"
2013 name = "uncompressedallowsecret"
2018 name = "uncompressedallowsecret"
2014 default = false
2019 default = false
2015
2020
2016 [[items]]
2021 [[items]]
2017 section = "server"
2022 section = "server"
2018 name = "validate"
2023 name = "validate"
2019 default = false
2024 default = false
2020
2025
2021 [[items]]
2026 [[items]]
2022 section = "server"
2027 section = "server"
2023 name = "view"
2028 name = "view"
2024 default = "served"
2029 default = "served"
2025
2030
2026 [[items]]
2031 [[items]]
2027 section = "server"
2032 section = "server"
2028 name = "zliblevel"
2033 name = "zliblevel"
2029 default = -1
2034 default = -1
2030
2035
2031 [[items]]
2036 [[items]]
2032 section = "server"
2037 section = "server"
2033 name = "zstdlevel"
2038 name = "zstdlevel"
2034 default = 3
2039 default = 3
2035
2040
2036 [[items]]
2041 [[items]]
2037 section = "share"
2042 section = "share"
2038 name = "pool"
2043 name = "pool"
2039
2044
2040 [[items]]
2045 [[items]]
2041 section = "share"
2046 section = "share"
2042 name = "poolnaming"
2047 name = "poolnaming"
2043 default = "identity"
2048 default = "identity"
2044
2049
2045 [[items]]
2050 [[items]]
2046 section = "share"
2051 section = "share"
2047 name = "safe-mismatch.source-not-safe"
2052 name = "safe-mismatch.source-not-safe"
2048 default = "abort"
2053 default = "abort"
2049
2054
2050 [[items]]
2055 [[items]]
2051 section = "share"
2056 section = "share"
2052 name = "safe-mismatch.source-not-safe.warn"
2057 name = "safe-mismatch.source-not-safe.warn"
2053 default = true
2058 default = true
2054
2059
2055 [[items]]
2060 [[items]]
2056 section = "share"
2061 section = "share"
2057 name = "safe-mismatch.source-not-safe:verbose-upgrade"
2062 name = "safe-mismatch.source-not-safe:verbose-upgrade"
2058 default = true
2063 default = true
2059
2064
2060 [[items]]
2065 [[items]]
2061 section = "share"
2066 section = "share"
2062 name = "safe-mismatch.source-safe"
2067 name = "safe-mismatch.source-safe"
2063 default = "abort"
2068 default = "abort"
2064
2069
2065 [[items]]
2070 [[items]]
2066 section = "share"
2071 section = "share"
2067 name = "safe-mismatch.source-safe.warn"
2072 name = "safe-mismatch.source-safe.warn"
2068 default = true
2073 default = true
2069
2074
2070 [[items]]
2075 [[items]]
2071 section = "share"
2076 section = "share"
2072 name = "safe-mismatch.source-safe:verbose-upgrade"
2077 name = "safe-mismatch.source-safe:verbose-upgrade"
2073 default = true
2078 default = true
2074
2079
2075 [[items]]
2080 [[items]]
2076 section = "shelve"
2081 section = "shelve"
2077 name = "maxbackups"
2082 name = "maxbackups"
2078 default = 10
2083 default = 10
2079
2084
2080 [[items]]
2085 [[items]]
2081 section = "shelve"
2086 section = "shelve"
2082 name = "store"
2087 name = "store"
2083 default = "internal"
2088 default = "internal"
2084 experimental = true
2089 experimental = true
2085
2090
2086 [[items]]
2091 [[items]]
2087 section = "smtp"
2092 section = "smtp"
2088 name = "host"
2093 name = "host"
2089
2094
2090 [[items]]
2095 [[items]]
2091 section = "smtp"
2096 section = "smtp"
2092 name = "local_hostname"
2097 name = "local_hostname"
2093
2098
2094 [[items]]
2099 [[items]]
2095 section = "smtp"
2100 section = "smtp"
2096 name = "password"
2101 name = "password"
2097
2102
2098 [[items]]
2103 [[items]]
2099 section = "smtp"
2104 section = "smtp"
2100 name = "port"
2105 name = "port"
2101 default-type = "dynamic"
2106 default-type = "dynamic"
2102
2107
2103 [[items]]
2108 [[items]]
2104 section = "smtp"
2109 section = "smtp"
2105 name = "tls"
2110 name = "tls"
2106 default = "none"
2111 default = "none"
2107
2112
2108 [[items]]
2113 [[items]]
2109 section = "smtp"
2114 section = "smtp"
2110 name = "username"
2115 name = "username"
2111
2116
2112 [[items]]
2117 [[items]]
2113 section = "sparse"
2118 section = "sparse"
2114 name = "missingwarning"
2119 name = "missingwarning"
2115 default = true
2120 default = true
2116 experimental = true
2121 experimental = true
2117
2122
2118
2123
2119 # The "storage" section house config options that change how the repository
2124 # The "storage" section house config options that change how the repository
2120 # data are accessed by the current process but does not affects the on disk
2125 # data are accessed by the current process but does not affects the on disk
2121 # format. They can also adjust how the storage is computed, but without affect
2126 # format. They can also adjust how the storage is computed, but without affect
2122 # compatibility wither other clients.
2127 # compatibility wither other clients.
2123 #
2128 #
2124 # For deeper format change, see the `format` section.
2129 # For deeper format change, see the `format` section.
2125
2130
2126
2131
2127 [[items]]
2132 [[items]]
2128 section = "storage"
2133 section = "storage"
2129 name = "dirstate-v2.slow-path"
2134 name = "dirstate-v2.slow-path"
2130 default = "abort"
2135 default = "abort"
2131 experimental = true # experimental as long as format.use-dirstate-v2 is.
2136 experimental = true # experimental as long as format.use-dirstate-v2 is.
2132
2137
2133 [[items]]
2138 [[items]]
2134 section = "storage"
2139 section = "storage"
2135 name = "revbranchcache.mmap"
2140 name = "revbranchcache.mmap"
2136 default = true
2141 default = true
2137
2142
2138 [[items]]
2143 [[items]]
2139 section = "storage"
2144 section = "storage"
2140 name = "new-repo-backend"
2145 name = "new-repo-backend"
2141 default = "revlogv1"
2146 default = "revlogv1"
2142 experimental = true
2147 experimental = true
2143
2148
2144 [[items]]
2149 [[items]]
2145 section = "storage"
2150 section = "storage"
2146 name = "revlog.delta-parent-search.candidate-group-chunk-size"
2151 name = "revlog.delta-parent-search.candidate-group-chunk-size"
2147 default = 20
2152 default = 20
2148
2153
2149 [[items]]
2154 [[items]]
2150 section = "storage"
2155 section = "storage"
2151 name = "revlog.issue6528.fix-incoming"
2156 name = "revlog.issue6528.fix-incoming"
2152 default = true
2157 default = true
2153
2158
2154 [[items]]
2159 [[items]]
2155 section = "storage"
2160 section = "storage"
2156 name = "revlog.optimize-delta-parent-choice"
2161 name = "revlog.optimize-delta-parent-choice"
2157 default = true
2162 default = true
2158 alias = [["format", "aggressivemergedeltas"]]
2163 alias = [["format", "aggressivemergedeltas"]]
2159
2164
2160 [[items]]
2165 [[items]]
2161 section = "storage"
2166 section = "storage"
2162 name = "revlog.persistent-nodemap.mmap"
2167 name = "revlog.persistent-nodemap.mmap"
2163 default = true
2168 default = true
2164
2169
2165 [[items]]
2170 [[items]]
2166 section = "storage"
2171 section = "storage"
2167 name = "revlog.persistent-nodemap.slow-path"
2172 name = "revlog.persistent-nodemap.slow-path"
2168 default = "abort"
2173 default = "abort"
2169
2174
2170 [[items]]
2175 [[items]]
2171 section = "storage"
2176 section = "storage"
2172 name = "revlog.reuse-external-delta"
2177 name = "revlog.reuse-external-delta"
2173 default = true
2178 default = true
2174
2179
2175 [[items]]
2180 [[items]]
2176 section = "storage"
2181 section = "storage"
2177 name = "revlog.reuse-external-delta-parent"
2182 name = "revlog.reuse-external-delta-parent"
2178 documentation = """This option is true unless `format.generaldelta` is set."""
2183 documentation = """This option is true unless `format.generaldelta` is set."""
2179
2184
2180 [[items]]
2185 [[items]]
2181 section = "storage"
2186 section = "storage"
2182 name = "revlog.zlib.level"
2187 name = "revlog.zlib.level"
2183
2188
2184 [[items]]
2189 [[items]]
2185 section = "storage"
2190 section = "storage"
2186 name = "revlog.zstd.level"
2191 name = "revlog.zstd.level"
2187
2192
2188 [[items]]
2193 [[items]]
2189 section = "subrepos"
2194 section = "subrepos"
2190 name = "allowed"
2195 name = "allowed"
2191 default-type = "dynamic" # to make backporting simpler
2196 default-type = "dynamic" # to make backporting simpler
2192
2197
2193 [[items]]
2198 [[items]]
2194 section = "subrepos"
2199 section = "subrepos"
2195 name = "git:allowed"
2200 name = "git:allowed"
2196 default-type = "dynamic"
2201 default-type = "dynamic"
2197
2202
2198 [[items]]
2203 [[items]]
2199 section = "subrepos"
2204 section = "subrepos"
2200 name = "hg:allowed"
2205 name = "hg:allowed"
2201 default-type = "dynamic"
2206 default-type = "dynamic"
2202
2207
2203 [[items]]
2208 [[items]]
2204 section = "subrepos"
2209 section = "subrepos"
2205 name = "svn:allowed"
2210 name = "svn:allowed"
2206 default-type = "dynamic"
2211 default-type = "dynamic"
2207
2212
2208 [[items]]
2213 [[items]]
2209 section = "templateconfig"
2214 section = "templateconfig"
2210 name = ".*"
2215 name = ".*"
2211 default-type = "dynamic"
2216 default-type = "dynamic"
2212 generic = true
2217 generic = true
2213
2218
2214 [[items]]
2219 [[items]]
2215 section = "templates"
2220 section = "templates"
2216 name = ".*"
2221 name = ".*"
2217 generic = true
2222 generic = true
2218
2223
2219 [[items]]
2224 [[items]]
2220 section = "trusted"
2225 section = "trusted"
2221 name = "groups"
2226 name = "groups"
2222 default-type = "list_type"
2227 default-type = "list_type"
2223
2228
2224 [[items]]
2229 [[items]]
2225 section = "trusted"
2230 section = "trusted"
2226 name = "users"
2231 name = "users"
2227 default-type = "list_type"
2232 default-type = "list_type"
2228
2233
2229 [[items]]
2234 [[items]]
2230 section = "ui"
2235 section = "ui"
2231 name = "_usedassubrepo"
2236 name = "_usedassubrepo"
2232 default = false
2237 default = false
2233
2238
2234 [[items]]
2239 [[items]]
2235 section = "ui"
2240 section = "ui"
2236 name = "allowemptycommit"
2241 name = "allowemptycommit"
2237 default = false
2242 default = false
2238
2243
2239 [[items]]
2244 [[items]]
2240 section = "ui"
2245 section = "ui"
2241 name = "archivemeta"
2246 name = "archivemeta"
2242 default = true
2247 default = true
2243
2248
2244 [[items]]
2249 [[items]]
2245 section = "ui"
2250 section = "ui"
2246 name = "askusername"
2251 name = "askusername"
2247 default = false
2252 default = false
2248
2253
2249 [[items]]
2254 [[items]]
2250 section = "ui"
2255 section = "ui"
2251 name = "available-memory"
2256 name = "available-memory"
2252
2257
2253 [[items]]
2258 [[items]]
2254 section = "ui"
2259 section = "ui"
2255 name = "clonebundlefallback"
2260 name = "clonebundlefallback"
2256 default = false
2261 default = false
2257
2262
2258 [[items]]
2263 [[items]]
2259 section = "ui"
2264 section = "ui"
2260 name = "clonebundleprefers"
2265 name = "clonebundleprefers"
2261 default-type = "list_type"
2266 default-type = "list_type"
2262
2267
2263 [[items]]
2268 [[items]]
2264 section = "ui"
2269 section = "ui"
2265 name = "clonebundles"
2270 name = "clonebundles"
2266 default = true
2271 default = true
2267
2272
2268 [[items]]
2273 [[items]]
2269 section = "ui"
2274 section = "ui"
2270 name = "color"
2275 name = "color"
2271 default = "auto"
2276 default = "auto"
2272
2277
2273 [[items]]
2278 [[items]]
2274 section = "ui"
2279 section = "ui"
2275 name = "commitsubrepos"
2280 name = "commitsubrepos"
2276 default = false
2281 default = false
2277
2282
2278 [[items]]
2283 [[items]]
2279 section = "ui"
2284 section = "ui"
2280 name = "debug"
2285 name = "debug"
2281 default = false
2286 default = false
2282
2287
2283 [[items]]
2288 [[items]]
2284 section = "ui"
2289 section = "ui"
2285 name = "debugger"
2290 name = "debugger"
2286
2291
2287 [[items]]
2292 [[items]]
2288 section = "ui"
2293 section = "ui"
2289 name = "detailed-exit-code"
2294 name = "detailed-exit-code"
2290 default = false
2295 default = false
2291 experimental = true
2296 experimental = true
2292
2297
2293 [[items]]
2298 [[items]]
2294 section = "ui"
2299 section = "ui"
2295 name = "editor"
2300 name = "editor"
2296 default-type = "dynamic"
2301 default-type = "dynamic"
2297
2302
2298 [[items]]
2303 [[items]]
2299 section = "ui"
2304 section = "ui"
2300 name = "fallbackencoding"
2305 name = "fallbackencoding"
2301
2306
2302 [[items]]
2307 [[items]]
2303 section = "ui"
2308 section = "ui"
2304 name = "forcecwd"
2309 name = "forcecwd"
2305
2310
2306 [[items]]
2311 [[items]]
2307 section = "ui"
2312 section = "ui"
2308 name = "forcemerge"
2313 name = "forcemerge"
2309
2314
2310 [[items]]
2315 [[items]]
2311 section = "ui"
2316 section = "ui"
2312 name = "formatdebug"
2317 name = "formatdebug"
2313 default = false
2318 default = false
2314
2319
2315 [[items]]
2320 [[items]]
2316 section = "ui"
2321 section = "ui"
2317 name = "formatjson"
2322 name = "formatjson"
2318 default = false
2323 default = false
2319
2324
2320 [[items]]
2325 [[items]]
2321 section = "ui"
2326 section = "ui"
2322 name = "formatted"
2327 name = "formatted"
2323
2328
2324 [[items]]
2329 [[items]]
2325 section = "ui"
2330 section = "ui"
2326 name = "interactive"
2331 name = "interactive"
2327
2332
2328 [[items]]
2333 [[items]]
2329 section = "ui"
2334 section = "ui"
2330 name = "interface"
2335 name = "interface"
2331
2336
2332 [[items]]
2337 [[items]]
2333 section = "ui"
2338 section = "ui"
2334 name = "interface.chunkselector"
2339 name = "interface.chunkselector"
2335
2340
2336 [[items]]
2341 [[items]]
2337 section = "ui"
2342 section = "ui"
2338 name = "large-file-limit"
2343 name = "large-file-limit"
2339 default = 10485760
2344 default = 10485760
2340
2345
2341 [[items]]
2346 [[items]]
2342 section = "ui"
2347 section = "ui"
2343 name = "logblockedtimes"
2348 name = "logblockedtimes"
2344 default = false
2349 default = false
2345
2350
2346 [[items]]
2351 [[items]]
2347 section = "ui"
2352 section = "ui"
2348 name = "merge"
2353 name = "merge"
2349
2354
2350 [[items]]
2355 [[items]]
2351 section = "ui"
2356 section = "ui"
2352 name = "mergemarkers"
2357 name = "mergemarkers"
2353 default = "basic"
2358 default = "basic"
2354
2359
2355 [[items]]
2360 [[items]]
2356 section = "ui"
2361 section = "ui"
2357 name = "message-output"
2362 name = "message-output"
2358 default = "stdio"
2363 default = "stdio"
2359
2364
2360 [[items]]
2365 [[items]]
2361 section = "ui"
2366 section = "ui"
2362 name = "nontty"
2367 name = "nontty"
2363 default = false
2368 default = false
2364
2369
2365 [[items]]
2370 [[items]]
2366 section = "ui"
2371 section = "ui"
2367 name = "origbackuppath"
2372 name = "origbackuppath"
2368
2373
2369 [[items]]
2374 [[items]]
2370 section = "ui"
2375 section = "ui"
2371 name = "paginate"
2376 name = "paginate"
2372 default = true
2377 default = true
2373
2378
2374 [[items]]
2379 [[items]]
2375 section = "ui"
2380 section = "ui"
2376 name = "patch"
2381 name = "patch"
2377
2382
2378 [[items]]
2383 [[items]]
2379 section = "ui"
2384 section = "ui"
2380 name = "portablefilenames"
2385 name = "portablefilenames"
2381 default = "warn"
2386 default = "warn"
2382
2387
2383 [[items]]
2388 [[items]]
2384 section = "ui"
2389 section = "ui"
2385 name = "promptecho"
2390 name = "promptecho"
2386 default = false
2391 default = false
2387
2392
2388 [[items]]
2393 [[items]]
2389 section = "ui"
2394 section = "ui"
2390 name = "quiet"
2395 name = "quiet"
2391 default = false
2396 default = false
2392
2397
2393 [[items]]
2398 [[items]]
2394 section = "ui"
2399 section = "ui"
2395 name = "quietbookmarkmove"
2400 name = "quietbookmarkmove"
2396 default = false
2401 default = false
2397
2402
2398 [[items]]
2403 [[items]]
2399 section = "ui"
2404 section = "ui"
2400 name = "relative-paths"
2405 name = "relative-paths"
2401 default = "legacy"
2406 default = "legacy"
2402
2407
2403 [[items]]
2408 [[items]]
2404 section = "ui"
2409 section = "ui"
2405 name = "remotecmd"
2410 name = "remotecmd"
2406 default = "hg"
2411 default = "hg"
2407
2412
2408 [[items]]
2413 [[items]]
2409 section = "ui"
2414 section = "ui"
2410 name = "report_untrusted"
2415 name = "report_untrusted"
2411 default = true
2416 default = true
2412
2417
2413 [[items]]
2418 [[items]]
2414 section = "ui"
2419 section = "ui"
2415 name = "rollback"
2420 name = "rollback"
2416 default = true
2421 default = true
2417
2422
2418 [[items]]
2423 [[items]]
2419 section = "ui"
2424 section = "ui"
2420 name = "signal-safe-lock"
2425 name = "signal-safe-lock"
2421 default = true
2426 default = true
2422
2427
2423 [[items]]
2428 [[items]]
2424 section = "ui"
2429 section = "ui"
2425 name = "slash"
2430 name = "slash"
2426 default = false
2431 default = false
2427
2432
2428 [[items]]
2433 [[items]]
2429 section = "ui"
2434 section = "ui"
2430 name = "ssh"
2435 name = "ssh"
2431 default = "ssh"
2436 default = "ssh"
2432
2437
2433 [[items]]
2438 [[items]]
2434 section = "ui"
2439 section = "ui"
2435 name = "ssherrorhint"
2440 name = "ssherrorhint"
2436
2441
2437 [[items]]
2442 [[items]]
2438 section = "ui"
2443 section = "ui"
2439 name = "statuscopies"
2444 name = "statuscopies"
2440 default = false
2445 default = false
2441
2446
2442 [[items]]
2447 [[items]]
2443 section = "ui"
2448 section = "ui"
2444 name = "strict"
2449 name = "strict"
2445 default = false
2450 default = false
2446
2451
2447 [[items]]
2452 [[items]]
2448 section = "ui"
2453 section = "ui"
2449 name = "style"
2454 name = "style"
2450 default = ""
2455 default = ""
2451
2456
2452 [[items]]
2457 [[items]]
2453 section = "ui"
2458 section = "ui"
2454 name = "supportcontact"
2459 name = "supportcontact"
2455
2460
2456 [[items]]
2461 [[items]]
2457 section = "ui"
2462 section = "ui"
2458 name = "textwidth"
2463 name = "textwidth"
2459 default = 78
2464 default = 78
2460
2465
2461 [[items]]
2466 [[items]]
2462 section = "ui"
2467 section = "ui"
2463 name = "timeout"
2468 name = "timeout"
2464 default = "600"
2469 default = "600"
2465
2470
2466 [[items]]
2471 [[items]]
2467 section = "ui"
2472 section = "ui"
2468 name = "timeout.warn"
2473 name = "timeout.warn"
2469 default = 0
2474 default = 0
2470
2475
2471 [[items]]
2476 [[items]]
2472 section = "ui"
2477 section = "ui"
2473 name = "timestamp-output"
2478 name = "timestamp-output"
2474 default = false
2479 default = false
2475
2480
2476 [[items]]
2481 [[items]]
2477 section = "ui"
2482 section = "ui"
2478 name = "traceback"
2483 name = "traceback"
2479 default = false
2484 default = false
2480
2485
2481 [[items]]
2486 [[items]]
2482 section = "ui"
2487 section = "ui"
2483 name = "tweakdefaults"
2488 name = "tweakdefaults"
2484 default = false
2489 default = false
2485
2490
2486 [[items]]
2491 [[items]]
2487 section = "ui"
2492 section = "ui"
2488 name = "username"
2493 name = "username"
2489 alias = [["ui", "user"]]
2494 alias = [["ui", "user"]]
2490
2495
2491 [[items]]
2496 [[items]]
2492 section = "ui"
2497 section = "ui"
2493 name = "verbose"
2498 name = "verbose"
2494 default = false
2499 default = false
2495
2500
2496 [[items]]
2501 [[items]]
2497 section = "usage"
2502 section = "usage"
2498 name = "repository-role"
2503 name = "repository-role"
2499 default = "default"
2504 default = "default"
2500 documentation = """What this repository is used for.
2505 documentation = """What this repository is used for.
2501
2506
2502 This is used to adjust behavior and performance to best fit the repository purpose.
2507 This is used to adjust behavior and performance to best fit the repository purpose.
2503
2508
2504 Currently recognised values are:
2509 Currently recognised values are:
2505 - default: an all purpose repository
2510 - default: an all purpose repository
2506 """
2511 """
2507
2512
2508 [[items]]
2513 [[items]]
2509 section = "usage"
2514 section = "usage"
2510 name = "resources"
2515 name = "resources"
2511 default = "default"
2516 default = "default"
2512 documentation = """How aggressive Mercurial can be with resource usage:
2517 documentation = """How aggressive Mercurial can be with resource usage:
2513
2518
2514 Currently recognised values are:
2519 Currently recognised values are:
2515 - default: the default value currently is equivalent to medium,
2520 - default: the default value currently is equivalent to medium,
2516 - high: allows for higher cpu, memory and disk-space usage to improve the performance of some operations.
2521 - high: allows for higher cpu, memory and disk-space usage to improve the performance of some operations.
2517 - medium: aims at a moderate resource usage,
2522 - medium: aims at a moderate resource usage,
2518 - low: reduces resources usage when possible, decreasing overall performance.
2523 - low: reduces resources usage when possible, decreasing overall performance.
2519
2524
2520 For finer configuration, see also `usage.resources.cpu`,
2525 For finer configuration, see also `usage.resources.cpu`,
2521 `usage.resources.disk` and `usage.resources.memory`.
2526 `usage.resources.disk` and `usage.resources.memory`.
2522 """
2527 """
2523
2528
2524 [[items]]
2529 [[items]]
2525 section = "usage"
2530 section = "usage"
2526 name = "resources.cpu"
2531 name = "resources.cpu"
2527 default = "default"
2532 default = "default"
2528 documentation = """How aggressive Mercurial can be in terms of cpu usage:
2533 documentation = """How aggressive Mercurial can be in terms of cpu usage:
2529
2534
2530 Currently recognised values are:
2535 Currently recognised values are:
2531 - default: the default value, inherits the value from `usage.resources`,
2536 - default: the default value, inherits the value from `usage.resources`,
2532 - high: allows for more aggressive cpu usage, improving storage quality and
2537 - high: allows for more aggressive cpu usage, improving storage quality and
2533 the performance of some operations at the expense of machine load
2538 the performance of some operations at the expense of machine load
2534 - medium: aims at a moderate cpu usage,
2539 - medium: aims at a moderate cpu usage,
2535 - low: reduces cpu usage when possible, potentially at the expense of
2540 - low: reduces cpu usage when possible, potentially at the expense of
2536 slower operations, increased storage and exchange payload.
2541 slower operations, increased storage and exchange payload.
2537
2542
2538 """
2543 """
2539
2544
2540 [[items]]
2545 [[items]]
2541 section = "usage"
2546 section = "usage"
2542 name = "resources.disk"
2547 name = "resources.disk"
2543 default = "default"
2548 default = "default"
2544 documentation = """How aggressive Mercurial can be in terms of disk usage:
2549 documentation = """How aggressive Mercurial can be in terms of disk usage:
2545
2550
2546 Currently recognised values are:
2551 Currently recognised values are:
2547 - default: the default value, inherits the value from `usage.resources`,
2552 - default: the default value, inherits the value from `usage.resources`,
2548 - high: allows for more disk space usage where it can improve the performance,
2553 - high: allows for more disk space usage where it can improve the performance,
2549 - medium: aims at a moderate disk usage,
2554 - medium: aims at a moderate disk usage,
2550 - low: reduces disk usage when possible, decreasing performance in some occasion.
2555 - low: reduces disk usage when possible, decreasing performance in some occasion.
2551 """
2556 """
2552
2557
2553 [[items]]
2558 [[items]]
2554 section = "usage"
2559 section = "usage"
2555 name = "resources.memory"
2560 name = "resources.memory"
2556 default = "default"
2561 default = "default"
2557 documentation = """How aggressive Mercurial can be in terms of memory usage:
2562 documentation = """How aggressive Mercurial can be in terms of memory usage:
2558
2563
2559 Currently recognised values are:
2564 Currently recognised values are:
2560 - default: the default value, inherits the value from `usage.resources`,
2565 - default: the default value, inherits the value from `usage.resources`,
2561 - high: allows for more aggressive memory usage to improve overall performance,
2566 - high: allows for more aggressive memory usage to improve overall performance,
2562 - medium: aims at a moderate memory usage,
2567 - medium: aims at a moderate memory usage,
2563 - low: reduces memory usage when possible at the cost of overall performance.
2568 - low: reduces memory usage when possible at the cost of overall performance.
2564 """
2569 """
2565
2570
2566 [[items]]
2571 [[items]]
2567 section = "verify"
2572 section = "verify"
2568 name = "skipflags"
2573 name = "skipflags"
2569 default = 0
2574 default = 0
2570
2575
2571 [[items]]
2576 [[items]]
2572 section = "web"
2577 section = "web"
2573 name = "accesslog"
2578 name = "accesslog"
2574 default = "-"
2579 default = "-"
2575
2580
2576 [[items]]
2581 [[items]]
2577 section = "web"
2582 section = "web"
2578 name = "address"
2583 name = "address"
2579 default = ""
2584 default = ""
2580
2585
2581 [[items]]
2586 [[items]]
2582 section = "web"
2587 section = "web"
2583 name = "allow-archive"
2588 name = "allow-archive"
2584 default-type = "list_type"
2589 default-type = "list_type"
2585 alias = [["web", "allow_archive"]]
2590 alias = [["web", "allow_archive"]]
2586
2591
2587 [[items]]
2592 [[items]]
2588 section = "web"
2593 section = "web"
2589 name = "allow-pull"
2594 name = "allow-pull"
2590 default = true
2595 default = true
2591 alias = [["web", "allowpull"]]
2596 alias = [["web", "allowpull"]]
2592
2597
2593 [[items]]
2598 [[items]]
2594 section = "web"
2599 section = "web"
2595 name = "allow-push"
2600 name = "allow-push"
2596 default-type = "list_type"
2601 default-type = "list_type"
2597 alias = [["web", "allow_push"]]
2602 alias = [["web", "allow_push"]]
2598
2603
2599 [[items]]
2604 [[items]]
2600 section = "web"
2605 section = "web"
2601 name = "allow_read"
2606 name = "allow_read"
2602 default-type = "list_type"
2607 default-type = "list_type"
2603
2608
2604 [[items]]
2609 [[items]]
2605 section = "web"
2610 section = "web"
2606 name = "allowbz2"
2611 name = "allowbz2"
2607 default = false
2612 default = false
2608
2613
2609 [[items]]
2614 [[items]]
2610 section = "web"
2615 section = "web"
2611 name = "allowgz"
2616 name = "allowgz"
2612 default = false
2617 default = false
2613
2618
2614 [[items]]
2619 [[items]]
2615 section = "web"
2620 section = "web"
2616 name = "allowzip"
2621 name = "allowzip"
2617 default = false
2622 default = false
2618
2623
2619 [[items]]
2624 [[items]]
2620 section = "web"
2625 section = "web"
2621 name = "archivesubrepos"
2626 name = "archivesubrepos"
2622 default = false
2627 default = false
2623
2628
2624 [[items]]
2629 [[items]]
2625 section = "web"
2630 section = "web"
2626 name = "baseurl"
2631 name = "baseurl"
2627
2632
2628 [[items]]
2633 [[items]]
2629 section = "web"
2634 section = "web"
2630 name = "cacerts"
2635 name = "cacerts"
2631
2636
2632 [[items]]
2637 [[items]]
2633 section = "web"
2638 section = "web"
2634 name = "cache"
2639 name = "cache"
2635 default = true
2640 default = true
2636
2641
2637 [[items]]
2642 [[items]]
2638 section = "web"
2643 section = "web"
2639 name = "certificate"
2644 name = "certificate"
2640
2645
2641 [[items]]
2646 [[items]]
2642 section = "web"
2647 section = "web"
2643 name = "collapse"
2648 name = "collapse"
2644 default = false
2649 default = false
2645
2650
2646 [[items]]
2651 [[items]]
2647 section = "web"
2652 section = "web"
2648 name = "comparisoncontext"
2653 name = "comparisoncontext"
2649 default = 5
2654 default = 5
2650
2655
2651 [[items]]
2656 [[items]]
2652 section = "web"
2657 section = "web"
2653 name = "contact"
2658 name = "contact"
2654
2659
2655 [[items]]
2660 [[items]]
2656 section = "web"
2661 section = "web"
2657 name = "csp"
2662 name = "csp"
2658
2663
2659 [[items]]
2664 [[items]]
2660 section = "web"
2665 section = "web"
2661 name = "deny_push"
2666 name = "deny_push"
2662 default-type = "list_type"
2667 default-type = "list_type"
2663
2668
2664 [[items]]
2669 [[items]]
2665 section = "web"
2670 section = "web"
2666 name = "deny_read"
2671 name = "deny_read"
2667 default-type = "list_type"
2672 default-type = "list_type"
2668
2673
2669 [[items]]
2674 [[items]]
2670 section = "web"
2675 section = "web"
2671 name = "descend"
2676 name = "descend"
2672 default = true
2677 default = true
2673
2678
2674 [[items]]
2679 [[items]]
2675 section = "web"
2680 section = "web"
2676 name = "description"
2681 name = "description"
2677 default = ""
2682 default = ""
2678
2683
2679 [[items]]
2684 [[items]]
2680 section = "web"
2685 section = "web"
2681 name = "encoding"
2686 name = "encoding"
2682 default-type = "lazy_module"
2687 default-type = "lazy_module"
2683 default = "encoding.encoding"
2688 default = "encoding.encoding"
2684
2689
2685 [[items]]
2690 [[items]]
2686 section = "web"
2691 section = "web"
2687 name = "errorlog"
2692 name = "errorlog"
2688 default = "-"
2693 default = "-"
2689
2694
2690 [[items]]
2695 [[items]]
2691 section = "web"
2696 section = "web"
2692 name = "guessmime"
2697 name = "guessmime"
2693 default = false
2698 default = false
2694
2699
2695 [[items]]
2700 [[items]]
2696 section = "web"
2701 section = "web"
2697 name = "hidden"
2702 name = "hidden"
2698 default = false
2703 default = false
2699
2704
2700 [[items]]
2705 [[items]]
2701 section = "web"
2706 section = "web"
2702 name = "ipv6"
2707 name = "ipv6"
2703 default = false
2708 default = false
2704
2709
2705 [[items]]
2710 [[items]]
2706 section = "web"
2711 section = "web"
2707 name = "labels"
2712 name = "labels"
2708 default-type = "list_type"
2713 default-type = "list_type"
2709
2714
2710 [[items]]
2715 [[items]]
2711 section = "web"
2716 section = "web"
2712 name = "logoimg"
2717 name = "logoimg"
2713 default = "hglogo.png"
2718 default = "hglogo.png"
2714
2719
2715 [[items]]
2720 [[items]]
2716 section = "web"
2721 section = "web"
2717 name = "logourl"
2722 name = "logourl"
2718 default = "https://mercurial-scm.org/"
2723 default = "https://mercurial-scm.org/"
2719
2724
2720 [[items]]
2725 [[items]]
2721 section = "web"
2726 section = "web"
2722 name = "maxchanges"
2727 name = "maxchanges"
2723 default = 10
2728 default = 10
2724
2729
2725 [[items]]
2730 [[items]]
2726 section = "web"
2731 section = "web"
2727 name = "maxfiles"
2732 name = "maxfiles"
2728 default = 10
2733 default = 10
2729
2734
2730 [[items]]
2735 [[items]]
2731 section = "web"
2736 section = "web"
2732 name = "maxshortchanges"
2737 name = "maxshortchanges"
2733 default = 60
2738 default = 60
2734
2739
2735 [[items]]
2740 [[items]]
2736 section = "web"
2741 section = "web"
2737 name = "motd"
2742 name = "motd"
2738 default = ""
2743 default = ""
2739
2744
2740 [[items]]
2745 [[items]]
2741 section = "web"
2746 section = "web"
2742 name = "name"
2747 name = "name"
2743 default-type = "dynamic"
2748 default-type = "dynamic"
2744
2749
2745 [[items]]
2750 [[items]]
2746 section = "web"
2751 section = "web"
2747 name = "port"
2752 name = "port"
2748 default = 8000
2753 default = 8000
2749
2754
2750 [[items]]
2755 [[items]]
2751 section = "web"
2756 section = "web"
2752 name = "prefix"
2757 name = "prefix"
2753 default = ""
2758 default = ""
2754
2759
2755 [[items]]
2760 [[items]]
2756 section = "web"
2761 section = "web"
2757 name = "push_ssl"
2762 name = "push_ssl"
2758 default = true
2763 default = true
2759
2764
2760 [[items]]
2765 [[items]]
2761 section = "web"
2766 section = "web"
2762 name = "refreshinterval"
2767 name = "refreshinterval"
2763 default = 20
2768 default = 20
2764
2769
2765 [[items]]
2770 [[items]]
2766 section = "web"
2771 section = "web"
2767 name = "server-header"
2772 name = "server-header"
2768
2773
2769 [[items]]
2774 [[items]]
2770 section = "web"
2775 section = "web"
2771 name = "static"
2776 name = "static"
2772
2777
2773 [[items]]
2778 [[items]]
2774 section = "web"
2779 section = "web"
2775 name = "staticurl"
2780 name = "staticurl"
2776
2781
2777 [[items]]
2782 [[items]]
2778 section = "web"
2783 section = "web"
2779 name = "stripes"
2784 name = "stripes"
2780 default = 1
2785 default = 1
2781
2786
2782 [[items]]
2787 [[items]]
2783 section = "web"
2788 section = "web"
2784 name = "style"
2789 name = "style"
2785 default = "paper"
2790 default = "paper"
2786
2791
2787 [[items]]
2792 [[items]]
2788 section = "web"
2793 section = "web"
2789 name = "templates"
2794 name = "templates"
2790
2795
2791 [[items]]
2796 [[items]]
2792 section = "web"
2797 section = "web"
2793 name = "view"
2798 name = "view"
2794 default = "served"
2799 default = "served"
2795 experimental = true
2800 experimental = true
2796
2801
2797 [[items]]
2802 [[items]]
2798 section = "worker"
2803 section = "worker"
2799 name = "backgroundclose"
2804 name = "backgroundclose"
2800 default-type = "dynamic"
2805 default-type = "dynamic"
2801
2806
2802 [[items]]
2807 [[items]]
2803 section = "worker"
2808 section = "worker"
2804 name = "backgroundclosemaxqueue"
2809 name = "backgroundclosemaxqueue"
2805 # Windows defaults to a limit of 512 open files. A buffer of 128
2810 # Windows defaults to a limit of 512 open files. A buffer of 128
2806 # should give us enough headway.
2811 # should give us enough headway.
2807 default = 384
2812 default = 384
2808
2813
2809 [[items]]
2814 [[items]]
2810 section = "worker"
2815 section = "worker"
2811 name = "backgroundcloseminfilecount"
2816 name = "backgroundcloseminfilecount"
2812 default = 2048
2817 default = 2048
2813
2818
2814 [[items]]
2819 [[items]]
2815 section = "worker"
2820 section = "worker"
2816 name = "backgroundclosethreadcount"
2821 name = "backgroundclosethreadcount"
2817 default = 4
2822 default = 4
2818
2823
2819 [[items]]
2824 [[items]]
2820 section = "worker"
2825 section = "worker"
2821 name = "enabled"
2826 name = "enabled"
2822 default = true
2827 default = true
2823
2828
2824 [[items]]
2829 [[items]]
2825 section = "worker"
2830 section = "worker"
2826 name = "numcpus"
2831 name = "numcpus"
2827
2832
2828 # Templates and template applications
2833 # Templates and template applications
2829
2834
2830 [[template-applications]]
2835 [[template-applications]]
2831 template = "diff-options"
2836 template = "diff-options"
2832 section = "annotate"
2837 section = "annotate"
2833
2838
2834 [[template-applications]]
2839 [[template-applications]]
2835 template = "diff-options"
2840 template = "diff-options"
2836 section = "commands"
2841 section = "commands"
2837 prefix = "commit.interactive"
2842 prefix = "commit.interactive"
2838
2843
2839 [[template-applications]]
2844 [[template-applications]]
2840 template = "diff-options"
2845 template = "diff-options"
2841 section = "commands"
2846 section = "commands"
2842 prefix = "revert.interactive"
2847 prefix = "revert.interactive"
2843
2848
2844 [[template-applications]]
2849 [[template-applications]]
2845 template = "diff-options"
2850 template = "diff-options"
2846 section = "diff"
2851 section = "diff"
2847
2852
2848 [templates]
2853 [templates]
2849 [[templates.diff-options]]
2854 [[templates.diff-options]]
2850 suffix = "nodates"
2855 suffix = "nodates"
2851 default = false
2856 default = false
2852
2857
2853 [[templates.diff-options]]
2858 [[templates.diff-options]]
2854 suffix = "showfunc"
2859 suffix = "showfunc"
2855 default = false
2860 default = false
2856
2861
2857 [[templates.diff-options]]
2862 [[templates.diff-options]]
2858 suffix = "unified"
2863 suffix = "unified"
2859
2864
2860 [[templates.diff-options]]
2865 [[templates.diff-options]]
2861 suffix = "git"
2866 suffix = "git"
2862 default = false
2867 default = false
2863
2868
2864 [[templates.diff-options]]
2869 [[templates.diff-options]]
2865 suffix = "ignorews"
2870 suffix = "ignorews"
2866 default = false
2871 default = false
2867
2872
2868 [[templates.diff-options]]
2873 [[templates.diff-options]]
2869 suffix = "ignorewsamount"
2874 suffix = "ignorewsamount"
2870 default = false
2875 default = false
2871
2876
2872 [[templates.diff-options]]
2877 [[templates.diff-options]]
2873 suffix = "ignoreblanklines"
2878 suffix = "ignoreblanklines"
2874 default = false
2879 default = false
2875
2880
2876 [[templates.diff-options]]
2881 [[templates.diff-options]]
2877 suffix = "ignorewseol"
2882 suffix = "ignorewseol"
2878 default = false
2883 default = false
2879
2884
2880 [[templates.diff-options]]
2885 [[templates.diff-options]]
2881 suffix = "nobinary"
2886 suffix = "nobinary"
2882 default = false
2887 default = false
2883
2888
2884 [[templates.diff-options]]
2889 [[templates.diff-options]]
2885 suffix = "noprefix"
2890 suffix = "noprefix"
2886 default = false
2891 default = false
2887
2892
2888 [[templates.diff-options]]
2893 [[templates.diff-options]]
2889 suffix = "word-diff"
2894 suffix = "word-diff"
2890 default = false
2895 default = false
2891
2896
2892 # In-core extensions
2897 # In-core extensions
2893
2898
2894 [[items]]
2899 [[items]]
2895 section = "blackbox"
2900 section = "blackbox"
2896 name = "debug.to-stderr"
2901 name = "debug.to-stderr"
2897 default = false
2902 default = false
2898 in_core_extension = "blackbox"
2903 in_core_extension = "blackbox"
2899
2904
2900 [[items]]
2905 [[items]]
2901 section = "blackbox"
2906 section = "blackbox"
2902 name = "dirty"
2907 name = "dirty"
2903 default = false
2908 default = false
2904 in_core_extension = "blackbox"
2909 in_core_extension = "blackbox"
2905
2910
2906 [[items]]
2911 [[items]]
2907 section = "blackbox"
2912 section = "blackbox"
2908 name = "maxsize"
2913 name = "maxsize"
2909 default = "1 MB"
2914 default = "1 MB"
2910 in_core_extension = "blackbox"
2915 in_core_extension = "blackbox"
2911
2916
2912 [[items]]
2917 [[items]]
2913 section = "blackbox"
2918 section = "blackbox"
2914 name = "logsource"
2919 name = "logsource"
2915 default = false
2920 default = false
2916 in_core_extension = "blackbox"
2921 in_core_extension = "blackbox"
2917
2922
2918 [[items]]
2923 [[items]]
2919 section = "blackbox"
2924 section = "blackbox"
2920 name = "maxfiles"
2925 name = "maxfiles"
2921 default = 7
2926 default = 7
2922 in_core_extension = "blackbox"
2927 in_core_extension = "blackbox"
2923
2928
2924 [[items]]
2929 [[items]]
2925 section = "blackbox"
2930 section = "blackbox"
2926 name = "track"
2931 name = "track"
2927 default-type = "lambda"
2932 default-type = "lambda"
2928 default = ["*"]
2933 default = ["*"]
2929 in_core_extension = "blackbox"
2934 in_core_extension = "blackbox"
2930
2935
2931 [[items]]
2936 [[items]]
2932 section = "blackbox"
2937 section = "blackbox"
2933 name = "ignore"
2938 name = "ignore"
2934 default-type = "lambda"
2939 default-type = "lambda"
2935 default = ["chgserver", "cmdserver", "extension"]
2940 default = ["chgserver", "cmdserver", "extension"]
2936 in_core_extension = "blackbox"
2941 in_core_extension = "blackbox"
2937
2942
2938 [[items]]
2943 [[items]]
2939 section = "blackbox"
2944 section = "blackbox"
2940 name = "date-format"
2945 name = "date-format"
2941 default = ""
2946 default = ""
2942 in_core_extension = "blackbox"
2947 in_core_extension = "blackbox"
@@ -1,4034 +1,4038 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 policy,
57 policy,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
100 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
101 b"^((dirstate|narrowspec.dirstate).*|branch$)"
102 )
102 )
103
103
104 # set of (path, vfs-location) tuples. vfs-location is:
104 # set of (path, vfs-location) tuples. vfs-location is:
105 # - 'plain for vfs relative paths
105 # - 'plain for vfs relative paths
106 # - '' for svfs relative paths
106 # - '' for svfs relative paths
107 _cachedfiles = set()
107 _cachedfiles = set()
108
108
109
109
110 class _basefilecache(scmutil.filecache):
110 class _basefilecache(scmutil.filecache):
111 """All filecache usage on repo are done for logic that should be unfiltered"""
111 """All filecache usage on repo are done for logic that should be unfiltered"""
112
112
113 def __get__(self, repo, type=None):
113 def __get__(self, repo, type=None):
114 if repo is None:
114 if repo is None:
115 return self
115 return self
116 # proxy to unfiltered __dict__ since filtered repo has no entry
116 # proxy to unfiltered __dict__ since filtered repo has no entry
117 unfi = repo.unfiltered()
117 unfi = repo.unfiltered()
118 try:
118 try:
119 return unfi.__dict__[self.sname]
119 return unfi.__dict__[self.sname]
120 except KeyError:
120 except KeyError:
121 pass
121 pass
122 return super(_basefilecache, self).__get__(unfi, type)
122 return super(_basefilecache, self).__get__(unfi, type)
123
123
124 def set(self, repo, value):
124 def set(self, repo, value):
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
125 return super(_basefilecache, self).set(repo.unfiltered(), value)
126
126
127
127
128 class repofilecache(_basefilecache):
128 class repofilecache(_basefilecache):
129 """filecache for files in .hg but outside of .hg/store"""
129 """filecache for files in .hg but outside of .hg/store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(repofilecache, self).__init__(*paths)
132 super(repofilecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b'plain'))
134 _cachedfiles.add((path, b'plain'))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138
138
139
139
140 class storecache(_basefilecache):
140 class storecache(_basefilecache):
141 """filecache for files in the store"""
141 """filecache for files in the store"""
142
142
143 def __init__(self, *paths):
143 def __init__(self, *paths):
144 super(storecache, self).__init__(*paths)
144 super(storecache, self).__init__(*paths)
145 for path in paths:
145 for path in paths:
146 _cachedfiles.add((path, b''))
146 _cachedfiles.add((path, b''))
147
147
148 def join(self, obj, fname):
148 def join(self, obj, fname):
149 return obj.sjoin(fname)
149 return obj.sjoin(fname)
150
150
151
151
152 class changelogcache(storecache):
152 class changelogcache(storecache):
153 """filecache for the changelog"""
153 """filecache for the changelog"""
154
154
155 def __init__(self):
155 def __init__(self):
156 super(changelogcache, self).__init__()
156 super(changelogcache, self).__init__()
157 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.i', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
158 _cachedfiles.add((b'00changelog.n', b''))
159
159
160 def tracked_paths(self, obj):
160 def tracked_paths(self, obj):
161 paths = [self.join(obj, b'00changelog.i')]
161 paths = [self.join(obj, b'00changelog.i')]
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 if obj.store.opener.options.get(b'persistent-nodemap', False):
163 paths.append(self.join(obj, b'00changelog.n'))
163 paths.append(self.join(obj, b'00changelog.n'))
164 return paths
164 return paths
165
165
166
166
167 class manifestlogcache(storecache):
167 class manifestlogcache(storecache):
168 """filecache for the manifestlog"""
168 """filecache for the manifestlog"""
169
169
170 def __init__(self):
170 def __init__(self):
171 super(manifestlogcache, self).__init__()
171 super(manifestlogcache, self).__init__()
172 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.i', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
173 _cachedfiles.add((b'00manifest.n', b''))
174
174
175 def tracked_paths(self, obj):
175 def tracked_paths(self, obj):
176 paths = [self.join(obj, b'00manifest.i')]
176 paths = [self.join(obj, b'00manifest.i')]
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 if obj.store.opener.options.get(b'persistent-nodemap', False):
178 paths.append(self.join(obj, b'00manifest.n'))
178 paths.append(self.join(obj, b'00manifest.n'))
179 return paths
179 return paths
180
180
181
181
182 class mixedrepostorecache(_basefilecache):
182 class mixedrepostorecache(_basefilecache):
183 """filecache for a mix files in .hg/store and outside"""
183 """filecache for a mix files in .hg/store and outside"""
184
184
185 def __init__(self, *pathsandlocations):
185 def __init__(self, *pathsandlocations):
186 # scmutil.filecache only uses the path for passing back into our
186 # scmutil.filecache only uses the path for passing back into our
187 # join(), so we can safely pass a list of paths and locations
187 # join(), so we can safely pass a list of paths and locations
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 super(mixedrepostorecache, self).__init__(*pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
189 _cachedfiles.update(pathsandlocations)
190
190
191 def join(self, obj, fnameandlocation):
191 def join(self, obj, fnameandlocation):
192 fname, location = fnameandlocation
192 fname, location = fnameandlocation
193 if location == b'plain':
193 if location == b'plain':
194 return obj.vfs.join(fname)
194 return obj.vfs.join(fname)
195 else:
195 else:
196 if location != b'':
196 if location != b'':
197 raise error.ProgrammingError(
197 raise error.ProgrammingError(
198 b'unexpected location: %s' % location
198 b'unexpected location: %s' % location
199 )
199 )
200 return obj.sjoin(fname)
200 return obj.sjoin(fname)
201
201
202
202
203 def isfilecached(repo, name):
203 def isfilecached(repo, name):
204 """check if a repo has already cached "name" filecache-ed property
204 """check if a repo has already cached "name" filecache-ed property
205
205
206 This returns (cachedobj-or-None, iscached) tuple.
206 This returns (cachedobj-or-None, iscached) tuple.
207 """
207 """
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 cacheentry = repo.unfiltered()._filecache.get(name, None)
209 if not cacheentry:
209 if not cacheentry:
210 return None, False
210 return None, False
211 return cacheentry.obj, True
211 return cacheentry.obj, True
212
212
213
213
214 class unfilteredpropertycache(util.propertycache):
214 class unfilteredpropertycache(util.propertycache):
215 """propertycache that apply to unfiltered repo only"""
215 """propertycache that apply to unfiltered repo only"""
216
216
217 def __get__(self, repo, type=None):
217 def __get__(self, repo, type=None):
218 unfi = repo.unfiltered()
218 unfi = repo.unfiltered()
219 if unfi is repo:
219 if unfi is repo:
220 return super(unfilteredpropertycache, self).__get__(unfi)
220 return super(unfilteredpropertycache, self).__get__(unfi)
221 return getattr(unfi, self.name)
221 return getattr(unfi, self.name)
222
222
223
223
224 class filteredpropertycache(util.propertycache):
224 class filteredpropertycache(util.propertycache):
225 """propertycache that must take filtering in account"""
225 """propertycache that must take filtering in account"""
226
226
227 def cachevalue(self, obj, value):
227 def cachevalue(self, obj, value):
228 object.__setattr__(obj, self.name, value)
228 object.__setattr__(obj, self.name, value)
229
229
230
230
231 def hasunfilteredcache(repo, name):
231 def hasunfilteredcache(repo, name):
232 """check if a repo has an unfilteredpropertycache value for <name>"""
232 """check if a repo has an unfilteredpropertycache value for <name>"""
233 return name in vars(repo.unfiltered())
233 return name in vars(repo.unfiltered())
234
234
235
235
236 def unfilteredmethod(orig):
236 def unfilteredmethod(orig):
237 """decorate method that always need to be run on unfiltered version"""
237 """decorate method that always need to be run on unfiltered version"""
238
238
239 @functools.wraps(orig)
239 @functools.wraps(orig)
240 def wrapper(repo, *args, **kwargs):
240 def wrapper(repo, *args, **kwargs):
241 return orig(repo.unfiltered(), *args, **kwargs)
241 return orig(repo.unfiltered(), *args, **kwargs)
242
242
243 return wrapper
243 return wrapper
244
244
245
245
246 moderncaps = {
246 moderncaps = {
247 b'lookup',
247 b'lookup',
248 b'branchmap',
248 b'branchmap',
249 b'pushkey',
249 b'pushkey',
250 b'known',
250 b'known',
251 b'getbundle',
251 b'getbundle',
252 b'unbundle',
252 b'unbundle',
253 }
253 }
254 legacycaps = moderncaps.union({b'changegroupsubset'})
254 legacycaps = moderncaps.union({b'changegroupsubset'})
255
255
256
256
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 @interfaceutil.implementer(repository.ipeercommandexecutor)
258 class localcommandexecutor:
258 class localcommandexecutor:
259 def __init__(self, peer):
259 def __init__(self, peer):
260 self._peer = peer
260 self._peer = peer
261 self._sent = False
261 self._sent = False
262 self._closed = False
262 self._closed = False
263
263
264 def __enter__(self):
264 def __enter__(self):
265 return self
265 return self
266
266
267 def __exit__(self, exctype, excvalue, exctb):
267 def __exit__(self, exctype, excvalue, exctb):
268 self.close()
268 self.close()
269
269
270 def callcommand(self, command, args):
270 def callcommand(self, command, args):
271 if self._sent:
271 if self._sent:
272 raise error.ProgrammingError(
272 raise error.ProgrammingError(
273 b'callcommand() cannot be used after sendcommands()'
273 b'callcommand() cannot be used after sendcommands()'
274 )
274 )
275
275
276 if self._closed:
276 if self._closed:
277 raise error.ProgrammingError(
277 raise error.ProgrammingError(
278 b'callcommand() cannot be used after close()'
278 b'callcommand() cannot be used after close()'
279 )
279 )
280
280
281 # We don't need to support anything fancy. Just call the named
281 # We don't need to support anything fancy. Just call the named
282 # method on the peer and return a resolved future.
282 # method on the peer and return a resolved future.
283 fn = getattr(self._peer, pycompat.sysstr(command))
283 fn = getattr(self._peer, pycompat.sysstr(command))
284
284
285 f = futures.Future()
285 f = futures.Future()
286
286
287 try:
287 try:
288 result = fn(**pycompat.strkwargs(args))
288 result = fn(**pycompat.strkwargs(args))
289 except Exception:
289 except Exception:
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
291 else:
291 else:
292 f.set_result(result)
292 f.set_result(result)
293
293
294 return f
294 return f
295
295
296 def sendcommands(self):
296 def sendcommands(self):
297 self._sent = True
297 self._sent = True
298
298
299 def close(self):
299 def close(self):
300 self._closed = True
300 self._closed = True
301
301
302
302
303 @interfaceutil.implementer(repository.ipeercommands)
303 @interfaceutil.implementer(repository.ipeercommands)
304 class localpeer(repository.peer):
304 class localpeer(repository.peer):
305 '''peer for a local repo; reflects only the most recent API'''
305 '''peer for a local repo; reflects only the most recent API'''
306
306
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
307 def __init__(self, repo, caps=None, path=None, remotehidden=False):
308 super(localpeer, self).__init__(
308 super(localpeer, self).__init__(
309 repo.ui, path=path, remotehidden=remotehidden
309 repo.ui, path=path, remotehidden=remotehidden
310 )
310 )
311
311
312 if caps is None:
312 if caps is None:
313 caps = moderncaps.copy()
313 caps = moderncaps.copy()
314 if remotehidden:
314 if remotehidden:
315 self._repo = repo.filtered(b'served.hidden')
315 self._repo = repo.filtered(b'served.hidden')
316 else:
316 else:
317 self._repo = repo.filtered(b'served')
317 self._repo = repo.filtered(b'served')
318 if repo._wanted_sidedata:
318 if repo._wanted_sidedata:
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
319 formatted = bundle2.format_remote_wanted_sidedata(repo)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
320 caps.add(b'exp-wanted-sidedata=' + formatted)
321
321
322 self._caps = repo._restrictcapabilities(caps)
322 self._caps = repo._restrictcapabilities(caps)
323
323
324 # Begin of _basepeer interface.
324 # Begin of _basepeer interface.
325
325
326 def url(self):
326 def url(self):
327 return self._repo.url()
327 return self._repo.url()
328
328
329 def local(self):
329 def local(self):
330 return self._repo
330 return self._repo
331
331
332 def canpush(self):
332 def canpush(self):
333 return True
333 return True
334
334
335 def close(self):
335 def close(self):
336 self._repo.close()
336 self._repo.close()
337
337
338 # End of _basepeer interface.
338 # End of _basepeer interface.
339
339
340 # Begin of _basewirecommands interface.
340 # Begin of _basewirecommands interface.
341
341
342 def branchmap(self):
342 def branchmap(self):
343 return self._repo.branchmap()
343 return self._repo.branchmap()
344
344
345 def capabilities(self):
345 def capabilities(self):
346 return self._caps
346 return self._caps
347
347
348 def get_cached_bundle_inline(self, path):
348 def get_cached_bundle_inline(self, path):
349 # not needed with local peer
349 # not needed with local peer
350 raise NotImplementedError
350 raise NotImplementedError
351
351
352 def clonebundles(self):
352 def clonebundles(self):
353 return bundlecaches.get_manifest(self._repo)
353 return bundlecaches.get_manifest(self._repo)
354
354
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
355 def debugwireargs(self, one, two, three=None, four=None, five=None):
356 """Used to test argument passing over the wire"""
356 """Used to test argument passing over the wire"""
357 return b"%s %s %s %s %s" % (
357 return b"%s %s %s %s %s" % (
358 one,
358 one,
359 two,
359 two,
360 pycompat.bytestr(three),
360 pycompat.bytestr(three),
361 pycompat.bytestr(four),
361 pycompat.bytestr(four),
362 pycompat.bytestr(five),
362 pycompat.bytestr(five),
363 )
363 )
364
364
365 def getbundle(
365 def getbundle(
366 self,
366 self,
367 source,
367 source,
368 heads=None,
368 heads=None,
369 common=None,
369 common=None,
370 bundlecaps=None,
370 bundlecaps=None,
371 remote_sidedata=None,
371 remote_sidedata=None,
372 **kwargs,
372 **kwargs,
373 ):
373 ):
374 chunks = exchange.getbundlechunks(
374 chunks = exchange.getbundlechunks(
375 self._repo,
375 self._repo,
376 source,
376 source,
377 heads=heads,
377 heads=heads,
378 common=common,
378 common=common,
379 bundlecaps=bundlecaps,
379 bundlecaps=bundlecaps,
380 remote_sidedata=remote_sidedata,
380 remote_sidedata=remote_sidedata,
381 **kwargs,
381 **kwargs,
382 )[1]
382 )[1]
383 cb = util.chunkbuffer(chunks)
383 cb = util.chunkbuffer(chunks)
384
384
385 if exchange.bundle2requested(bundlecaps):
385 if exchange.bundle2requested(bundlecaps):
386 # When requesting a bundle2, getbundle returns a stream to make the
386 # When requesting a bundle2, getbundle returns a stream to make the
387 # wire level function happier. We need to build a proper object
387 # wire level function happier. We need to build a proper object
388 # from it in local peer.
388 # from it in local peer.
389 return bundle2.getunbundler(self.ui, cb)
389 return bundle2.getunbundler(self.ui, cb)
390 else:
390 else:
391 return changegroup.getunbundler(b'01', cb, None)
391 return changegroup.getunbundler(b'01', cb, None)
392
392
393 def heads(self):
393 def heads(self):
394 return self._repo.heads()
394 return self._repo.heads()
395
395
396 def known(self, nodes):
396 def known(self, nodes):
397 return self._repo.known(nodes)
397 return self._repo.known(nodes)
398
398
399 def listkeys(self, namespace):
399 def listkeys(self, namespace):
400 return self._repo.listkeys(namespace)
400 return self._repo.listkeys(namespace)
401
401
402 def lookup(self, key):
402 def lookup(self, key):
403 return self._repo.lookup(key)
403 return self._repo.lookup(key)
404
404
405 def pushkey(self, namespace, key, old, new):
405 def pushkey(self, namespace, key, old, new):
406 return self._repo.pushkey(namespace, key, old, new)
406 return self._repo.pushkey(namespace, key, old, new)
407
407
408 def stream_out(self):
408 def stream_out(self):
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
409 raise error.Abort(_(b'cannot perform stream clone against local peer'))
410
410
411 def unbundle(self, bundle, heads, url):
411 def unbundle(self, bundle, heads, url):
412 """apply a bundle on a repo
412 """apply a bundle on a repo
413
413
414 This function handles the repo locking itself."""
414 This function handles the repo locking itself."""
415 try:
415 try:
416 try:
416 try:
417 bundle = exchange.readbundle(self.ui, bundle, None)
417 bundle = exchange.readbundle(self.ui, bundle, None)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
418 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
419 if hasattr(ret, 'getchunks'):
419 if hasattr(ret, 'getchunks'):
420 # This is a bundle20 object, turn it into an unbundler.
420 # This is a bundle20 object, turn it into an unbundler.
421 # This little dance should be dropped eventually when the
421 # This little dance should be dropped eventually when the
422 # API is finally improved.
422 # API is finally improved.
423 stream = util.chunkbuffer(ret.getchunks())
423 stream = util.chunkbuffer(ret.getchunks())
424 ret = bundle2.getunbundler(self.ui, stream)
424 ret = bundle2.getunbundler(self.ui, stream)
425 return ret
425 return ret
426 except Exception as exc:
426 except Exception as exc:
427 # If the exception contains output salvaged from a bundle2
427 # If the exception contains output salvaged from a bundle2
428 # reply, we need to make sure it is printed before continuing
428 # reply, we need to make sure it is printed before continuing
429 # to fail. So we build a bundle2 with such output and consume
429 # to fail. So we build a bundle2 with such output and consume
430 # it directly.
430 # it directly.
431 #
431 #
432 # This is not very elegant but allows a "simple" solution for
432 # This is not very elegant but allows a "simple" solution for
433 # issue4594
433 # issue4594
434 output = getattr(exc, '_bundle2salvagedoutput', ())
434 output = getattr(exc, '_bundle2salvagedoutput', ())
435 if output:
435 if output:
436 bundler = bundle2.bundle20(self._repo.ui)
436 bundler = bundle2.bundle20(self._repo.ui)
437 for out in output:
437 for out in output:
438 bundler.addpart(out)
438 bundler.addpart(out)
439 stream = util.chunkbuffer(bundler.getchunks())
439 stream = util.chunkbuffer(bundler.getchunks())
440 b = bundle2.getunbundler(self.ui, stream)
440 b = bundle2.getunbundler(self.ui, stream)
441 bundle2.processbundle(self._repo, b)
441 bundle2.processbundle(self._repo, b)
442 raise
442 raise
443 except error.PushRaced as exc:
443 except error.PushRaced as exc:
444 raise error.ResponseError(
444 raise error.ResponseError(
445 _(b'push failed:'), stringutil.forcebytestr(exc)
445 _(b'push failed:'), stringutil.forcebytestr(exc)
446 )
446 )
447
447
448 # End of _basewirecommands interface.
448 # End of _basewirecommands interface.
449
449
450 # Begin of peer interface.
450 # Begin of peer interface.
451
451
452 def commandexecutor(self):
452 def commandexecutor(self):
453 return localcommandexecutor(self)
453 return localcommandexecutor(self)
454
454
455 # End of peer interface.
455 # End of peer interface.
456
456
457
457
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
458 @interfaceutil.implementer(repository.ipeerlegacycommands)
459 class locallegacypeer(localpeer):
459 class locallegacypeer(localpeer):
460 """peer extension which implements legacy methods too; used for tests with
460 """peer extension which implements legacy methods too; used for tests with
461 restricted capabilities"""
461 restricted capabilities"""
462
462
463 def __init__(self, repo, path=None, remotehidden=False):
463 def __init__(self, repo, path=None, remotehidden=False):
464 super(locallegacypeer, self).__init__(
464 super(locallegacypeer, self).__init__(
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
465 repo, caps=legacycaps, path=path, remotehidden=remotehidden
466 )
466 )
467
467
468 # Begin of baselegacywirecommands interface.
468 # Begin of baselegacywirecommands interface.
469
469
470 def between(self, pairs):
470 def between(self, pairs):
471 return self._repo.between(pairs)
471 return self._repo.between(pairs)
472
472
473 def branches(self, nodes):
473 def branches(self, nodes):
474 return self._repo.branches(nodes)
474 return self._repo.branches(nodes)
475
475
476 def changegroup(self, nodes, source):
476 def changegroup(self, nodes, source):
477 outgoing = discovery.outgoing(
477 outgoing = discovery.outgoing(
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
478 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
479 )
479 )
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
480 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
481
481
482 def changegroupsubset(self, bases, heads, source):
482 def changegroupsubset(self, bases, heads, source):
483 outgoing = discovery.outgoing(
483 outgoing = discovery.outgoing(
484 self._repo, missingroots=bases, ancestorsof=heads
484 self._repo, missingroots=bases, ancestorsof=heads
485 )
485 )
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
486 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
487
487
488 # End of baselegacywirecommands interface.
488 # End of baselegacywirecommands interface.
489
489
490
490
491 # Functions receiving (ui, features) that extensions can register to impact
491 # Functions receiving (ui, features) that extensions can register to impact
492 # the ability to load repositories with custom requirements. Only
492 # the ability to load repositories with custom requirements. Only
493 # functions defined in loaded extensions are called.
493 # functions defined in loaded extensions are called.
494 #
494 #
495 # The function receives a set of requirement strings that the repository
495 # The function receives a set of requirement strings that the repository
496 # is capable of opening. Functions will typically add elements to the
496 # is capable of opening. Functions will typically add elements to the
497 # set to reflect that the extension knows how to handle that requirements.
497 # set to reflect that the extension knows how to handle that requirements.
498 featuresetupfuncs = set()
498 featuresetupfuncs = set()
499
499
500
500
501 def _getsharedvfs(hgvfs, requirements):
501 def _getsharedvfs(hgvfs, requirements):
502 """returns the vfs object pointing to root of shared source
502 """returns the vfs object pointing to root of shared source
503 repo for a shared repository
503 repo for a shared repository
504
504
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
505 hgvfs is vfs pointing at .hg/ of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
506 requirements is a set of requirements of current repo (shared one)
507 """
507 """
508 # The ``shared`` or ``relshared`` requirements indicate the
508 # The ``shared`` or ``relshared`` requirements indicate the
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
509 # store lives in the path contained in the ``.hg/sharedpath`` file.
510 # This is an absolute path for ``shared`` and relative to
510 # This is an absolute path for ``shared`` and relative to
511 # ``.hg/`` for ``relshared``.
511 # ``.hg/`` for ``relshared``.
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
512 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
513 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
514 sharedpath = util.normpath(hgvfs.join(sharedpath))
515
515
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
516 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517
517
518 if not sharedvfs.exists():
518 if not sharedvfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _(b'.hg/sharedpath points to nonexistent directory %s')
520 _(b'.hg/sharedpath points to nonexistent directory %s')
521 % sharedvfs.base
521 % sharedvfs.base
522 )
522 )
523 return sharedvfs
523 return sharedvfs
524
524
525
525
526 def _readrequires(vfs, allowmissing):
526 def _readrequires(vfs, allowmissing):
527 """reads the require file present at root of this vfs
527 """reads the require file present at root of this vfs
528 and return a set of requirements
528 and return a set of requirements
529
529
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
530 If allowmissing is True, we suppress FileNotFoundError if raised"""
531 # requires file contains a newline-delimited list of
531 # requires file contains a newline-delimited list of
532 # features/capabilities the opener (us) must have in order to use
532 # features/capabilities the opener (us) must have in order to use
533 # the repository. This file was introduced in Mercurial 0.9.2,
533 # the repository. This file was introduced in Mercurial 0.9.2,
534 # which means very old repositories may not have one. We assume
534 # which means very old repositories may not have one. We assume
535 # a missing file translates to no requirements.
535 # a missing file translates to no requirements.
536 read = vfs.tryread if allowmissing else vfs.read
536 read = vfs.tryread if allowmissing else vfs.read
537 return set(read(b'requires').splitlines())
537 return set(read(b'requires').splitlines())
538
538
539
539
540 def makelocalrepository(baseui, path: bytes, intents=None):
540 def makelocalrepository(baseui, path: bytes, intents=None):
541 """Create a local repository object.
541 """Create a local repository object.
542
542
543 Given arguments needed to construct a local repository, this function
543 Given arguments needed to construct a local repository, this function
544 performs various early repository loading functionality (such as
544 performs various early repository loading functionality (such as
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
545 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
546 the repository can be opened, derives a type suitable for representing
546 the repository can be opened, derives a type suitable for representing
547 that repository, and returns an instance of it.
547 that repository, and returns an instance of it.
548
548
549 The returned object conforms to the ``repository.completelocalrepository``
549 The returned object conforms to the ``repository.completelocalrepository``
550 interface.
550 interface.
551
551
552 The repository type is derived by calling a series of factory functions
552 The repository type is derived by calling a series of factory functions
553 for each aspect/interface of the final repository. These are defined by
553 for each aspect/interface of the final repository. These are defined by
554 ``REPO_INTERFACES``.
554 ``REPO_INTERFACES``.
555
555
556 Each factory function is called to produce a type implementing a specific
556 Each factory function is called to produce a type implementing a specific
557 interface. The cumulative list of returned types will be combined into a
557 interface. The cumulative list of returned types will be combined into a
558 new type and that type will be instantiated to represent the local
558 new type and that type will be instantiated to represent the local
559 repository.
559 repository.
560
560
561 The factory functions each receive various state that may be consulted
561 The factory functions each receive various state that may be consulted
562 as part of deriving a type.
562 as part of deriving a type.
563
563
564 Extensions should wrap these factory functions to customize repository type
564 Extensions should wrap these factory functions to customize repository type
565 creation. Note that an extension's wrapped function may be called even if
565 creation. Note that an extension's wrapped function may be called even if
566 that extension is not loaded for the repo being constructed. Extensions
566 that extension is not loaded for the repo being constructed. Extensions
567 should check if their ``__name__`` appears in the
567 should check if their ``__name__`` appears in the
568 ``extensionmodulenames`` set passed to the factory function and no-op if
568 ``extensionmodulenames`` set passed to the factory function and no-op if
569 not.
569 not.
570 """
570 """
571 ui = baseui.copy()
571 ui = baseui.copy()
572 # Prevent copying repo configuration.
572 # Prevent copying repo configuration.
573 ui.copy = baseui.copy
573 ui.copy = baseui.copy
574
574
575 # Working directory VFS rooted at repository root.
575 # Working directory VFS rooted at repository root.
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
576 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
577
577
578 # Main VFS for .hg/ directory.
578 # Main VFS for .hg/ directory.
579 hgpath = wdirvfs.join(b'.hg')
579 hgpath = wdirvfs.join(b'.hg')
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
580 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
581 # Whether this repository is shared one or not
581 # Whether this repository is shared one or not
582 shared = False
582 shared = False
583 # If this repository is shared, vfs pointing to shared repo
583 # If this repository is shared, vfs pointing to shared repo
584 sharedvfs = None
584 sharedvfs = None
585
585
586 # The .hg/ path should exist and should be a directory. All other
586 # The .hg/ path should exist and should be a directory. All other
587 # cases are errors.
587 # cases are errors.
588 if not hgvfs.isdir():
588 if not hgvfs.isdir():
589 try:
589 try:
590 hgvfs.stat()
590 hgvfs.stat()
591 except FileNotFoundError:
591 except FileNotFoundError:
592 pass
592 pass
593 except ValueError as e:
593 except ValueError as e:
594 # Can be raised on Python 3.8 when path is invalid.
594 # Can be raised on Python 3.8 when path is invalid.
595 raise error.Abort(
595 raise error.Abort(
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
596 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
597 )
597 )
598
598
599 raise error.RepoError(_(b'repository %s not found') % path)
599 raise error.RepoError(_(b'repository %s not found') % path)
600
600
601 requirements = _readrequires(hgvfs, True)
601 requirements = _readrequires(hgvfs, True)
602 shared = (
602 shared = (
603 requirementsmod.SHARED_REQUIREMENT in requirements
603 requirementsmod.SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
604 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
605 )
605 )
606 storevfs = None
606 storevfs = None
607 if shared:
607 if shared:
608 # This is a shared repo
608 # This is a shared repo
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
609 sharedvfs = _getsharedvfs(hgvfs, requirements)
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
610 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
611 else:
611 else:
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
612 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
613
613
614 # if .hg/requires contains the sharesafe requirement, it means
614 # if .hg/requires contains the sharesafe requirement, it means
615 # there exists a `.hg/store/requires` too and we should read it
615 # there exists a `.hg/store/requires` too and we should read it
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
616 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
617 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
618 # is not present, refer checkrequirementscompat() for that
618 # is not present, refer checkrequirementscompat() for that
619 #
619 #
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
620 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
621 # repository was shared the old way. We check the share source .hg/requires
621 # repository was shared the old way. We check the share source .hg/requires
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
622 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
623 # to be reshared
623 # to be reshared
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
624 hint = _(b"see `hg help config.format.use-share-safe` for more information")
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
625 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
626 if (
626 if (
627 shared
627 shared
628 and requirementsmod.SHARESAFE_REQUIREMENT
628 and requirementsmod.SHARESAFE_REQUIREMENT
629 not in _readrequires(sharedvfs, True)
629 not in _readrequires(sharedvfs, True)
630 ):
630 ):
631 mismatch_warn = ui.configbool(
631 mismatch_warn = ui.configbool(
632 b'share', b'safe-mismatch.source-not-safe.warn'
632 b'share', b'safe-mismatch.source-not-safe.warn'
633 )
633 )
634 mismatch_config = ui.config(
634 mismatch_config = ui.config(
635 b'share', b'safe-mismatch.source-not-safe'
635 b'share', b'safe-mismatch.source-not-safe'
636 )
636 )
637 mismatch_verbose_upgrade = ui.configbool(
637 mismatch_verbose_upgrade = ui.configbool(
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
638 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
639 )
639 )
640 if mismatch_config in (
640 if mismatch_config in (
641 b'downgrade-allow',
641 b'downgrade-allow',
642 b'allow',
642 b'allow',
643 b'downgrade-abort',
643 b'downgrade-abort',
644 ):
644 ):
645 # prevent cyclic import localrepo -> upgrade -> localrepo
645 # prevent cyclic import localrepo -> upgrade -> localrepo
646 from . import upgrade
646 from . import upgrade
647
647
648 upgrade.downgrade_share_to_non_safe(
648 upgrade.downgrade_share_to_non_safe(
649 ui,
649 ui,
650 hgvfs,
650 hgvfs,
651 sharedvfs,
651 sharedvfs,
652 requirements,
652 requirements,
653 mismatch_config,
653 mismatch_config,
654 mismatch_warn,
654 mismatch_warn,
655 mismatch_verbose_upgrade,
655 mismatch_verbose_upgrade,
656 )
656 )
657 elif mismatch_config == b'abort':
657 elif mismatch_config == b'abort':
658 raise error.Abort(
658 raise error.Abort(
659 _(b"share source does not support share-safe requirement"),
659 _(b"share source does not support share-safe requirement"),
660 hint=hint,
660 hint=hint,
661 )
661 )
662 else:
662 else:
663 raise error.Abort(
663 raise error.Abort(
664 _(
664 _(
665 b"share-safe mismatch with source.\nUnrecognized"
665 b"share-safe mismatch with source.\nUnrecognized"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
666 b" value '%s' of `share.safe-mismatch.source-not-safe`"
667 b" set."
667 b" set."
668 )
668 )
669 % mismatch_config,
669 % mismatch_config,
670 hint=hint,
670 hint=hint,
671 )
671 )
672 else:
672 else:
673 requirements |= _readrequires(storevfs, False)
673 requirements |= _readrequires(storevfs, False)
674 elif shared:
674 elif shared:
675 sourcerequires = _readrequires(sharedvfs, False)
675 sourcerequires = _readrequires(sharedvfs, False)
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
676 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
677 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
678 mismatch_warn = ui.configbool(
678 mismatch_warn = ui.configbool(
679 b'share', b'safe-mismatch.source-safe.warn'
679 b'share', b'safe-mismatch.source-safe.warn'
680 )
680 )
681 mismatch_verbose_upgrade = ui.configbool(
681 mismatch_verbose_upgrade = ui.configbool(
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
682 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
683 )
683 )
684 if mismatch_config in (
684 if mismatch_config in (
685 b'upgrade-allow',
685 b'upgrade-allow',
686 b'allow',
686 b'allow',
687 b'upgrade-abort',
687 b'upgrade-abort',
688 ):
688 ):
689 # prevent cyclic import localrepo -> upgrade -> localrepo
689 # prevent cyclic import localrepo -> upgrade -> localrepo
690 from . import upgrade
690 from . import upgrade
691
691
692 upgrade.upgrade_share_to_safe(
692 upgrade.upgrade_share_to_safe(
693 ui,
693 ui,
694 hgvfs,
694 hgvfs,
695 storevfs,
695 storevfs,
696 requirements,
696 requirements,
697 mismatch_config,
697 mismatch_config,
698 mismatch_warn,
698 mismatch_warn,
699 mismatch_verbose_upgrade,
699 mismatch_verbose_upgrade,
700 )
700 )
701 elif mismatch_config == b'abort':
701 elif mismatch_config == b'abort':
702 raise error.Abort(
702 raise error.Abort(
703 _(
703 _(
704 b'version mismatch: source uses share-safe'
704 b'version mismatch: source uses share-safe'
705 b' functionality while the current share does not'
705 b' functionality while the current share does not'
706 ),
706 ),
707 hint=hint,
707 hint=hint,
708 )
708 )
709 else:
709 else:
710 raise error.Abort(
710 raise error.Abort(
711 _(
711 _(
712 b"share-safe mismatch with source.\nUnrecognized"
712 b"share-safe mismatch with source.\nUnrecognized"
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
713 b" value '%s' of `share.safe-mismatch.source-safe` set."
714 )
714 )
715 % mismatch_config,
715 % mismatch_config,
716 hint=hint,
716 hint=hint,
717 )
717 )
718
718
719 # The .hg/hgrc file may load extensions or contain config options
719 # The .hg/hgrc file may load extensions or contain config options
720 # that influence repository construction. Attempt to load it and
720 # that influence repository construction. Attempt to load it and
721 # process any new extensions that it may have pulled in.
721 # process any new extensions that it may have pulled in.
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
722 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
723 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
724 extensions.loadall(ui)
724 extensions.loadall(ui)
725 extensions.populateui(ui)
725 extensions.populateui(ui)
726
726
727 # Set of module names of extensions loaded for this repository.
727 # Set of module names of extensions loaded for this repository.
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
728 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
729
729
730 supportedrequirements = gathersupportedrequirements(ui)
730 supportedrequirements = gathersupportedrequirements(ui)
731
731
732 # We first validate the requirements are known.
732 # We first validate the requirements are known.
733 ensurerequirementsrecognized(requirements, supportedrequirements)
733 ensurerequirementsrecognized(requirements, supportedrequirements)
734
734
735 # Then we validate that the known set is reasonable to use together.
735 # Then we validate that the known set is reasonable to use together.
736 ensurerequirementscompatible(ui, requirements)
736 ensurerequirementscompatible(ui, requirements)
737
737
738 # TODO there are unhandled edge cases related to opening repositories with
738 # TODO there are unhandled edge cases related to opening repositories with
739 # shared storage. If storage is shared, we should also test for requirements
739 # shared storage. If storage is shared, we should also test for requirements
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
740 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
741 # that repo, as that repo may load extensions needed to open it. This is a
741 # that repo, as that repo may load extensions needed to open it. This is a
742 # bit complicated because we don't want the other hgrc to overwrite settings
742 # bit complicated because we don't want the other hgrc to overwrite settings
743 # in this hgrc.
743 # in this hgrc.
744 #
744 #
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
745 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
746 # file when sharing repos. But if a requirement is added after the share is
746 # file when sharing repos. But if a requirement is added after the share is
747 # performed, thereby introducing a new requirement for the opener, we may
747 # performed, thereby introducing a new requirement for the opener, we may
748 # will not see that and could encounter a run-time error interacting with
748 # will not see that and could encounter a run-time error interacting with
749 # that shared store since it has an unknown-to-us requirement.
749 # that shared store since it has an unknown-to-us requirement.
750
750
751 # At this point, we know we should be capable of opening the repository.
751 # At this point, we know we should be capable of opening the repository.
752 # Now get on with doing that.
752 # Now get on with doing that.
753
753
754 features = set()
754 features = set()
755
755
756 # The "store" part of the repository holds versioned data. How it is
756 # The "store" part of the repository holds versioned data. How it is
757 # accessed is determined by various requirements. If `shared` or
757 # accessed is determined by various requirements. If `shared` or
758 # `relshared` requirements are present, this indicates current repository
758 # `relshared` requirements are present, this indicates current repository
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
759 # is a share and store exists in path mentioned in `.hg/sharedpath`
760 if shared:
760 if shared:
761 storebasepath = sharedvfs.base
761 storebasepath = sharedvfs.base
762 cachepath = sharedvfs.join(b'cache')
762 cachepath = sharedvfs.join(b'cache')
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
763 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
764 else:
764 else:
765 storebasepath = hgvfs.base
765 storebasepath = hgvfs.base
766 cachepath = hgvfs.join(b'cache')
766 cachepath = hgvfs.join(b'cache')
767 wcachepath = hgvfs.join(b'wcache')
767 wcachepath = hgvfs.join(b'wcache')
768
768
769 # The store has changed over time and the exact layout is dictated by
769 # The store has changed over time and the exact layout is dictated by
770 # requirements. The store interface abstracts differences across all
770 # requirements. The store interface abstracts differences across all
771 # of them.
771 # of them.
772 store = makestore(
772 store = makestore(
773 requirements,
773 requirements,
774 storebasepath,
774 storebasepath,
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
775 lambda base: vfsmod.vfs(base, cacheaudited=True),
776 )
776 )
777 hgvfs.createmode = store.createmode
777 hgvfs.createmode = store.createmode
778
778
779 storevfs = store.vfs
779 storevfs = store.vfs
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
780 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
781
781
782 if (
782 if (
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
783 requirementsmod.REVLOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
784 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
785 ):
785 ):
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
786 features.add(repository.REPO_FEATURE_SIDE_DATA)
787 # the revlogv2 docket introduced race condition that we need to fix
787 # the revlogv2 docket introduced race condition that we need to fix
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
788 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
789
789
790 # The cache vfs is used to manage cache files.
790 # The cache vfs is used to manage cache files.
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
791 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
792 cachevfs.createmode = store.createmode
792 cachevfs.createmode = store.createmode
793 # The cache vfs is used to manage cache files related to the working copy
793 # The cache vfs is used to manage cache files related to the working copy
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
794 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
795 wcachevfs.createmode = store.createmode
795 wcachevfs.createmode = store.createmode
796
796
797 # Now resolve the type for the repository object. We do this by repeatedly
797 # Now resolve the type for the repository object. We do this by repeatedly
798 # calling a factory function to produces types for specific aspects of the
798 # calling a factory function to produces types for specific aspects of the
799 # repo's operation. The aggregate returned types are used as base classes
799 # repo's operation. The aggregate returned types are used as base classes
800 # for a dynamically-derived type, which will represent our new repository.
800 # for a dynamically-derived type, which will represent our new repository.
801
801
802 bases = []
802 bases = []
803 extrastate = {}
803 extrastate = {}
804
804
805 for iface, fn in REPO_INTERFACES:
805 for iface, fn in REPO_INTERFACES:
806 # We pass all potentially useful state to give extensions tons of
806 # We pass all potentially useful state to give extensions tons of
807 # flexibility.
807 # flexibility.
808 typ = fn()(
808 typ = fn()(
809 ui=ui,
809 ui=ui,
810 intents=intents,
810 intents=intents,
811 requirements=requirements,
811 requirements=requirements,
812 features=features,
812 features=features,
813 wdirvfs=wdirvfs,
813 wdirvfs=wdirvfs,
814 hgvfs=hgvfs,
814 hgvfs=hgvfs,
815 store=store,
815 store=store,
816 storevfs=storevfs,
816 storevfs=storevfs,
817 storeoptions=storevfs.options,
817 storeoptions=storevfs.options,
818 cachevfs=cachevfs,
818 cachevfs=cachevfs,
819 wcachevfs=wcachevfs,
819 wcachevfs=wcachevfs,
820 extensionmodulenames=extensionmodulenames,
820 extensionmodulenames=extensionmodulenames,
821 extrastate=extrastate,
821 extrastate=extrastate,
822 baseclasses=bases,
822 baseclasses=bases,
823 )
823 )
824
824
825 if not isinstance(typ, type):
825 if not isinstance(typ, type):
826 raise error.ProgrammingError(
826 raise error.ProgrammingError(
827 b'unable to construct type for %s' % iface
827 b'unable to construct type for %s' % iface
828 )
828 )
829
829
830 bases.append(typ)
830 bases.append(typ)
831
831
832 # type() allows you to use characters in type names that wouldn't be
832 # type() allows you to use characters in type names that wouldn't be
833 # recognized as Python symbols in source code. We abuse that to add
833 # recognized as Python symbols in source code. We abuse that to add
834 # rich information about our constructed repo.
834 # rich information about our constructed repo.
835 name = pycompat.sysstr(
835 name = pycompat.sysstr(
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
836 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
837 )
837 )
838
838
839 cls = type(name, tuple(bases), {})
839 cls = type(name, tuple(bases), {})
840
840
841 return cls(
841 return cls(
842 baseui=baseui,
842 baseui=baseui,
843 ui=ui,
843 ui=ui,
844 origroot=path,
844 origroot=path,
845 wdirvfs=wdirvfs,
845 wdirvfs=wdirvfs,
846 hgvfs=hgvfs,
846 hgvfs=hgvfs,
847 requirements=requirements,
847 requirements=requirements,
848 supportedrequirements=supportedrequirements,
848 supportedrequirements=supportedrequirements,
849 sharedpath=storebasepath,
849 sharedpath=storebasepath,
850 store=store,
850 store=store,
851 cachevfs=cachevfs,
851 cachevfs=cachevfs,
852 wcachevfs=wcachevfs,
852 wcachevfs=wcachevfs,
853 features=features,
853 features=features,
854 intents=intents,
854 intents=intents,
855 )
855 )
856
856
857
857
858 def loadhgrc(
858 def loadhgrc(
859 ui,
859 ui,
860 wdirvfs: vfsmod.vfs,
860 wdirvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
861 hgvfs: vfsmod.vfs,
862 requirements,
862 requirements,
863 sharedvfs: Optional[vfsmod.vfs] = None,
863 sharedvfs: Optional[vfsmod.vfs] = None,
864 ):
864 ):
865 """Load hgrc files/content into a ui instance.
865 """Load hgrc files/content into a ui instance.
866
866
867 This is called during repository opening to load any additional
867 This is called during repository opening to load any additional
868 config files or settings relevant to the current repository.
868 config files or settings relevant to the current repository.
869
869
870 Returns a bool indicating whether any additional configs were loaded.
870 Returns a bool indicating whether any additional configs were loaded.
871
871
872 Extensions should monkeypatch this function to modify how per-repo
872 Extensions should monkeypatch this function to modify how per-repo
873 configs are loaded. For example, an extension may wish to pull in
873 configs are loaded. For example, an extension may wish to pull in
874 configs from alternate files or sources.
874 configs from alternate files or sources.
875
875
876 sharedvfs is vfs object pointing to source repo if the current one is a
876 sharedvfs is vfs object pointing to source repo if the current one is a
877 shared one
877 shared one
878 """
878 """
879 if not rcutil.use_repo_hgrc():
879 if not rcutil.use_repo_hgrc():
880 return False
880 return False
881
881
882 ret = False
882 ret = False
883 # first load config from shared source if we has to
883 # first load config from shared source if we has to
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
884 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
885 try:
885 try:
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
886 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
887 ret = True
887 ret = True
888 except IOError:
888 except IOError:
889 pass
889 pass
890
890
891 try:
891 try:
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
892 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
893 ret = True
893 ret = True
894 except IOError:
894 except IOError:
895 pass
895 pass
896
896
897 try:
897 try:
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
898 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
899 ret = True
899 ret = True
900 except IOError:
900 except IOError:
901 pass
901 pass
902
902
903 return ret
903 return ret
904
904
905
905
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
906 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
907 """Perform additional actions after .hg/hgrc is loaded.
907 """Perform additional actions after .hg/hgrc is loaded.
908
908
909 This function is called during repository loading immediately after
909 This function is called during repository loading immediately after
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
910 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
911
911
912 The function can be used to validate configs, automatically add
912 The function can be used to validate configs, automatically add
913 options (including extensions) based on requirements, etc.
913 options (including extensions) based on requirements, etc.
914 """
914 """
915
915
916 # Map of requirements to list of extensions to load automatically when
916 # Map of requirements to list of extensions to load automatically when
917 # requirement is present.
917 # requirement is present.
918 autoextensions = {
918 autoextensions = {
919 b'git': [b'git'],
919 b'git': [b'git'],
920 b'largefiles': [b'largefiles'],
920 b'largefiles': [b'largefiles'],
921 b'lfs': [b'lfs'],
921 b'lfs': [b'lfs'],
922 }
922 }
923
923
924 for requirement, names in sorted(autoextensions.items()):
924 for requirement, names in sorted(autoextensions.items()):
925 if requirement not in requirements:
925 if requirement not in requirements:
926 continue
926 continue
927
927
928 for name in names:
928 for name in names:
929 if not ui.hasconfig(b'extensions', name):
929 if not ui.hasconfig(b'extensions', name):
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
930 ui.setconfig(b'extensions', name, b'', source=b'autoload')
931
931
932
932
933 def gathersupportedrequirements(ui):
933 def gathersupportedrequirements(ui):
934 """Determine the complete set of recognized requirements."""
934 """Determine the complete set of recognized requirements."""
935 # Start with all requirements supported by this file.
935 # Start with all requirements supported by this file.
936 supported = set(localrepository._basesupported)
936 supported = set(localrepository._basesupported)
937
937
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
938 # Execute ``featuresetupfuncs`` entries if they belong to an extension
939 # relevant to this ui instance.
939 # relevant to this ui instance.
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
940 modules = {m.__name__ for n, m in extensions.extensions(ui)}
941
941
942 for fn in featuresetupfuncs:
942 for fn in featuresetupfuncs:
943 if fn.__module__ in modules:
943 if fn.__module__ in modules:
944 fn(ui, supported)
944 fn(ui, supported)
945
945
946 # Add derived requirements from registered compression engines.
946 # Add derived requirements from registered compression engines.
947 for name in util.compengines:
947 for name in util.compengines:
948 engine = util.compengines[name]
948 engine = util.compengines[name]
949 if engine.available() and engine.revlogheader():
949 if engine.available() and engine.revlogheader():
950 supported.add(b'exp-compression-%s' % name)
950 supported.add(b'exp-compression-%s' % name)
951 if engine.name() == b'zstd':
951 if engine.name() == b'zstd':
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
952 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
953
953
954 return supported
954 return supported
955
955
956
956
957 def ensurerequirementsrecognized(requirements, supported):
957 def ensurerequirementsrecognized(requirements, supported):
958 """Validate that a set of local requirements is recognized.
958 """Validate that a set of local requirements is recognized.
959
959
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
960 Receives a set of requirements. Raises an ``error.RepoError`` if there
961 exists any requirement in that set that currently loaded code doesn't
961 exists any requirement in that set that currently loaded code doesn't
962 recognize.
962 recognize.
963
963
964 Returns a set of supported requirements.
964 Returns a set of supported requirements.
965 """
965 """
966 missing = set()
966 missing = set()
967
967
968 for requirement in requirements:
968 for requirement in requirements:
969 if requirement in supported:
969 if requirement in supported:
970 continue
970 continue
971
971
972 if not requirement or not requirement[0:1].isalnum():
972 if not requirement or not requirement[0:1].isalnum():
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
973 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
974
974
975 missing.add(requirement)
975 missing.add(requirement)
976
976
977 if missing:
977 if missing:
978 raise error.RequirementError(
978 raise error.RequirementError(
979 _(b'repository requires features unknown to this Mercurial: %s')
979 _(b'repository requires features unknown to this Mercurial: %s')
980 % b' '.join(sorted(missing)),
980 % b' '.join(sorted(missing)),
981 hint=_(
981 hint=_(
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
982 b'see https://mercurial-scm.org/wiki/MissingRequirement '
983 b'for more information'
983 b'for more information'
984 ),
984 ),
985 )
985 )
986
986
987
987
988 def ensurerequirementscompatible(ui, requirements):
988 def ensurerequirementscompatible(ui, requirements):
989 """Validates that a set of recognized requirements is mutually compatible.
989 """Validates that a set of recognized requirements is mutually compatible.
990
990
991 Some requirements may not be compatible with others or require
991 Some requirements may not be compatible with others or require
992 config options that aren't enabled. This function is called during
992 config options that aren't enabled. This function is called during
993 repository opening to ensure that the set of requirements needed
993 repository opening to ensure that the set of requirements needed
994 to open a repository is sane and compatible with config options.
994 to open a repository is sane and compatible with config options.
995
995
996 Extensions can monkeypatch this function to perform additional
996 Extensions can monkeypatch this function to perform additional
997 checking.
997 checking.
998
998
999 ``error.RepoError`` should be raised on failure.
999 ``error.RepoError`` should be raised on failure.
1000 """
1000 """
1001 if (
1001 if (
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1002 requirementsmod.SPARSE_REQUIREMENT in requirements
1003 and not sparse.enabled
1003 and not sparse.enabled
1004 ):
1004 ):
1005 raise error.RepoError(
1005 raise error.RepoError(
1006 _(
1006 _(
1007 b'repository is using sparse feature but '
1007 b'repository is using sparse feature but '
1008 b'sparse is not enabled; enable the '
1008 b'sparse is not enabled; enable the '
1009 b'"sparse" extensions to access'
1009 b'"sparse" extensions to access'
1010 )
1010 )
1011 )
1011 )
1012
1012
1013
1013
1014 def makestore(requirements, path, vfstype):
1014 def makestore(requirements, path, vfstype):
1015 """Construct a storage object for a repository."""
1015 """Construct a storage object for a repository."""
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1016 if requirementsmod.STORE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1017 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1018 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1019 return storemod.fncachestore(path, vfstype, dotencode)
1019 return storemod.fncachestore(path, vfstype, dotencode)
1020
1020
1021 return storemod.encodedstore(path, vfstype)
1021 return storemod.encodedstore(path, vfstype)
1022
1022
1023 return storemod.basicstore(path, vfstype)
1023 return storemod.basicstore(path, vfstype)
1024
1024
1025
1025
1026 def resolvestorevfsoptions(ui, requirements, features):
1026 def resolvestorevfsoptions(ui, requirements, features):
1027 """Resolve the options to pass to the store vfs opener.
1027 """Resolve the options to pass to the store vfs opener.
1028
1028
1029 The returned dict is used to influence behavior of the storage layer.
1029 The returned dict is used to influence behavior of the storage layer.
1030 """
1030 """
1031 options = {}
1031 options = {}
1032
1032
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1033 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1034 options[b'treemanifest'] = True
1034 options[b'treemanifest'] = True
1035
1035
1036 # experimental config: format.manifestcachesize
1036 # experimental config: format.manifestcachesize
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1037 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1038 if manifestcachesize is not None:
1038 if manifestcachesize is not None:
1039 options[b'manifestcachesize'] = manifestcachesize
1039 options[b'manifestcachesize'] = manifestcachesize
1040
1040
1041 # In the absence of another requirement superseding a revlog-related
1041 # In the absence of another requirement superseding a revlog-related
1042 # requirement, we have to assume the repo is using revlog version 0.
1042 # requirement, we have to assume the repo is using revlog version 0.
1043 # This revlog format is super old and we don't bother trying to parse
1043 # This revlog format is super old and we don't bother trying to parse
1044 # opener options for it because those options wouldn't do anything
1044 # opener options for it because those options wouldn't do anything
1045 # meaningful on such old repos.
1045 # meaningful on such old repos.
1046 if (
1046 if (
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1047 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1048 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1049 ):
1049 ):
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1050 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1051 else: # explicitly mark repo as using revlogv0
1051 else: # explicitly mark repo as using revlogv0
1052 options[b'revlogv0'] = True
1052 options[b'revlogv0'] = True
1053
1053
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1054 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1055 options[b'copies-storage'] = b'changeset-sidedata'
1055 options[b'copies-storage'] = b'changeset-sidedata'
1056 else:
1056 else:
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1057 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1058 copiesextramode = (b'changeset-only', b'compatibility')
1059 if writecopiesto in copiesextramode:
1059 if writecopiesto in copiesextramode:
1060 options[b'copies-storage'] = b'extra'
1060 options[b'copies-storage'] = b'extra'
1061
1061
1062 return options
1062 return options
1063
1063
1064
1064
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1065 def resolverevlogstorevfsoptions(ui, requirements, features):
1066 """Resolve opener options specific to revlogs."""
1066 """Resolve opener options specific to revlogs."""
1067
1067
1068 options = {}
1068 options = {}
1069 options[b'flagprocessors'] = {}
1069 options[b'flagprocessors'] = {}
1070
1070
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1071 feature_config = options[b'feature-config'] = revlog.FeatureConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1072 data_config = options[b'data-config'] = revlog.DataConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1073 delta_config = options[b'delta-config'] = revlog.DeltaConfig()
1074
1074
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 options[b'revlogv1'] = True
1076 options[b'revlogv1'] = True
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 options[b'revlogv2'] = True
1078 options[b'revlogv2'] = True
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 options[b'changelogv2'] = True
1080 options[b'changelogv2'] = True
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083
1083
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 options[b'generaldelta'] = True
1085 options[b'generaldelta'] = True
1086
1086
1087 # experimental config: format.chunkcachesize
1087 # experimental config: format.chunkcachesize
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 if chunkcachesize is not None:
1089 if chunkcachesize is not None:
1090 data_config.chunk_cache_size = chunkcachesize
1090 data_config.chunk_cache_size = chunkcachesize
1091
1091
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1092 memory_profile = scmutil.get_resource_profile(ui, b'memory')
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1093 if memory_profile >= scmutil.RESOURCE_MEDIUM:
1094 data_config.uncompressed_cache_count = 10_000
1094 data_config.uncompressed_cache_count = 10_000
1095 data_config.uncompressed_cache_factor = 4
1095 data_config.uncompressed_cache_factor = 4
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1096 if memory_profile >= scmutil.RESOURCE_HIGH:
1097 data_config.uncompressed_cache_factor = 10
1097 data_config.uncompressed_cache_factor = 10
1098
1098
1099 delta_config.delta_both_parents = ui.configbool(
1099 delta_config.delta_both_parents = ui.configbool(
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1100 b'storage', b'revlog.optimize-delta-parent-choice'
1101 )
1101 )
1102 delta_config.candidate_group_chunk_size = ui.configint(
1102 delta_config.candidate_group_chunk_size = ui.configint(
1103 b'storage',
1103 b'storage',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1104 b'revlog.delta-parent-search.candidate-group-chunk-size',
1105 )
1105 )
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1106 delta_config.debug_delta = ui.configbool(b'debug', b'revlog.debug-delta')
1107
1107
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1108 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1109 options[b'issue6528.fix-incoming'] = issue6528
1109 options[b'issue6528.fix-incoming'] = issue6528
1110
1110
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1111 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1112 lazydeltabase = False
1112 lazydeltabase = False
1113 if lazydelta:
1113 if lazydelta:
1114 lazydeltabase = ui.configbool(
1114 lazydeltabase = ui.configbool(
1115 b'storage', b'revlog.reuse-external-delta-parent'
1115 b'storage', b'revlog.reuse-external-delta-parent'
1116 )
1116 )
1117 if lazydeltabase is None:
1117 if lazydeltabase is None:
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1118 lazydeltabase = not scmutil.gddeltaconfig(ui)
1119 delta_config.lazy_delta = lazydelta
1119 delta_config.lazy_delta = lazydelta
1120 delta_config.lazy_delta_base = lazydeltabase
1120 delta_config.lazy_delta_base = lazydeltabase
1121
1121
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1122 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1123 if 0 <= chainspan:
1123 if 0 <= chainspan:
1124 delta_config.max_deltachain_span = chainspan
1124 delta_config.max_deltachain_span = chainspan
1125
1125
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1126 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1127 if mmapindexthreshold is not None:
1127 if mmapindexthreshold is not None:
1128 data_config.mmap_index_threshold = mmapindexthreshold
1128 data_config.mmap_index_threshold = mmapindexthreshold
1129
1129
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1130 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1131 srdensitythres = float(
1131 srdensitythres = float(
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1132 ui.config(b'experimental', b'sparse-read.density-threshold')
1133 )
1133 )
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1134 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1135 data_config.with_sparse_read = withsparseread
1135 data_config.with_sparse_read = withsparseread
1136 data_config.sr_density_threshold = srdensitythres
1136 data_config.sr_density_threshold = srdensitythres
1137 data_config.sr_min_gap_size = srmingapsize
1137 data_config.sr_min_gap_size = srmingapsize
1138
1138
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1139 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1140 delta_config.sparse_revlog = sparserevlog
1140 delta_config.sparse_revlog = sparserevlog
1141 if sparserevlog:
1141 if sparserevlog:
1142 options[b'generaldelta'] = True
1142 options[b'generaldelta'] = True
1143 data_config.with_sparse_read = True
1143 data_config.with_sparse_read = True
1144
1144
1145 maxchainlen = None
1145 maxchainlen = None
1146 if sparserevlog:
1146 if sparserevlog:
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1147 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1148 # experimental config: format.maxchainlen
1148 # experimental config: format.maxchainlen
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1149 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1150 if maxchainlen is not None:
1150 if maxchainlen is not None:
1151 delta_config.max_chain_len = maxchainlen
1151 delta_config.max_chain_len = maxchainlen
1152
1152
1153 for r in requirements:
1153 for r in requirements:
1154 # we allow multiple compression engine requirement to co-exist because
1154 # we allow multiple compression engine requirement to co-exist because
1155 # strickly speaking, revlog seems to support mixed compression style.
1155 # strickly speaking, revlog seems to support mixed compression style.
1156 #
1156 #
1157 # The compression used for new entries will be "the last one"
1157 # The compression used for new entries will be "the last one"
1158 prefix = r.startswith
1158 prefix = r.startswith
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1159 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1160 feature_config.compression_engine = r.split(b'-', 2)[2]
1161
1161
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1162 zlib_level = ui.configint(b'storage', b'revlog.zlib.level')
1163 if zlib_level is not None:
1163 if zlib_level is not None:
1164 if not (0 <= zlib_level <= 9):
1164 if not (0 <= zlib_level <= 9):
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1165 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1166 raise error.Abort(msg % zlib_level)
1166 raise error.Abort(msg % zlib_level)
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1167 feature_config.compression_engine_options[b'zlib.level'] = zlib_level
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1168 zstd_level = ui.configint(b'storage', b'revlog.zstd.level')
1169 if zstd_level is not None:
1169 if zstd_level is not None:
1170 if not (0 <= zstd_level <= 22):
1170 if not (0 <= zstd_level <= 22):
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1171 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1172 raise error.Abort(msg % zstd_level)
1172 raise error.Abort(msg % zstd_level)
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1173 feature_config.compression_engine_options[b'zstd.level'] = zstd_level
1174
1174
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1175 if requirementsmod.NARROW_REQUIREMENT in requirements:
1176 feature_config.enable_ellipsis = True
1176 feature_config.enable_ellipsis = True
1177
1177
1178 if ui.configbool(b'experimental', b'rust.index'):
1178 if ui.configbool(b'experimental', b'rust.index'):
1179 options[b'rust.index'] = True
1179 options[b'rust.index'] = True
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1180 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1181 slow_path = ui.config(
1181 slow_path = ui.config(
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1182 b'storage', b'revlog.persistent-nodemap.slow-path'
1183 )
1183 )
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1184 if slow_path not in (b'allow', b'warn', b'abort'):
1185 default = ui.config_default(
1185 default = ui.config_default(
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1186 b'storage', b'revlog.persistent-nodemap.slow-path'
1187 )
1187 )
1188 msg = _(
1188 msg = _(
1189 b'unknown value for config '
1189 b'unknown value for config '
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1190 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1191 )
1191 )
1192 ui.warn(msg % slow_path)
1192 ui.warn(msg % slow_path)
1193 if not ui.quiet:
1193 if not ui.quiet:
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1194 ui.warn(_(b'falling back to default value: %s\n') % default)
1195 slow_path = default
1195 slow_path = default
1196
1196
1197 msg = _(
1197 msg = _(
1198 b"accessing `persistent-nodemap` repository without associated "
1198 b"accessing `persistent-nodemap` repository without associated "
1199 b"fast implementation."
1199 b"fast implementation."
1200 )
1200 )
1201 hint = _(
1201 hint = _(
1202 b"check `hg help config.format.use-persistent-nodemap` "
1202 b"check `hg help config.format.use-persistent-nodemap` "
1203 b"for details"
1203 b"for details"
1204 )
1204 )
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1205 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1206 if slow_path == b'warn':
1206 if slow_path == b'warn':
1207 msg = b"warning: " + msg + b'\n'
1207 msg = b"warning: " + msg + b'\n'
1208 ui.warn(msg)
1208 ui.warn(msg)
1209 if not ui.quiet:
1209 if not ui.quiet:
1210 hint = b'(' + hint + b')\n'
1210 hint = b'(' + hint + b')\n'
1211 ui.warn(hint)
1211 ui.warn(hint)
1212 if slow_path == b'abort':
1212 if slow_path == b'abort':
1213 raise error.Abort(msg, hint=hint)
1213 raise error.Abort(msg, hint=hint)
1214 options[b'persistent-nodemap'] = True
1214 options[b'persistent-nodemap'] = True
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1215 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1216 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1217 if slow_path not in (b'allow', b'warn', b'abort'):
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1218 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1219 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1220 ui.warn(msg % slow_path)
1220 ui.warn(msg % slow_path)
1221 if not ui.quiet:
1221 if not ui.quiet:
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1222 ui.warn(_(b'falling back to default value: %s\n') % default)
1223 slow_path = default
1223 slow_path = default
1224
1224
1225 msg = _(
1225 msg = _(
1226 b"accessing `dirstate-v2` repository without associated "
1226 b"accessing `dirstate-v2` repository without associated "
1227 b"fast implementation."
1227 b"fast implementation."
1228 )
1228 )
1229 hint = _(
1229 hint = _(
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1230 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1231 )
1231 )
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1232 if not dirstate.HAS_FAST_DIRSTATE_V2:
1233 if slow_path == b'warn':
1233 if slow_path == b'warn':
1234 msg = b"warning: " + msg + b'\n'
1234 msg = b"warning: " + msg + b'\n'
1235 ui.warn(msg)
1235 ui.warn(msg)
1236 if not ui.quiet:
1236 if not ui.quiet:
1237 hint = b'(' + hint + b')\n'
1237 hint = b'(' + hint + b')\n'
1238 ui.warn(hint)
1238 ui.warn(hint)
1239 if slow_path == b'abort':
1239 if slow_path == b'abort':
1240 raise error.Abort(msg, hint=hint)
1240 raise error.Abort(msg, hint=hint)
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1241 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1242 options[b'persistent-nodemap.mmap'] = True
1242 options[b'persistent-nodemap.mmap'] = True
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1243 if ui.configbool(b'devel', b'persistent-nodemap'):
1244 options[b'devel-force-nodemap'] = True
1244 options[b'devel-force-nodemap'] = True
1245
1245
1246 return options
1246 return options
1247
1247
1248
1248
1249 def makemain(**kwargs):
1249 def makemain(**kwargs):
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1250 """Produce a type conforming to ``ilocalrepositorymain``."""
1251 return localrepository
1251 return localrepository
1252
1252
1253
1253
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1254 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1255 class revlogfilestorage:
1255 class revlogfilestorage:
1256 """File storage when using revlogs."""
1256 """File storage when using revlogs."""
1257
1257
1258 def file(self, path):
1258 def file(self, path):
1259 if path.startswith(b'/'):
1259 if path.startswith(b'/'):
1260 path = path[1:]
1260 path = path[1:]
1261
1261
1262 try_split = (
1262 try_split = (
1263 self.currenttransaction() is not None
1263 self.currenttransaction() is not None
1264 or txnutil.mayhavepending(self.root)
1264 or txnutil.mayhavepending(self.root)
1265 )
1265 )
1266
1266
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1267 return filelog.filelog(self.svfs, path, try_split=try_split)
1268
1268
1269
1269
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1270 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 class revlognarrowfilestorage:
1271 class revlognarrowfilestorage:
1272 """File storage when using revlogs and narrow files."""
1272 """File storage when using revlogs and narrow files."""
1273
1273
1274 def file(self, path):
1274 def file(self, path):
1275 if path.startswith(b'/'):
1275 if path.startswith(b'/'):
1276 path = path[1:]
1276 path = path[1:]
1277
1277
1278 try_split = (
1278 try_split = (
1279 self.currenttransaction() is not None
1279 self.currenttransaction() is not None
1280 or txnutil.mayhavepending(self.root)
1280 or txnutil.mayhavepending(self.root)
1281 )
1281 )
1282 return filelog.narrowfilelog(
1282 return filelog.narrowfilelog(
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1283 self.svfs, path, self._storenarrowmatch, try_split=try_split
1284 )
1284 )
1285
1285
1286
1286
1287 def makefilestorage(requirements, features, **kwargs):
1287 def makefilestorage(requirements, features, **kwargs):
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1288 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1289 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1290 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1291
1291
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1292 if requirementsmod.NARROW_REQUIREMENT in requirements:
1293 return revlognarrowfilestorage
1293 return revlognarrowfilestorage
1294 else:
1294 else:
1295 return revlogfilestorage
1295 return revlogfilestorage
1296
1296
1297
1297
1298 # List of repository interfaces and factory functions for them. Each
1298 # List of repository interfaces and factory functions for them. Each
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1299 # will be called in order during ``makelocalrepository()`` to iteratively
1300 # derive the final type for a local repository instance. We capture the
1300 # derive the final type for a local repository instance. We capture the
1301 # function as a lambda so we don't hold a reference and the module-level
1301 # function as a lambda so we don't hold a reference and the module-level
1302 # functions can be wrapped.
1302 # functions can be wrapped.
1303 REPO_INTERFACES = [
1303 REPO_INTERFACES = [
1304 (repository.ilocalrepositorymain, lambda: makemain),
1304 (repository.ilocalrepositorymain, lambda: makemain),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1305 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1306 ]
1306 ]
1307
1307
1308
1308
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1309 @interfaceutil.implementer(repository.ilocalrepositorymain)
1310 class localrepository:
1310 class localrepository:
1311 """Main class for representing local repositories.
1311 """Main class for representing local repositories.
1312
1312
1313 All local repositories are instances of this class.
1313 All local repositories are instances of this class.
1314
1314
1315 Constructed on its own, instances of this class are not usable as
1315 Constructed on its own, instances of this class are not usable as
1316 repository objects. To obtain a usable repository object, call
1316 repository objects. To obtain a usable repository object, call
1317 ``hg.repository()``, ``localrepo.instance()``, or
1317 ``hg.repository()``, ``localrepo.instance()``, or
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1318 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1319 ``instance()`` adds support for creating new repositories.
1319 ``instance()`` adds support for creating new repositories.
1320 ``hg.repository()`` adds more extension integration, including calling
1320 ``hg.repository()`` adds more extension integration, including calling
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1321 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1322 used.
1322 used.
1323 """
1323 """
1324
1324
1325 _basesupported = {
1325 _basesupported = {
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1326 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1327 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1328 requirementsmod.CHANGELOGV2_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1329 requirementsmod.COPIESSDC_REQUIREMENT,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1330 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1331 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1332 requirementsmod.DOTENCODE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1333 requirementsmod.FNCACHE_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1334 requirementsmod.GENERALDELTA_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1335 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1336 requirementsmod.NODEMAP_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1337 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1338 requirementsmod.REVLOGV1_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1339 requirementsmod.REVLOGV2_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1340 requirementsmod.SHARED_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1341 requirementsmod.SHARESAFE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1342 requirementsmod.SPARSE_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1343 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1344 requirementsmod.STORE_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1345 requirementsmod.TREEMANIFEST_REQUIREMENT,
1346 }
1346 }
1347
1347
1348 # list of prefix for file which can be written without 'wlock'
1348 # list of prefix for file which can be written without 'wlock'
1349 # Extensions should extend this list when needed
1349 # Extensions should extend this list when needed
1350 _wlockfreeprefix = {
1350 _wlockfreeprefix = {
1351 # We migh consider requiring 'wlock' for the next
1351 # We migh consider requiring 'wlock' for the next
1352 # two, but pretty much all the existing code assume
1352 # two, but pretty much all the existing code assume
1353 # wlock is not needed so we keep them excluded for
1353 # wlock is not needed so we keep them excluded for
1354 # now.
1354 # now.
1355 b'hgrc',
1355 b'hgrc',
1356 b'requires',
1356 b'requires',
1357 # XXX cache is a complicatged business someone
1357 # XXX cache is a complicatged business someone
1358 # should investigate this in depth at some point
1358 # should investigate this in depth at some point
1359 b'cache/',
1359 b'cache/',
1360 # XXX bisect was still a bit too messy at the time
1360 # XXX bisect was still a bit too messy at the time
1361 # this changeset was introduced. Someone should fix
1361 # this changeset was introduced. Someone should fix
1362 # the remainig bit and drop this line
1362 # the remainig bit and drop this line
1363 b'bisect.state',
1363 b'bisect.state',
1364 }
1364 }
1365
1365
1366 def __init__(
1366 def __init__(
1367 self,
1367 self,
1368 baseui,
1368 baseui,
1369 ui,
1369 ui,
1370 origroot: bytes,
1370 origroot: bytes,
1371 wdirvfs: vfsmod.vfs,
1371 wdirvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1372 hgvfs: vfsmod.vfs,
1373 requirements,
1373 requirements,
1374 supportedrequirements,
1374 supportedrequirements,
1375 sharedpath: bytes,
1375 sharedpath: bytes,
1376 store,
1376 store,
1377 cachevfs: vfsmod.vfs,
1377 cachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1378 wcachevfs: vfsmod.vfs,
1379 features,
1379 features,
1380 intents=None,
1380 intents=None,
1381 ):
1381 ):
1382 """Create a new local repository instance.
1382 """Create a new local repository instance.
1383
1383
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1384 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1385 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1386 object.
1386 object.
1387
1387
1388 Arguments:
1388 Arguments:
1389
1389
1390 baseui
1390 baseui
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1391 ``ui.ui`` instance that ``ui`` argument was based off of.
1392
1392
1393 ui
1393 ui
1394 ``ui.ui`` instance for use by the repository.
1394 ``ui.ui`` instance for use by the repository.
1395
1395
1396 origroot
1396 origroot
1397 ``bytes`` path to working directory root of this repository.
1397 ``bytes`` path to working directory root of this repository.
1398
1398
1399 wdirvfs
1399 wdirvfs
1400 ``vfs.vfs`` rooted at the working directory.
1400 ``vfs.vfs`` rooted at the working directory.
1401
1401
1402 hgvfs
1402 hgvfs
1403 ``vfs.vfs`` rooted at .hg/
1403 ``vfs.vfs`` rooted at .hg/
1404
1404
1405 requirements
1405 requirements
1406 ``set`` of bytestrings representing repository opening requirements.
1406 ``set`` of bytestrings representing repository opening requirements.
1407
1407
1408 supportedrequirements
1408 supportedrequirements
1409 ``set`` of bytestrings representing repository requirements that we
1409 ``set`` of bytestrings representing repository requirements that we
1410 know how to open. May be a supetset of ``requirements``.
1410 know how to open. May be a supetset of ``requirements``.
1411
1411
1412 sharedpath
1412 sharedpath
1413 ``bytes`` Defining path to storage base directory. Points to a
1413 ``bytes`` Defining path to storage base directory. Points to a
1414 ``.hg/`` directory somewhere.
1414 ``.hg/`` directory somewhere.
1415
1415
1416 store
1416 store
1417 ``store.basicstore`` (or derived) instance providing access to
1417 ``store.basicstore`` (or derived) instance providing access to
1418 versioned storage.
1418 versioned storage.
1419
1419
1420 cachevfs
1420 cachevfs
1421 ``vfs.vfs`` used for cache files.
1421 ``vfs.vfs`` used for cache files.
1422
1422
1423 wcachevfs
1423 wcachevfs
1424 ``vfs.vfs`` used for cache files related to the working copy.
1424 ``vfs.vfs`` used for cache files related to the working copy.
1425
1425
1426 features
1426 features
1427 ``set`` of bytestrings defining features/capabilities of this
1427 ``set`` of bytestrings defining features/capabilities of this
1428 instance.
1428 instance.
1429
1429
1430 intents
1430 intents
1431 ``set`` of system strings indicating what this repo will be used
1431 ``set`` of system strings indicating what this repo will be used
1432 for.
1432 for.
1433 """
1433 """
1434 self.baseui = baseui
1434 self.baseui = baseui
1435 self.ui = ui
1435 self.ui = ui
1436 self.origroot = origroot
1436 self.origroot = origroot
1437 # vfs rooted at working directory.
1437 # vfs rooted at working directory.
1438 self.wvfs = wdirvfs
1438 self.wvfs = wdirvfs
1439 self.root = wdirvfs.base
1439 self.root = wdirvfs.base
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1440 # vfs rooted at .hg/. Used to access most non-store paths.
1441 self.vfs = hgvfs
1441 self.vfs = hgvfs
1442 self.path = hgvfs.base
1442 self.path = hgvfs.base
1443 self.requirements = requirements
1443 self.requirements = requirements
1444 self.nodeconstants = sha1nodeconstants
1444 self.nodeconstants = sha1nodeconstants
1445 self.nullid = self.nodeconstants.nullid
1445 self.nullid = self.nodeconstants.nullid
1446 self.supported = supportedrequirements
1446 self.supported = supportedrequirements
1447 self.sharedpath = sharedpath
1447 self.sharedpath = sharedpath
1448 self.store = store
1448 self.store = store
1449 self.cachevfs = cachevfs
1449 self.cachevfs = cachevfs
1450 self.wcachevfs = wcachevfs
1450 self.wcachevfs = wcachevfs
1451 self.features = features
1451 self.features = features
1452
1452
1453 self.filtername = None
1453 self.filtername = None
1454
1454
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1455 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1456 b'devel', b'check-locks'
1456 b'devel', b'check-locks'
1457 ):
1457 ):
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1458 self.vfs.audit = self._getvfsward(self.vfs.audit)
1459 # A list of callback to shape the phase if no data were found.
1459 # A list of callback to shape the phase if no data were found.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1460 # Callback are in the form: func(repo, roots) --> processed root.
1461 # This list it to be filled by extension during repo setup
1461 # This list it to be filled by extension during repo setup
1462 self._phasedefaults = []
1462 self._phasedefaults = []
1463
1463
1464 color.setup(self.ui)
1464 color.setup(self.ui)
1465
1465
1466 self.spath = self.store.path
1466 self.spath = self.store.path
1467 self.svfs = self.store.vfs
1467 self.svfs = self.store.vfs
1468 self.sjoin = self.store.join
1468 self.sjoin = self.store.join
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1469 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1470 b'devel', b'check-locks'
1470 b'devel', b'check-locks'
1471 ):
1471 ):
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1472 if hasattr(self.svfs, 'vfs'): # this is filtervfs
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1473 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1474 else: # standard vfs
1474 else: # standard vfs
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1475 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1476
1476
1477 self._dirstatevalidatewarned = False
1477 self._dirstatevalidatewarned = False
1478
1478
1479 self._branchcaches = branchmap.BranchMapCache()
1479 self._branchcaches = branchmap.BranchMapCache()
1480 self._revbranchcache = None
1480 self._revbranchcache = None
1481 self._filterpats = {}
1481 self._filterpats = {}
1482 self._datafilters = {}
1482 self._datafilters = {}
1483 self._transref = self._lockref = self._wlockref = None
1483 self._transref = self._lockref = self._wlockref = None
1484
1484
1485 # A cache for various files under .hg/ that tracks file changes,
1485 # A cache for various files under .hg/ that tracks file changes,
1486 # (used by the filecache decorator)
1486 # (used by the filecache decorator)
1487 #
1487 #
1488 # Maps a property name to its util.filecacheentry
1488 # Maps a property name to its util.filecacheentry
1489 self._filecache = {}
1489 self._filecache = {}
1490
1490
1491 # hold sets of revision to be filtered
1491 # hold sets of revision to be filtered
1492 # should be cleared when something might have changed the filter value:
1492 # should be cleared when something might have changed the filter value:
1493 # - new changesets,
1493 # - new changesets,
1494 # - phase change,
1494 # - phase change,
1495 # - new obsolescence marker,
1495 # - new obsolescence marker,
1496 # - working directory parent change,
1496 # - working directory parent change,
1497 # - bookmark changes
1497 # - bookmark changes
1498 self.filteredrevcache = {}
1498 self.filteredrevcache = {}
1499
1499
1500 self._dirstate = None
1500 self._dirstate = None
1501 # post-dirstate-status hooks
1501 # post-dirstate-status hooks
1502 self._postdsstatus = []
1502 self._postdsstatus = []
1503
1503
1504 self._pending_narrow_pats = None
1504 self._pending_narrow_pats = None
1505 self._pending_narrow_pats_dirstate = None
1505 self._pending_narrow_pats_dirstate = None
1506
1506
1507 # generic mapping between names and nodes
1507 # generic mapping between names and nodes
1508 self.names = namespaces.namespaces()
1508 self.names = namespaces.namespaces()
1509
1509
1510 # Key to signature value.
1510 # Key to signature value.
1511 self._sparsesignaturecache = {}
1511 self._sparsesignaturecache = {}
1512 # Signature to cached matcher instance.
1512 # Signature to cached matcher instance.
1513 self._sparsematchercache = {}
1513 self._sparsematchercache = {}
1514
1514
1515 self._extrafilterid = repoview.extrafilter(ui)
1515 self._extrafilterid = repoview.extrafilter(ui)
1516
1516
1517 self.filecopiesmode = None
1517 self.filecopiesmode = None
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1518 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1519 self.filecopiesmode = b'changeset-sidedata'
1519 self.filecopiesmode = b'changeset-sidedata'
1520
1520
1521 self._wanted_sidedata = set()
1521 self._wanted_sidedata = set()
1522 self._sidedata_computers = {}
1522 self._sidedata_computers = {}
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1523 sidedatamod.set_sidedata_spec_for_repo(self)
1524
1524
1525 def _getvfsward(self, origfunc):
1525 def _getvfsward(self, origfunc):
1526 """build a ward for self.vfs"""
1526 """build a ward for self.vfs"""
1527 rref = weakref.ref(self)
1527 rref = weakref.ref(self)
1528
1528
1529 def checkvfs(path, mode=None):
1529 def checkvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1530 ret = origfunc(path, mode=mode)
1531 repo = rref()
1531 repo = rref()
1532 if (
1532 if (
1533 repo is None
1533 repo is None
1534 or not hasattr(repo, '_wlockref')
1534 or not hasattr(repo, '_wlockref')
1535 or not hasattr(repo, '_lockref')
1535 or not hasattr(repo, '_lockref')
1536 ):
1536 ):
1537 return
1537 return
1538 if mode in (None, b'r', b'rb'):
1538 if mode in (None, b'r', b'rb'):
1539 return
1539 return
1540 if path.startswith(repo.path):
1540 if path.startswith(repo.path):
1541 # truncate name relative to the repository (.hg)
1541 # truncate name relative to the repository (.hg)
1542 path = path[len(repo.path) + 1 :]
1542 path = path[len(repo.path) + 1 :]
1543 if path.startswith(b'cache/'):
1543 if path.startswith(b'cache/'):
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1544 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1545 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1546 # path prefixes covered by 'lock'
1546 # path prefixes covered by 'lock'
1547 vfs_path_prefixes = (
1547 vfs_path_prefixes = (
1548 b'journal.',
1548 b'journal.',
1549 b'undo.',
1549 b'undo.',
1550 b'strip-backup/',
1550 b'strip-backup/',
1551 b'cache/',
1551 b'cache/',
1552 )
1552 )
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1553 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1554 if repo._currentlock(repo._lockref) is None:
1554 if repo._currentlock(repo._lockref) is None:
1555 repo.ui.develwarn(
1555 repo.ui.develwarn(
1556 b'write with no lock: "%s"' % path,
1556 b'write with no lock: "%s"' % path,
1557 stacklevel=3,
1557 stacklevel=3,
1558 config=b'check-locks',
1558 config=b'check-locks',
1559 )
1559 )
1560 elif repo._currentlock(repo._wlockref) is None:
1560 elif repo._currentlock(repo._wlockref) is None:
1561 # rest of vfs files are covered by 'wlock'
1561 # rest of vfs files are covered by 'wlock'
1562 #
1562 #
1563 # exclude special files
1563 # exclude special files
1564 for prefix in self._wlockfreeprefix:
1564 for prefix in self._wlockfreeprefix:
1565 if path.startswith(prefix):
1565 if path.startswith(prefix):
1566 return
1566 return
1567 repo.ui.develwarn(
1567 repo.ui.develwarn(
1568 b'write with no wlock: "%s"' % path,
1568 b'write with no wlock: "%s"' % path,
1569 stacklevel=3,
1569 stacklevel=3,
1570 config=b'check-locks',
1570 config=b'check-locks',
1571 )
1571 )
1572 return ret
1572 return ret
1573
1573
1574 return checkvfs
1574 return checkvfs
1575
1575
1576 def _getsvfsward(self, origfunc):
1576 def _getsvfsward(self, origfunc):
1577 """build a ward for self.svfs"""
1577 """build a ward for self.svfs"""
1578 rref = weakref.ref(self)
1578 rref = weakref.ref(self)
1579
1579
1580 def checksvfs(path, mode=None):
1580 def checksvfs(path, mode=None):
1581 ret = origfunc(path, mode=mode)
1581 ret = origfunc(path, mode=mode)
1582 repo = rref()
1582 repo = rref()
1583 if repo is None or not hasattr(repo, '_lockref'):
1583 if repo is None or not hasattr(repo, '_lockref'):
1584 return
1584 return
1585 if mode in (None, b'r', b'rb'):
1585 if mode in (None, b'r', b'rb'):
1586 return
1586 return
1587 if path.startswith(repo.sharedpath):
1587 if path.startswith(repo.sharedpath):
1588 # truncate name relative to the repository (.hg)
1588 # truncate name relative to the repository (.hg)
1589 path = path[len(repo.sharedpath) + 1 :]
1589 path = path[len(repo.sharedpath) + 1 :]
1590 if repo._currentlock(repo._lockref) is None:
1590 if repo._currentlock(repo._lockref) is None:
1591 repo.ui.develwarn(
1591 repo.ui.develwarn(
1592 b'write with no lock: "%s"' % path, stacklevel=4
1592 b'write with no lock: "%s"' % path, stacklevel=4
1593 )
1593 )
1594 return ret
1594 return ret
1595
1595
1596 return checksvfs
1596 return checksvfs
1597
1597
1598 @property
1598 @property
1599 def vfs_map(self):
1599 def vfs_map(self):
1600 return {
1600 return {
1601 b'': self.svfs,
1601 b'': self.svfs,
1602 b'plain': self.vfs,
1602 b'plain': self.vfs,
1603 b'store': self.svfs,
1603 b'store': self.svfs,
1604 }
1604 }
1605
1605
1606 def close(self):
1606 def close(self):
1607 self._writecaches()
1607 self._writecaches()
1608
1608
1609 def _writecaches(self):
1609 def _writecaches(self):
1610 if self._revbranchcache:
1610 if self._revbranchcache:
1611 self._revbranchcache.write()
1611 self._revbranchcache.write()
1612
1612
1613 def _restrictcapabilities(self, caps):
1613 def _restrictcapabilities(self, caps):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1614 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1615 caps = set(caps)
1615 caps = set(caps)
1616 capsblob = bundle2.encodecaps(
1616 capsblob = bundle2.encodecaps(
1617 bundle2.getrepocaps(self, role=b'client')
1617 bundle2.getrepocaps(self, role=b'client')
1618 )
1618 )
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1619 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1620 if self.ui.configbool(b'experimental', b'narrow'):
1620 if self.ui.configbool(b'experimental', b'narrow'):
1621 caps.add(wireprototypes.NARROWCAP)
1621 caps.add(wireprototypes.NARROWCAP)
1622 return caps
1622 return caps
1623
1623
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1624 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1625 # self -> auditor -> self._checknested -> self
1625 # self -> auditor -> self._checknested -> self
1626
1626
1627 @property
1627 @property
1628 def auditor(self):
1628 def auditor(self):
1629 # This is only used by context.workingctx.match in order to
1629 # This is only used by context.workingctx.match in order to
1630 # detect files in subrepos.
1630 # detect files in subrepos.
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1631 return pathutil.pathauditor(self.root, callback=self._checknested)
1632
1632
1633 @property
1633 @property
1634 def nofsauditor(self):
1634 def nofsauditor(self):
1635 # This is only used by context.basectx.match in order to detect
1635 # This is only used by context.basectx.match in order to detect
1636 # files in subrepos.
1636 # files in subrepos.
1637 return pathutil.pathauditor(
1637 return pathutil.pathauditor(
1638 self.root, callback=self._checknested, realfs=False, cached=True
1638 self.root, callback=self._checknested, realfs=False, cached=True
1639 )
1639 )
1640
1640
1641 def _checknested(self, path):
1641 def _checknested(self, path):
1642 """Determine if path is a legal nested repository."""
1642 """Determine if path is a legal nested repository."""
1643 if not path.startswith(self.root):
1643 if not path.startswith(self.root):
1644 return False
1644 return False
1645 subpath = path[len(self.root) + 1 :]
1645 subpath = path[len(self.root) + 1 :]
1646 normsubpath = util.pconvert(subpath)
1646 normsubpath = util.pconvert(subpath)
1647
1647
1648 # XXX: Checking against the current working copy is wrong in
1648 # XXX: Checking against the current working copy is wrong in
1649 # the sense that it can reject things like
1649 # the sense that it can reject things like
1650 #
1650 #
1651 # $ hg cat -r 10 sub/x.txt
1651 # $ hg cat -r 10 sub/x.txt
1652 #
1652 #
1653 # if sub/ is no longer a subrepository in the working copy
1653 # if sub/ is no longer a subrepository in the working copy
1654 # parent revision.
1654 # parent revision.
1655 #
1655 #
1656 # However, it can of course also allow things that would have
1656 # However, it can of course also allow things that would have
1657 # been rejected before, such as the above cat command if sub/
1657 # been rejected before, such as the above cat command if sub/
1658 # is a subrepository now, but was a normal directory before.
1658 # is a subrepository now, but was a normal directory before.
1659 # The old path auditor would have rejected by mistake since it
1659 # The old path auditor would have rejected by mistake since it
1660 # panics when it sees sub/.hg/.
1660 # panics when it sees sub/.hg/.
1661 #
1661 #
1662 # All in all, checking against the working copy seems sensible
1662 # All in all, checking against the working copy seems sensible
1663 # since we want to prevent access to nested repositories on
1663 # since we want to prevent access to nested repositories on
1664 # the filesystem *now*.
1664 # the filesystem *now*.
1665 ctx = self[None]
1665 ctx = self[None]
1666 parts = util.splitpath(subpath)
1666 parts = util.splitpath(subpath)
1667 while parts:
1667 while parts:
1668 prefix = b'/'.join(parts)
1668 prefix = b'/'.join(parts)
1669 if prefix in ctx.substate:
1669 if prefix in ctx.substate:
1670 if prefix == normsubpath:
1670 if prefix == normsubpath:
1671 return True
1671 return True
1672 else:
1672 else:
1673 sub = ctx.sub(prefix)
1673 sub = ctx.sub(prefix)
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1674 return sub.checknested(subpath[len(prefix) + 1 :])
1675 else:
1675 else:
1676 parts.pop()
1676 parts.pop()
1677 return False
1677 return False
1678
1678
1679 def peer(self, path=None, remotehidden=False):
1679 def peer(self, path=None, remotehidden=False):
1680 return localpeer(
1680 return localpeer(
1681 self, path=path, remotehidden=remotehidden
1681 self, path=path, remotehidden=remotehidden
1682 ) # not cached to avoid reference cycle
1682 ) # not cached to avoid reference cycle
1683
1683
1684 def unfiltered(self):
1684 def unfiltered(self):
1685 """Return unfiltered version of the repository
1685 """Return unfiltered version of the repository
1686
1686
1687 Intended to be overwritten by filtered repo."""
1687 Intended to be overwritten by filtered repo."""
1688 return self
1688 return self
1689
1689
1690 def filtered(self, name, visibilityexceptions=None):
1690 def filtered(self, name, visibilityexceptions=None):
1691 """Return a filtered version of a repository
1691 """Return a filtered version of a repository
1692
1692
1693 The `name` parameter is the identifier of the requested view. This
1693 The `name` parameter is the identifier of the requested view. This
1694 will return a repoview object set "exactly" to the specified view.
1694 will return a repoview object set "exactly" to the specified view.
1695
1695
1696 This function does not apply recursive filtering to a repository. For
1696 This function does not apply recursive filtering to a repository. For
1697 example calling `repo.filtered("served")` will return a repoview using
1697 example calling `repo.filtered("served")` will return a repoview using
1698 the "served" view, regardless of the initial view used by `repo`.
1698 the "served" view, regardless of the initial view used by `repo`.
1699
1699
1700 In other word, there is always only one level of `repoview` "filtering".
1700 In other word, there is always only one level of `repoview` "filtering".
1701 """
1701 """
1702 if self._extrafilterid is not None and b'%' not in name:
1702 if self._extrafilterid is not None and b'%' not in name:
1703 name = name + b'%' + self._extrafilterid
1703 name = name + b'%' + self._extrafilterid
1704
1704
1705 cls = repoview.newtype(self.unfiltered().__class__)
1705 cls = repoview.newtype(self.unfiltered().__class__)
1706 return cls(self, name, visibilityexceptions)
1706 return cls(self, name, visibilityexceptions)
1707
1707
1708 @mixedrepostorecache(
1708 @mixedrepostorecache(
1709 (b'bookmarks', b'plain'),
1709 (b'bookmarks', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1710 (b'bookmarks.current', b'plain'),
1711 (b'bookmarks', b''),
1711 (b'bookmarks', b''),
1712 (b'00changelog.i', b''),
1712 (b'00changelog.i', b''),
1713 )
1713 )
1714 def _bookmarks(self):
1714 def _bookmarks(self):
1715 # Since the multiple files involved in the transaction cannot be
1715 # Since the multiple files involved in the transaction cannot be
1716 # written atomically (with current repository format), there is a race
1716 # written atomically (with current repository format), there is a race
1717 # condition here.
1717 # condition here.
1718 #
1718 #
1719 # 1) changelog content A is read
1719 # 1) changelog content A is read
1720 # 2) outside transaction update changelog to content B
1720 # 2) outside transaction update changelog to content B
1721 # 3) outside transaction update bookmark file referring to content B
1721 # 3) outside transaction update bookmark file referring to content B
1722 # 4) bookmarks file content is read and filtered against changelog-A
1722 # 4) bookmarks file content is read and filtered against changelog-A
1723 #
1723 #
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1724 # When this happens, bookmarks against nodes missing from A are dropped.
1725 #
1725 #
1726 # Having this happening during read is not great, but it become worse
1726 # Having this happening during read is not great, but it become worse
1727 # when this happen during write because the bookmarks to the "unknown"
1727 # when this happen during write because the bookmarks to the "unknown"
1728 # nodes will be dropped for good. However, writes happen within locks.
1728 # nodes will be dropped for good. However, writes happen within locks.
1729 # This locking makes it possible to have a race free consistent read.
1729 # This locking makes it possible to have a race free consistent read.
1730 # For this purpose data read from disc before locking are
1730 # For this purpose data read from disc before locking are
1731 # "invalidated" right after the locks are taken. This invalidations are
1731 # "invalidated" right after the locks are taken. This invalidations are
1732 # "light", the `filecache` mechanism keep the data in memory and will
1732 # "light", the `filecache` mechanism keep the data in memory and will
1733 # reuse them if the underlying files did not changed. Not parsing the
1733 # reuse them if the underlying files did not changed. Not parsing the
1734 # same data multiple times helps performances.
1734 # same data multiple times helps performances.
1735 #
1735 #
1736 # Unfortunately in the case describe above, the files tracked by the
1736 # Unfortunately in the case describe above, the files tracked by the
1737 # bookmarks file cache might not have changed, but the in-memory
1737 # bookmarks file cache might not have changed, but the in-memory
1738 # content is still "wrong" because we used an older changelog content
1738 # content is still "wrong" because we used an older changelog content
1739 # to process the on-disk data. So after locking, the changelog would be
1739 # to process the on-disk data. So after locking, the changelog would be
1740 # refreshed but `_bookmarks` would be preserved.
1740 # refreshed but `_bookmarks` would be preserved.
1741 # Adding `00changelog.i` to the list of tracked file is not
1741 # Adding `00changelog.i` to the list of tracked file is not
1742 # enough, because at the time we build the content for `_bookmarks` in
1742 # enough, because at the time we build the content for `_bookmarks` in
1743 # (4), the changelog file has already diverged from the content used
1743 # (4), the changelog file has already diverged from the content used
1744 # for loading `changelog` in (1)
1744 # for loading `changelog` in (1)
1745 #
1745 #
1746 # To prevent the issue, we force the changelog to be explicitly
1746 # To prevent the issue, we force the changelog to be explicitly
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1747 # reloaded while computing `_bookmarks`. The data race can still happen
1748 # without the lock (with a narrower window), but it would no longer go
1748 # without the lock (with a narrower window), but it would no longer go
1749 # undetected during the lock time refresh.
1749 # undetected during the lock time refresh.
1750 #
1750 #
1751 # The new schedule is as follow
1751 # The new schedule is as follow
1752 #
1752 #
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1753 # 1) filecache logic detect that `_bookmarks` needs to be computed
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1754 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1755 # 3) We force `changelog` filecache to be tested
1755 # 3) We force `changelog` filecache to be tested
1756 # 4) cachestat for `changelog` are captured (for changelog)
1756 # 4) cachestat for `changelog` are captured (for changelog)
1757 # 5) `_bookmarks` is computed and cached
1757 # 5) `_bookmarks` is computed and cached
1758 #
1758 #
1759 # The step in (3) ensure we have a changelog at least as recent as the
1759 # The step in (3) ensure we have a changelog at least as recent as the
1760 # cache stat computed in (1). As a result at locking time:
1760 # cache stat computed in (1). As a result at locking time:
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1761 # * if the changelog did not changed since (1) -> we can reuse the data
1762 # * otherwise -> the bookmarks get refreshed.
1762 # * otherwise -> the bookmarks get refreshed.
1763 self._refreshchangelog()
1763 self._refreshchangelog()
1764 return bookmarks.bmstore(self)
1764 return bookmarks.bmstore(self)
1765
1765
1766 def _refreshchangelog(self):
1766 def _refreshchangelog(self):
1767 """make sure the in memory changelog match the on-disk one"""
1767 """make sure the in memory changelog match the on-disk one"""
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1768 if 'changelog' in vars(self) and self.currenttransaction() is None:
1769 del self.changelog
1769 del self.changelog
1770
1770
1771 @property
1771 @property
1772 def _activebookmark(self):
1772 def _activebookmark(self):
1773 return self._bookmarks.active
1773 return self._bookmarks.active
1774
1774
1775 # _phasesets depend on changelog. what we need is to call
1775 # _phasesets depend on changelog. what we need is to call
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1776 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1777 # can't be easily expressed in filecache mechanism.
1777 # can't be easily expressed in filecache mechanism.
1778 @storecache(b'phaseroots', b'00changelog.i')
1778 @storecache(b'phaseroots', b'00changelog.i')
1779 def _phasecache(self):
1779 def _phasecache(self):
1780 return phases.phasecache(self, self._phasedefaults)
1780 return phases.phasecache(self, self._phasedefaults)
1781
1781
1782 @storecache(b'obsstore')
1782 @storecache(b'obsstore')
1783 def obsstore(self):
1783 def obsstore(self):
1784 return obsolete.makestore(self.ui, self)
1784 return obsolete.makestore(self.ui, self)
1785
1785
1786 @changelogcache()
1786 @changelogcache()
1787 def changelog(repo):
1787 def changelog(repo):
1788 # load dirstate before changelog to avoid race see issue6303
1788 # load dirstate before changelog to avoid race see issue6303
1789 repo.dirstate.prefetch_parents()
1789 repo.dirstate.prefetch_parents()
1790 return repo.store.changelog(
1790 return repo.store.changelog(
1791 txnutil.mayhavepending(repo.root),
1791 txnutil.mayhavepending(repo.root),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1792 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1793 )
1793 )
1794
1794
1795 @manifestlogcache()
1795 @manifestlogcache()
1796 def manifestlog(self):
1796 def manifestlog(self):
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1797 return self.store.manifestlog(self, self._storenarrowmatch)
1798
1798
1799 @unfilteredpropertycache
1799 @unfilteredpropertycache
1800 def dirstate(self):
1800 def dirstate(self):
1801 if self._dirstate is None:
1801 if self._dirstate is None:
1802 self._dirstate = self._makedirstate()
1802 self._dirstate = self._makedirstate()
1803 else:
1803 else:
1804 self._dirstate.refresh()
1804 self._dirstate.refresh()
1805 return self._dirstate
1805 return self._dirstate
1806
1806
1807 def _makedirstate(self):
1807 def _makedirstate(self):
1808 """Extension point for wrapping the dirstate per-repo."""
1808 """Extension point for wrapping the dirstate per-repo."""
1809 sparsematchfn = None
1809 sparsematchfn = None
1810 if sparse.use_sparse(self):
1810 if sparse.use_sparse(self):
1811 sparsematchfn = lambda: sparse.matcher(self)
1811 sparsematchfn = lambda: sparse.matcher(self)
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1812 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1813 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1814 use_dirstate_v2 = v2_req in self.requirements
1814 use_dirstate_v2 = v2_req in self.requirements
1815 use_tracked_hint = th in self.requirements
1815 use_tracked_hint = th in self.requirements
1816
1816
1817 return dirstate.dirstate(
1817 return dirstate.dirstate(
1818 self.vfs,
1818 self.vfs,
1819 self.ui,
1819 self.ui,
1820 self.root,
1820 self.root,
1821 self._dirstatevalidate,
1821 self._dirstatevalidate,
1822 sparsematchfn,
1822 sparsematchfn,
1823 self.nodeconstants,
1823 self.nodeconstants,
1824 use_dirstate_v2,
1824 use_dirstate_v2,
1825 use_tracked_hint=use_tracked_hint,
1825 use_tracked_hint=use_tracked_hint,
1826 )
1826 )
1827
1827
1828 def _dirstatevalidate(self, node):
1828 def _dirstatevalidate(self, node):
1829 okay = True
1829 okay = True
1830 try:
1830 try:
1831 self.changelog.rev(node)
1831 self.changelog.rev(node)
1832 except error.LookupError:
1832 except error.LookupError:
1833 # If the parent are unknown it might just be because the changelog
1833 # If the parent are unknown it might just be because the changelog
1834 # in memory is lagging behind the dirstate in memory. So try to
1834 # in memory is lagging behind the dirstate in memory. So try to
1835 # refresh the changelog first.
1835 # refresh the changelog first.
1836 #
1836 #
1837 # We only do so if we don't hold the lock, if we do hold the lock
1837 # We only do so if we don't hold the lock, if we do hold the lock
1838 # the invalidation at that time should have taken care of this and
1838 # the invalidation at that time should have taken care of this and
1839 # something is very fishy.
1839 # something is very fishy.
1840 if self.currentlock() is None:
1840 if self.currentlock() is None:
1841 self.invalidate()
1841 self.invalidate()
1842 try:
1842 try:
1843 self.changelog.rev(node)
1843 self.changelog.rev(node)
1844 except error.LookupError:
1844 except error.LookupError:
1845 okay = False
1845 okay = False
1846 else:
1846 else:
1847 # XXX we should consider raising an error here.
1847 # XXX we should consider raising an error here.
1848 okay = False
1848 okay = False
1849 if okay:
1849 if okay:
1850 return node
1850 return node
1851 else:
1851 else:
1852 if not self._dirstatevalidatewarned:
1852 if not self._dirstatevalidatewarned:
1853 self._dirstatevalidatewarned = True
1853 self._dirstatevalidatewarned = True
1854 self.ui.warn(
1854 self.ui.warn(
1855 _(b"warning: ignoring unknown working parent %s!\n")
1855 _(b"warning: ignoring unknown working parent %s!\n")
1856 % short(node)
1856 % short(node)
1857 )
1857 )
1858 return self.nullid
1858 return self.nullid
1859
1859
1860 @storecache(narrowspec.FILENAME)
1860 @storecache(narrowspec.FILENAME)
1861 def narrowpats(self):
1861 def narrowpats(self):
1862 """matcher patterns for this repository's narrowspec
1862 """matcher patterns for this repository's narrowspec
1863
1863
1864 A tuple of (includes, excludes).
1864 A tuple of (includes, excludes).
1865 """
1865 """
1866 # the narrow management should probably move into its own object
1866 # the narrow management should probably move into its own object
1867 val = self._pending_narrow_pats
1867 val = self._pending_narrow_pats
1868 if val is None:
1868 if val is None:
1869 val = narrowspec.load(self)
1869 val = narrowspec.load(self)
1870 return val
1870 return val
1871
1871
1872 @storecache(narrowspec.FILENAME)
1872 @storecache(narrowspec.FILENAME)
1873 def _storenarrowmatch(self):
1873 def _storenarrowmatch(self):
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1874 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1875 return matchmod.always()
1875 return matchmod.always()
1876 include, exclude = self.narrowpats
1876 include, exclude = self.narrowpats
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878
1878
1879 @storecache(narrowspec.FILENAME)
1879 @storecache(narrowspec.FILENAME)
1880 def _narrowmatch(self):
1880 def _narrowmatch(self):
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1881 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1882 return matchmod.always()
1882 return matchmod.always()
1883 narrowspec.checkworkingcopynarrowspec(self)
1883 narrowspec.checkworkingcopynarrowspec(self)
1884 include, exclude = self.narrowpats
1884 include, exclude = self.narrowpats
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1885 return narrowspec.match(self.root, include=include, exclude=exclude)
1886
1886
1887 def narrowmatch(self, match=None, includeexact=False):
1887 def narrowmatch(self, match=None, includeexact=False):
1888 """matcher corresponding the the repo's narrowspec
1888 """matcher corresponding the the repo's narrowspec
1889
1889
1890 If `match` is given, then that will be intersected with the narrow
1890 If `match` is given, then that will be intersected with the narrow
1891 matcher.
1891 matcher.
1892
1892
1893 If `includeexact` is True, then any exact matches from `match` will
1893 If `includeexact` is True, then any exact matches from `match` will
1894 be included even if they're outside the narrowspec.
1894 be included even if they're outside the narrowspec.
1895 """
1895 """
1896 if match:
1896 if match:
1897 if includeexact and not self._narrowmatch.always():
1897 if includeexact and not self._narrowmatch.always():
1898 # do not exclude explicitly-specified paths so that they can
1898 # do not exclude explicitly-specified paths so that they can
1899 # be warned later on
1899 # be warned later on
1900 em = matchmod.exact(match.files())
1900 em = matchmod.exact(match.files())
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1901 nm = matchmod.unionmatcher([self._narrowmatch, em])
1902 return matchmod.intersectmatchers(match, nm)
1902 return matchmod.intersectmatchers(match, nm)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1903 return matchmod.intersectmatchers(match, self._narrowmatch)
1904 return self._narrowmatch
1904 return self._narrowmatch
1905
1905
1906 def setnarrowpats(self, newincludes, newexcludes):
1906 def setnarrowpats(self, newincludes, newexcludes):
1907 narrowspec.save(self, newincludes, newexcludes)
1907 narrowspec.save(self, newincludes, newexcludes)
1908 self.invalidate(clearfilecache=True)
1908 self.invalidate(clearfilecache=True)
1909
1909
1910 @unfilteredpropertycache
1910 @unfilteredpropertycache
1911 def _quick_access_changeid_null(self):
1911 def _quick_access_changeid_null(self):
1912 return {
1912 return {
1913 b'null': (nullrev, self.nodeconstants.nullid),
1913 b'null': (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1914 nullrev: (nullrev, self.nodeconstants.nullid),
1915 self.nullid: (nullrev, self.nullid),
1915 self.nullid: (nullrev, self.nullid),
1916 }
1916 }
1917
1917
1918 @unfilteredpropertycache
1918 @unfilteredpropertycache
1919 def _quick_access_changeid_wc(self):
1919 def _quick_access_changeid_wc(self):
1920 # also fast path access to the working copy parents
1920 # also fast path access to the working copy parents
1921 # however, only do it for filter that ensure wc is visible.
1921 # however, only do it for filter that ensure wc is visible.
1922 quick = self._quick_access_changeid_null.copy()
1922 quick = self._quick_access_changeid_null.copy()
1923 cl = self.unfiltered().changelog
1923 cl = self.unfiltered().changelog
1924 for node in self.dirstate.parents():
1924 for node in self.dirstate.parents():
1925 if node == self.nullid:
1925 if node == self.nullid:
1926 continue
1926 continue
1927 rev = cl.index.get_rev(node)
1927 rev = cl.index.get_rev(node)
1928 if rev is None:
1928 if rev is None:
1929 # unknown working copy parent case:
1929 # unknown working copy parent case:
1930 #
1930 #
1931 # skip the fast path and let higher code deal with it
1931 # skip the fast path and let higher code deal with it
1932 continue
1932 continue
1933 pair = (rev, node)
1933 pair = (rev, node)
1934 quick[rev] = pair
1934 quick[rev] = pair
1935 quick[node] = pair
1935 quick[node] = pair
1936 # also add the parents of the parents
1936 # also add the parents of the parents
1937 for r in cl.parentrevs(rev):
1937 for r in cl.parentrevs(rev):
1938 if r == nullrev:
1938 if r == nullrev:
1939 continue
1939 continue
1940 n = cl.node(r)
1940 n = cl.node(r)
1941 pair = (r, n)
1941 pair = (r, n)
1942 quick[r] = pair
1942 quick[r] = pair
1943 quick[n] = pair
1943 quick[n] = pair
1944 p1node = self.dirstate.p1()
1944 p1node = self.dirstate.p1()
1945 if p1node != self.nullid:
1945 if p1node != self.nullid:
1946 quick[b'.'] = quick[p1node]
1946 quick[b'.'] = quick[p1node]
1947 return quick
1947 return quick
1948
1948
1949 @unfilteredmethod
1949 @unfilteredmethod
1950 def _quick_access_changeid_invalidate(self):
1950 def _quick_access_changeid_invalidate(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1951 if '_quick_access_changeid_wc' in vars(self):
1952 del self.__dict__['_quick_access_changeid_wc']
1952 del self.__dict__['_quick_access_changeid_wc']
1953
1953
1954 @property
1954 @property
1955 def _quick_access_changeid(self):
1955 def _quick_access_changeid(self):
1956 """an helper dictionnary for __getitem__ calls
1956 """an helper dictionnary for __getitem__ calls
1957
1957
1958 This contains a list of symbol we can recognise right away without
1958 This contains a list of symbol we can recognise right away without
1959 further processing.
1959 further processing.
1960 """
1960 """
1961 if self.filtername in repoview.filter_has_wc:
1961 if self.filtername in repoview.filter_has_wc:
1962 return self._quick_access_changeid_wc
1962 return self._quick_access_changeid_wc
1963 return self._quick_access_changeid_null
1963 return self._quick_access_changeid_null
1964
1964
1965 def __getitem__(self, changeid):
1965 def __getitem__(self, changeid):
1966 # dealing with special cases
1966 # dealing with special cases
1967 if changeid is None:
1967 if changeid is None:
1968 return context.workingctx(self)
1968 return context.workingctx(self)
1969 if isinstance(changeid, context.basectx):
1969 if isinstance(changeid, context.basectx):
1970 return changeid
1970 return changeid
1971
1971
1972 # dealing with multiple revisions
1972 # dealing with multiple revisions
1973 if isinstance(changeid, slice):
1973 if isinstance(changeid, slice):
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1974 # wdirrev isn't contiguous so the slice shouldn't include it
1975 return [
1975 return [
1976 self[i]
1976 self[i]
1977 for i in range(*changeid.indices(len(self)))
1977 for i in range(*changeid.indices(len(self)))
1978 if i not in self.changelog.filteredrevs
1978 if i not in self.changelog.filteredrevs
1979 ]
1979 ]
1980
1980
1981 # dealing with some special values
1981 # dealing with some special values
1982 quick_access = self._quick_access_changeid.get(changeid)
1982 quick_access = self._quick_access_changeid.get(changeid)
1983 if quick_access is not None:
1983 if quick_access is not None:
1984 rev, node = quick_access
1984 rev, node = quick_access
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1985 return context.changectx(self, rev, node, maybe_filtered=False)
1986 if changeid == b'tip':
1986 if changeid == b'tip':
1987 node = self.changelog.tip()
1987 node = self.changelog.tip()
1988 rev = self.changelog.rev(node)
1988 rev = self.changelog.rev(node)
1989 return context.changectx(self, rev, node)
1989 return context.changectx(self, rev, node)
1990
1990
1991 # dealing with arbitrary values
1991 # dealing with arbitrary values
1992 try:
1992 try:
1993 if isinstance(changeid, int):
1993 if isinstance(changeid, int):
1994 node = self.changelog.node(changeid)
1994 node = self.changelog.node(changeid)
1995 rev = changeid
1995 rev = changeid
1996 elif changeid == b'.':
1996 elif changeid == b'.':
1997 # this is a hack to delay/avoid loading obsmarkers
1997 # this is a hack to delay/avoid loading obsmarkers
1998 # when we know that '.' won't be hidden
1998 # when we know that '.' won't be hidden
1999 node = self.dirstate.p1()
1999 node = self.dirstate.p1()
2000 rev = self.unfiltered().changelog.rev(node)
2000 rev = self.unfiltered().changelog.rev(node)
2001 elif len(changeid) == self.nodeconstants.nodelen:
2001 elif len(changeid) == self.nodeconstants.nodelen:
2002 try:
2002 try:
2003 node = changeid
2003 node = changeid
2004 rev = self.changelog.rev(changeid)
2004 rev = self.changelog.rev(changeid)
2005 except error.FilteredLookupError:
2005 except error.FilteredLookupError:
2006 changeid = hex(changeid) # for the error message
2006 changeid = hex(changeid) # for the error message
2007 raise
2007 raise
2008 except LookupError:
2008 except LookupError:
2009 # check if it might have come from damaged dirstate
2009 # check if it might have come from damaged dirstate
2010 #
2010 #
2011 # XXX we could avoid the unfiltered if we had a recognizable
2011 # XXX we could avoid the unfiltered if we had a recognizable
2012 # exception for filtered changeset access
2012 # exception for filtered changeset access
2013 if (
2013 if (
2014 self.local()
2014 self.local()
2015 and changeid in self.unfiltered().dirstate.parents()
2015 and changeid in self.unfiltered().dirstate.parents()
2016 ):
2016 ):
2017 msg = _(b"working directory has unknown parent '%s'!")
2017 msg = _(b"working directory has unknown parent '%s'!")
2018 raise error.Abort(msg % short(changeid))
2018 raise error.Abort(msg % short(changeid))
2019 changeid = hex(changeid) # for the error message
2019 changeid = hex(changeid) # for the error message
2020 raise
2020 raise
2021
2021
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2022 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2023 node = bin(changeid)
2023 node = bin(changeid)
2024 rev = self.changelog.rev(node)
2024 rev = self.changelog.rev(node)
2025 else:
2025 else:
2026 raise error.ProgrammingError(
2026 raise error.ProgrammingError(
2027 b"unsupported changeid '%s' of type %s"
2027 b"unsupported changeid '%s' of type %s"
2028 % (changeid, pycompat.bytestr(type(changeid)))
2028 % (changeid, pycompat.bytestr(type(changeid)))
2029 )
2029 )
2030
2030
2031 return context.changectx(self, rev, node)
2031 return context.changectx(self, rev, node)
2032
2032
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2033 except (error.FilteredIndexError, error.FilteredLookupError):
2034 raise error.FilteredRepoLookupError(
2034 raise error.FilteredRepoLookupError(
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2035 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2036 )
2036 )
2037 except (IndexError, LookupError):
2037 except (IndexError, LookupError):
2038 raise error.RepoLookupError(
2038 raise error.RepoLookupError(
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2039 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2040 )
2040 )
2041 except error.WdirUnsupported:
2041 except error.WdirUnsupported:
2042 return context.workingctx(self)
2042 return context.workingctx(self)
2043
2043
2044 def __contains__(self, changeid):
2044 def __contains__(self, changeid):
2045 """True if the given changeid exists"""
2045 """True if the given changeid exists"""
2046 try:
2046 try:
2047 self[changeid]
2047 self[changeid]
2048 return True
2048 return True
2049 except error.RepoLookupError:
2049 except error.RepoLookupError:
2050 return False
2050 return False
2051
2051
2052 def __nonzero__(self):
2052 def __nonzero__(self):
2053 return True
2053 return True
2054
2054
2055 __bool__ = __nonzero__
2055 __bool__ = __nonzero__
2056
2056
2057 def __len__(self):
2057 def __len__(self):
2058 # no need to pay the cost of repoview.changelog
2058 # no need to pay the cost of repoview.changelog
2059 unfi = self.unfiltered()
2059 unfi = self.unfiltered()
2060 return len(unfi.changelog)
2060 return len(unfi.changelog)
2061
2061
2062 def __iter__(self):
2062 def __iter__(self):
2063 return iter(self.changelog)
2063 return iter(self.changelog)
2064
2064
2065 def revs(self, expr: bytes, *args):
2065 def revs(self, expr: bytes, *args):
2066 """Find revisions matching a revset.
2066 """Find revisions matching a revset.
2067
2067
2068 The revset is specified as a string ``expr`` that may contain
2068 The revset is specified as a string ``expr`` that may contain
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2069 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2070
2070
2071 Revset aliases from the configuration are not expanded. To expand
2071 Revset aliases from the configuration are not expanded. To expand
2072 user aliases, consider calling ``scmutil.revrange()`` or
2072 user aliases, consider calling ``scmutil.revrange()`` or
2073 ``repo.anyrevs([expr], user=True)``.
2073 ``repo.anyrevs([expr], user=True)``.
2074
2074
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2075 Returns a smartset.abstractsmartset, which is a list-like interface
2076 that contains integer revisions.
2076 that contains integer revisions.
2077 """
2077 """
2078 tree = revsetlang.spectree(expr, *args)
2078 tree = revsetlang.spectree(expr, *args)
2079 return revset.makematcher(tree)(self)
2079 return revset.makematcher(tree)(self)
2080
2080
2081 def set(self, expr: bytes, *args):
2081 def set(self, expr: bytes, *args):
2082 """Find revisions matching a revset and emit changectx instances.
2082 """Find revisions matching a revset and emit changectx instances.
2083
2083
2084 This is a convenience wrapper around ``revs()`` that iterates the
2084 This is a convenience wrapper around ``revs()`` that iterates the
2085 result and is a generator of changectx instances.
2085 result and is a generator of changectx instances.
2086
2086
2087 Revset aliases from the configuration are not expanded. To expand
2087 Revset aliases from the configuration are not expanded. To expand
2088 user aliases, consider calling ``scmutil.revrange()``.
2088 user aliases, consider calling ``scmutil.revrange()``.
2089 """
2089 """
2090 for r in self.revs(expr, *args):
2090 for r in self.revs(expr, *args):
2091 yield self[r]
2091 yield self[r]
2092
2092
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2093 def anyrevs(self, specs: bytes, user=False, localalias=None):
2094 """Find revisions matching one of the given revsets.
2094 """Find revisions matching one of the given revsets.
2095
2095
2096 Revset aliases from the configuration are not expanded by default. To
2096 Revset aliases from the configuration are not expanded by default. To
2097 expand user aliases, specify ``user=True``. To provide some local
2097 expand user aliases, specify ``user=True``. To provide some local
2098 definitions overriding user aliases, set ``localalias`` to
2098 definitions overriding user aliases, set ``localalias`` to
2099 ``{name: definitionstring}``.
2099 ``{name: definitionstring}``.
2100 """
2100 """
2101 if specs == [b'null']:
2101 if specs == [b'null']:
2102 return revset.baseset([nullrev])
2102 return revset.baseset([nullrev])
2103 if specs == [b'.']:
2103 if specs == [b'.']:
2104 quick_data = self._quick_access_changeid.get(b'.')
2104 quick_data = self._quick_access_changeid.get(b'.')
2105 if quick_data is not None:
2105 if quick_data is not None:
2106 return revset.baseset([quick_data[0]])
2106 return revset.baseset([quick_data[0]])
2107 if user:
2107 if user:
2108 m = revset.matchany(
2108 m = revset.matchany(
2109 self.ui,
2109 self.ui,
2110 specs,
2110 specs,
2111 lookup=revset.lookupfn(self),
2111 lookup=revset.lookupfn(self),
2112 localalias=localalias,
2112 localalias=localalias,
2113 )
2113 )
2114 else:
2114 else:
2115 m = revset.matchany(None, specs, localalias=localalias)
2115 m = revset.matchany(None, specs, localalias=localalias)
2116 return m(self)
2116 return m(self)
2117
2117
2118 def url(self) -> bytes:
2118 def url(self) -> bytes:
2119 return b'file:' + self.root
2119 return b'file:' + self.root
2120
2120
2121 def hook(self, name, throw=False, **args):
2121 def hook(self, name, throw=False, **args):
2122 """Call a hook, passing this repo instance.
2122 """Call a hook, passing this repo instance.
2123
2123
2124 This a convenience method to aid invoking hooks. Extensions likely
2124 This a convenience method to aid invoking hooks. Extensions likely
2125 won't call this unless they have registered a custom hook or are
2125 won't call this unless they have registered a custom hook or are
2126 replacing code that is expected to call a hook.
2126 replacing code that is expected to call a hook.
2127 """
2127 """
2128 return hook.hook(self.ui, self, name, throw, **args)
2128 return hook.hook(self.ui, self, name, throw, **args)
2129
2129
2130 @filteredpropertycache
2130 @filteredpropertycache
2131 def _tagscache(self):
2131 def _tagscache(self):
2132 """Returns a tagscache object that contains various tags related
2132 """Returns a tagscache object that contains various tags related
2133 caches."""
2133 caches."""
2134
2134
2135 # This simplifies its cache management by having one decorated
2135 # This simplifies its cache management by having one decorated
2136 # function (this one) and the rest simply fetch things from it.
2136 # function (this one) and the rest simply fetch things from it.
2137 class tagscache:
2137 class tagscache:
2138 def __init__(self):
2138 def __init__(self):
2139 # These two define the set of tags for this repository. tags
2139 # These two define the set of tags for this repository. tags
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2140 # maps tag name to node; tagtypes maps tag name to 'global' or
2141 # 'local'. (Global tags are defined by .hgtags across all
2141 # 'local'. (Global tags are defined by .hgtags across all
2142 # heads, and local tags are defined in .hg/localtags.)
2142 # heads, and local tags are defined in .hg/localtags.)
2143 # They constitute the in-memory cache of tags.
2143 # They constitute the in-memory cache of tags.
2144 self.tags = self.tagtypes = None
2144 self.tags = self.tagtypes = None
2145
2145
2146 self.nodetagscache = self.tagslist = None
2146 self.nodetagscache = self.tagslist = None
2147
2147
2148 cache = tagscache()
2148 cache = tagscache()
2149 cache.tags, cache.tagtypes = self._findtags()
2149 cache.tags, cache.tagtypes = self._findtags()
2150
2150
2151 return cache
2151 return cache
2152
2152
2153 def tags(self):
2153 def tags(self):
2154 '''return a mapping of tag to node'''
2154 '''return a mapping of tag to node'''
2155 t = {}
2155 t = {}
2156 if self.changelog.filteredrevs:
2156 if self.changelog.filteredrevs:
2157 tags, tt = self._findtags()
2157 tags, tt = self._findtags()
2158 else:
2158 else:
2159 tags = self._tagscache.tags
2159 tags = self._tagscache.tags
2160 rev = self.changelog.rev
2160 rev = self.changelog.rev
2161 for k, v in tags.items():
2161 for k, v in tags.items():
2162 try:
2162 try:
2163 # ignore tags to unknown nodes
2163 # ignore tags to unknown nodes
2164 rev(v)
2164 rev(v)
2165 t[k] = v
2165 t[k] = v
2166 except (error.LookupError, ValueError):
2166 except (error.LookupError, ValueError):
2167 pass
2167 pass
2168 return t
2168 return t
2169
2169
2170 def _findtags(self):
2170 def _findtags(self):
2171 """Do the hard work of finding tags. Return a pair of dicts
2171 """Do the hard work of finding tags. Return a pair of dicts
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2172 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2173 maps tag name to a string like \'global\' or \'local\'.
2173 maps tag name to a string like \'global\' or \'local\'.
2174 Subclasses or extensions are free to add their own tags, but
2174 Subclasses or extensions are free to add their own tags, but
2175 should be aware that the returned dicts will be retained for the
2175 should be aware that the returned dicts will be retained for the
2176 duration of the localrepo object."""
2176 duration of the localrepo object."""
2177
2177
2178 # XXX what tagtype should subclasses/extensions use? Currently
2178 # XXX what tagtype should subclasses/extensions use? Currently
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2179 # mq and bookmarks add tags, but do not set the tagtype at all.
2180 # Should each extension invent its own tag type? Should there
2180 # Should each extension invent its own tag type? Should there
2181 # be one tagtype for all such "virtual" tags? Or is the status
2181 # be one tagtype for all such "virtual" tags? Or is the status
2182 # quo fine?
2182 # quo fine?
2183
2183
2184 # map tag name to (node, hist)
2184 # map tag name to (node, hist)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2185 alltags = tagsmod.findglobaltags(self.ui, self)
2186 # map tag name to tag type
2186 # map tag name to tag type
2187 tagtypes = {tag: b'global' for tag in alltags}
2187 tagtypes = {tag: b'global' for tag in alltags}
2188
2188
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2189 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2190
2190
2191 # Build the return dicts. Have to re-encode tag names because
2191 # Build the return dicts. Have to re-encode tag names because
2192 # the tags module always uses UTF-8 (in order not to lose info
2192 # the tags module always uses UTF-8 (in order not to lose info
2193 # writing to the cache), but the rest of Mercurial wants them in
2193 # writing to the cache), but the rest of Mercurial wants them in
2194 # local encoding.
2194 # local encoding.
2195 tags = {}
2195 tags = {}
2196 for name, (node, hist) in alltags.items():
2196 for name, (node, hist) in alltags.items():
2197 if node != self.nullid:
2197 if node != self.nullid:
2198 tags[encoding.tolocal(name)] = node
2198 tags[encoding.tolocal(name)] = node
2199 tags[b'tip'] = self.changelog.tip()
2199 tags[b'tip'] = self.changelog.tip()
2200 tagtypes = {
2200 tagtypes = {
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2201 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2202 }
2202 }
2203 return (tags, tagtypes)
2203 return (tags, tagtypes)
2204
2204
2205 def tagtype(self, tagname):
2205 def tagtype(self, tagname):
2206 """
2206 """
2207 return the type of the given tag. result can be:
2207 return the type of the given tag. result can be:
2208
2208
2209 'local' : a local tag
2209 'local' : a local tag
2210 'global' : a global tag
2210 'global' : a global tag
2211 None : tag does not exist
2211 None : tag does not exist
2212 """
2212 """
2213
2213
2214 return self._tagscache.tagtypes.get(tagname)
2214 return self._tagscache.tagtypes.get(tagname)
2215
2215
2216 def tagslist(self):
2216 def tagslist(self):
2217 '''return a list of tags ordered by revision'''
2217 '''return a list of tags ordered by revision'''
2218 if not self._tagscache.tagslist:
2218 if not self._tagscache.tagslist:
2219 l = []
2219 l = []
2220 for t, n in self.tags().items():
2220 for t, n in self.tags().items():
2221 l.append((self.changelog.rev(n), t, n))
2221 l.append((self.changelog.rev(n), t, n))
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2222 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2223
2223
2224 return self._tagscache.tagslist
2224 return self._tagscache.tagslist
2225
2225
2226 def nodetags(self, node):
2226 def nodetags(self, node):
2227 '''return the tags associated with a node'''
2227 '''return the tags associated with a node'''
2228 if not self._tagscache.nodetagscache:
2228 if not self._tagscache.nodetagscache:
2229 nodetagscache = {}
2229 nodetagscache = {}
2230 for t, n in self._tagscache.tags.items():
2230 for t, n in self._tagscache.tags.items():
2231 nodetagscache.setdefault(n, []).append(t)
2231 nodetagscache.setdefault(n, []).append(t)
2232 for tags in nodetagscache.values():
2232 for tags in nodetagscache.values():
2233 tags.sort()
2233 tags.sort()
2234 self._tagscache.nodetagscache = nodetagscache
2234 self._tagscache.nodetagscache = nodetagscache
2235 return self._tagscache.nodetagscache.get(node, [])
2235 return self._tagscache.nodetagscache.get(node, [])
2236
2236
2237 def nodebookmarks(self, node):
2237 def nodebookmarks(self, node):
2238 """return the list of bookmarks pointing to the specified node"""
2238 """return the list of bookmarks pointing to the specified node"""
2239 return self._bookmarks.names(node)
2239 return self._bookmarks.names(node)
2240
2240
2241 def branchmap(self):
2241 def branchmap(self):
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2242 """returns a dictionary {branch: [branchheads]} with branchheads
2243 ordered by increasing revision number"""
2243 ordered by increasing revision number"""
2244 return self._branchcaches[self]
2244 return self._branchcaches[self]
2245
2245
2246 @unfilteredmethod
2246 @unfilteredmethod
2247 def revbranchcache(self):
2247 def revbranchcache(self):
2248 if not self._revbranchcache:
2248 if not self._revbranchcache:
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2249 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2250 return self._revbranchcache
2250 return self._revbranchcache
2251
2251
2252 def register_changeset(self, rev, changelogrevision):
2252 def register_changeset(self, rev, changelogrevision):
2253 self.revbranchcache().setdata(rev, changelogrevision)
2253 self.revbranchcache().setdata(rev, changelogrevision)
2254
2254
2255 def branchtip(self, branch, ignoremissing=False):
2255 def branchtip(self, branch, ignoremissing=False):
2256 """return the tip node for a given branch
2256 """return the tip node for a given branch
2257
2257
2258 If ignoremissing is True, then this method will not raise an error.
2258 If ignoremissing is True, then this method will not raise an error.
2259 This is helpful for callers that only expect None for a missing branch
2259 This is helpful for callers that only expect None for a missing branch
2260 (e.g. namespace).
2260 (e.g. namespace).
2261
2261
2262 """
2262 """
2263 try:
2263 try:
2264 return self.branchmap().branchtip(branch)
2264 return self.branchmap().branchtip(branch)
2265 except KeyError:
2265 except KeyError:
2266 if not ignoremissing:
2266 if not ignoremissing:
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2267 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2268 else:
2268 else:
2269 pass
2269 pass
2270
2270
2271 def lookup(self, key):
2271 def lookup(self, key):
2272 node = scmutil.revsymbol(self, key).node()
2272 node = scmutil.revsymbol(self, key).node()
2273 if node is None:
2273 if node is None:
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2274 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2275 return node
2275 return node
2276
2276
2277 def lookupbranch(self, key):
2277 def lookupbranch(self, key):
2278 if self.branchmap().hasbranch(key):
2278 if self.branchmap().hasbranch(key):
2279 return key
2279 return key
2280
2280
2281 return scmutil.revsymbol(self, key).branch()
2281 return scmutil.revsymbol(self, key).branch()
2282
2282
2283 def known(self, nodes):
2283 def known(self, nodes):
2284 cl = self.changelog
2284 cl = self.changelog
2285 get_rev = cl.index.get_rev
2285 get_rev = cl.index.get_rev
2286 filtered = cl.filteredrevs
2286 filtered = cl.filteredrevs
2287 result = []
2287 result = []
2288 for n in nodes:
2288 for n in nodes:
2289 r = get_rev(n)
2289 r = get_rev(n)
2290 resp = not (r is None or r in filtered)
2290 resp = not (r is None or r in filtered)
2291 result.append(resp)
2291 result.append(resp)
2292 return result
2292 return result
2293
2293
2294 def local(self):
2294 def local(self):
2295 return self
2295 return self
2296
2296
2297 def publishing(self):
2297 def publishing(self):
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2298 # it's safe (and desirable) to trust the publish flag unconditionally
2299 # so that we don't finalize changes shared between users via ssh or nfs
2299 # so that we don't finalize changes shared between users via ssh or nfs
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2300 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2301
2301
2302 def cancopy(self):
2302 def cancopy(self):
2303 # so statichttprepo's override of local() works
2303 # so statichttprepo's override of local() works
2304 if not self.local():
2304 if not self.local():
2305 return False
2305 return False
2306 if not self.publishing():
2306 if not self.publishing():
2307 return True
2307 return True
2308 # if publishing we can't copy if there is filtered content
2308 # if publishing we can't copy if there is filtered content
2309 return not self.filtered(b'visible').changelog.filteredrevs
2309 return not self.filtered(b'visible').changelog.filteredrevs
2310
2310
2311 def shared(self):
2311 def shared(self):
2312 '''the type of shared repository (None if not shared)'''
2312 '''the type of shared repository (None if not shared)'''
2313 if self.sharedpath != self.path:
2313 if self.sharedpath != self.path:
2314 return b'store'
2314 return b'store'
2315 return None
2315 return None
2316
2316
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2317 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2318 return self.vfs.reljoin(self.root, f, *insidef)
2318 return self.vfs.reljoin(self.root, f, *insidef)
2319
2319
2320 def setparents(self, p1, p2=None):
2320 def setparents(self, p1, p2=None):
2321 if p2 is None:
2321 if p2 is None:
2322 p2 = self.nullid
2322 p2 = self.nullid
2323 self[None].setparents(p1, p2)
2323 self[None].setparents(p1, p2)
2324 self._quick_access_changeid_invalidate()
2324 self._quick_access_changeid_invalidate()
2325
2325
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2326 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2327 """changeid must be a changeset revision, if specified.
2327 """changeid must be a changeset revision, if specified.
2328 fileid can be a file revision or node."""
2328 fileid can be a file revision or node."""
2329 return context.filectx(
2329 return context.filectx(
2330 self, path, changeid, fileid, changectx=changectx
2330 self, path, changeid, fileid, changectx=changectx
2331 )
2331 )
2332
2332
2333 def getcwd(self) -> bytes:
2333 def getcwd(self) -> bytes:
2334 return self.dirstate.getcwd()
2334 return self.dirstate.getcwd()
2335
2335
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2336 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2337 return self.dirstate.pathto(f, cwd)
2337 return self.dirstate.pathto(f, cwd)
2338
2338
2339 def _loadfilter(self, filter):
2339 def _loadfilter(self, filter):
2340 if filter not in self._filterpats:
2340 if filter not in self._filterpats:
2341 l = []
2341 l = []
2342 for pat, cmd in self.ui.configitems(filter):
2342 for pat, cmd in self.ui.configitems(filter):
2343 if cmd == b'!':
2343 if cmd == b'!':
2344 continue
2344 continue
2345 mf = matchmod.match(self.root, b'', [pat])
2345 mf = matchmod.match(self.root, b'', [pat])
2346 fn = None
2346 fn = None
2347 params = cmd
2347 params = cmd
2348 for name, filterfn in self._datafilters.items():
2348 for name, filterfn in self._datafilters.items():
2349 if cmd.startswith(name):
2349 if cmd.startswith(name):
2350 fn = filterfn
2350 fn = filterfn
2351 params = cmd[len(name) :].lstrip()
2351 params = cmd[len(name) :].lstrip()
2352 break
2352 break
2353 if not fn:
2353 if not fn:
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2354 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2355 fn.__name__ = 'commandfilter'
2355 fn.__name__ = 'commandfilter'
2356 # Wrap old filters not supporting keyword arguments
2356 # Wrap old filters not supporting keyword arguments
2357 if not pycompat.getargspec(fn)[2]:
2357 if not pycompat.getargspec(fn)[2]:
2358 oldfn = fn
2358 oldfn = fn
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2359 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2360 fn.__name__ = 'compat-' + oldfn.__name__
2360 fn.__name__ = 'compat-' + oldfn.__name__
2361 l.append((mf, fn, params))
2361 l.append((mf, fn, params))
2362 self._filterpats[filter] = l
2362 self._filterpats[filter] = l
2363 return self._filterpats[filter]
2363 return self._filterpats[filter]
2364
2364
2365 def _filter(self, filterpats, filename, data):
2365 def _filter(self, filterpats, filename, data):
2366 for mf, fn, cmd in filterpats:
2366 for mf, fn, cmd in filterpats:
2367 if mf(filename):
2367 if mf(filename):
2368 self.ui.debug(
2368 self.ui.debug(
2369 b"filtering %s through %s\n"
2369 b"filtering %s through %s\n"
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2370 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2371 )
2371 )
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2372 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2373 break
2373 break
2374
2374
2375 return data
2375 return data
2376
2376
2377 @unfilteredpropertycache
2377 @unfilteredpropertycache
2378 def _encodefilterpats(self):
2378 def _encodefilterpats(self):
2379 return self._loadfilter(b'encode')
2379 return self._loadfilter(b'encode')
2380
2380
2381 @unfilteredpropertycache
2381 @unfilteredpropertycache
2382 def _decodefilterpats(self):
2382 def _decodefilterpats(self):
2383 return self._loadfilter(b'decode')
2383 return self._loadfilter(b'decode')
2384
2384
2385 def adddatafilter(self, name, filter):
2385 def adddatafilter(self, name, filter):
2386 self._datafilters[name] = filter
2386 self._datafilters[name] = filter
2387
2387
2388 def wread(self, filename: bytes) -> bytes:
2388 def wread(self, filename: bytes) -> bytes:
2389 if self.wvfs.islink(filename):
2389 if self.wvfs.islink(filename):
2390 data = self.wvfs.readlink(filename)
2390 data = self.wvfs.readlink(filename)
2391 else:
2391 else:
2392 data = self.wvfs.read(filename)
2392 data = self.wvfs.read(filename)
2393 return self._filter(self._encodefilterpats, filename, data)
2393 return self._filter(self._encodefilterpats, filename, data)
2394
2394
2395 def wwrite(
2395 def wwrite(
2396 self,
2396 self,
2397 filename: bytes,
2397 filename: bytes,
2398 data: bytes,
2398 data: bytes,
2399 flags: bytes,
2399 flags: bytes,
2400 backgroundclose=False,
2400 backgroundclose=False,
2401 **kwargs,
2401 **kwargs,
2402 ) -> int:
2402 ) -> int:
2403 """write ``data`` into ``filename`` in the working directory
2403 """write ``data`` into ``filename`` in the working directory
2404
2404
2405 This returns length of written (maybe decoded) data.
2405 This returns length of written (maybe decoded) data.
2406 """
2406 """
2407 data = self._filter(self._decodefilterpats, filename, data)
2407 data = self._filter(self._decodefilterpats, filename, data)
2408 if b'l' in flags:
2408 if b'l' in flags:
2409 self.wvfs.symlink(data, filename)
2409 self.wvfs.symlink(data, filename)
2410 else:
2410 else:
2411 self.wvfs.write(
2411 self.wvfs.write(
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2412 filename, data, backgroundclose=backgroundclose, **kwargs
2413 )
2413 )
2414 if b'x' in flags:
2414 if b'x' in flags:
2415 self.wvfs.setflags(filename, False, True)
2415 self.wvfs.setflags(filename, False, True)
2416 else:
2416 else:
2417 self.wvfs.setflags(filename, False, False)
2417 self.wvfs.setflags(filename, False, False)
2418 return len(data)
2418 return len(data)
2419
2419
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2420 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2421 return self._filter(self._decodefilterpats, filename, data)
2421 return self._filter(self._decodefilterpats, filename, data)
2422
2422
2423 def currenttransaction(self):
2423 def currenttransaction(self):
2424 """return the current transaction or None if non exists"""
2424 """return the current transaction or None if non exists"""
2425 if self._transref:
2425 if self._transref:
2426 tr = self._transref()
2426 tr = self._transref()
2427 else:
2427 else:
2428 tr = None
2428 tr = None
2429
2429
2430 if tr and tr.running():
2430 if tr and tr.running():
2431 return tr
2431 return tr
2432 return None
2432 return None
2433
2433
2434 def transaction(self, desc, report=None):
2434 def transaction(self, desc, report=None):
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2436 b'devel', b'check-locks'
2436 b'devel', b'check-locks'
2437 ):
2437 ):
2438 if self._currentlock(self._lockref) is None:
2438 if self._currentlock(self._lockref) is None:
2439 raise error.ProgrammingError(b'transaction requires locking')
2439 raise error.ProgrammingError(b'transaction requires locking')
2440 tr = self.currenttransaction()
2440 tr = self.currenttransaction()
2441 if tr is not None:
2441 if tr is not None:
2442 return tr.nest(name=desc)
2442 return tr.nest(name=desc)
2443
2443
2444 # abort here if the journal already exists
2444 # abort here if the journal already exists
2445 if self.svfs.exists(b"journal"):
2445 if self.svfs.exists(b"journal"):
2446 raise error.RepoError(
2446 raise error.RepoError(
2447 _(b"abandoned transaction found"),
2447 _(b"abandoned transaction found"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2448 hint=_(b"run 'hg recover' to clean up transaction"),
2449 )
2449 )
2450
2450
2451 # At that point your dirstate should be clean:
2451 # At that point your dirstate should be clean:
2452 #
2452 #
2453 # - If you don't have the wlock, why would you still have a dirty
2453 # - If you don't have the wlock, why would you still have a dirty
2454 # dirstate ?
2454 # dirstate ?
2455 #
2455 #
2456 # - If you hold the wlock, you should not be opening a transaction in
2456 # - If you hold the wlock, you should not be opening a transaction in
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2457 # the middle of a `distate.changing_*` block. The transaction needs to
2458 # be open before that and wrap the change-context.
2458 # be open before that and wrap the change-context.
2459 #
2459 #
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2460 # - If you are not within a `dirstate.changing_*` context, why is our
2461 # dirstate dirty?
2461 # dirstate dirty?
2462 if self.dirstate._dirty:
2462 if self.dirstate._dirty:
2463 m = "cannot open a transaction with a dirty dirstate"
2463 m = "cannot open a transaction with a dirty dirstate"
2464 raise error.ProgrammingError(m)
2464 raise error.ProgrammingError(m)
2465
2465
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2466 idbase = b"%.40f#%f" % (random.random(), time.time())
2467 ha = hex(hashutil.sha1(idbase).digest())
2467 ha = hex(hashutil.sha1(idbase).digest())
2468 txnid = b'TXN:' + ha
2468 txnid = b'TXN:' + ha
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2469 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2470
2470
2471 self._writejournal(desc)
2471 self._writejournal(desc)
2472 if report:
2472 if report:
2473 rp = report
2473 rp = report
2474 else:
2474 else:
2475 rp = self.ui.warn
2475 rp = self.ui.warn
2476 vfsmap = self.vfs_map
2476 vfsmap = self.vfs_map
2477 # we must avoid cyclic reference between repo and transaction.
2477 # we must avoid cyclic reference between repo and transaction.
2478 reporef = weakref.ref(self)
2478 reporef = weakref.ref(self)
2479 # Code to track tag movement
2479 # Code to track tag movement
2480 #
2480 #
2481 # Since tags are all handled as file content, it is actually quite hard
2481 # Since tags are all handled as file content, it is actually quite hard
2482 # to track these movement from a code perspective. So we fallback to a
2482 # to track these movement from a code perspective. So we fallback to a
2483 # tracking at the repository level. One could envision to track changes
2483 # tracking at the repository level. One could envision to track changes
2484 # to the '.hgtags' file through changegroup apply but that fails to
2484 # to the '.hgtags' file through changegroup apply but that fails to
2485 # cope with case where transaction expose new heads without changegroup
2485 # cope with case where transaction expose new heads without changegroup
2486 # being involved (eg: phase movement).
2486 # being involved (eg: phase movement).
2487 #
2487 #
2488 # For now, We gate the feature behind a flag since this likely comes
2488 # For now, We gate the feature behind a flag since this likely comes
2489 # with performance impacts. The current code run more often than needed
2489 # with performance impacts. The current code run more often than needed
2490 # and do not use caches as much as it could. The current focus is on
2490 # and do not use caches as much as it could. The current focus is on
2491 # the behavior of the feature so we disable it by default. The flag
2491 # the behavior of the feature so we disable it by default. The flag
2492 # will be removed when we are happy with the performance impact.
2492 # will be removed when we are happy with the performance impact.
2493 #
2493 #
2494 # Once this feature is no longer experimental move the following
2494 # Once this feature is no longer experimental move the following
2495 # documentation to the appropriate help section:
2495 # documentation to the appropriate help section:
2496 #
2496 #
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2497 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2498 # tags (new or changed or deleted tags). In addition the details of
2498 # tags (new or changed or deleted tags). In addition the details of
2499 # these changes are made available in a file at:
2499 # these changes are made available in a file at:
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2500 # ``REPOROOT/.hg/changes/tags.changes``.
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2501 # Make sure you check for HG_TAG_MOVED before reading that file as it
2502 # might exist from a previous transaction even if no tag were touched
2502 # might exist from a previous transaction even if no tag were touched
2503 # in this one. Changes are recorded in a line base format::
2503 # in this one. Changes are recorded in a line base format::
2504 #
2504 #
2505 # <action> <hex-node> <tag-name>\n
2505 # <action> <hex-node> <tag-name>\n
2506 #
2506 #
2507 # Actions are defined as follow:
2507 # Actions are defined as follow:
2508 # "-R": tag is removed,
2508 # "-R": tag is removed,
2509 # "+A": tag is added,
2509 # "+A": tag is added,
2510 # "-M": tag is moved (old value),
2510 # "-M": tag is moved (old value),
2511 # "+M": tag is moved (new value),
2511 # "+M": tag is moved (new value),
2512 tracktags = lambda x: None
2512 tracktags = lambda x: None
2513 # experimental config: experimental.hook-track-tags
2513 # experimental config: experimental.hook-track-tags
2514 shouldtracktags = self.ui.configbool(
2514 shouldtracktags = self.ui.configbool(
2515 b'experimental', b'hook-track-tags'
2515 b'experimental', b'hook-track-tags'
2516 )
2516 )
2517 if desc != b'strip' and shouldtracktags:
2517 if desc != b'strip' and shouldtracktags:
2518 oldheads = self.changelog.headrevs()
2518 oldheads = self.changelog.headrevs()
2519
2519
2520 def tracktags(tr2):
2520 def tracktags(tr2):
2521 repo = reporef()
2521 repo = reporef()
2522 assert repo is not None # help pytype
2522 assert repo is not None # help pytype
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2523 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2524 newheads = repo.changelog.headrevs()
2524 newheads = repo.changelog.headrevs()
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2525 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2526 # notes: we compare lists here.
2526 # notes: we compare lists here.
2527 # As we do it only once buiding set would not be cheaper
2527 # As we do it only once buiding set would not be cheaper
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2528 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2529 if changes:
2529 if changes:
2530 tr2.hookargs[b'tag_moved'] = b'1'
2530 tr2.hookargs[b'tag_moved'] = b'1'
2531 with repo.vfs(
2531 with repo.vfs(
2532 b'changes/tags.changes', b'w', atomictemp=True
2532 b'changes/tags.changes', b'w', atomictemp=True
2533 ) as changesfile:
2533 ) as changesfile:
2534 # note: we do not register the file to the transaction
2534 # note: we do not register the file to the transaction
2535 # because we needs it to still exist on the transaction
2535 # because we needs it to still exist on the transaction
2536 # is close (for txnclose hooks)
2536 # is close (for txnclose hooks)
2537 tagsmod.writediff(changesfile, changes)
2537 tagsmod.writediff(changesfile, changes)
2538
2538
2539 def validate(tr2):
2539 def validate(tr2):
2540 """will run pre-closing hooks"""
2540 """will run pre-closing hooks"""
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2541 # XXX the transaction API is a bit lacking here so we take a hacky
2542 # path for now
2542 # path for now
2543 #
2543 #
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2544 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2545 # dict is copied before these run. In addition we needs the data
2545 # dict is copied before these run. In addition we needs the data
2546 # available to in memory hooks too.
2546 # available to in memory hooks too.
2547 #
2547 #
2548 # Moreover, we also need to make sure this runs before txnclose
2548 # Moreover, we also need to make sure this runs before txnclose
2549 # hooks and there is no "pending" mechanism that would execute
2549 # hooks and there is no "pending" mechanism that would execute
2550 # logic only if hooks are about to run.
2550 # logic only if hooks are about to run.
2551 #
2551 #
2552 # Fixing this limitation of the transaction is also needed to track
2552 # Fixing this limitation of the transaction is also needed to track
2553 # other families of changes (bookmarks, phases, obsolescence).
2553 # other families of changes (bookmarks, phases, obsolescence).
2554 #
2554 #
2555 # This will have to be fixed before we remove the experimental
2555 # This will have to be fixed before we remove the experimental
2556 # gating.
2556 # gating.
2557 tracktags(tr2)
2557 tracktags(tr2)
2558 repo = reporef()
2558 repo = reporef()
2559 assert repo is not None # help pytype
2559 assert repo is not None # help pytype
2560
2560
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2561 singleheadopt = (b'experimental', b'single-head-per-branch')
2562 singlehead = repo.ui.configbool(*singleheadopt)
2562 singlehead = repo.ui.configbool(*singleheadopt)
2563 if singlehead:
2563 if singlehead:
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2564 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2565 accountclosed = singleheadsub.get(
2565 accountclosed = singleheadsub.get(
2566 b"account-closed-heads", False
2566 b"account-closed-heads", False
2567 )
2567 )
2568 if singleheadsub.get(b"public-changes-only", False):
2568 if singleheadsub.get(b"public-changes-only", False):
2569 filtername = b"immutable"
2569 filtername = b"immutable"
2570 else:
2570 else:
2571 filtername = b"visible"
2571 filtername = b"visible"
2572 scmutil.enforcesinglehead(
2572 scmutil.enforcesinglehead(
2573 repo, tr2, desc, accountclosed, filtername
2573 repo, tr2, desc, accountclosed, filtername
2574 )
2574 )
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2575 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2576 for name, (old, new) in sorted(
2576 for name, (old, new) in sorted(
2577 tr.changes[b'bookmarks'].items()
2577 tr.changes[b'bookmarks'].items()
2578 ):
2578 ):
2579 args = tr.hookargs.copy()
2579 args = tr.hookargs.copy()
2580 args.update(bookmarks.preparehookargs(name, old, new))
2580 args.update(bookmarks.preparehookargs(name, old, new))
2581 repo.hook(
2581 repo.hook(
2582 b'pretxnclose-bookmark',
2582 b'pretxnclose-bookmark',
2583 throw=True,
2583 throw=True,
2584 **pycompat.strkwargs(args),
2584 **pycompat.strkwargs(args),
2585 )
2585 )
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2586 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2587 cl = repo.unfiltered().changelog
2587 cl = repo.unfiltered().changelog
2588 for revs, (old, new) in tr.changes[b'phases']:
2588 for revs, (old, new) in tr.changes[b'phases']:
2589 for rev in revs:
2589 for rev in revs:
2590 args = tr.hookargs.copy()
2590 args = tr.hookargs.copy()
2591 node = hex(cl.node(rev))
2591 node = hex(cl.node(rev))
2592 args.update(phases.preparehookargs(node, old, new))
2592 args.update(phases.preparehookargs(node, old, new))
2593 repo.hook(
2593 repo.hook(
2594 b'pretxnclose-phase',
2594 b'pretxnclose-phase',
2595 throw=True,
2595 throw=True,
2596 **pycompat.strkwargs(args),
2596 **pycompat.strkwargs(args),
2597 )
2597 )
2598
2598
2599 repo.hook(
2599 repo.hook(
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2600 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2601 )
2601 )
2602
2602
2603 def releasefn(tr, success):
2603 def releasefn(tr, success):
2604 repo = reporef()
2604 repo = reporef()
2605 if repo is None:
2605 if repo is None:
2606 # If the repo has been GC'd (and this release function is being
2606 # If the repo has been GC'd (and this release function is being
2607 # called from transaction.__del__), there's not much we can do,
2607 # called from transaction.__del__), there's not much we can do,
2608 # so just leave the unfinished transaction there and let the
2608 # so just leave the unfinished transaction there and let the
2609 # user run `hg recover`.
2609 # user run `hg recover`.
2610 return
2610 return
2611 if success:
2611 if success:
2612 # this should be explicitly invoked here, because
2612 # this should be explicitly invoked here, because
2613 # in-memory changes aren't written out at closing
2613 # in-memory changes aren't written out at closing
2614 # transaction, if tr.addfilegenerator (via
2614 # transaction, if tr.addfilegenerator (via
2615 # dirstate.write or so) isn't invoked while
2615 # dirstate.write or so) isn't invoked while
2616 # transaction running
2616 # transaction running
2617 repo.dirstate.write(None)
2617 repo.dirstate.write(None)
2618 else:
2618 else:
2619 # discard all changes (including ones already written
2619 # discard all changes (including ones already written
2620 # out) in this transaction
2620 # out) in this transaction
2621 repo.invalidate(clearfilecache=True)
2621 repo.invalidate(clearfilecache=True)
2622
2622
2623 tr = transaction.transaction(
2623 tr = transaction.transaction(
2624 rp,
2624 rp,
2625 self.svfs,
2625 self.svfs,
2626 vfsmap,
2626 vfsmap,
2627 b"journal",
2627 b"journal",
2628 b"undo",
2628 b"undo",
2629 lambda: None,
2629 lambda: None,
2630 self.store.createmode,
2630 self.store.createmode,
2631 validator=validate,
2631 validator=validate,
2632 releasefn=releasefn,
2632 releasefn=releasefn,
2633 checkambigfiles=_cachedfiles,
2633 checkambigfiles=_cachedfiles,
2634 name=desc,
2634 name=desc,
2635 )
2635 )
2636 for vfs_id, path in self._journalfiles():
2636 for vfs_id, path in self._journalfiles():
2637 tr.add_journal(vfs_id, path)
2637 tr.add_journal(vfs_id, path)
2638 tr.changes[b'origrepolen'] = len(self)
2638 tr.changes[b'origrepolen'] = len(self)
2639 tr.changes[b'obsmarkers'] = set()
2639 tr.changes[b'obsmarkers'] = set()
2640 tr.changes[b'phases'] = []
2640 tr.changes[b'phases'] = []
2641 tr.changes[b'bookmarks'] = {}
2641 tr.changes[b'bookmarks'] = {}
2642
2642
2643 tr.hookargs[b'txnid'] = txnid
2643 tr.hookargs[b'txnid'] = txnid
2644 tr.hookargs[b'txnname'] = desc
2644 tr.hookargs[b'txnname'] = desc
2645 tr.hookargs[b'changes'] = tr.changes
2645 tr.hookargs[b'changes'] = tr.changes
2646 # note: writing the fncache only during finalize mean that the file is
2646 # note: writing the fncache only during finalize mean that the file is
2647 # outdated when running hooks. As fncache is used for streaming clone,
2647 # outdated when running hooks. As fncache is used for streaming clone,
2648 # this is not expected to break anything that happen during the hooks.
2648 # this is not expected to break anything that happen during the hooks.
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2649 tr.addfinalize(b'flush-fncache', self.store.write)
2650
2650
2651 def txnclosehook(tr2):
2651 def txnclosehook(tr2):
2652 """To be run if transaction is successful, will schedule a hook run"""
2652 """To be run if transaction is successful, will schedule a hook run"""
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2653 # Don't reference tr2 in hook() so we don't hold a reference.
2654 # This reduces memory consumption when there are multiple
2654 # This reduces memory consumption when there are multiple
2655 # transactions per lock. This can likely go away if issue5045
2655 # transactions per lock. This can likely go away if issue5045
2656 # fixes the function accumulation.
2656 # fixes the function accumulation.
2657 hookargs = tr2.hookargs
2657 hookargs = tr2.hookargs
2658
2658
2659 def hookfunc(unused_success):
2659 def hookfunc(unused_success):
2660 repo = reporef()
2660 repo = reporef()
2661 assert repo is not None # help pytype
2661 assert repo is not None # help pytype
2662
2662
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2663 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2664 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2665 for name, (old, new) in bmchanges:
2665 for name, (old, new) in bmchanges:
2666 args = tr.hookargs.copy()
2666 args = tr.hookargs.copy()
2667 args.update(bookmarks.preparehookargs(name, old, new))
2667 args.update(bookmarks.preparehookargs(name, old, new))
2668 repo.hook(
2668 repo.hook(
2669 b'txnclose-bookmark',
2669 b'txnclose-bookmark',
2670 throw=False,
2670 throw=False,
2671 **pycompat.strkwargs(args),
2671 **pycompat.strkwargs(args),
2672 )
2672 )
2673
2673
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2674 if hook.hashook(repo.ui, b'txnclose-phase'):
2675 cl = repo.unfiltered().changelog
2675 cl = repo.unfiltered().changelog
2676 phasemv = sorted(
2676 phasemv = sorted(
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2677 tr.changes[b'phases'], key=lambda r: r[0][0]
2678 )
2678 )
2679 for revs, (old, new) in phasemv:
2679 for revs, (old, new) in phasemv:
2680 for rev in revs:
2680 for rev in revs:
2681 args = tr.hookargs.copy()
2681 args = tr.hookargs.copy()
2682 node = hex(cl.node(rev))
2682 node = hex(cl.node(rev))
2683 args.update(phases.preparehookargs(node, old, new))
2683 args.update(phases.preparehookargs(node, old, new))
2684 repo.hook(
2684 repo.hook(
2685 b'txnclose-phase',
2685 b'txnclose-phase',
2686 throw=False,
2686 throw=False,
2687 **pycompat.strkwargs(args),
2687 **pycompat.strkwargs(args),
2688 )
2688 )
2689
2689
2690 repo.hook(
2690 repo.hook(
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2691 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2692 )
2692 )
2693
2693
2694 repo = reporef()
2694 repo = reporef()
2695 assert repo is not None # help pytype
2695 assert repo is not None # help pytype
2696 repo._afterlock(hookfunc)
2696 repo._afterlock(hookfunc)
2697
2697
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2698 tr.addfinalize(b'txnclose-hook', txnclosehook)
2699 # Include a leading "-" to make it happen before the transaction summary
2699 # Include a leading "-" to make it happen before the transaction summary
2700 # reports registered via scmutil.registersummarycallback() whose names
2700 # reports registered via scmutil.registersummarycallback() whose names
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2701 # are 00-txnreport etc. That way, the caches will be warm when the
2702 # callbacks run.
2702 # callbacks run.
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2703 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2704
2704
2705 def txnaborthook(tr2):
2705 def txnaborthook(tr2):
2706 """To be run if transaction is aborted"""
2706 """To be run if transaction is aborted"""
2707 repo = reporef()
2707 repo = reporef()
2708 assert repo is not None # help pytype
2708 assert repo is not None # help pytype
2709 repo.hook(
2709 repo.hook(
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2710 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2711 )
2711 )
2712
2712
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2713 tr.addabort(b'txnabort-hook', txnaborthook)
2714 # avoid eager cache invalidation. in-memory data should be identical
2714 # avoid eager cache invalidation. in-memory data should be identical
2715 # to stored data if transaction has no error.
2715 # to stored data if transaction has no error.
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2716 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2717 self._transref = weakref.ref(tr)
2717 self._transref = weakref.ref(tr)
2718 scmutil.registersummarycallback(self, tr, desc)
2718 scmutil.registersummarycallback(self, tr, desc)
2719 # This only exist to deal with the need of rollback to have viable
2719 # This only exist to deal with the need of rollback to have viable
2720 # parents at the end of the operation. So backup viable parents at the
2720 # parents at the end of the operation. So backup viable parents at the
2721 # time of this operation.
2721 # time of this operation.
2722 #
2722 #
2723 # We only do it when the `wlock` is taken, otherwise other might be
2723 # We only do it when the `wlock` is taken, otherwise other might be
2724 # altering the dirstate under us.
2724 # altering the dirstate under us.
2725 #
2725 #
2726 # This is really not a great way to do this (first, because we cannot
2726 # This is really not a great way to do this (first, because we cannot
2727 # always do it). There are more viable alternative that exists
2727 # always do it). There are more viable alternative that exists
2728 #
2728 #
2729 # - backing only the working copy parent in a dedicated files and doing
2729 # - backing only the working copy parent in a dedicated files and doing
2730 # a clean "keep-update" to them on `hg rollback`.
2730 # a clean "keep-update" to them on `hg rollback`.
2731 #
2731 #
2732 # - slightly changing the behavior an applying a logic similar to "hg
2732 # - slightly changing the behavior an applying a logic similar to "hg
2733 # strip" to pick a working copy destination on `hg rollback`
2733 # strip" to pick a working copy destination on `hg rollback`
2734 if self.currentwlock() is not None:
2734 if self.currentwlock() is not None:
2735 ds = self.dirstate
2735 ds = self.dirstate
2736 if not self.vfs.exists(b'branch'):
2736 if not self.vfs.exists(b'branch'):
2737 # force a file to be written if None exist
2737 # force a file to be written if None exist
2738 ds.setbranch(b'default', None)
2738 ds.setbranch(b'default', None)
2739
2739
2740 def backup_dirstate(tr):
2740 def backup_dirstate(tr):
2741 for f in ds.all_file_names():
2741 for f in ds.all_file_names():
2742 # hardlink backup is okay because `dirstate` is always
2742 # hardlink backup is okay because `dirstate` is always
2743 # atomically written and possible data file are append only
2743 # atomically written and possible data file are append only
2744 # and resistant to trailing data.
2744 # and resistant to trailing data.
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2745 tr.addbackup(f, hardlink=True, location=b'plain')
2746
2746
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2747 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2748 return tr
2748 return tr
2749
2749
2750 def _journalfiles(self):
2750 def _journalfiles(self):
2751 return (
2751 return (
2752 (self.svfs, b'journal'),
2752 (self.svfs, b'journal'),
2753 (self.vfs, b'journal.desc'),
2753 (self.vfs, b'journal.desc'),
2754 )
2754 )
2755
2755
2756 def undofiles(self):
2756 def undofiles(self):
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2757 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2758
2758
2759 @unfilteredmethod
2759 @unfilteredmethod
2760 def _writejournal(self, desc):
2760 def _writejournal(self, desc):
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2761 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2762
2762
2763 def recover(self):
2763 def recover(self):
2764 with self.lock():
2764 with self.lock():
2765 if self.svfs.exists(b"journal"):
2765 if self.svfs.exists(b"journal"):
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2766 self.ui.status(_(b"rolling back interrupted transaction\n"))
2767 vfsmap = self.vfs_map
2767 vfsmap = self.vfs_map
2768 transaction.rollback(
2768 transaction.rollback(
2769 self.svfs,
2769 self.svfs,
2770 vfsmap,
2770 vfsmap,
2771 b"journal",
2771 b"journal",
2772 self.ui.warn,
2772 self.ui.warn,
2773 checkambigfiles=_cachedfiles,
2773 checkambigfiles=_cachedfiles,
2774 )
2774 )
2775 self.invalidate()
2775 self.invalidate()
2776 return True
2776 return True
2777 else:
2777 else:
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2778 self.ui.warn(_(b"no interrupted transaction available\n"))
2779 return False
2779 return False
2780
2780
2781 def rollback(self, dryrun=False, force=False):
2781 def rollback(self, dryrun=False, force=False):
2782 wlock = lock = None
2782 wlock = lock = None
2783 try:
2783 try:
2784 wlock = self.wlock()
2784 wlock = self.wlock()
2785 lock = self.lock()
2785 lock = self.lock()
2786 if self.svfs.exists(b"undo"):
2786 if self.svfs.exists(b"undo"):
2787 return self._rollback(dryrun, force)
2787 return self._rollback(dryrun, force)
2788 else:
2788 else:
2789 self.ui.warn(_(b"no rollback information available\n"))
2789 self.ui.warn(_(b"no rollback information available\n"))
2790 return 1
2790 return 1
2791 finally:
2791 finally:
2792 release(lock, wlock)
2792 release(lock, wlock)
2793
2793
2794 @unfilteredmethod # Until we get smarter cache management
2794 @unfilteredmethod # Until we get smarter cache management
2795 def _rollback(self, dryrun, force):
2795 def _rollback(self, dryrun, force):
2796 ui = self.ui
2796 ui = self.ui
2797
2797
2798 parents = self.dirstate.parents()
2798 parents = self.dirstate.parents()
2799 try:
2799 try:
2800 args = self.vfs.read(b'undo.desc').splitlines()
2800 args = self.vfs.read(b'undo.desc').splitlines()
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2801 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2802 if len(args) >= 3:
2802 if len(args) >= 3:
2803 detail = args[2]
2803 detail = args[2]
2804 oldtip = oldlen - 1
2804 oldtip = oldlen - 1
2805
2805
2806 if detail and ui.verbose:
2806 if detail and ui.verbose:
2807 msg = _(
2807 msg = _(
2808 b'repository tip rolled back to revision %d'
2808 b'repository tip rolled back to revision %d'
2809 b' (undo %s: %s)\n'
2809 b' (undo %s: %s)\n'
2810 ) % (oldtip, desc, detail)
2810 ) % (oldtip, desc, detail)
2811 else:
2811 else:
2812 msg = _(
2812 msg = _(
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2813 b'repository tip rolled back to revision %d (undo %s)\n'
2814 ) % (oldtip, desc)
2814 ) % (oldtip, desc)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2815 parentgone = any(self[p].rev() > oldtip for p in parents)
2816 except IOError:
2816 except IOError:
2817 msg = _(b'rolling back unknown transaction\n')
2817 msg = _(b'rolling back unknown transaction\n')
2818 desc = None
2818 desc = None
2819 parentgone = True
2819 parentgone = True
2820
2820
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2821 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2822 raise error.Abort(
2822 raise error.Abort(
2823 _(
2823 _(
2824 b'rollback of last commit while not checked out '
2824 b'rollback of last commit while not checked out '
2825 b'may lose data'
2825 b'may lose data'
2826 ),
2826 ),
2827 hint=_(b'use -f to force'),
2827 hint=_(b'use -f to force'),
2828 )
2828 )
2829
2829
2830 ui.status(msg)
2830 ui.status(msg)
2831 if dryrun:
2831 if dryrun:
2832 return 0
2832 return 0
2833
2833
2834 self.destroying()
2834 self.destroying()
2835 vfsmap = self.vfs_map
2835 vfsmap = self.vfs_map
2836 skip_journal_pattern = None
2836 skip_journal_pattern = None
2837 if not parentgone:
2837 if not parentgone:
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2838 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2839 transaction.rollback(
2839 transaction.rollback(
2840 self.svfs,
2840 self.svfs,
2841 vfsmap,
2841 vfsmap,
2842 b'undo',
2842 b'undo',
2843 ui.warn,
2843 ui.warn,
2844 checkambigfiles=_cachedfiles,
2844 checkambigfiles=_cachedfiles,
2845 skip_journal_pattern=skip_journal_pattern,
2845 skip_journal_pattern=skip_journal_pattern,
2846 )
2846 )
2847 self.invalidate()
2847 self.invalidate()
2848 self.dirstate.invalidate()
2848 self.dirstate.invalidate()
2849
2849
2850 if parentgone:
2850 if parentgone:
2851 # replace this with some explicit parent update in the future.
2851 # replace this with some explicit parent update in the future.
2852 has_node = self.changelog.index.has_node
2852 has_node = self.changelog.index.has_node
2853 if not all(has_node(p) for p in self.dirstate._pl):
2853 if not all(has_node(p) for p in self.dirstate._pl):
2854 # There was no dirstate to backup initially, we need to drop
2854 # There was no dirstate to backup initially, we need to drop
2855 # the existing one.
2855 # the existing one.
2856 with self.dirstate.changing_parents(self):
2856 with self.dirstate.changing_parents(self):
2857 self.dirstate.setparents(self.nullid)
2857 self.dirstate.setparents(self.nullid)
2858 self.dirstate.clear()
2858 self.dirstate.clear()
2859
2859
2860 parents = tuple([p.rev() for p in self[None].parents()])
2860 parents = tuple([p.rev() for p in self[None].parents()])
2861 if len(parents) > 1:
2861 if len(parents) > 1:
2862 ui.status(
2862 ui.status(
2863 _(
2863 _(
2864 b'working directory now based on '
2864 b'working directory now based on '
2865 b'revisions %d and %d\n'
2865 b'revisions %d and %d\n'
2866 )
2866 )
2867 % parents
2867 % parents
2868 )
2868 )
2869 else:
2869 else:
2870 ui.status(
2870 ui.status(
2871 _(b'working directory now based on revision %d\n') % parents
2871 _(b'working directory now based on revision %d\n') % parents
2872 )
2872 )
2873 mergestatemod.mergestate.clean(self)
2873 mergestatemod.mergestate.clean(self)
2874
2874
2875 # TODO: if we know which new heads may result from this rollback, pass
2875 # TODO: if we know which new heads may result from this rollback, pass
2876 # them to destroy(), which will prevent the branchhead cache from being
2876 # them to destroy(), which will prevent the branchhead cache from being
2877 # invalidated.
2877 # invalidated.
2878 self.destroyed()
2878 self.destroyed()
2879 return 0
2879 return 0
2880
2880
2881 def _buildcacheupdater(self, newtransaction):
2881 def _buildcacheupdater(self, newtransaction):
2882 """called during transaction to build the callback updating cache
2882 """called during transaction to build the callback updating cache
2883
2883
2884 Lives on the repository to help extension who might want to augment
2884 Lives on the repository to help extension who might want to augment
2885 this logic. For this purpose, the created transaction is passed to the
2885 this logic. For this purpose, the created transaction is passed to the
2886 method.
2886 method.
2887 """
2887 """
2888 # we must avoid cyclic reference between repo and transaction.
2888 # we must avoid cyclic reference between repo and transaction.
2889 reporef = weakref.ref(self)
2889 reporef = weakref.ref(self)
2890
2890
2891 def updater(tr):
2891 def updater(tr):
2892 repo = reporef()
2892 repo = reporef()
2893 assert repo is not None # help pytype
2893 assert repo is not None # help pytype
2894 repo.updatecaches(tr)
2894 repo.updatecaches(tr)
2895
2895
2896 return updater
2896 return updater
2897
2897
2898 @unfilteredmethod
2898 @unfilteredmethod
2899 def updatecaches(self, tr=None, full=False, caches=None):
2899 def updatecaches(self, tr=None, full=False, caches=None):
2900 """warm appropriate caches
2900 """warm appropriate caches
2901
2901
2902 If this function is called after a transaction closed. The transaction
2902 If this function is called after a transaction closed. The transaction
2903 will be available in the 'tr' argument. This can be used to selectively
2903 will be available in the 'tr' argument. This can be used to selectively
2904 update caches relevant to the changes in that transaction.
2904 update caches relevant to the changes in that transaction.
2905
2905
2906 If 'full' is set, make sure all caches the function knows about have
2906 If 'full' is set, make sure all caches the function knows about have
2907 up-to-date data. Even the ones usually loaded more lazily.
2907 up-to-date data. Even the ones usually loaded more lazily.
2908
2908
2909 The `full` argument can take a special "post-clone" value. In this case
2909 The `full` argument can take a special "post-clone" value. In this case
2910 the cache warming is made after a clone and of the slower cache might
2910 the cache warming is made after a clone and of the slower cache might
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2911 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2912 as we plan for a cleaner way to deal with this for 5.9.
2912 as we plan for a cleaner way to deal with this for 5.9.
2913 """
2913 """
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2914 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2915 # During strip, many caches are invalid but
2915 # During strip, many caches are invalid but
2916 # later call to `destroyed` will refresh them.
2916 # later call to `destroyed` will refresh them.
2917 return
2917 return
2918
2918
2919 unfi = self.unfiltered()
2919 unfi = self.unfiltered()
2920
2920
2921 if caches is None:
2921 if caches is None:
2922 caches = repository.CACHES_DEFAULT
2922 caches = repository.CACHES_DEFAULT
2923
2923
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2924 if repository.CACHE_BRANCHMAP_SERVED in caches:
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2925 if tr is None or tr.changes[b'origrepolen'] < len(self):
2926 # accessing the 'served' branchmap should refresh all the others,
2926 # accessing the 'served' branchmap should refresh all the others,
2927 self.ui.debug(b'updating the branch cache\n')
2927 self.ui.debug(b'updating the branch cache\n')
2928 self.filtered(b'served').branchmap()
2928 self.filtered(b'served').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2929 self.filtered(b'served.hidden').branchmap()
2930 # flush all possibly delayed write.
2930 # flush all possibly delayed write.
2931 self._branchcaches.write_delayed(self)
2931 self._branchcaches.write_delayed(self)
2932
2932
2933 if repository.CACHE_CHANGELOG_CACHE in caches:
2933 if repository.CACHE_CHANGELOG_CACHE in caches:
2934 self.changelog.update_caches(transaction=tr)
2934 self.changelog.update_caches(transaction=tr)
2935
2935
2936 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2936 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2937 self.manifestlog.update_caches(transaction=tr)
2937 self.manifestlog.update_caches(transaction=tr)
2938 for entry in self.store.walk():
2938 for entry in self.store.walk():
2939 if not entry.is_revlog:
2939 if not entry.is_revlog:
2940 continue
2940 continue
2941 if not entry.is_manifestlog:
2941 if not entry.is_manifestlog:
2942 continue
2942 continue
2943 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2943 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2944 if manifestrevlog is not None:
2944 if manifestrevlog is not None:
2945 manifestrevlog.update_caches(transaction=tr)
2945 manifestrevlog.update_caches(transaction=tr)
2946
2946
2947 if repository.CACHE_REV_BRANCH in caches:
2947 if repository.CACHE_REV_BRANCH in caches:
2948 rbc = unfi.revbranchcache()
2948 rbc = unfi.revbranchcache()
2949 for r in unfi.changelog:
2949 for r in unfi.changelog:
2950 rbc.branchinfo(r)
2950 rbc.branchinfo(r)
2951 rbc.write()
2951 rbc.write()
2952
2952
2953 if repository.CACHE_FULL_MANIFEST in caches:
2953 if repository.CACHE_FULL_MANIFEST in caches:
2954 # ensure the working copy parents are in the manifestfulltextcache
2954 # ensure the working copy parents are in the manifestfulltextcache
2955 for ctx in self[b'.'].parents():
2955 for ctx in self[b'.'].parents():
2956 ctx.manifest() # accessing the manifest is enough
2956 ctx.manifest() # accessing the manifest is enough
2957
2957
2958 if repository.CACHE_FILE_NODE_TAGS in caches:
2958 if repository.CACHE_FILE_NODE_TAGS in caches:
2959 # accessing fnode cache warms the cache
2959 # accessing fnode cache warms the cache
2960 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2960 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2961
2961
2962 if repository.CACHE_TAGS_DEFAULT in caches:
2962 if repository.CACHE_TAGS_DEFAULT in caches:
2963 # accessing tags warm the cache
2963 # accessing tags warm the cache
2964 self.tags()
2964 self.tags()
2965 if repository.CACHE_TAGS_SERVED in caches:
2965 if repository.CACHE_TAGS_SERVED in caches:
2966 self.filtered(b'served').tags()
2966 self.filtered(b'served').tags()
2967
2967
2968 if repository.CACHE_BRANCHMAP_ALL in caches:
2968 if repository.CACHE_BRANCHMAP_ALL in caches:
2969 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2969 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2970 # so we're forcing a write to cause these caches to be warmed up
2970 # so we're forcing a write to cause these caches to be warmed up
2971 # even if they haven't explicitly been requested yet (if they've
2971 # even if they haven't explicitly been requested yet (if they've
2972 # never been used by hg, they won't ever have been written, even if
2972 # never been used by hg, they won't ever have been written, even if
2973 # they're a subset of another kind of cache that *has* been used).
2973 # they're a subset of another kind of cache that *has* been used).
2974 for filt in repoview.filtertable.keys():
2974 for filt in repoview.filtertable.keys():
2975 filtered = self.filtered(filt)
2975 filtered = self.filtered(filt)
2976 filtered.branchmap().write(filtered)
2976 filtered.branchmap().write(filtered)
2977
2977
2978 def invalidatecaches(self):
2978 def invalidatecaches(self):
2979 if '_tagscache' in vars(self):
2979 if '_tagscache' in vars(self):
2980 # can't use delattr on proxy
2980 # can't use delattr on proxy
2981 del self.__dict__['_tagscache']
2981 del self.__dict__['_tagscache']
2982
2982
2983 self._branchcaches.clear()
2983 self._branchcaches.clear()
2984 self.invalidatevolatilesets()
2984 self.invalidatevolatilesets()
2985 self._sparsesignaturecache.clear()
2985 self._sparsesignaturecache.clear()
2986
2986
2987 def invalidatevolatilesets(self):
2987 def invalidatevolatilesets(self):
2988 self.filteredrevcache.clear()
2988 self.filteredrevcache.clear()
2989 obsolete.clearobscaches(self)
2989 obsolete.clearobscaches(self)
2990 self._quick_access_changeid_invalidate()
2990 self._quick_access_changeid_invalidate()
2991
2991
2992 def invalidatedirstate(self):
2992 def invalidatedirstate(self):
2993 """Invalidates the dirstate, causing the next call to dirstate
2993 """Invalidates the dirstate, causing the next call to dirstate
2994 to check if it was modified since the last time it was read,
2994 to check if it was modified since the last time it was read,
2995 rereading it if it has.
2995 rereading it if it has.
2996
2996
2997 This is different to dirstate.invalidate() that it doesn't always
2997 This is different to dirstate.invalidate() that it doesn't always
2998 rereads the dirstate. Use dirstate.invalidate() if you want to
2998 rereads the dirstate. Use dirstate.invalidate() if you want to
2999 explicitly read the dirstate again (i.e. restoring it to a previous
2999 explicitly read the dirstate again (i.e. restoring it to a previous
3000 known good state)."""
3000 known good state)."""
3001 unfi = self.unfiltered()
3001 unfi = self.unfiltered()
3002 if 'dirstate' in unfi.__dict__:
3002 if 'dirstate' in unfi.__dict__:
3003 assert not self.dirstate.is_changing_any
3003 assert not self.dirstate.is_changing_any
3004 del unfi.__dict__['dirstate']
3004 del unfi.__dict__['dirstate']
3005
3005
3006 def invalidate(self, clearfilecache=False):
3006 def invalidate(self, clearfilecache=False):
3007 """Invalidates both store and non-store parts other than dirstate
3007 """Invalidates both store and non-store parts other than dirstate
3008
3008
3009 If a transaction is running, invalidation of store is omitted,
3009 If a transaction is running, invalidation of store is omitted,
3010 because discarding in-memory changes might cause inconsistency
3010 because discarding in-memory changes might cause inconsistency
3011 (e.g. incomplete fncache causes unintentional failure, but
3011 (e.g. incomplete fncache causes unintentional failure, but
3012 redundant one doesn't).
3012 redundant one doesn't).
3013 """
3013 """
3014 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3014 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3015 for k in list(self._filecache.keys()):
3015 for k in list(self._filecache.keys()):
3016 if (
3016 if (
3017 k == b'changelog'
3017 k == b'changelog'
3018 and self.currenttransaction()
3018 and self.currenttransaction()
3019 and self.changelog.is_delaying
3019 and self.changelog.is_delaying
3020 ):
3020 ):
3021 # The changelog object may store unwritten revisions. We don't
3021 # The changelog object may store unwritten revisions. We don't
3022 # want to lose them.
3022 # want to lose them.
3023 # TODO: Solve the problem instead of working around it.
3023 # TODO: Solve the problem instead of working around it.
3024 continue
3024 continue
3025
3025
3026 if clearfilecache:
3026 if clearfilecache:
3027 del self._filecache[k]
3027 del self._filecache[k]
3028 try:
3028 try:
3029 # XXX ideally, the key would be a unicode string to match the
3029 # XXX ideally, the key would be a unicode string to match the
3030 # fact it refers to an attribut name. However changing this was
3030 # fact it refers to an attribut name. However changing this was
3031 # a bit a scope creep compared to the series cleaning up
3031 # a bit a scope creep compared to the series cleaning up
3032 # del/set/getattr so we kept thing simple here.
3032 # del/set/getattr so we kept thing simple here.
3033 delattr(unfiltered, pycompat.sysstr(k))
3033 delattr(unfiltered, pycompat.sysstr(k))
3034 except AttributeError:
3034 except AttributeError:
3035 pass
3035 pass
3036 self.invalidatecaches()
3036 self.invalidatecaches()
3037 if not self.currenttransaction():
3037 if not self.currenttransaction():
3038 # TODO: Changing contents of store outside transaction
3038 # TODO: Changing contents of store outside transaction
3039 # causes inconsistency. We should make in-memory store
3039 # causes inconsistency. We should make in-memory store
3040 # changes detectable, and abort if changed.
3040 # changes detectable, and abort if changed.
3041 self.store.invalidatecaches()
3041 self.store.invalidatecaches()
3042
3042
3043 def invalidateall(self):
3043 def invalidateall(self):
3044 """Fully invalidates both store and non-store parts, causing the
3044 """Fully invalidates both store and non-store parts, causing the
3045 subsequent operation to reread any outside changes."""
3045 subsequent operation to reread any outside changes."""
3046 # extension should hook this to invalidate its caches
3046 # extension should hook this to invalidate its caches
3047 self.invalidate()
3047 self.invalidate()
3048 self.invalidatedirstate()
3048 self.invalidatedirstate()
3049
3049
3050 @unfilteredmethod
3050 @unfilteredmethod
3051 def _refreshfilecachestats(self, tr):
3051 def _refreshfilecachestats(self, tr):
3052 """Reload stats of cached files so that they are flagged as valid"""
3052 """Reload stats of cached files so that they are flagged as valid"""
3053 for k, ce in self._filecache.items():
3053 for k, ce in self._filecache.items():
3054 k = pycompat.sysstr(k)
3054 k = pycompat.sysstr(k)
3055 if k == 'dirstate' or k not in self.__dict__:
3055 if k == 'dirstate' or k not in self.__dict__:
3056 continue
3056 continue
3057 ce.refresh()
3057 ce.refresh()
3058
3058
3059 def _lock(
3059 def _lock(
3060 self,
3060 self,
3061 vfs,
3061 vfs,
3062 lockname,
3062 lockname,
3063 wait,
3063 wait,
3064 releasefn,
3064 releasefn,
3065 acquirefn,
3065 acquirefn,
3066 desc,
3066 desc,
3067 ):
3067 ):
3068 timeout = 0
3068 timeout = 0
3069 warntimeout = 0
3069 warntimeout = 0
3070 if wait:
3070 if wait:
3071 timeout = self.ui.configint(b"ui", b"timeout")
3071 timeout = self.ui.configint(b"ui", b"timeout")
3072 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3072 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3073 # internal config: ui.signal-safe-lock
3073 # internal config: ui.signal-safe-lock
3074 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3074 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3075 sync_file = self.ui.config(b'devel', b'lock-wait-sync-file')
3076 if not sync_file:
3077 sync_file = None
3075
3078
3076 l = lockmod.trylock(
3079 l = lockmod.trylock(
3077 self.ui,
3080 self.ui,
3078 vfs,
3081 vfs,
3079 lockname,
3082 lockname,
3080 timeout,
3083 timeout,
3081 warntimeout,
3084 warntimeout,
3082 releasefn=releasefn,
3085 releasefn=releasefn,
3083 acquirefn=acquirefn,
3086 acquirefn=acquirefn,
3084 desc=desc,
3087 desc=desc,
3085 signalsafe=signalsafe,
3088 signalsafe=signalsafe,
3089 devel_wait_sync_file=sync_file,
3086 )
3090 )
3087 return l
3091 return l
3088
3092
3089 def _afterlock(self, callback):
3093 def _afterlock(self, callback):
3090 """add a callback to be run when the repository is fully unlocked
3094 """add a callback to be run when the repository is fully unlocked
3091
3095
3092 The callback will be executed when the outermost lock is released
3096 The callback will be executed when the outermost lock is released
3093 (with wlock being higher level than 'lock')."""
3097 (with wlock being higher level than 'lock')."""
3094 for ref in (self._wlockref, self._lockref):
3098 for ref in (self._wlockref, self._lockref):
3095 l = ref and ref()
3099 l = ref and ref()
3096 if l and l.held:
3100 if l and l.held:
3097 l.postrelease.append(callback)
3101 l.postrelease.append(callback)
3098 break
3102 break
3099 else: # no lock have been found.
3103 else: # no lock have been found.
3100 callback(True)
3104 callback(True)
3101
3105
3102 def lock(self, wait=True):
3106 def lock(self, wait=True):
3103 """Lock the repository store (.hg/store) and return a weak reference
3107 """Lock the repository store (.hg/store) and return a weak reference
3104 to the lock. Use this before modifying the store (e.g. committing or
3108 to the lock. Use this before modifying the store (e.g. committing or
3105 stripping). If you are opening a transaction, get a lock as well.)
3109 stripping). If you are opening a transaction, get a lock as well.)
3106
3110
3107 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3111 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3108 'wlock' first to avoid a dead-lock hazard."""
3112 'wlock' first to avoid a dead-lock hazard."""
3109 l = self._currentlock(self._lockref)
3113 l = self._currentlock(self._lockref)
3110 if l is not None:
3114 if l is not None:
3111 l.lock()
3115 l.lock()
3112 return l
3116 return l
3113
3117
3114 l = self._lock(
3118 l = self._lock(
3115 vfs=self.svfs,
3119 vfs=self.svfs,
3116 lockname=b"lock",
3120 lockname=b"lock",
3117 wait=wait,
3121 wait=wait,
3118 releasefn=None,
3122 releasefn=None,
3119 acquirefn=self.invalidate,
3123 acquirefn=self.invalidate,
3120 desc=_(b'repository %s') % self.origroot,
3124 desc=_(b'repository %s') % self.origroot,
3121 )
3125 )
3122 self._lockref = weakref.ref(l)
3126 self._lockref = weakref.ref(l)
3123 return l
3127 return l
3124
3128
3125 def wlock(self, wait=True):
3129 def wlock(self, wait=True):
3126 """Lock the non-store parts of the repository (everything under
3130 """Lock the non-store parts of the repository (everything under
3127 .hg except .hg/store) and return a weak reference to the lock.
3131 .hg except .hg/store) and return a weak reference to the lock.
3128
3132
3129 Use this before modifying files in .hg.
3133 Use this before modifying files in .hg.
3130
3134
3131 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3135 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3132 'wlock' first to avoid a dead-lock hazard."""
3136 'wlock' first to avoid a dead-lock hazard."""
3133 l = self._wlockref() if self._wlockref else None
3137 l = self._wlockref() if self._wlockref else None
3134 if l is not None and l.held:
3138 if l is not None and l.held:
3135 l.lock()
3139 l.lock()
3136 return l
3140 return l
3137
3141
3138 # We do not need to check for non-waiting lock acquisition. Such
3142 # We do not need to check for non-waiting lock acquisition. Such
3139 # acquisition would not cause dead-lock as they would just fail.
3143 # acquisition would not cause dead-lock as they would just fail.
3140 if wait and (
3144 if wait and (
3141 self.ui.configbool(b'devel', b'all-warnings')
3145 self.ui.configbool(b'devel', b'all-warnings')
3142 or self.ui.configbool(b'devel', b'check-locks')
3146 or self.ui.configbool(b'devel', b'check-locks')
3143 ):
3147 ):
3144 if self._currentlock(self._lockref) is not None:
3148 if self._currentlock(self._lockref) is not None:
3145 self.ui.develwarn(b'"wlock" acquired after "lock"')
3149 self.ui.develwarn(b'"wlock" acquired after "lock"')
3146
3150
3147 def unlock():
3151 def unlock():
3148 if self.dirstate.is_changing_any:
3152 if self.dirstate.is_changing_any:
3149 msg = b"wlock release in the middle of a changing parents"
3153 msg = b"wlock release in the middle of a changing parents"
3150 self.ui.develwarn(msg)
3154 self.ui.develwarn(msg)
3151 self.dirstate.invalidate()
3155 self.dirstate.invalidate()
3152 else:
3156 else:
3153 if self.dirstate._dirty:
3157 if self.dirstate._dirty:
3154 msg = b"dirty dirstate on wlock release"
3158 msg = b"dirty dirstate on wlock release"
3155 self.ui.develwarn(msg)
3159 self.ui.develwarn(msg)
3156 self.dirstate.write(None)
3160 self.dirstate.write(None)
3157
3161
3158 unfi = self.unfiltered()
3162 unfi = self.unfiltered()
3159 if 'dirstate' in unfi.__dict__:
3163 if 'dirstate' in unfi.__dict__:
3160 del unfi.__dict__['dirstate']
3164 del unfi.__dict__['dirstate']
3161
3165
3162 l = self._lock(
3166 l = self._lock(
3163 self.vfs,
3167 self.vfs,
3164 b"wlock",
3168 b"wlock",
3165 wait,
3169 wait,
3166 unlock,
3170 unlock,
3167 self.invalidatedirstate,
3171 self.invalidatedirstate,
3168 _(b'working directory of %s') % self.origroot,
3172 _(b'working directory of %s') % self.origroot,
3169 )
3173 )
3170 self._wlockref = weakref.ref(l)
3174 self._wlockref = weakref.ref(l)
3171 return l
3175 return l
3172
3176
3173 def _currentlock(self, lockref):
3177 def _currentlock(self, lockref):
3174 """Returns the lock if it's held, or None if it's not."""
3178 """Returns the lock if it's held, or None if it's not."""
3175 if lockref is None:
3179 if lockref is None:
3176 return None
3180 return None
3177 l = lockref()
3181 l = lockref()
3178 if l is None or not l.held:
3182 if l is None or not l.held:
3179 return None
3183 return None
3180 return l
3184 return l
3181
3185
3182 def currentwlock(self):
3186 def currentwlock(self):
3183 """Returns the wlock if it's held, or None if it's not."""
3187 """Returns the wlock if it's held, or None if it's not."""
3184 return self._currentlock(self._wlockref)
3188 return self._currentlock(self._wlockref)
3185
3189
3186 def currentlock(self):
3190 def currentlock(self):
3187 """Returns the lock if it's held, or None if it's not."""
3191 """Returns the lock if it's held, or None if it's not."""
3188 return self._currentlock(self._lockref)
3192 return self._currentlock(self._lockref)
3189
3193
3190 def checkcommitpatterns(self, wctx, match, status, fail):
3194 def checkcommitpatterns(self, wctx, match, status, fail):
3191 """check for commit arguments that aren't committable"""
3195 """check for commit arguments that aren't committable"""
3192 if match.isexact() or match.prefix():
3196 if match.isexact() or match.prefix():
3193 matched = set(status.modified + status.added + status.removed)
3197 matched = set(status.modified + status.added + status.removed)
3194
3198
3195 for f in match.files():
3199 for f in match.files():
3196 f = self.dirstate.normalize(f)
3200 f = self.dirstate.normalize(f)
3197 if f == b'.' or f in matched or f in wctx.substate:
3201 if f == b'.' or f in matched or f in wctx.substate:
3198 continue
3202 continue
3199 if f in status.deleted:
3203 if f in status.deleted:
3200 fail(f, _(b'file not found!'))
3204 fail(f, _(b'file not found!'))
3201 # Is it a directory that exists or used to exist?
3205 # Is it a directory that exists or used to exist?
3202 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3206 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3203 d = f + b'/'
3207 d = f + b'/'
3204 for mf in matched:
3208 for mf in matched:
3205 if mf.startswith(d):
3209 if mf.startswith(d):
3206 break
3210 break
3207 else:
3211 else:
3208 fail(f, _(b"no match under directory!"))
3212 fail(f, _(b"no match under directory!"))
3209 elif f not in self.dirstate:
3213 elif f not in self.dirstate:
3210 fail(f, _(b"file not tracked!"))
3214 fail(f, _(b"file not tracked!"))
3211
3215
3212 @unfilteredmethod
3216 @unfilteredmethod
3213 def commit(
3217 def commit(
3214 self,
3218 self,
3215 text=b"",
3219 text=b"",
3216 user=None,
3220 user=None,
3217 date=None,
3221 date=None,
3218 match=None,
3222 match=None,
3219 force=False,
3223 force=False,
3220 editor=None,
3224 editor=None,
3221 extra=None,
3225 extra=None,
3222 ):
3226 ):
3223 """Add a new revision to current repository.
3227 """Add a new revision to current repository.
3224
3228
3225 Revision information is gathered from the working directory,
3229 Revision information is gathered from the working directory,
3226 match can be used to filter the committed files. If editor is
3230 match can be used to filter the committed files. If editor is
3227 supplied, it is called to get a commit message.
3231 supplied, it is called to get a commit message.
3228 """
3232 """
3229 if extra is None:
3233 if extra is None:
3230 extra = {}
3234 extra = {}
3231
3235
3232 def fail(f, msg):
3236 def fail(f, msg):
3233 raise error.InputError(b'%s: %s' % (f, msg))
3237 raise error.InputError(b'%s: %s' % (f, msg))
3234
3238
3235 if not match:
3239 if not match:
3236 match = matchmod.always()
3240 match = matchmod.always()
3237
3241
3238 if not force:
3242 if not force:
3239 match.bad = fail
3243 match.bad = fail
3240
3244
3241 # lock() for recent changelog (see issue4368)
3245 # lock() for recent changelog (see issue4368)
3242 with self.wlock(), self.lock():
3246 with self.wlock(), self.lock():
3243 wctx = self[None]
3247 wctx = self[None]
3244 merge = len(wctx.parents()) > 1
3248 merge = len(wctx.parents()) > 1
3245
3249
3246 if not force and merge and not match.always():
3250 if not force and merge and not match.always():
3247 raise error.Abort(
3251 raise error.Abort(
3248 _(
3252 _(
3249 b'cannot partially commit a merge '
3253 b'cannot partially commit a merge '
3250 b'(do not specify files or patterns)'
3254 b'(do not specify files or patterns)'
3251 )
3255 )
3252 )
3256 )
3253
3257
3254 status = self.status(match=match, clean=force)
3258 status = self.status(match=match, clean=force)
3255 if force:
3259 if force:
3256 status.modified.extend(
3260 status.modified.extend(
3257 status.clean
3261 status.clean
3258 ) # mq may commit clean files
3262 ) # mq may commit clean files
3259
3263
3260 # check subrepos
3264 # check subrepos
3261 subs, commitsubs, newstate = subrepoutil.precommit(
3265 subs, commitsubs, newstate = subrepoutil.precommit(
3262 self.ui, wctx, status, match, force=force
3266 self.ui, wctx, status, match, force=force
3263 )
3267 )
3264
3268
3265 # make sure all explicit patterns are matched
3269 # make sure all explicit patterns are matched
3266 if not force:
3270 if not force:
3267 self.checkcommitpatterns(wctx, match, status, fail)
3271 self.checkcommitpatterns(wctx, match, status, fail)
3268
3272
3269 cctx = context.workingcommitctx(
3273 cctx = context.workingcommitctx(
3270 self, status, text, user, date, extra
3274 self, status, text, user, date, extra
3271 )
3275 )
3272
3276
3273 ms = mergestatemod.mergestate.read(self)
3277 ms = mergestatemod.mergestate.read(self)
3274 mergeutil.checkunresolved(ms)
3278 mergeutil.checkunresolved(ms)
3275
3279
3276 # internal config: ui.allowemptycommit
3280 # internal config: ui.allowemptycommit
3277 if cctx.isempty() and not self.ui.configbool(
3281 if cctx.isempty() and not self.ui.configbool(
3278 b'ui', b'allowemptycommit'
3282 b'ui', b'allowemptycommit'
3279 ):
3283 ):
3280 self.ui.debug(b'nothing to commit, clearing merge state\n')
3284 self.ui.debug(b'nothing to commit, clearing merge state\n')
3281 ms.reset()
3285 ms.reset()
3282 return None
3286 return None
3283
3287
3284 if merge and cctx.deleted():
3288 if merge and cctx.deleted():
3285 raise error.Abort(_(b"cannot commit merge with missing files"))
3289 raise error.Abort(_(b"cannot commit merge with missing files"))
3286
3290
3287 if editor:
3291 if editor:
3288 cctx._text = editor(self, cctx, subs)
3292 cctx._text = editor(self, cctx, subs)
3289 edited = text != cctx._text
3293 edited = text != cctx._text
3290
3294
3291 # Save commit message in case this transaction gets rolled back
3295 # Save commit message in case this transaction gets rolled back
3292 # (e.g. by a pretxncommit hook). Leave the content alone on
3296 # (e.g. by a pretxncommit hook). Leave the content alone on
3293 # the assumption that the user will use the same editor again.
3297 # the assumption that the user will use the same editor again.
3294 msg_path = self.savecommitmessage(cctx._text)
3298 msg_path = self.savecommitmessage(cctx._text)
3295
3299
3296 # commit subs and write new state
3300 # commit subs and write new state
3297 if subs:
3301 if subs:
3298 uipathfn = scmutil.getuipathfn(self)
3302 uipathfn = scmutil.getuipathfn(self)
3299 for s in sorted(commitsubs):
3303 for s in sorted(commitsubs):
3300 sub = wctx.sub(s)
3304 sub = wctx.sub(s)
3301 self.ui.status(
3305 self.ui.status(
3302 _(b'committing subrepository %s\n')
3306 _(b'committing subrepository %s\n')
3303 % uipathfn(subrepoutil.subrelpath(sub))
3307 % uipathfn(subrepoutil.subrelpath(sub))
3304 )
3308 )
3305 sr = sub.commit(cctx._text, user, date)
3309 sr = sub.commit(cctx._text, user, date)
3306 newstate[s] = (newstate[s][0], sr)
3310 newstate[s] = (newstate[s][0], sr)
3307 subrepoutil.writestate(self, newstate)
3311 subrepoutil.writestate(self, newstate)
3308
3312
3309 p1, p2 = self.dirstate.parents()
3313 p1, p2 = self.dirstate.parents()
3310 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3314 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3311 try:
3315 try:
3312 self.hook(
3316 self.hook(
3313 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3317 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3314 )
3318 )
3315 with self.transaction(b'commit'):
3319 with self.transaction(b'commit'):
3316 ret = self.commitctx(cctx, True)
3320 ret = self.commitctx(cctx, True)
3317 # update bookmarks, dirstate and mergestate
3321 # update bookmarks, dirstate and mergestate
3318 bookmarks.update(self, [p1, p2], ret)
3322 bookmarks.update(self, [p1, p2], ret)
3319 cctx.markcommitted(ret)
3323 cctx.markcommitted(ret)
3320 ms.reset()
3324 ms.reset()
3321 except: # re-raises
3325 except: # re-raises
3322 if edited:
3326 if edited:
3323 self.ui.write(
3327 self.ui.write(
3324 _(b'note: commit message saved in %s\n') % msg_path
3328 _(b'note: commit message saved in %s\n') % msg_path
3325 )
3329 )
3326 self.ui.write(
3330 self.ui.write(
3327 _(
3331 _(
3328 b"note: use 'hg commit --logfile "
3332 b"note: use 'hg commit --logfile "
3329 b"%s --edit' to reuse it\n"
3333 b"%s --edit' to reuse it\n"
3330 )
3334 )
3331 % msg_path
3335 % msg_path
3332 )
3336 )
3333 raise
3337 raise
3334
3338
3335 def commithook(unused_success):
3339 def commithook(unused_success):
3336 # hack for command that use a temporary commit (eg: histedit)
3340 # hack for command that use a temporary commit (eg: histedit)
3337 # temporary commit got stripped before hook release
3341 # temporary commit got stripped before hook release
3338 if self.changelog.hasnode(ret):
3342 if self.changelog.hasnode(ret):
3339 self.hook(
3343 self.hook(
3340 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3344 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3341 )
3345 )
3342
3346
3343 self._afterlock(commithook)
3347 self._afterlock(commithook)
3344 return ret
3348 return ret
3345
3349
3346 @unfilteredmethod
3350 @unfilteredmethod
3347 def commitctx(self, ctx, error=False, origctx=None):
3351 def commitctx(self, ctx, error=False, origctx=None):
3348 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3352 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3349
3353
3350 @unfilteredmethod
3354 @unfilteredmethod
3351 def destroying(self):
3355 def destroying(self):
3352 """Inform the repository that nodes are about to be destroyed.
3356 """Inform the repository that nodes are about to be destroyed.
3353 Intended for use by strip and rollback, so there's a common
3357 Intended for use by strip and rollback, so there's a common
3354 place for anything that has to be done before destroying history.
3358 place for anything that has to be done before destroying history.
3355
3359
3356 This is mostly useful for saving state that is in memory and waiting
3360 This is mostly useful for saving state that is in memory and waiting
3357 to be flushed when the current lock is released. Because a call to
3361 to be flushed when the current lock is released. Because a call to
3358 destroyed is imminent, the repo will be invalidated causing those
3362 destroyed is imminent, the repo will be invalidated causing those
3359 changes to stay in memory (waiting for the next unlock), or vanish
3363 changes to stay in memory (waiting for the next unlock), or vanish
3360 completely.
3364 completely.
3361 """
3365 """
3362 # When using the same lock to commit and strip, the phasecache is left
3366 # When using the same lock to commit and strip, the phasecache is left
3363 # dirty after committing. Then when we strip, the repo is invalidated,
3367 # dirty after committing. Then when we strip, the repo is invalidated,
3364 # causing those changes to disappear.
3368 # causing those changes to disappear.
3365 if '_phasecache' in vars(self):
3369 if '_phasecache' in vars(self):
3366 self._phasecache.write(self)
3370 self._phasecache.write(self)
3367
3371
3368 @unfilteredmethod
3372 @unfilteredmethod
3369 def destroyed(self):
3373 def destroyed(self):
3370 """Inform the repository that nodes have been destroyed.
3374 """Inform the repository that nodes have been destroyed.
3371 Intended for use by strip and rollback, so there's a common
3375 Intended for use by strip and rollback, so there's a common
3372 place for anything that has to be done after destroying history.
3376 place for anything that has to be done after destroying history.
3373 """
3377 """
3374 # refresh all repository caches
3378 # refresh all repository caches
3375 self.updatecaches()
3379 self.updatecaches()
3376
3380
3377 # Ensure the persistent tag cache is updated. Doing it now
3381 # Ensure the persistent tag cache is updated. Doing it now
3378 # means that the tag cache only has to worry about destroyed
3382 # means that the tag cache only has to worry about destroyed
3379 # heads immediately after a strip/rollback. That in turn
3383 # heads immediately after a strip/rollback. That in turn
3380 # guarantees that "cachetip == currenttip" (comparing both rev
3384 # guarantees that "cachetip == currenttip" (comparing both rev
3381 # and node) always means no nodes have been added or destroyed.
3385 # and node) always means no nodes have been added or destroyed.
3382
3386
3383 # XXX this is suboptimal when qrefresh'ing: we strip the current
3387 # XXX this is suboptimal when qrefresh'ing: we strip the current
3384 # head, refresh the tag cache, then immediately add a new head.
3388 # head, refresh the tag cache, then immediately add a new head.
3385 # But I think doing it this way is necessary for the "instant
3389 # But I think doing it this way is necessary for the "instant
3386 # tag cache retrieval" case to work.
3390 # tag cache retrieval" case to work.
3387 self.invalidate()
3391 self.invalidate()
3388
3392
3389 def status(
3393 def status(
3390 self,
3394 self,
3391 node1=b'.',
3395 node1=b'.',
3392 node2=None,
3396 node2=None,
3393 match=None,
3397 match=None,
3394 ignored=False,
3398 ignored=False,
3395 clean=False,
3399 clean=False,
3396 unknown=False,
3400 unknown=False,
3397 listsubrepos=False,
3401 listsubrepos=False,
3398 ):
3402 ):
3399 '''a convenience method that calls node1.status(node2)'''
3403 '''a convenience method that calls node1.status(node2)'''
3400 return self[node1].status(
3404 return self[node1].status(
3401 node2, match, ignored, clean, unknown, listsubrepos
3405 node2, match, ignored, clean, unknown, listsubrepos
3402 )
3406 )
3403
3407
3404 def addpostdsstatus(self, ps):
3408 def addpostdsstatus(self, ps):
3405 """Add a callback to run within the wlock, at the point at which status
3409 """Add a callback to run within the wlock, at the point at which status
3406 fixups happen.
3410 fixups happen.
3407
3411
3408 On status completion, callback(wctx, status) will be called with the
3412 On status completion, callback(wctx, status) will be called with the
3409 wlock held, unless the dirstate has changed from underneath or the wlock
3413 wlock held, unless the dirstate has changed from underneath or the wlock
3410 couldn't be grabbed.
3414 couldn't be grabbed.
3411
3415
3412 Callbacks should not capture and use a cached copy of the dirstate --
3416 Callbacks should not capture and use a cached copy of the dirstate --
3413 it might change in the meanwhile. Instead, they should access the
3417 it might change in the meanwhile. Instead, they should access the
3414 dirstate via wctx.repo().dirstate.
3418 dirstate via wctx.repo().dirstate.
3415
3419
3416 This list is emptied out after each status run -- extensions should
3420 This list is emptied out after each status run -- extensions should
3417 make sure it adds to this list each time dirstate.status is called.
3421 make sure it adds to this list each time dirstate.status is called.
3418 Extensions should also make sure they don't call this for statuses
3422 Extensions should also make sure they don't call this for statuses
3419 that don't involve the dirstate.
3423 that don't involve the dirstate.
3420 """
3424 """
3421
3425
3422 # The list is located here for uniqueness reasons -- it is actually
3426 # The list is located here for uniqueness reasons -- it is actually
3423 # managed by the workingctx, but that isn't unique per-repo.
3427 # managed by the workingctx, but that isn't unique per-repo.
3424 self._postdsstatus.append(ps)
3428 self._postdsstatus.append(ps)
3425
3429
3426 def postdsstatus(self):
3430 def postdsstatus(self):
3427 """Used by workingctx to get the list of post-dirstate-status hooks."""
3431 """Used by workingctx to get the list of post-dirstate-status hooks."""
3428 return self._postdsstatus
3432 return self._postdsstatus
3429
3433
3430 def clearpostdsstatus(self):
3434 def clearpostdsstatus(self):
3431 """Used by workingctx to clear post-dirstate-status hooks."""
3435 """Used by workingctx to clear post-dirstate-status hooks."""
3432 del self._postdsstatus[:]
3436 del self._postdsstatus[:]
3433
3437
3434 def heads(self, start=None):
3438 def heads(self, start=None):
3435 if start is None:
3439 if start is None:
3436 cl = self.changelog
3440 cl = self.changelog
3437 headrevs = reversed(cl.headrevs())
3441 headrevs = reversed(cl.headrevs())
3438 return [cl.node(rev) for rev in headrevs]
3442 return [cl.node(rev) for rev in headrevs]
3439
3443
3440 heads = self.changelog.heads(start)
3444 heads = self.changelog.heads(start)
3441 # sort the output in rev descending order
3445 # sort the output in rev descending order
3442 return sorted(heads, key=self.changelog.rev, reverse=True)
3446 return sorted(heads, key=self.changelog.rev, reverse=True)
3443
3447
3444 def branchheads(self, branch=None, start=None, closed=False):
3448 def branchheads(self, branch=None, start=None, closed=False):
3445 """return a (possibly filtered) list of heads for the given branch
3449 """return a (possibly filtered) list of heads for the given branch
3446
3450
3447 Heads are returned in topological order, from newest to oldest.
3451 Heads are returned in topological order, from newest to oldest.
3448 If branch is None, use the dirstate branch.
3452 If branch is None, use the dirstate branch.
3449 If start is not None, return only heads reachable from start.
3453 If start is not None, return only heads reachable from start.
3450 If closed is True, return heads that are marked as closed as well.
3454 If closed is True, return heads that are marked as closed as well.
3451 """
3455 """
3452 if branch is None:
3456 if branch is None:
3453 branch = self[None].branch()
3457 branch = self[None].branch()
3454 branches = self.branchmap()
3458 branches = self.branchmap()
3455 if not branches.hasbranch(branch):
3459 if not branches.hasbranch(branch):
3456 return []
3460 return []
3457 # the cache returns heads ordered lowest to highest
3461 # the cache returns heads ordered lowest to highest
3458 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3462 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3459 if start is not None:
3463 if start is not None:
3460 # filter out the heads that cannot be reached from startrev
3464 # filter out the heads that cannot be reached from startrev
3461 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3465 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3462 bheads = [h for h in bheads if h in fbheads]
3466 bheads = [h for h in bheads if h in fbheads]
3463 return bheads
3467 return bheads
3464
3468
3465 def branches(self, nodes):
3469 def branches(self, nodes):
3466 if not nodes:
3470 if not nodes:
3467 nodes = [self.changelog.tip()]
3471 nodes = [self.changelog.tip()]
3468 b = []
3472 b = []
3469 for n in nodes:
3473 for n in nodes:
3470 t = n
3474 t = n
3471 while True:
3475 while True:
3472 p = self.changelog.parents(n)
3476 p = self.changelog.parents(n)
3473 if p[1] != self.nullid or p[0] == self.nullid:
3477 if p[1] != self.nullid or p[0] == self.nullid:
3474 b.append((t, n, p[0], p[1]))
3478 b.append((t, n, p[0], p[1]))
3475 break
3479 break
3476 n = p[0]
3480 n = p[0]
3477 return b
3481 return b
3478
3482
3479 def between(self, pairs):
3483 def between(self, pairs):
3480 r = []
3484 r = []
3481
3485
3482 for top, bottom in pairs:
3486 for top, bottom in pairs:
3483 n, l, i = top, [], 0
3487 n, l, i = top, [], 0
3484 f = 1
3488 f = 1
3485
3489
3486 while n != bottom and n != self.nullid:
3490 while n != bottom and n != self.nullid:
3487 p = self.changelog.parents(n)[0]
3491 p = self.changelog.parents(n)[0]
3488 if i == f:
3492 if i == f:
3489 l.append(n)
3493 l.append(n)
3490 f = f * 2
3494 f = f * 2
3491 n = p
3495 n = p
3492 i += 1
3496 i += 1
3493
3497
3494 r.append(l)
3498 r.append(l)
3495
3499
3496 return r
3500 return r
3497
3501
3498 def checkpush(self, pushop):
3502 def checkpush(self, pushop):
3499 """Extensions can override this function if additional checks have
3503 """Extensions can override this function if additional checks have
3500 to be performed before pushing, or call it if they override push
3504 to be performed before pushing, or call it if they override push
3501 command.
3505 command.
3502 """
3506 """
3503
3507
3504 @unfilteredpropertycache
3508 @unfilteredpropertycache
3505 def prepushoutgoinghooks(self):
3509 def prepushoutgoinghooks(self):
3506 """Return util.hooks consists of a pushop with repo, remote, outgoing
3510 """Return util.hooks consists of a pushop with repo, remote, outgoing
3507 methods, which are called before pushing changesets.
3511 methods, which are called before pushing changesets.
3508 """
3512 """
3509 return util.hooks()
3513 return util.hooks()
3510
3514
3511 def pushkey(self, namespace, key, old, new):
3515 def pushkey(self, namespace, key, old, new):
3512 try:
3516 try:
3513 tr = self.currenttransaction()
3517 tr = self.currenttransaction()
3514 hookargs = {}
3518 hookargs = {}
3515 if tr is not None:
3519 if tr is not None:
3516 hookargs.update(tr.hookargs)
3520 hookargs.update(tr.hookargs)
3517 hookargs = pycompat.strkwargs(hookargs)
3521 hookargs = pycompat.strkwargs(hookargs)
3518 hookargs['namespace'] = namespace
3522 hookargs['namespace'] = namespace
3519 hookargs['key'] = key
3523 hookargs['key'] = key
3520 hookargs['old'] = old
3524 hookargs['old'] = old
3521 hookargs['new'] = new
3525 hookargs['new'] = new
3522 self.hook(b'prepushkey', throw=True, **hookargs)
3526 self.hook(b'prepushkey', throw=True, **hookargs)
3523 except error.HookAbort as exc:
3527 except error.HookAbort as exc:
3524 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3528 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3525 if exc.hint:
3529 if exc.hint:
3526 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3530 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3527 return False
3531 return False
3528 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3532 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3529 ret = pushkey.push(self, namespace, key, old, new)
3533 ret = pushkey.push(self, namespace, key, old, new)
3530
3534
3531 def runhook(unused_success):
3535 def runhook(unused_success):
3532 self.hook(
3536 self.hook(
3533 b'pushkey',
3537 b'pushkey',
3534 namespace=namespace,
3538 namespace=namespace,
3535 key=key,
3539 key=key,
3536 old=old,
3540 old=old,
3537 new=new,
3541 new=new,
3538 ret=ret,
3542 ret=ret,
3539 )
3543 )
3540
3544
3541 self._afterlock(runhook)
3545 self._afterlock(runhook)
3542 return ret
3546 return ret
3543
3547
3544 def listkeys(self, namespace):
3548 def listkeys(self, namespace):
3545 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3549 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3546 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3550 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3547 values = pushkey.list(self, namespace)
3551 values = pushkey.list(self, namespace)
3548 self.hook(b'listkeys', namespace=namespace, values=values)
3552 self.hook(b'listkeys', namespace=namespace, values=values)
3549 return values
3553 return values
3550
3554
3551 def debugwireargs(self, one, two, three=None, four=None, five=None):
3555 def debugwireargs(self, one, two, three=None, four=None, five=None):
3552 '''used to test argument passing over the wire'''
3556 '''used to test argument passing over the wire'''
3553 return b"%s %s %s %s %s" % (
3557 return b"%s %s %s %s %s" % (
3554 one,
3558 one,
3555 two,
3559 two,
3556 pycompat.bytestr(three),
3560 pycompat.bytestr(three),
3557 pycompat.bytestr(four),
3561 pycompat.bytestr(four),
3558 pycompat.bytestr(five),
3562 pycompat.bytestr(five),
3559 )
3563 )
3560
3564
3561 def savecommitmessage(self, text):
3565 def savecommitmessage(self, text):
3562 fp = self.vfs(b'last-message.txt', b'wb')
3566 fp = self.vfs(b'last-message.txt', b'wb')
3563 try:
3567 try:
3564 fp.write(text)
3568 fp.write(text)
3565 finally:
3569 finally:
3566 fp.close()
3570 fp.close()
3567 return self.pathto(fp.name[len(self.root) + 1 :])
3571 return self.pathto(fp.name[len(self.root) + 1 :])
3568
3572
3569 def register_wanted_sidedata(self, category):
3573 def register_wanted_sidedata(self, category):
3570 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3574 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3571 # Only revlogv2 repos can want sidedata.
3575 # Only revlogv2 repos can want sidedata.
3572 return
3576 return
3573 self._wanted_sidedata.add(pycompat.bytestr(category))
3577 self._wanted_sidedata.add(pycompat.bytestr(category))
3574
3578
3575 def register_sidedata_computer(
3579 def register_sidedata_computer(
3576 self, kind, category, keys, computer, flags, replace=False
3580 self, kind, category, keys, computer, flags, replace=False
3577 ):
3581 ):
3578 if kind not in revlogconst.ALL_KINDS:
3582 if kind not in revlogconst.ALL_KINDS:
3579 msg = _(b"unexpected revlog kind '%s'.")
3583 msg = _(b"unexpected revlog kind '%s'.")
3580 raise error.ProgrammingError(msg % kind)
3584 raise error.ProgrammingError(msg % kind)
3581 category = pycompat.bytestr(category)
3585 category = pycompat.bytestr(category)
3582 already_registered = category in self._sidedata_computers.get(kind, [])
3586 already_registered = category in self._sidedata_computers.get(kind, [])
3583 if already_registered and not replace:
3587 if already_registered and not replace:
3584 msg = _(
3588 msg = _(
3585 b"cannot register a sidedata computer twice for category '%s'."
3589 b"cannot register a sidedata computer twice for category '%s'."
3586 )
3590 )
3587 raise error.ProgrammingError(msg % category)
3591 raise error.ProgrammingError(msg % category)
3588 if replace and not already_registered:
3592 if replace and not already_registered:
3589 msg = _(
3593 msg = _(
3590 b"cannot replace a sidedata computer that isn't registered "
3594 b"cannot replace a sidedata computer that isn't registered "
3591 b"for category '%s'."
3595 b"for category '%s'."
3592 )
3596 )
3593 raise error.ProgrammingError(msg % category)
3597 raise error.ProgrammingError(msg % category)
3594 self._sidedata_computers.setdefault(kind, {})
3598 self._sidedata_computers.setdefault(kind, {})
3595 self._sidedata_computers[kind][category] = (keys, computer, flags)
3599 self._sidedata_computers[kind][category] = (keys, computer, flags)
3596
3600
3597
3601
3598 def undoname(fn: bytes) -> bytes:
3602 def undoname(fn: bytes) -> bytes:
3599 base, name = os.path.split(fn)
3603 base, name = os.path.split(fn)
3600 assert name.startswith(b'journal')
3604 assert name.startswith(b'journal')
3601 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3605 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3602
3606
3603
3607
3604 def instance(ui, path: bytes, create, intents=None, createopts=None):
3608 def instance(ui, path: bytes, create, intents=None, createopts=None):
3605 # prevent cyclic import localrepo -> upgrade -> localrepo
3609 # prevent cyclic import localrepo -> upgrade -> localrepo
3606 from . import upgrade
3610 from . import upgrade
3607
3611
3608 localpath = urlutil.urllocalpath(path)
3612 localpath = urlutil.urllocalpath(path)
3609 if create:
3613 if create:
3610 createrepository(ui, localpath, createopts=createopts)
3614 createrepository(ui, localpath, createopts=createopts)
3611
3615
3612 def repo_maker():
3616 def repo_maker():
3613 return makelocalrepository(ui, localpath, intents=intents)
3617 return makelocalrepository(ui, localpath, intents=intents)
3614
3618
3615 repo = repo_maker()
3619 repo = repo_maker()
3616 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3620 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3617 return repo
3621 return repo
3618
3622
3619
3623
3620 def islocal(path: bytes) -> bool:
3624 def islocal(path: bytes) -> bool:
3621 return True
3625 return True
3622
3626
3623
3627
3624 def defaultcreateopts(ui, createopts=None):
3628 def defaultcreateopts(ui, createopts=None):
3625 """Populate the default creation options for a repository.
3629 """Populate the default creation options for a repository.
3626
3630
3627 A dictionary of explicitly requested creation options can be passed
3631 A dictionary of explicitly requested creation options can be passed
3628 in. Missing keys will be populated.
3632 in. Missing keys will be populated.
3629 """
3633 """
3630 createopts = dict(createopts or {})
3634 createopts = dict(createopts or {})
3631
3635
3632 if b'backend' not in createopts:
3636 if b'backend' not in createopts:
3633 # experimental config: storage.new-repo-backend
3637 # experimental config: storage.new-repo-backend
3634 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3638 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3635
3639
3636 return createopts
3640 return createopts
3637
3641
3638
3642
3639 def clone_requirements(ui, createopts, srcrepo):
3643 def clone_requirements(ui, createopts, srcrepo):
3640 """clone the requirements of a local repo for a local clone
3644 """clone the requirements of a local repo for a local clone
3641
3645
3642 The store requirements are unchanged while the working copy requirements
3646 The store requirements are unchanged while the working copy requirements
3643 depends on the configuration
3647 depends on the configuration
3644 """
3648 """
3645 target_requirements = set()
3649 target_requirements = set()
3646 if not srcrepo.requirements:
3650 if not srcrepo.requirements:
3647 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3651 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3648 # with it.
3652 # with it.
3649 return target_requirements
3653 return target_requirements
3650 createopts = defaultcreateopts(ui, createopts=createopts)
3654 createopts = defaultcreateopts(ui, createopts=createopts)
3651 for r in newreporequirements(ui, createopts):
3655 for r in newreporequirements(ui, createopts):
3652 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3656 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3653 target_requirements.add(r)
3657 target_requirements.add(r)
3654
3658
3655 for r in srcrepo.requirements:
3659 for r in srcrepo.requirements:
3656 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3660 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3657 target_requirements.add(r)
3661 target_requirements.add(r)
3658 return target_requirements
3662 return target_requirements
3659
3663
3660
3664
3661 def newreporequirements(ui, createopts):
3665 def newreporequirements(ui, createopts):
3662 """Determine the set of requirements for a new local repository.
3666 """Determine the set of requirements for a new local repository.
3663
3667
3664 Extensions can wrap this function to specify custom requirements for
3668 Extensions can wrap this function to specify custom requirements for
3665 new repositories.
3669 new repositories.
3666 """
3670 """
3667
3671
3668 if b'backend' not in createopts:
3672 if b'backend' not in createopts:
3669 raise error.ProgrammingError(
3673 raise error.ProgrammingError(
3670 b'backend key not present in createopts; '
3674 b'backend key not present in createopts; '
3671 b'was defaultcreateopts() called?'
3675 b'was defaultcreateopts() called?'
3672 )
3676 )
3673
3677
3674 if createopts[b'backend'] != b'revlogv1':
3678 if createopts[b'backend'] != b'revlogv1':
3675 raise error.Abort(
3679 raise error.Abort(
3676 _(
3680 _(
3677 b'unable to determine repository requirements for '
3681 b'unable to determine repository requirements for '
3678 b'storage backend: %s'
3682 b'storage backend: %s'
3679 )
3683 )
3680 % createopts[b'backend']
3684 % createopts[b'backend']
3681 )
3685 )
3682
3686
3683 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3687 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3684 if ui.configbool(b'format', b'usestore'):
3688 if ui.configbool(b'format', b'usestore'):
3685 requirements.add(requirementsmod.STORE_REQUIREMENT)
3689 requirements.add(requirementsmod.STORE_REQUIREMENT)
3686 if ui.configbool(b'format', b'usefncache'):
3690 if ui.configbool(b'format', b'usefncache'):
3687 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3691 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3688 if ui.configbool(b'format', b'dotencode'):
3692 if ui.configbool(b'format', b'dotencode'):
3689 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3693 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3690
3694
3691 compengines = ui.configlist(b'format', b'revlog-compression')
3695 compengines = ui.configlist(b'format', b'revlog-compression')
3692 for compengine in compengines:
3696 for compengine in compengines:
3693 if compengine in util.compengines:
3697 if compengine in util.compengines:
3694 engine = util.compengines[compengine]
3698 engine = util.compengines[compengine]
3695 if engine.available() and engine.revlogheader():
3699 if engine.available() and engine.revlogheader():
3696 break
3700 break
3697 else:
3701 else:
3698 raise error.Abort(
3702 raise error.Abort(
3699 _(
3703 _(
3700 b'compression engines %s defined by '
3704 b'compression engines %s defined by '
3701 b'format.revlog-compression not available'
3705 b'format.revlog-compression not available'
3702 )
3706 )
3703 % b', '.join(b'"%s"' % e for e in compengines),
3707 % b', '.join(b'"%s"' % e for e in compengines),
3704 hint=_(
3708 hint=_(
3705 b'run "hg debuginstall" to list available '
3709 b'run "hg debuginstall" to list available '
3706 b'compression engines'
3710 b'compression engines'
3707 ),
3711 ),
3708 )
3712 )
3709
3713
3710 # zlib is the historical default and doesn't need an explicit requirement.
3714 # zlib is the historical default and doesn't need an explicit requirement.
3711 if compengine == b'zstd':
3715 if compengine == b'zstd':
3712 requirements.add(b'revlog-compression-zstd')
3716 requirements.add(b'revlog-compression-zstd')
3713 elif compengine != b'zlib':
3717 elif compengine != b'zlib':
3714 requirements.add(b'exp-compression-%s' % compengine)
3718 requirements.add(b'exp-compression-%s' % compengine)
3715
3719
3716 if scmutil.gdinitconfig(ui):
3720 if scmutil.gdinitconfig(ui):
3717 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3721 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3718 if ui.configbool(b'format', b'sparse-revlog'):
3722 if ui.configbool(b'format', b'sparse-revlog'):
3719 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3723 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3720
3724
3721 # experimental config: format.use-dirstate-v2
3725 # experimental config: format.use-dirstate-v2
3722 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3726 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3723 if ui.configbool(b'format', b'use-dirstate-v2'):
3727 if ui.configbool(b'format', b'use-dirstate-v2'):
3724 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3728 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3725
3729
3726 # experimental config: format.exp-use-copies-side-data-changeset
3730 # experimental config: format.exp-use-copies-side-data-changeset
3727 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3731 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3728 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3732 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3729 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3733 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3730 if ui.configbool(b'experimental', b'treemanifest'):
3734 if ui.configbool(b'experimental', b'treemanifest'):
3731 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3735 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3732
3736
3733 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3737 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3734 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3738 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3735 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3739 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3736
3740
3737 revlogv2 = ui.config(b'experimental', b'revlogv2')
3741 revlogv2 = ui.config(b'experimental', b'revlogv2')
3738 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3742 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3739 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3743 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3740 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3744 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3741 # experimental config: format.internal-phase
3745 # experimental config: format.internal-phase
3742 if ui.configbool(b'format', b'use-internal-phase'):
3746 if ui.configbool(b'format', b'use-internal-phase'):
3743 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3747 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3744
3748
3745 # experimental config: format.exp-archived-phase
3749 # experimental config: format.exp-archived-phase
3746 if ui.configbool(b'format', b'exp-archived-phase'):
3750 if ui.configbool(b'format', b'exp-archived-phase'):
3747 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3751 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3748
3752
3749 if createopts.get(b'narrowfiles'):
3753 if createopts.get(b'narrowfiles'):
3750 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3754 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3751
3755
3752 if createopts.get(b'lfs'):
3756 if createopts.get(b'lfs'):
3753 requirements.add(b'lfs')
3757 requirements.add(b'lfs')
3754
3758
3755 if ui.configbool(b'format', b'bookmarks-in-store'):
3759 if ui.configbool(b'format', b'bookmarks-in-store'):
3756 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3760 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3757
3761
3758 # The feature is disabled unless a fast implementation is available.
3762 # The feature is disabled unless a fast implementation is available.
3759 persistent_nodemap_default = policy.importrust('revlog') is not None
3763 persistent_nodemap_default = policy.importrust('revlog') is not None
3760 if ui.configbool(
3764 if ui.configbool(
3761 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3765 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3762 ):
3766 ):
3763 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3767 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3764
3768
3765 # if share-safe is enabled, let's create the new repository with the new
3769 # if share-safe is enabled, let's create the new repository with the new
3766 # requirement
3770 # requirement
3767 if ui.configbool(b'format', b'use-share-safe'):
3771 if ui.configbool(b'format', b'use-share-safe'):
3768 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3772 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3769
3773
3770 # if we are creating a share-repoΒΉ we have to handle requirement
3774 # if we are creating a share-repoΒΉ we have to handle requirement
3771 # differently.
3775 # differently.
3772 #
3776 #
3773 # [1] (i.e. reusing the store from another repository, just having a
3777 # [1] (i.e. reusing the store from another repository, just having a
3774 # working copy)
3778 # working copy)
3775 if b'sharedrepo' in createopts:
3779 if b'sharedrepo' in createopts:
3776 source_requirements = set(createopts[b'sharedrepo'].requirements)
3780 source_requirements = set(createopts[b'sharedrepo'].requirements)
3777
3781
3778 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3782 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3779 # share to an old school repository, we have to copy the
3783 # share to an old school repository, we have to copy the
3780 # requirements and hope for the best.
3784 # requirements and hope for the best.
3781 requirements = source_requirements
3785 requirements = source_requirements
3782 else:
3786 else:
3783 # We have control on the working copy only, so "copy" the non
3787 # We have control on the working copy only, so "copy" the non
3784 # working copy part over, ignoring previous logic.
3788 # working copy part over, ignoring previous logic.
3785 to_drop = set()
3789 to_drop = set()
3786 for req in requirements:
3790 for req in requirements:
3787 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3791 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3788 continue
3792 continue
3789 if req in source_requirements:
3793 if req in source_requirements:
3790 continue
3794 continue
3791 to_drop.add(req)
3795 to_drop.add(req)
3792 requirements -= to_drop
3796 requirements -= to_drop
3793 requirements |= source_requirements
3797 requirements |= source_requirements
3794
3798
3795 if createopts.get(b'sharedrelative'):
3799 if createopts.get(b'sharedrelative'):
3796 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3800 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3797 else:
3801 else:
3798 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3802 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3799
3803
3800 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3804 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3801 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3805 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3802 msg = _(b"ignoring unknown tracked key version: %d\n")
3806 msg = _(b"ignoring unknown tracked key version: %d\n")
3803 hint = _(
3807 hint = _(
3804 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3808 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3805 )
3809 )
3806 if version != 1:
3810 if version != 1:
3807 ui.warn(msg % version, hint=hint)
3811 ui.warn(msg % version, hint=hint)
3808 else:
3812 else:
3809 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3813 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3810
3814
3811 return requirements
3815 return requirements
3812
3816
3813
3817
3814 def checkrequirementscompat(ui, requirements):
3818 def checkrequirementscompat(ui, requirements):
3815 """Checks compatibility of repository requirements enabled and disabled.
3819 """Checks compatibility of repository requirements enabled and disabled.
3816
3820
3817 Returns a set of requirements which needs to be dropped because dependend
3821 Returns a set of requirements which needs to be dropped because dependend
3818 requirements are not enabled. Also warns users about it"""
3822 requirements are not enabled. Also warns users about it"""
3819
3823
3820 dropped = set()
3824 dropped = set()
3821
3825
3822 if requirementsmod.STORE_REQUIREMENT not in requirements:
3826 if requirementsmod.STORE_REQUIREMENT not in requirements:
3823 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3827 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3824 ui.warn(
3828 ui.warn(
3825 _(
3829 _(
3826 b'ignoring enabled \'format.bookmarks-in-store\' config '
3830 b'ignoring enabled \'format.bookmarks-in-store\' config '
3827 b'beacuse it is incompatible with disabled '
3831 b'beacuse it is incompatible with disabled '
3828 b'\'format.usestore\' config\n'
3832 b'\'format.usestore\' config\n'
3829 )
3833 )
3830 )
3834 )
3831 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3835 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3832
3836
3833 if (
3837 if (
3834 requirementsmod.SHARED_REQUIREMENT in requirements
3838 requirementsmod.SHARED_REQUIREMENT in requirements
3835 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3839 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3836 ):
3840 ):
3837 raise error.Abort(
3841 raise error.Abort(
3838 _(
3842 _(
3839 b"cannot create shared repository as source was created"
3843 b"cannot create shared repository as source was created"
3840 b" with 'format.usestore' config disabled"
3844 b" with 'format.usestore' config disabled"
3841 )
3845 )
3842 )
3846 )
3843
3847
3844 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3848 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3845 if ui.hasconfig(b'format', b'use-share-safe'):
3849 if ui.hasconfig(b'format', b'use-share-safe'):
3846 msg = _(
3850 msg = _(
3847 b"ignoring enabled 'format.use-share-safe' config because "
3851 b"ignoring enabled 'format.use-share-safe' config because "
3848 b"it is incompatible with disabled 'format.usestore'"
3852 b"it is incompatible with disabled 'format.usestore'"
3849 b" config\n"
3853 b" config\n"
3850 )
3854 )
3851 ui.warn(msg)
3855 ui.warn(msg)
3852 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3856 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3853
3857
3854 return dropped
3858 return dropped
3855
3859
3856
3860
3857 def filterknowncreateopts(ui, createopts):
3861 def filterknowncreateopts(ui, createopts):
3858 """Filters a dict of repo creation options against options that are known.
3862 """Filters a dict of repo creation options against options that are known.
3859
3863
3860 Receives a dict of repo creation options and returns a dict of those
3864 Receives a dict of repo creation options and returns a dict of those
3861 options that we don't know how to handle.
3865 options that we don't know how to handle.
3862
3866
3863 This function is called as part of repository creation. If the
3867 This function is called as part of repository creation. If the
3864 returned dict contains any items, repository creation will not
3868 returned dict contains any items, repository creation will not
3865 be allowed, as it means there was a request to create a repository
3869 be allowed, as it means there was a request to create a repository
3866 with options not recognized by loaded code.
3870 with options not recognized by loaded code.
3867
3871
3868 Extensions can wrap this function to filter out creation options
3872 Extensions can wrap this function to filter out creation options
3869 they know how to handle.
3873 they know how to handle.
3870 """
3874 """
3871 known = {
3875 known = {
3872 b'backend',
3876 b'backend',
3873 b'lfs',
3877 b'lfs',
3874 b'narrowfiles',
3878 b'narrowfiles',
3875 b'sharedrepo',
3879 b'sharedrepo',
3876 b'sharedrelative',
3880 b'sharedrelative',
3877 b'shareditems',
3881 b'shareditems',
3878 b'shallowfilestore',
3882 b'shallowfilestore',
3879 }
3883 }
3880
3884
3881 return {k: v for k, v in createopts.items() if k not in known}
3885 return {k: v for k, v in createopts.items() if k not in known}
3882
3886
3883
3887
3884 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3888 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3885 """Create a new repository in a vfs.
3889 """Create a new repository in a vfs.
3886
3890
3887 ``path`` path to the new repo's working directory.
3891 ``path`` path to the new repo's working directory.
3888 ``createopts`` options for the new repository.
3892 ``createopts`` options for the new repository.
3889 ``requirement`` predefined set of requirements.
3893 ``requirement`` predefined set of requirements.
3890 (incompatible with ``createopts``)
3894 (incompatible with ``createopts``)
3891
3895
3892 The following keys for ``createopts`` are recognized:
3896 The following keys for ``createopts`` are recognized:
3893
3897
3894 backend
3898 backend
3895 The storage backend to use.
3899 The storage backend to use.
3896 lfs
3900 lfs
3897 Repository will be created with ``lfs`` requirement. The lfs extension
3901 Repository will be created with ``lfs`` requirement. The lfs extension
3898 will automatically be loaded when the repository is accessed.
3902 will automatically be loaded when the repository is accessed.
3899 narrowfiles
3903 narrowfiles
3900 Set up repository to support narrow file storage.
3904 Set up repository to support narrow file storage.
3901 sharedrepo
3905 sharedrepo
3902 Repository object from which storage should be shared.
3906 Repository object from which storage should be shared.
3903 sharedrelative
3907 sharedrelative
3904 Boolean indicating if the path to the shared repo should be
3908 Boolean indicating if the path to the shared repo should be
3905 stored as relative. By default, the pointer to the "parent" repo
3909 stored as relative. By default, the pointer to the "parent" repo
3906 is stored as an absolute path.
3910 is stored as an absolute path.
3907 shareditems
3911 shareditems
3908 Set of items to share to the new repository (in addition to storage).
3912 Set of items to share to the new repository (in addition to storage).
3909 shallowfilestore
3913 shallowfilestore
3910 Indicates that storage for files should be shallow (not all ancestor
3914 Indicates that storage for files should be shallow (not all ancestor
3911 revisions are known).
3915 revisions are known).
3912 """
3916 """
3913
3917
3914 if requirements is not None:
3918 if requirements is not None:
3915 if createopts is not None:
3919 if createopts is not None:
3916 msg = b'cannot specify both createopts and requirements'
3920 msg = b'cannot specify both createopts and requirements'
3917 raise error.ProgrammingError(msg)
3921 raise error.ProgrammingError(msg)
3918 createopts = {}
3922 createopts = {}
3919 else:
3923 else:
3920 createopts = defaultcreateopts(ui, createopts=createopts)
3924 createopts = defaultcreateopts(ui, createopts=createopts)
3921
3925
3922 unknownopts = filterknowncreateopts(ui, createopts)
3926 unknownopts = filterknowncreateopts(ui, createopts)
3923
3927
3924 if not isinstance(unknownopts, dict):
3928 if not isinstance(unknownopts, dict):
3925 raise error.ProgrammingError(
3929 raise error.ProgrammingError(
3926 b'filterknowncreateopts() did not return a dict'
3930 b'filterknowncreateopts() did not return a dict'
3927 )
3931 )
3928
3932
3929 if unknownopts:
3933 if unknownopts:
3930 raise error.Abort(
3934 raise error.Abort(
3931 _(
3935 _(
3932 b'unable to create repository because of unknown '
3936 b'unable to create repository because of unknown '
3933 b'creation option: %s'
3937 b'creation option: %s'
3934 )
3938 )
3935 % b', '.join(sorted(unknownopts)),
3939 % b', '.join(sorted(unknownopts)),
3936 hint=_(b'is a required extension not loaded?'),
3940 hint=_(b'is a required extension not loaded?'),
3937 )
3941 )
3938
3942
3939 requirements = newreporequirements(ui, createopts=createopts)
3943 requirements = newreporequirements(ui, createopts=createopts)
3940 requirements -= checkrequirementscompat(ui, requirements)
3944 requirements -= checkrequirementscompat(ui, requirements)
3941
3945
3942 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3946 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3943
3947
3944 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3948 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3945 if hgvfs.exists():
3949 if hgvfs.exists():
3946 raise error.RepoError(_(b'repository %s already exists') % path)
3950 raise error.RepoError(_(b'repository %s already exists') % path)
3947
3951
3948 if b'sharedrepo' in createopts:
3952 if b'sharedrepo' in createopts:
3949 sharedpath = createopts[b'sharedrepo'].sharedpath
3953 sharedpath = createopts[b'sharedrepo'].sharedpath
3950
3954
3951 if createopts.get(b'sharedrelative'):
3955 if createopts.get(b'sharedrelative'):
3952 try:
3956 try:
3953 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3957 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3954 sharedpath = util.pconvert(sharedpath)
3958 sharedpath = util.pconvert(sharedpath)
3955 except (IOError, ValueError) as e:
3959 except (IOError, ValueError) as e:
3956 # ValueError is raised on Windows if the drive letters differ
3960 # ValueError is raised on Windows if the drive letters differ
3957 # on each path.
3961 # on each path.
3958 raise error.Abort(
3962 raise error.Abort(
3959 _(b'cannot calculate relative path'),
3963 _(b'cannot calculate relative path'),
3960 hint=stringutil.forcebytestr(e),
3964 hint=stringutil.forcebytestr(e),
3961 )
3965 )
3962
3966
3963 if not wdirvfs.exists():
3967 if not wdirvfs.exists():
3964 wdirvfs.makedirs()
3968 wdirvfs.makedirs()
3965
3969
3966 hgvfs.makedir(notindexed=True)
3970 hgvfs.makedir(notindexed=True)
3967 if b'sharedrepo' not in createopts:
3971 if b'sharedrepo' not in createopts:
3968 hgvfs.mkdir(b'cache')
3972 hgvfs.mkdir(b'cache')
3969 hgvfs.mkdir(b'wcache')
3973 hgvfs.mkdir(b'wcache')
3970
3974
3971 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3975 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3972 if has_store and b'sharedrepo' not in createopts:
3976 if has_store and b'sharedrepo' not in createopts:
3973 hgvfs.mkdir(b'store')
3977 hgvfs.mkdir(b'store')
3974
3978
3975 # We create an invalid changelog outside the store so very old
3979 # We create an invalid changelog outside the store so very old
3976 # Mercurial versions (which didn't know about the requirements
3980 # Mercurial versions (which didn't know about the requirements
3977 # file) encounter an error on reading the changelog. This
3981 # file) encounter an error on reading the changelog. This
3978 # effectively locks out old clients and prevents them from
3982 # effectively locks out old clients and prevents them from
3979 # mucking with a repo in an unknown format.
3983 # mucking with a repo in an unknown format.
3980 #
3984 #
3981 # The revlog header has version 65535, which won't be recognized by
3985 # The revlog header has version 65535, which won't be recognized by
3982 # such old clients.
3986 # such old clients.
3983 hgvfs.append(
3987 hgvfs.append(
3984 b'00changelog.i',
3988 b'00changelog.i',
3985 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3989 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3986 b'layout',
3990 b'layout',
3987 )
3991 )
3988
3992
3989 # Filter the requirements into working copy and store ones
3993 # Filter the requirements into working copy and store ones
3990 wcreq, storereq = scmutil.filterrequirements(requirements)
3994 wcreq, storereq = scmutil.filterrequirements(requirements)
3991 # write working copy ones
3995 # write working copy ones
3992 scmutil.writerequires(hgvfs, wcreq)
3996 scmutil.writerequires(hgvfs, wcreq)
3993 # If there are store requirements and the current repository
3997 # If there are store requirements and the current repository
3994 # is not a shared one, write stored requirements
3998 # is not a shared one, write stored requirements
3995 # For new shared repository, we don't need to write the store
3999 # For new shared repository, we don't need to write the store
3996 # requirements as they are already present in store requires
4000 # requirements as they are already present in store requires
3997 if storereq and b'sharedrepo' not in createopts:
4001 if storereq and b'sharedrepo' not in createopts:
3998 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4002 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3999 scmutil.writerequires(storevfs, storereq)
4003 scmutil.writerequires(storevfs, storereq)
4000
4004
4001 # Write out file telling readers where to find the shared store.
4005 # Write out file telling readers where to find the shared store.
4002 if b'sharedrepo' in createopts:
4006 if b'sharedrepo' in createopts:
4003 hgvfs.write(b'sharedpath', sharedpath)
4007 hgvfs.write(b'sharedpath', sharedpath)
4004
4008
4005 if createopts.get(b'shareditems'):
4009 if createopts.get(b'shareditems'):
4006 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4010 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4007 hgvfs.write(b'shared', shared)
4011 hgvfs.write(b'shared', shared)
4008
4012
4009
4013
4010 def poisonrepository(repo):
4014 def poisonrepository(repo):
4011 """Poison a repository instance so it can no longer be used."""
4015 """Poison a repository instance so it can no longer be used."""
4012 # Perform any cleanup on the instance.
4016 # Perform any cleanup on the instance.
4013 repo.close()
4017 repo.close()
4014
4018
4015 # Our strategy is to replace the type of the object with one that
4019 # Our strategy is to replace the type of the object with one that
4016 # has all attribute lookups result in error.
4020 # has all attribute lookups result in error.
4017 #
4021 #
4018 # But we have to allow the close() method because some constructors
4022 # But we have to allow the close() method because some constructors
4019 # of repos call close() on repo references.
4023 # of repos call close() on repo references.
4020 class poisonedrepository:
4024 class poisonedrepository:
4021 def __getattribute__(self, item):
4025 def __getattribute__(self, item):
4022 if item == 'close':
4026 if item == 'close':
4023 return object.__getattribute__(self, item)
4027 return object.__getattribute__(self, item)
4024
4028
4025 raise error.ProgrammingError(
4029 raise error.ProgrammingError(
4026 b'repo instances should not be used after unshare'
4030 b'repo instances should not be used after unshare'
4027 )
4031 )
4028
4032
4029 def close(self):
4033 def close(self):
4030 pass
4034 pass
4031
4035
4032 # We may have a repoview, which intercepts __setattr__. So be sure
4036 # We may have a repoview, which intercepts __setattr__. So be sure
4033 # we operate at the lowest level possible.
4037 # we operate at the lowest level possible.
4034 object.__setattr__(repo, '__class__', poisonedrepository)
4038 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,402 +1,408 b''
1 # lock.py - simple advisory locking scheme for mercurial
1 # lock.py - simple advisory locking scheme for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import os
11 import os
12 import signal
12 import signal
13 import socket
13 import socket
14 import time
14 import time
15 import typing
15 import typing
16 import warnings
16 import warnings
17
17
18 from .i18n import _
18 from .i18n import _
19
19
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 pycompat,
23 pycompat,
24 util,
24 util,
25 )
25 )
26
26
27 from .utils import procutil
27 from .utils import procutil
28
28
29
29
30 def _getlockprefix():
30 def _getlockprefix():
31 """Return a string which is used to differentiate pid namespaces
31 """Return a string which is used to differentiate pid namespaces
32
32
33 It's useful to detect "dead" processes and remove stale locks with
33 It's useful to detect "dead" processes and remove stale locks with
34 confidence. Typically it's just hostname. On modern linux, we include an
34 confidence. Typically it's just hostname. On modern linux, we include an
35 extra Linux-specific pid namespace identifier.
35 extra Linux-specific pid namespace identifier.
36 """
36 """
37 result = encoding.strtolocal(socket.gethostname())
37 result = encoding.strtolocal(socket.gethostname())
38 if pycompat.sysplatform.startswith(b'linux'):
38 if pycompat.sysplatform.startswith(b'linux'):
39 try:
39 try:
40 result += b'/%x' % os.stat(b'/proc/self/ns/pid').st_ino
40 result += b'/%x' % os.stat(b'/proc/self/ns/pid').st_ino
41 except (FileNotFoundError, PermissionError, NotADirectoryError):
41 except (FileNotFoundError, PermissionError, NotADirectoryError):
42 pass
42 pass
43 return result
43 return result
44
44
45
45
46 @contextlib.contextmanager
46 @contextlib.contextmanager
47 def _delayedinterrupt():
47 def _delayedinterrupt():
48 """Block signal interrupt while doing something critical
48 """Block signal interrupt while doing something critical
49
49
50 This makes sure that the code block wrapped by this context manager won't
50 This makes sure that the code block wrapped by this context manager won't
51 be interrupted.
51 be interrupted.
52
52
53 For Windows developers: It appears not possible to guard time.sleep()
53 For Windows developers: It appears not possible to guard time.sleep()
54 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
54 from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
55 working.
55 working.
56 """
56 """
57 assertedsigs = []
57 assertedsigs = []
58 blocked = False
58 blocked = False
59 orighandlers = {}
59 orighandlers = {}
60
60
61 def raiseinterrupt(num):
61 def raiseinterrupt(num):
62 if num == getattr(signal, 'SIGINT', None) or num == getattr(
62 if num == getattr(signal, 'SIGINT', None) or num == getattr(
63 signal, 'CTRL_C_EVENT', None
63 signal, 'CTRL_C_EVENT', None
64 ):
64 ):
65 raise KeyboardInterrupt
65 raise KeyboardInterrupt
66 else:
66 else:
67 raise error.SignalInterrupt
67 raise error.SignalInterrupt
68
68
69 def catchterm(num, frame):
69 def catchterm(num, frame):
70 if blocked:
70 if blocked:
71 assertedsigs.append(num)
71 assertedsigs.append(num)
72 else:
72 else:
73 raiseinterrupt(num)
73 raiseinterrupt(num)
74
74
75 try:
75 try:
76 # save handlers first so they can be restored even if a setup is
76 # save handlers first so they can be restored even if a setup is
77 # interrupted between signal.signal() and orighandlers[] =.
77 # interrupted between signal.signal() and orighandlers[] =.
78 for name in [
78 for name in [
79 'CTRL_C_EVENT',
79 'CTRL_C_EVENT',
80 'SIGINT',
80 'SIGINT',
81 'SIGBREAK',
81 'SIGBREAK',
82 'SIGHUP',
82 'SIGHUP',
83 'SIGTERM',
83 'SIGTERM',
84 ]:
84 ]:
85 num = getattr(signal, name, None)
85 num = getattr(signal, name, None)
86 if num and num not in orighandlers:
86 if num and num not in orighandlers:
87 orighandlers[num] = signal.getsignal(num)
87 orighandlers[num] = signal.getsignal(num)
88 try:
88 try:
89 for num in orighandlers:
89 for num in orighandlers:
90 signal.signal(num, catchterm)
90 signal.signal(num, catchterm)
91 except ValueError:
91 except ValueError:
92 pass # in a thread? no luck
92 pass # in a thread? no luck
93
93
94 blocked = True
94 blocked = True
95 yield
95 yield
96 finally:
96 finally:
97 # no simple way to reliably restore all signal handlers because
97 # no simple way to reliably restore all signal handlers because
98 # any loops, recursive function calls, except blocks, etc. can be
98 # any loops, recursive function calls, except blocks, etc. can be
99 # interrupted. so instead, make catchterm() raise interrupt.
99 # interrupted. so instead, make catchterm() raise interrupt.
100 blocked = False
100 blocked = False
101 try:
101 try:
102 for num, handler in orighandlers.items():
102 for num, handler in orighandlers.items():
103 signal.signal(num, handler)
103 signal.signal(num, handler)
104 except ValueError:
104 except ValueError:
105 pass # in a thread?
105 pass # in a thread?
106
106
107 # re-raise interrupt exception if any, which may be shadowed by a new
107 # re-raise interrupt exception if any, which may be shadowed by a new
108 # interrupt occurred while re-raising the first one
108 # interrupt occurred while re-raising the first one
109 if assertedsigs:
109 if assertedsigs:
110 raiseinterrupt(assertedsigs[0])
110 raiseinterrupt(assertedsigs[0])
111
111
112
112
113 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
113 def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
114 """return an acquired lock or raise an a LockHeld exception
114 """return an acquired lock or raise an a LockHeld exception
115
115
116 This function is responsible to issue warnings and or debug messages about
116 This function is responsible to issue warnings and or debug messages about
117 the held lock while trying to acquires it."""
117 the held lock while trying to acquires it."""
118 devel_wait_file = kwargs.pop("devel_wait_sync_file", None)
118
119
119 def printwarning(printer, locker):
120 def printwarning(printer, locker):
120 """issue the usual "waiting on lock" message through any channel"""
121 """issue the usual "waiting on lock" message through any channel"""
121 # show more details for new-style locks
122 # show more details for new-style locks
122 if b':' in locker:
123 if b':' in locker:
123 host, pid = locker.split(b":", 1)
124 host, pid = locker.split(b":", 1)
124 msg = _(
125 msg = _(
125 b"waiting for lock on %s held by process %r on host %r\n"
126 b"waiting for lock on %s held by process %r on host %r\n"
126 ) % (
127 ) % (
127 pycompat.bytestr(l.desc),
128 pycompat.bytestr(l.desc),
128 pycompat.bytestr(pid),
129 pycompat.bytestr(pid),
129 pycompat.bytestr(host),
130 pycompat.bytestr(host),
130 )
131 )
131 else:
132 else:
132 msg = _(b"waiting for lock on %s held by %r\n") % (
133 msg = _(b"waiting for lock on %s held by %r\n") % (
133 l.desc,
134 l.desc,
134 pycompat.bytestr(locker),
135 pycompat.bytestr(locker),
135 )
136 )
136 printer(msg)
137 printer(msg)
137
138
138 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
139 l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
139
140
140 debugidx = 0 if (warntimeout and timeout) else -1
141 debugidx = 0 if (warntimeout and timeout) else -1
141 warningidx = 0
142 warningidx = 0
142 if not timeout:
143 if not timeout:
143 warningidx = -1
144 warningidx = -1
144 elif warntimeout:
145 elif warntimeout:
145 warningidx = warntimeout
146 warningidx = warntimeout
146
147
147 delay = 0
148 delay = 0
148 while True:
149 while True:
149 try:
150 try:
150 l._trylock()
151 l._trylock()
151 break
152 break
152 except error.LockHeld as inst:
153 except error.LockHeld as inst:
154 if devel_wait_file is not None:
155 # create the file to signal we are waiting
156 with open(devel_wait_file, 'w'):
157 pass
158
153 if delay == debugidx:
159 if delay == debugidx:
154 printwarning(ui.debug, inst.locker)
160 printwarning(ui.debug, inst.locker)
155 if delay == warningidx:
161 if delay == warningidx:
156 printwarning(ui.warn, inst.locker)
162 printwarning(ui.warn, inst.locker)
157 if timeout <= delay:
163 if timeout <= delay:
158 assert isinstance(inst.filename, bytes)
164 assert isinstance(inst.filename, bytes)
159 raise error.LockHeld(
165 raise error.LockHeld(
160 errno.ETIMEDOUT,
166 errno.ETIMEDOUT,
161 typing.cast(bytes, inst.filename),
167 typing.cast(bytes, inst.filename),
162 l.desc,
168 l.desc,
163 inst.locker,
169 inst.locker,
164 )
170 )
165 time.sleep(1)
171 time.sleep(1)
166 delay += 1
172 delay += 1
167
173
168 l.delay = delay
174 l.delay = delay
169 if l.delay:
175 if l.delay:
170 if 0 <= warningidx <= l.delay:
176 if 0 <= warningidx <= l.delay:
171 ui.warn(_(b"got lock after %d seconds\n") % l.delay)
177 ui.warn(_(b"got lock after %d seconds\n") % l.delay)
172 else:
178 else:
173 ui.debug(b"got lock after %d seconds\n" % l.delay)
179 ui.debug(b"got lock after %d seconds\n" % l.delay)
174 if l.acquirefn:
180 if l.acquirefn:
175 l.acquirefn()
181 l.acquirefn()
176 return l
182 return l
177
183
178
184
179 class lock:
185 class lock:
180 """An advisory lock held by one process to control access to a set
186 """An advisory lock held by one process to control access to a set
181 of files. Non-cooperating processes or incorrectly written scripts
187 of files. Non-cooperating processes or incorrectly written scripts
182 can ignore Mercurial's locking scheme and stomp all over the
188 can ignore Mercurial's locking scheme and stomp all over the
183 repository, so don't do that.
189 repository, so don't do that.
184
190
185 Typically used via localrepository.lock() to lock the repository
191 Typically used via localrepository.lock() to lock the repository
186 store (.hg/store/) or localrepository.wlock() to lock everything
192 store (.hg/store/) or localrepository.wlock() to lock everything
187 else under .hg/."""
193 else under .hg/."""
188
194
189 # lock is symlink on platforms that support it, file on others.
195 # lock is symlink on platforms that support it, file on others.
190
196
191 # symlink is used because create of directory entry and contents
197 # symlink is used because create of directory entry and contents
192 # are atomic even over nfs.
198 # are atomic even over nfs.
193
199
194 # old-style lock: symlink to pid
200 # old-style lock: symlink to pid
195 # new-style lock: symlink to hostname:pid
201 # new-style lock: symlink to hostname:pid
196
202
197 _host = None
203 _host = None
198
204
199 def __init__(
205 def __init__(
200 self,
206 self,
201 vfs,
207 vfs,
202 fname,
208 fname,
203 timeout=-1,
209 timeout=-1,
204 releasefn=None,
210 releasefn=None,
205 acquirefn=None,
211 acquirefn=None,
206 desc=None,
212 desc=None,
207 signalsafe=True,
213 signalsafe=True,
208 dolock=True,
214 dolock=True,
209 ):
215 ):
210 self.vfs = vfs
216 self.vfs = vfs
211 self.f = fname
217 self.f = fname
212 self.held = 0
218 self.held = 0
213 self.timeout = timeout
219 self.timeout = timeout
214 self.releasefn = releasefn
220 self.releasefn = releasefn
215 self.acquirefn = acquirefn
221 self.acquirefn = acquirefn
216 self.desc = desc
222 self.desc = desc
217 if signalsafe:
223 if signalsafe:
218 self._maybedelayedinterrupt = _delayedinterrupt
224 self._maybedelayedinterrupt = _delayedinterrupt
219 else:
225 else:
220 self._maybedelayedinterrupt = util.nullcontextmanager
226 self._maybedelayedinterrupt = util.nullcontextmanager
221 self.postrelease = []
227 self.postrelease = []
222 self.pid = self._getpid()
228 self.pid = self._getpid()
223 if dolock:
229 if dolock:
224 self.delay = self.lock()
230 self.delay = self.lock()
225 if self.acquirefn:
231 if self.acquirefn:
226 self.acquirefn()
232 self.acquirefn()
227
233
228 def __enter__(self):
234 def __enter__(self):
229 return self
235 return self
230
236
231 def __exit__(self, exc_type, exc_value, exc_tb):
237 def __exit__(self, exc_type, exc_value, exc_tb):
232 success = all(a is None for a in (exc_type, exc_value, exc_tb))
238 success = all(a is None for a in (exc_type, exc_value, exc_tb))
233 self.release(success=success)
239 self.release(success=success)
234
240
235 def __del__(self):
241 def __del__(self):
236 if self.held:
242 if self.held:
237 warnings.warn(
243 warnings.warn(
238 "use lock.release instead of del lock",
244 "use lock.release instead of del lock",
239 category=DeprecationWarning,
245 category=DeprecationWarning,
240 stacklevel=2,
246 stacklevel=2,
241 )
247 )
242
248
243 # ensure the lock will be removed
249 # ensure the lock will be removed
244 # even if recursive locking did occur
250 # even if recursive locking did occur
245 self.held = 1
251 self.held = 1
246
252
247 self.release()
253 self.release()
248
254
249 def _getpid(self):
255 def _getpid(self):
250 # wrapper around procutil.getpid() to make testing easier
256 # wrapper around procutil.getpid() to make testing easier
251 return procutil.getpid()
257 return procutil.getpid()
252
258
253 def lock(self):
259 def lock(self):
254 timeout = self.timeout
260 timeout = self.timeout
255 while True:
261 while True:
256 try:
262 try:
257 self._trylock()
263 self._trylock()
258 return self.timeout - timeout
264 return self.timeout - timeout
259 except error.LockHeld as inst:
265 except error.LockHeld as inst:
260 if timeout != 0:
266 if timeout != 0:
261 time.sleep(1)
267 time.sleep(1)
262 if timeout > 0:
268 if timeout > 0:
263 timeout -= 1
269 timeout -= 1
264 continue
270 continue
265 raise error.LockHeld(
271 raise error.LockHeld(
266 errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
272 errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
267 )
273 )
268
274
269 def _trylock(self):
275 def _trylock(self):
270 if self.held:
276 if self.held:
271 self.held += 1
277 self.held += 1
272 return
278 return
273 if lock._host is None:
279 if lock._host is None:
274 lock._host = _getlockprefix()
280 lock._host = _getlockprefix()
275 lockname = b'%s:%d' % (lock._host, self.pid)
281 lockname = b'%s:%d' % (lock._host, self.pid)
276 retry = 5
282 retry = 5
277 while not self.held and retry:
283 while not self.held and retry:
278 retry -= 1
284 retry -= 1
279 try:
285 try:
280 with self._maybedelayedinterrupt():
286 with self._maybedelayedinterrupt():
281 self.vfs.makelock(lockname, self.f)
287 self.vfs.makelock(lockname, self.f)
282 self.held = 1
288 self.held = 1
283 except (OSError, IOError) as why:
289 except (OSError, IOError) as why:
284 if why.errno == errno.EEXIST:
290 if why.errno == errno.EEXIST:
285 locker = self._readlock()
291 locker = self._readlock()
286 if locker is None:
292 if locker is None:
287 continue
293 continue
288
294
289 locker = self._testlock(locker)
295 locker = self._testlock(locker)
290 if locker is not None:
296 if locker is not None:
291 raise error.LockHeld(
297 raise error.LockHeld(
292 errno.EAGAIN,
298 errno.EAGAIN,
293 self.vfs.join(self.f),
299 self.vfs.join(self.f),
294 self.desc,
300 self.desc,
295 locker,
301 locker,
296 )
302 )
297 else:
303 else:
298 assert isinstance(why.filename, bytes)
304 assert isinstance(why.filename, bytes)
299 assert isinstance(why.strerror, str)
305 assert isinstance(why.strerror, str)
300 raise error.LockUnavailable(
306 raise error.LockUnavailable(
301 why.errno,
307 why.errno,
302 why.strerror,
308 why.strerror,
303 typing.cast(bytes, why.filename),
309 typing.cast(bytes, why.filename),
304 self.desc,
310 self.desc,
305 )
311 )
306
312
307 if not self.held:
313 if not self.held:
308 # use empty locker to mean "busy for frequent lock/unlock
314 # use empty locker to mean "busy for frequent lock/unlock
309 # by many processes"
315 # by many processes"
310 raise error.LockHeld(
316 raise error.LockHeld(
311 errno.EAGAIN, self.vfs.join(self.f), self.desc, b""
317 errno.EAGAIN, self.vfs.join(self.f), self.desc, b""
312 )
318 )
313
319
314 def _readlock(self):
320 def _readlock(self):
315 """read lock and return its value
321 """read lock and return its value
316
322
317 Returns None if no lock exists, pid for old-style locks, and host:pid
323 Returns None if no lock exists, pid for old-style locks, and host:pid
318 for new-style locks.
324 for new-style locks.
319 """
325 """
320 try:
326 try:
321 return self.vfs.readlock(self.f)
327 return self.vfs.readlock(self.f)
322 except FileNotFoundError:
328 except FileNotFoundError:
323 return None
329 return None
324
330
325 def _lockshouldbebroken(self, locker):
331 def _lockshouldbebroken(self, locker):
326 if locker is None:
332 if locker is None:
327 return False
333 return False
328 try:
334 try:
329 host, pid = locker.split(b":", 1)
335 host, pid = locker.split(b":", 1)
330 except ValueError:
336 except ValueError:
331 return False
337 return False
332 if host != lock._host:
338 if host != lock._host:
333 return False
339 return False
334 try:
340 try:
335 pid = int(pid)
341 pid = int(pid)
336 except ValueError:
342 except ValueError:
337 return False
343 return False
338 if procutil.testpid(pid):
344 if procutil.testpid(pid):
339 return False
345 return False
340 return True
346 return True
341
347
342 def _testlock(self, locker):
348 def _testlock(self, locker):
343 if not self._lockshouldbebroken(locker):
349 if not self._lockshouldbebroken(locker):
344 return locker
350 return locker
345
351
346 # if locker dead, break lock. must do this with another lock
352 # if locker dead, break lock. must do this with another lock
347 # held, or can race and break valid lock.
353 # held, or can race and break valid lock.
348 try:
354 try:
349 with lock(self.vfs, self.f + b'.break', timeout=0):
355 with lock(self.vfs, self.f + b'.break', timeout=0):
350 locker = self._readlock()
356 locker = self._readlock()
351 if not self._lockshouldbebroken(locker):
357 if not self._lockshouldbebroken(locker):
352 return locker
358 return locker
353 self.vfs.unlink(self.f)
359 self.vfs.unlink(self.f)
354 except error.LockError:
360 except error.LockError:
355 return locker
361 return locker
356
362
357 def testlock(self):
363 def testlock(self):
358 """return id of locker if lock is valid, else None.
364 """return id of locker if lock is valid, else None.
359
365
360 If old-style lock, we cannot tell what machine locker is on.
366 If old-style lock, we cannot tell what machine locker is on.
361 with new-style lock, if locker is on this machine, we can
367 with new-style lock, if locker is on this machine, we can
362 see if locker is alive. If locker is on this machine but
368 see if locker is alive. If locker is on this machine but
363 not alive, we can safely break lock.
369 not alive, we can safely break lock.
364
370
365 The lock file is only deleted when None is returned.
371 The lock file is only deleted when None is returned.
366
372
367 """
373 """
368 locker = self._readlock()
374 locker = self._readlock()
369 return self._testlock(locker)
375 return self._testlock(locker)
370
376
371 def release(self, success=True):
377 def release(self, success=True):
372 """release the lock and execute callback function if any
378 """release the lock and execute callback function if any
373
379
374 If the lock has been acquired multiple times, the actual release is
380 If the lock has been acquired multiple times, the actual release is
375 delayed to the last release call."""
381 delayed to the last release call."""
376 if self.held > 1:
382 if self.held > 1:
377 self.held -= 1
383 self.held -= 1
378 elif self.held == 1:
384 elif self.held == 1:
379 self.held = 0
385 self.held = 0
380 if self._getpid() != self.pid:
386 if self._getpid() != self.pid:
381 # we forked, and are not the parent
387 # we forked, and are not the parent
382 return
388 return
383 try:
389 try:
384 if self.releasefn:
390 if self.releasefn:
385 self.releasefn()
391 self.releasefn()
386 finally:
392 finally:
387 try:
393 try:
388 self.vfs.unlink(self.f)
394 self.vfs.unlink(self.f)
389 except OSError:
395 except OSError:
390 pass
396 pass
391 # The postrelease functions typically assume the lock is not held
397 # The postrelease functions typically assume the lock is not held
392 # at all.
398 # at all.
393 for callback in self.postrelease:
399 for callback in self.postrelease:
394 callback(success)
400 callback(success)
395 # Prevent double usage and help clear cycles.
401 # Prevent double usage and help clear cycles.
396 self.postrelease = None
402 self.postrelease = None
397
403
398
404
399 def release(*locks):
405 def release(*locks):
400 for lock in locks:
406 for lock in locks:
401 if lock is not None:
407 if lock is not None:
402 lock.release()
408 lock.release()
@@ -1,141 +1,178 b''
1 #require unix-permissions no-root no-windows
1 #require unix-permissions no-root no-windows
2
2
3 Prepare
3 Prepare
4
4
5 $ hg init a
5 $ hg init a
6 $ echo a > a/a
6 $ echo a > a/a
7 $ hg -R a ci -A -m a
7 $ hg -R a ci -A -m a
8 adding a
8 adding a
9
9
10 $ hg clone a b
10 $ hg clone a b
11 updating to branch default
11 updating to branch default
12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13
13
14 Test that raising an exception in the release function doesn't cause the lock to choke
14 Test that raising an exception in the release function doesn't cause the lock to choke
15
15
16 $ cat > testlock.py << EOF
16 $ cat > testlock.py << EOF
17 > from mercurial import error, registrar
17 > from mercurial import error, registrar
18 >
18 >
19 > cmdtable = {}
19 > cmdtable = {}
20 > command = registrar.command(cmdtable)
20 > command = registrar.command(cmdtable)
21 >
21 >
22 > def acquiretestlock(repo, releaseexc):
22 > def acquiretestlock(repo, releaseexc):
23 > def unlock():
23 > def unlock():
24 > if releaseexc:
24 > if releaseexc:
25 > raise error.Abort(b'expected release exception')
25 > raise error.Abort(b'expected release exception')
26 > l = repo._lock(repo.vfs, b'testlock', False, unlock, None, b'test lock')
26 > l = repo._lock(repo.vfs, b'testlock', False, unlock, None, b'test lock')
27 > return l
27 > return l
28 >
28 >
29 > @command(b'testlockexc')
29 > @command(b'testlockexc')
30 > def testlockexc(ui, repo):
30 > def testlockexc(ui, repo):
31 > testlock = acquiretestlock(repo, True)
31 > testlock = acquiretestlock(repo, True)
32 > try:
32 > try:
33 > testlock.release()
33 > testlock.release()
34 > finally:
34 > finally:
35 > try:
35 > try:
36 > testlock = acquiretestlock(repo, False)
36 > testlock = acquiretestlock(repo, False)
37 > except error.LockHeld:
37 > except error.LockHeld:
38 > raise error.Abort(b'lockfile on disk even after releasing!')
38 > raise error.Abort(b'lockfile on disk even after releasing!')
39 > testlock.release()
39 > testlock.release()
40 > EOF
40 > EOF
41 $ cat >> $HGRCPATH << EOF
41 $ cat >> $HGRCPATH << EOF
42 > [extensions]
42 > [extensions]
43 > testlock=$TESTTMP/testlock.py
43 > testlock=$TESTTMP/testlock.py
44 > EOF
44 > EOF
45
45
46 $ hg -R b testlockexc
46 $ hg -R b testlockexc
47 abort: expected release exception
47 abort: expected release exception
48 [255]
48 [255]
49
49
50 One process waiting for another
50 One process waiting for another
51
51
52 $ cat > hooks.py << EOF
52 $ SYNC_FILE_LOCKED="$TESTTMP/sync-file-locked"
53 > import time
53 $ export SYNC_FILE_LOCKED
54 > def sleepone(**x): time.sleep(1)
54 $ SYNC_FILE_TRYING_LOCK="$TESTTMP/sync-file-trying-lock"
55 > def sleephalf(**x): time.sleep(0.5)
55 $ export SYNC_FILE_TRYING_LOCK
56 $ cat << EOF > locker.sh
57 > $RUNTESTDIR/testlib/wait-on-file 10 $SYNC_FILE_TRYING_LOCK $SYNC_FILE_LOCKED;
58 > EOF
59 $ cat << EOF > waiter.sh
60 > $RUNTESTDIR/testlib/wait-on-file 10 $SYNC_FILE_LOCKED;
56 > EOF
61 > EOF
62 $ clean_sync() {
63 > rm -f "$SYNC_FILE_LOCKED"
64 > rm -f "$SYNC_FILE_TRYING_LOCK"
65 > }
66
67
68 $ clean_sync
57 $ echo b > b/b
69 $ echo b > b/b
58 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
70 $ hg -R b ci -A -m b \
59 $ hg -R b up -q --config ui.timeout.warn=0 --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
71 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
72 > > stdout &
73 $ hg -R b up -q \
74 > --config ui.timeout.warn=0 \
75 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
76 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
60 > > preup-stdout 2>preup-stderr
77 > > preup-stdout 2> preup-stderr
61 $ wait
78 $ wait
62 $ cat preup-stdout
79 $ cat preup-stdout
63 $ cat preup-stderr
80 $ cat preup-stderr
64 waiting for lock on working directory of b held by process '*' on host '*' (glob)
81 waiting for lock on working directory of b held by process '*' on host '*' (glob)
65 got lock after * seconds (glob)
82 got lock after * seconds (glob)
66 $ cat stdout
83 $ cat stdout
67 adding b
84 adding b
68
85
69 On processs waiting on another, warning after a long time.
86 On processs waiting on another, warning after a long time.
70
87
88 $ clean_sync
71 $ echo b > b/c
89 $ echo b > b/c
72 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
90 $ hg -R b ci -A -m b \
73 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
91 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
92 > > stdout &
93 $ hg -R b up -q \
94 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
95 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
74 > --config ui.timeout.warn=250 \
96 > --config ui.timeout.warn=250 \
75 > > preup-stdout 2>preup-stderr
97 > > preup-stdout 2> preup-stderr
76 $ wait
98 $ wait
77 $ cat preup-stdout
99 $ cat preup-stdout
78 $ cat preup-stderr
100 $ cat preup-stderr
79 $ cat stdout
101 $ cat stdout
80 adding c
102 adding c
81
103
82 On processs waiting on another, warning disabled.
104 On processs waiting on another, warning disabled.
83
105
106 $ clean_sync
84 $ echo b > b/d
107 $ echo b > b/d
85 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
108 $ hg -R b ci -A -m b \
86 $ hg -R b up -q --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
109 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
110 > > stdout &
111 $ hg -R b up -q \
112 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
113 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
87 > --config ui.timeout.warn=-1 \
114 > --config ui.timeout.warn=-1 \
88 > > preup-stdout 2>preup-stderr
115 > > preup-stdout 2>preup-stderr
89 $ wait
116 $ wait
90 $ cat preup-stdout
117 $ cat preup-stdout
91 $ cat preup-stderr
118 $ cat preup-stderr
92 $ cat stdout
119 $ cat stdout
93 adding d
120 adding d
94
121
95 check we still print debug output
122 check we still print debug output
96
123
97 On processs waiting on another, warning after a long time (debug output on)
124 On processs waiting on another, warning after a long time (debug output on)
98
125
126 $ clean_sync
99 $ echo b > b/e
127 $ echo b > b/e
100 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
128 $ hg -R b ci -A -m b \
101 $ hg -R b up --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
129 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
130 > > stdout &
131 $ hg -R b up \
132 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
133 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
102 > --config ui.timeout.warn=250 --debug\
134 > --config ui.timeout.warn=250 --debug \
103 > > preup-stdout 2>preup-stderr
135 > > preup-stdout 2>preup-stderr
104 $ wait
136 $ wait
105 $ cat preup-stdout
137 $ cat preup-stdout
106 calling hook pre-update: hghook_pre-update.sleephalf
138 running hook pre-update: sh $TESTTMP/waiter.sh
107 waiting for lock on working directory of b held by process '*' on host '*' (glob)
139 waiting for lock on working directory of b held by process '*' on host '*' (glob)
108 got lock after * seconds (glob)
140 got lock after * seconds (glob)
109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110 $ cat preup-stderr
142 $ cat preup-stderr
111 $ cat stdout
143 $ cat stdout
112 adding e
144 adding e
113
145
114 On processs waiting on another, warning disabled, (debug output on)
146 On processs waiting on another, warning disabled, (debug output on)
115
147
148 $ clean_sync
116 $ echo b > b/f
149 $ echo b > b/f
117 $ hg -R b ci -A -m b --config hooks.precommit="python:`pwd`/hooks.py:sleepone" > stdout &
150 $ hg -R b ci -A -m b \
118 $ hg -R b up --config hooks.pre-update="python:`pwd`/hooks.py:sleephalf" \
151 > --config hooks.precommit="sh $TESTTMP/locker.sh" \
152 > > stdout &
153 $ hg -R b up \
154 > --config hooks.pre-update="sh $TESTTMP/waiter.sh" \
155 > --config devel.lock-wait-sync-file="$SYNC_FILE_TRYING_LOCK" \
119 > --config ui.timeout.warn=-1 --debug\
156 > --config ui.timeout.warn=-1 --debug\
120 > > preup-stdout 2>preup-stderr
157 > > preup-stdout 2>preup-stderr
121 $ wait
158 $ wait
122 $ cat preup-stdout
159 $ cat preup-stdout
123 calling hook pre-update: hghook_pre-update.sleephalf
160 running hook pre-update: sh $TESTTMP/waiter.sh
124 waiting for lock on working directory of b held by process '*' on host '*' (glob)
161 waiting for lock on working directory of b held by process '*' on host '*' (glob)
125 got lock after * seconds (glob)
162 got lock after * seconds (glob)
126 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 $ cat preup-stderr
164 $ cat preup-stderr
128 $ cat stdout
165 $ cat stdout
129 adding f
166 adding f
130
167
131 Pushing to a local read-only repo that can't be locked
168 Pushing to a local read-only repo that can't be locked
132
169
133 $ chmod 100 a/.hg/store
170 $ chmod 100 a/.hg/store
134
171
135 $ hg -R b push a
172 $ hg -R b push a
136 pushing to a
173 pushing to a
137 searching for changes
174 searching for changes
138 abort: could not lock repository a: $EACCES$
175 abort: could not lock repository a: $EACCES$
139 [20]
176 [20]
140
177
141 $ chmod 700 a/.hg/store
178 $ chmod 700 a/.hg/store
General Comments 0
You need to be logged in to leave comments. Login now