##// END OF EJS Templates
configitems: register 'merge.checkunknown' and 'merge.checkignored'...
Boris Feld -
r34523:bed1d2ea default
parent child Browse files
Show More
@@ -1,737 +1,743 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17 def loadconfigtable(ui, extname, configtable):
17 def loadconfigtable(ui, extname, configtable):
18 """update config item known to the ui with the extension ones"""
18 """update config item known to the ui with the extension ones"""
19 for section, items in configtable.items():
19 for section, items in configtable.items():
20 knownitems = ui._knownconfig.setdefault(section, {})
20 knownitems = ui._knownconfig.setdefault(section, {})
21 knownkeys = set(knownitems)
21 knownkeys = set(knownitems)
22 newkeys = set(items)
22 newkeys = set(items)
23 for key in sorted(knownkeys & newkeys):
23 for key in sorted(knownkeys & newkeys):
24 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg %= (extname, section, key)
25 msg %= (extname, section, key)
26 ui.develwarn(msg, config='warn-config')
26 ui.develwarn(msg, config='warn-config')
27
27
28 knownitems.update(items)
28 knownitems.update(items)
29
29
30 class configitem(object):
30 class configitem(object):
31 """represent a known config item
31 """represent a known config item
32
32
33 :section: the official config section where to find this item,
33 :section: the official config section where to find this item,
34 :name: the official name within the section,
34 :name: the official name within the section,
35 :default: default value for this item,
35 :default: default value for this item,
36 :alias: optional list of tuples as alternatives.
36 :alias: optional list of tuples as alternatives.
37 """
37 """
38
38
39 def __init__(self, section, name, default=None, alias=()):
39 def __init__(self, section, name, default=None, alias=()):
40 self.section = section
40 self.section = section
41 self.name = name
41 self.name = name
42 self.default = default
42 self.default = default
43 self.alias = list(alias)
43 self.alias = list(alias)
44
44
45 coreitems = {}
45 coreitems = {}
46
46
47 def _register(configtable, *args, **kwargs):
47 def _register(configtable, *args, **kwargs):
48 item = configitem(*args, **kwargs)
48 item = configitem(*args, **kwargs)
49 section = configtable.setdefault(item.section, {})
49 section = configtable.setdefault(item.section, {})
50 if item.name in section:
50 if item.name in section:
51 msg = "duplicated config item registration for '%s.%s'"
51 msg = "duplicated config item registration for '%s.%s'"
52 raise error.ProgrammingError(msg % (item.section, item.name))
52 raise error.ProgrammingError(msg % (item.section, item.name))
53 section[item.name] = item
53 section[item.name] = item
54
54
55 # special value for case where the default is derived from other values
55 # special value for case where the default is derived from other values
56 dynamicdefault = object()
56 dynamicdefault = object()
57
57
58 # Registering actual config items
58 # Registering actual config items
59
59
60 def getitemregister(configtable):
60 def getitemregister(configtable):
61 return functools.partial(_register, configtable)
61 return functools.partial(_register, configtable)
62
62
63 coreconfigitem = getitemregister(coreitems)
63 coreconfigitem = getitemregister(coreitems)
64
64
65 coreconfigitem('auth', 'cookiefile',
65 coreconfigitem('auth', 'cookiefile',
66 default=None,
66 default=None,
67 )
67 )
68 # bookmarks.pushing: internal hack for discovery
68 # bookmarks.pushing: internal hack for discovery
69 coreconfigitem('bookmarks', 'pushing',
69 coreconfigitem('bookmarks', 'pushing',
70 default=list,
70 default=list,
71 )
71 )
72 # bundle.mainreporoot: internal hack for bundlerepo
72 # bundle.mainreporoot: internal hack for bundlerepo
73 coreconfigitem('bundle', 'mainreporoot',
73 coreconfigitem('bundle', 'mainreporoot',
74 default='',
74 default='',
75 )
75 )
76 # bundle.reorder: experimental config
76 # bundle.reorder: experimental config
77 coreconfigitem('bundle', 'reorder',
77 coreconfigitem('bundle', 'reorder',
78 default='auto',
78 default='auto',
79 )
79 )
80 coreconfigitem('censor', 'policy',
80 coreconfigitem('censor', 'policy',
81 default='abort',
81 default='abort',
82 )
82 )
83 coreconfigitem('chgserver', 'idletimeout',
83 coreconfigitem('chgserver', 'idletimeout',
84 default=3600,
84 default=3600,
85 )
85 )
86 coreconfigitem('chgserver', 'skiphash',
86 coreconfigitem('chgserver', 'skiphash',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('cmdserver', 'log',
89 coreconfigitem('cmdserver', 'log',
90 default=None,
90 default=None,
91 )
91 )
92 coreconfigitem('color', 'mode',
92 coreconfigitem('color', 'mode',
93 default='auto',
93 default='auto',
94 )
94 )
95 coreconfigitem('color', 'pagermode',
95 coreconfigitem('color', 'pagermode',
96 default=dynamicdefault,
96 default=dynamicdefault,
97 )
97 )
98 coreconfigitem('commands', 'status.relative',
98 coreconfigitem('commands', 'status.relative',
99 default=False,
99 default=False,
100 )
100 )
101 coreconfigitem('commands', 'status.skipstates',
101 coreconfigitem('commands', 'status.skipstates',
102 default=[],
102 default=[],
103 )
103 )
104 coreconfigitem('commands', 'status.verbose',
104 coreconfigitem('commands', 'status.verbose',
105 default=False,
105 default=False,
106 )
106 )
107 coreconfigitem('commands', 'update.requiredest',
107 coreconfigitem('commands', 'update.requiredest',
108 default=False,
108 default=False,
109 )
109 )
110 coreconfigitem('debug', 'dirstate.delaywrite',
110 coreconfigitem('debug', 'dirstate.delaywrite',
111 default=0,
111 default=0,
112 )
112 )
113 coreconfigitem('devel', 'all-warnings',
113 coreconfigitem('devel', 'all-warnings',
114 default=False,
114 default=False,
115 )
115 )
116 coreconfigitem('devel', 'bundle2.debug',
116 coreconfigitem('devel', 'bundle2.debug',
117 default=False,
117 default=False,
118 )
118 )
119 coreconfigitem('devel', 'check-locks',
119 coreconfigitem('devel', 'check-locks',
120 default=False,
120 default=False,
121 )
121 )
122 coreconfigitem('devel', 'check-relroot',
122 coreconfigitem('devel', 'check-relroot',
123 default=False,
123 default=False,
124 )
124 )
125 coreconfigitem('devel', 'default-date',
125 coreconfigitem('devel', 'default-date',
126 default=None,
126 default=None,
127 )
127 )
128 coreconfigitem('devel', 'deprec-warn',
128 coreconfigitem('devel', 'deprec-warn',
129 default=False,
129 default=False,
130 )
130 )
131 coreconfigitem('devel', 'disableloaddefaultcerts',
131 coreconfigitem('devel', 'disableloaddefaultcerts',
132 default=False,
132 default=False,
133 )
133 )
134 coreconfigitem('devel', 'legacy.exchange',
134 coreconfigitem('devel', 'legacy.exchange',
135 default=list,
135 default=list,
136 )
136 )
137 coreconfigitem('devel', 'servercafile',
137 coreconfigitem('devel', 'servercafile',
138 default='',
138 default='',
139 )
139 )
140 coreconfigitem('devel', 'serverexactprotocol',
140 coreconfigitem('devel', 'serverexactprotocol',
141 default='',
141 default='',
142 )
142 )
143 coreconfigitem('devel', 'serverrequirecert',
143 coreconfigitem('devel', 'serverrequirecert',
144 default=False,
144 default=False,
145 )
145 )
146 coreconfigitem('devel', 'strip-obsmarkers',
146 coreconfigitem('devel', 'strip-obsmarkers',
147 default=True,
147 default=True,
148 )
148 )
149 coreconfigitem('diff', 'nodates',
149 coreconfigitem('diff', 'nodates',
150 default=None,
150 default=None,
151 )
151 )
152 coreconfigitem('diff', 'showfunc',
152 coreconfigitem('diff', 'showfunc',
153 default=None,
153 default=None,
154 )
154 )
155 coreconfigitem('diff', 'unified',
155 coreconfigitem('diff', 'unified',
156 default=None,
156 default=None,
157 )
157 )
158 coreconfigitem('diff', 'git',
158 coreconfigitem('diff', 'git',
159 default=None,
159 default=None,
160 )
160 )
161 coreconfigitem('diff', 'ignorews',
161 coreconfigitem('diff', 'ignorews',
162 default=None,
162 default=None,
163 )
163 )
164 coreconfigitem('diff', 'ignorewsamount',
164 coreconfigitem('diff', 'ignorewsamount',
165 default=None,
165 default=None,
166 )
166 )
167 coreconfigitem('diff', 'ignoreblanklines',
167 coreconfigitem('diff', 'ignoreblanklines',
168 default=None,
168 default=None,
169 )
169 )
170 coreconfigitem('diff', 'ignorewseol',
170 coreconfigitem('diff', 'ignorewseol',
171 default=None,
171 default=None,
172 )
172 )
173 coreconfigitem('diff', 'nobinary',
173 coreconfigitem('diff', 'nobinary',
174 default=None,
174 default=None,
175 )
175 )
176 coreconfigitem('diff', 'noprefix',
176 coreconfigitem('diff', 'noprefix',
177 default=None,
177 default=None,
178 )
178 )
179 coreconfigitem('email', 'charsets',
179 coreconfigitem('email', 'charsets',
180 default=list,
180 default=list,
181 )
181 )
182 coreconfigitem('email', 'from',
182 coreconfigitem('email', 'from',
183 default=None,
183 default=None,
184 )
184 )
185 coreconfigitem('email', 'method',
185 coreconfigitem('email', 'method',
186 default='smtp',
186 default='smtp',
187 )
187 )
188 coreconfigitem('experimental', 'allowdivergence',
188 coreconfigitem('experimental', 'allowdivergence',
189 default=False,
189 default=False,
190 )
190 )
191 coreconfigitem('experimental', 'bundle-phases',
191 coreconfigitem('experimental', 'bundle-phases',
192 default=False,
192 default=False,
193 )
193 )
194 coreconfigitem('experimental', 'bundle2-advertise',
194 coreconfigitem('experimental', 'bundle2-advertise',
195 default=True,
195 default=True,
196 )
196 )
197 coreconfigitem('experimental', 'bundle2-output-capture',
197 coreconfigitem('experimental', 'bundle2-output-capture',
198 default=False,
198 default=False,
199 )
199 )
200 coreconfigitem('experimental', 'bundle2.pushback',
200 coreconfigitem('experimental', 'bundle2.pushback',
201 default=False,
201 default=False,
202 )
202 )
203 coreconfigitem('experimental', 'bundle2lazylocking',
203 coreconfigitem('experimental', 'bundle2lazylocking',
204 default=False,
204 default=False,
205 )
205 )
206 coreconfigitem('experimental', 'bundlecomplevel',
206 coreconfigitem('experimental', 'bundlecomplevel',
207 default=None,
207 default=None,
208 )
208 )
209 coreconfigitem('experimental', 'changegroup3',
209 coreconfigitem('experimental', 'changegroup3',
210 default=False,
210 default=False,
211 )
211 )
212 coreconfigitem('experimental', 'clientcompressionengines',
212 coreconfigitem('experimental', 'clientcompressionengines',
213 default=list,
213 default=list,
214 )
214 )
215 coreconfigitem('experimental', 'copytrace',
215 coreconfigitem('experimental', 'copytrace',
216 default='on',
216 default='on',
217 )
217 )
218 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
218 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
219 default=100,
219 default=100,
220 )
220 )
221 coreconfigitem('experimental', 'crecordtest',
221 coreconfigitem('experimental', 'crecordtest',
222 default=None,
222 default=None,
223 )
223 )
224 coreconfigitem('experimental', 'editortmpinhg',
224 coreconfigitem('experimental', 'editortmpinhg',
225 default=False,
225 default=False,
226 )
226 )
227 coreconfigitem('experimental', 'maxdeltachainspan',
227 coreconfigitem('experimental', 'maxdeltachainspan',
228 default=-1,
228 default=-1,
229 )
229 )
230 coreconfigitem('experimental', 'mmapindexthreshold',
230 coreconfigitem('experimental', 'mmapindexthreshold',
231 default=None,
231 default=None,
232 )
232 )
233 coreconfigitem('experimental', 'nonnormalparanoidcheck',
233 coreconfigitem('experimental', 'nonnormalparanoidcheck',
234 default=False,
234 default=False,
235 )
235 )
236 coreconfigitem('experimental', 'stabilization',
236 coreconfigitem('experimental', 'stabilization',
237 default=list,
237 default=list,
238 alias=[('experimental', 'evolution')],
238 alias=[('experimental', 'evolution')],
239 )
239 )
240 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
240 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
241 default=False,
241 default=False,
242 alias=[('experimental', 'evolution.bundle-obsmarker')],
242 alias=[('experimental', 'evolution.bundle-obsmarker')],
243 )
243 )
244 coreconfigitem('experimental', 'stabilization.track-operation',
244 coreconfigitem('experimental', 'stabilization.track-operation',
245 default=True,
245 default=True,
246 alias=[('experimental', 'evolution.track-operation')]
246 alias=[('experimental', 'evolution.track-operation')]
247 )
247 )
248 coreconfigitem('experimental', 'exportableenviron',
248 coreconfigitem('experimental', 'exportableenviron',
249 default=list,
249 default=list,
250 )
250 )
251 coreconfigitem('experimental', 'extendedheader.index',
251 coreconfigitem('experimental', 'extendedheader.index',
252 default=None,
252 default=None,
253 )
253 )
254 coreconfigitem('experimental', 'extendedheader.similarity',
254 coreconfigitem('experimental', 'extendedheader.similarity',
255 default=False,
255 default=False,
256 )
256 )
257 coreconfigitem('experimental', 'format.compression',
257 coreconfigitem('experimental', 'format.compression',
258 default='zlib',
258 default='zlib',
259 )
259 )
260 coreconfigitem('experimental', 'graphshorten',
260 coreconfigitem('experimental', 'graphshorten',
261 default=False,
261 default=False,
262 )
262 )
263 coreconfigitem('experimental', 'hook-track-tags',
263 coreconfigitem('experimental', 'hook-track-tags',
264 default=False,
264 default=False,
265 )
265 )
266 coreconfigitem('experimental', 'httppostargs',
266 coreconfigitem('experimental', 'httppostargs',
267 default=False,
267 default=False,
268 )
268 )
269 coreconfigitem('experimental', 'manifestv2',
269 coreconfigitem('experimental', 'manifestv2',
270 default=False,
270 default=False,
271 )
271 )
272 coreconfigitem('experimental', 'mergedriver',
272 coreconfigitem('experimental', 'mergedriver',
273 default=None,
273 default=None,
274 )
274 )
275 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
275 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
276 default=False,
276 default=False,
277 )
277 )
278 coreconfigitem('experimental', 'rebase.multidest',
278 coreconfigitem('experimental', 'rebase.multidest',
279 default=False,
279 default=False,
280 )
280 )
281 coreconfigitem('experimental', 'revertalternateinteractivemode',
281 coreconfigitem('experimental', 'revertalternateinteractivemode',
282 default=True,
282 default=True,
283 )
283 )
284 coreconfigitem('experimental', 'revlogv2',
284 coreconfigitem('experimental', 'revlogv2',
285 default=None,
285 default=None,
286 )
286 )
287 coreconfigitem('experimental', 'spacemovesdown',
287 coreconfigitem('experimental', 'spacemovesdown',
288 default=False,
288 default=False,
289 )
289 )
290 coreconfigitem('experimental', 'treemanifest',
290 coreconfigitem('experimental', 'treemanifest',
291 default=False,
291 default=False,
292 )
292 )
293 coreconfigitem('experimental', 'updatecheck',
293 coreconfigitem('experimental', 'updatecheck',
294 default=None,
294 default=None,
295 )
295 )
296 coreconfigitem('format', 'aggressivemergedeltas',
296 coreconfigitem('format', 'aggressivemergedeltas',
297 default=False,
297 default=False,
298 )
298 )
299 coreconfigitem('format', 'chunkcachesize',
299 coreconfigitem('format', 'chunkcachesize',
300 default=None,
300 default=None,
301 )
301 )
302 coreconfigitem('format', 'dotencode',
302 coreconfigitem('format', 'dotencode',
303 default=True,
303 default=True,
304 )
304 )
305 coreconfigitem('format', 'generaldelta',
305 coreconfigitem('format', 'generaldelta',
306 default=False,
306 default=False,
307 )
307 )
308 coreconfigitem('format', 'manifestcachesize',
308 coreconfigitem('format', 'manifestcachesize',
309 default=None,
309 default=None,
310 )
310 )
311 coreconfigitem('format', 'maxchainlen',
311 coreconfigitem('format', 'maxchainlen',
312 default=None,
312 default=None,
313 )
313 )
314 coreconfigitem('format', 'obsstore-version',
314 coreconfigitem('format', 'obsstore-version',
315 default=None,
315 default=None,
316 )
316 )
317 coreconfigitem('format', 'usefncache',
317 coreconfigitem('format', 'usefncache',
318 default=True,
318 default=True,
319 )
319 )
320 coreconfigitem('format', 'usegeneraldelta',
320 coreconfigitem('format', 'usegeneraldelta',
321 default=True,
321 default=True,
322 )
322 )
323 coreconfigitem('format', 'usestore',
323 coreconfigitem('format', 'usestore',
324 default=True,
324 default=True,
325 )
325 )
326 coreconfigitem('hostsecurity', 'ciphers',
326 coreconfigitem('hostsecurity', 'ciphers',
327 default=None,
327 default=None,
328 )
328 )
329 coreconfigitem('hostsecurity', 'disabletls10warning',
329 coreconfigitem('hostsecurity', 'disabletls10warning',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('http_proxy', 'always',
332 coreconfigitem('http_proxy', 'always',
333 default=False,
333 default=False,
334 )
334 )
335 coreconfigitem('http_proxy', 'host',
335 coreconfigitem('http_proxy', 'host',
336 default=None,
336 default=None,
337 )
337 )
338 coreconfigitem('http_proxy', 'no',
338 coreconfigitem('http_proxy', 'no',
339 default=list,
339 default=list,
340 )
340 )
341 coreconfigitem('http_proxy', 'passwd',
341 coreconfigitem('http_proxy', 'passwd',
342 default=None,
342 default=None,
343 )
343 )
344 coreconfigitem('http_proxy', 'user',
344 coreconfigitem('http_proxy', 'user',
345 default=None,
345 default=None,
346 )
346 )
347 coreconfigitem('merge', 'checkunknown',
348 default='abort',
349 )
350 coreconfigitem('merge', 'checkignored',
351 default='abort',
352 )
347 coreconfigitem('merge', 'followcopies',
353 coreconfigitem('merge', 'followcopies',
348 default=True,
354 default=True,
349 )
355 )
350 coreconfigitem('merge', 'preferancestor',
356 coreconfigitem('merge', 'preferancestor',
351 default=lambda: ['*'],
357 default=lambda: ['*'],
352 )
358 )
353 coreconfigitem('pager', 'ignore',
359 coreconfigitem('pager', 'ignore',
354 default=list,
360 default=list,
355 )
361 )
356 coreconfigitem('patch', 'eol',
362 coreconfigitem('patch', 'eol',
357 default='strict',
363 default='strict',
358 )
364 )
359 coreconfigitem('patch', 'fuzz',
365 coreconfigitem('patch', 'fuzz',
360 default=2,
366 default=2,
361 )
367 )
362 coreconfigitem('paths', 'default',
368 coreconfigitem('paths', 'default',
363 default=None,
369 default=None,
364 )
370 )
365 coreconfigitem('paths', 'default-push',
371 coreconfigitem('paths', 'default-push',
366 default=None,
372 default=None,
367 )
373 )
368 coreconfigitem('phases', 'checksubrepos',
374 coreconfigitem('phases', 'checksubrepos',
369 default='follow',
375 default='follow',
370 )
376 )
371 coreconfigitem('phases', 'new-commit',
377 coreconfigitem('phases', 'new-commit',
372 default=dynamicdefault,
378 default=dynamicdefault,
373 )
379 )
374 coreconfigitem('phases', 'publish',
380 coreconfigitem('phases', 'publish',
375 default=True,
381 default=True,
376 )
382 )
377 coreconfigitem('profiling', 'enabled',
383 coreconfigitem('profiling', 'enabled',
378 default=False,
384 default=False,
379 )
385 )
380 coreconfigitem('profiling', 'format',
386 coreconfigitem('profiling', 'format',
381 default='text',
387 default='text',
382 )
388 )
383 coreconfigitem('profiling', 'freq',
389 coreconfigitem('profiling', 'freq',
384 default=1000,
390 default=1000,
385 )
391 )
386 coreconfigitem('profiling', 'limit',
392 coreconfigitem('profiling', 'limit',
387 default=30,
393 default=30,
388 )
394 )
389 coreconfigitem('profiling', 'nested',
395 coreconfigitem('profiling', 'nested',
390 default=0,
396 default=0,
391 )
397 )
392 coreconfigitem('profiling', 'output',
398 coreconfigitem('profiling', 'output',
393 default=None,
399 default=None,
394 )
400 )
395 coreconfigitem('profiling', 'showmax',
401 coreconfigitem('profiling', 'showmax',
396 default=0.999,
402 default=0.999,
397 )
403 )
398 coreconfigitem('profiling', 'showmin',
404 coreconfigitem('profiling', 'showmin',
399 default=dynamicdefault,
405 default=dynamicdefault,
400 )
406 )
401 coreconfigitem('profiling', 'sort',
407 coreconfigitem('profiling', 'sort',
402 default='inlinetime',
408 default='inlinetime',
403 )
409 )
404 coreconfigitem('profiling', 'statformat',
410 coreconfigitem('profiling', 'statformat',
405 default='hotpath',
411 default='hotpath',
406 )
412 )
407 coreconfigitem('profiling', 'type',
413 coreconfigitem('profiling', 'type',
408 default='stat',
414 default='stat',
409 )
415 )
410 coreconfigitem('progress', 'assume-tty',
416 coreconfigitem('progress', 'assume-tty',
411 default=False,
417 default=False,
412 )
418 )
413 coreconfigitem('progress', 'changedelay',
419 coreconfigitem('progress', 'changedelay',
414 default=1,
420 default=1,
415 )
421 )
416 coreconfigitem('progress', 'clear-complete',
422 coreconfigitem('progress', 'clear-complete',
417 default=True,
423 default=True,
418 )
424 )
419 coreconfigitem('progress', 'debug',
425 coreconfigitem('progress', 'debug',
420 default=False,
426 default=False,
421 )
427 )
422 coreconfigitem('progress', 'delay',
428 coreconfigitem('progress', 'delay',
423 default=3,
429 default=3,
424 )
430 )
425 coreconfigitem('progress', 'disable',
431 coreconfigitem('progress', 'disable',
426 default=False,
432 default=False,
427 )
433 )
428 coreconfigitem('progress', 'estimateinterval',
434 coreconfigitem('progress', 'estimateinterval',
429 default=60.0,
435 default=60.0,
430 )
436 )
431 coreconfigitem('progress', 'refresh',
437 coreconfigitem('progress', 'refresh',
432 default=0.1,
438 default=0.1,
433 )
439 )
434 coreconfigitem('progress', 'width',
440 coreconfigitem('progress', 'width',
435 default=dynamicdefault,
441 default=dynamicdefault,
436 )
442 )
437 coreconfigitem('push', 'pushvars.server',
443 coreconfigitem('push', 'pushvars.server',
438 default=False,
444 default=False,
439 )
445 )
440 coreconfigitem('server', 'bundle1',
446 coreconfigitem('server', 'bundle1',
441 default=True,
447 default=True,
442 )
448 )
443 coreconfigitem('server', 'bundle1gd',
449 coreconfigitem('server', 'bundle1gd',
444 default=None,
450 default=None,
445 )
451 )
446 coreconfigitem('server', 'compressionengines',
452 coreconfigitem('server', 'compressionengines',
447 default=list,
453 default=list,
448 )
454 )
449 coreconfigitem('server', 'concurrent-push-mode',
455 coreconfigitem('server', 'concurrent-push-mode',
450 default='strict',
456 default='strict',
451 )
457 )
452 coreconfigitem('server', 'disablefullbundle',
458 coreconfigitem('server', 'disablefullbundle',
453 default=False,
459 default=False,
454 )
460 )
455 coreconfigitem('server', 'maxhttpheaderlen',
461 coreconfigitem('server', 'maxhttpheaderlen',
456 default=1024,
462 default=1024,
457 )
463 )
458 coreconfigitem('server', 'preferuncompressed',
464 coreconfigitem('server', 'preferuncompressed',
459 default=False,
465 default=False,
460 )
466 )
461 coreconfigitem('server', 'uncompressed',
467 coreconfigitem('server', 'uncompressed',
462 default=True,
468 default=True,
463 )
469 )
464 coreconfigitem('server', 'uncompressedallowsecret',
470 coreconfigitem('server', 'uncompressedallowsecret',
465 default=False,
471 default=False,
466 )
472 )
467 coreconfigitem('server', 'validate',
473 coreconfigitem('server', 'validate',
468 default=False,
474 default=False,
469 )
475 )
470 coreconfigitem('server', 'zliblevel',
476 coreconfigitem('server', 'zliblevel',
471 default=-1,
477 default=-1,
472 )
478 )
473 coreconfigitem('smtp', 'host',
479 coreconfigitem('smtp', 'host',
474 default=None,
480 default=None,
475 )
481 )
476 coreconfigitem('smtp', 'local_hostname',
482 coreconfigitem('smtp', 'local_hostname',
477 default=None,
483 default=None,
478 )
484 )
479 coreconfigitem('smtp', 'password',
485 coreconfigitem('smtp', 'password',
480 default=None,
486 default=None,
481 )
487 )
482 coreconfigitem('smtp', 'port',
488 coreconfigitem('smtp', 'port',
483 default=dynamicdefault,
489 default=dynamicdefault,
484 )
490 )
485 coreconfigitem('smtp', 'tls',
491 coreconfigitem('smtp', 'tls',
486 default='none',
492 default='none',
487 )
493 )
488 coreconfigitem('smtp', 'username',
494 coreconfigitem('smtp', 'username',
489 default=None,
495 default=None,
490 )
496 )
491 coreconfigitem('sparse', 'missingwarning',
497 coreconfigitem('sparse', 'missingwarning',
492 default=True,
498 default=True,
493 )
499 )
494 coreconfigitem('trusted', 'groups',
500 coreconfigitem('trusted', 'groups',
495 default=list,
501 default=list,
496 )
502 )
497 coreconfigitem('trusted', 'users',
503 coreconfigitem('trusted', 'users',
498 default=list,
504 default=list,
499 )
505 )
500 coreconfigitem('ui', '_usedassubrepo',
506 coreconfigitem('ui', '_usedassubrepo',
501 default=False,
507 default=False,
502 )
508 )
503 coreconfigitem('ui', 'allowemptycommit',
509 coreconfigitem('ui', 'allowemptycommit',
504 default=False,
510 default=False,
505 )
511 )
506 coreconfigitem('ui', 'archivemeta',
512 coreconfigitem('ui', 'archivemeta',
507 default=True,
513 default=True,
508 )
514 )
509 coreconfigitem('ui', 'askusername',
515 coreconfigitem('ui', 'askusername',
510 default=False,
516 default=False,
511 )
517 )
512 coreconfigitem('ui', 'clonebundlefallback',
518 coreconfigitem('ui', 'clonebundlefallback',
513 default=False,
519 default=False,
514 )
520 )
515 coreconfigitem('ui', 'clonebundleprefers',
521 coreconfigitem('ui', 'clonebundleprefers',
516 default=list,
522 default=list,
517 )
523 )
518 coreconfigitem('ui', 'clonebundles',
524 coreconfigitem('ui', 'clonebundles',
519 default=True,
525 default=True,
520 )
526 )
521 coreconfigitem('ui', 'color',
527 coreconfigitem('ui', 'color',
522 default='auto',
528 default='auto',
523 )
529 )
524 coreconfigitem('ui', 'commitsubrepos',
530 coreconfigitem('ui', 'commitsubrepos',
525 default=False,
531 default=False,
526 )
532 )
527 coreconfigitem('ui', 'debug',
533 coreconfigitem('ui', 'debug',
528 default=False,
534 default=False,
529 )
535 )
530 coreconfigitem('ui', 'debugger',
536 coreconfigitem('ui', 'debugger',
531 default=None,
537 default=None,
532 )
538 )
533 coreconfigitem('ui', 'fallbackencoding',
539 coreconfigitem('ui', 'fallbackencoding',
534 default=None,
540 default=None,
535 )
541 )
536 coreconfigitem('ui', 'forcecwd',
542 coreconfigitem('ui', 'forcecwd',
537 default=None,
543 default=None,
538 )
544 )
539 coreconfigitem('ui', 'forcemerge',
545 coreconfigitem('ui', 'forcemerge',
540 default=None,
546 default=None,
541 )
547 )
542 coreconfigitem('ui', 'formatdebug',
548 coreconfigitem('ui', 'formatdebug',
543 default=False,
549 default=False,
544 )
550 )
545 coreconfigitem('ui', 'formatjson',
551 coreconfigitem('ui', 'formatjson',
546 default=False,
552 default=False,
547 )
553 )
548 coreconfigitem('ui', 'formatted',
554 coreconfigitem('ui', 'formatted',
549 default=None,
555 default=None,
550 )
556 )
551 coreconfigitem('ui', 'graphnodetemplate',
557 coreconfigitem('ui', 'graphnodetemplate',
552 default=None,
558 default=None,
553 )
559 )
554 coreconfigitem('ui', 'http2debuglevel',
560 coreconfigitem('ui', 'http2debuglevel',
555 default=None,
561 default=None,
556 )
562 )
557 coreconfigitem('ui', 'interactive',
563 coreconfigitem('ui', 'interactive',
558 default=None,
564 default=None,
559 )
565 )
560 coreconfigitem('ui', 'interface',
566 coreconfigitem('ui', 'interface',
561 default=None,
567 default=None,
562 )
568 )
563 coreconfigitem('ui', 'logblockedtimes',
569 coreconfigitem('ui', 'logblockedtimes',
564 default=False,
570 default=False,
565 )
571 )
566 coreconfigitem('ui', 'logtemplate',
572 coreconfigitem('ui', 'logtemplate',
567 default=None,
573 default=None,
568 )
574 )
569 coreconfigitem('ui', 'merge',
575 coreconfigitem('ui', 'merge',
570 default=None,
576 default=None,
571 )
577 )
572 coreconfigitem('ui', 'mergemarkers',
578 coreconfigitem('ui', 'mergemarkers',
573 default='basic',
579 default='basic',
574 )
580 )
575 coreconfigitem('ui', 'mergemarkertemplate',
581 coreconfigitem('ui', 'mergemarkertemplate',
576 default=('{node|short} '
582 default=('{node|short} '
577 '{ifeq(tags, "tip", "", '
583 '{ifeq(tags, "tip", "", '
578 'ifeq(tags, "", "", "{tags} "))}'
584 'ifeq(tags, "", "", "{tags} "))}'
579 '{if(bookmarks, "{bookmarks} ")}'
585 '{if(bookmarks, "{bookmarks} ")}'
580 '{ifeq(branch, "default", "", "{branch} ")}'
586 '{ifeq(branch, "default", "", "{branch} ")}'
581 '- {author|user}: {desc|firstline}')
587 '- {author|user}: {desc|firstline}')
582 )
588 )
583 coreconfigitem('ui', 'nontty',
589 coreconfigitem('ui', 'nontty',
584 default=False,
590 default=False,
585 )
591 )
586 coreconfigitem('ui', 'origbackuppath',
592 coreconfigitem('ui', 'origbackuppath',
587 default=None,
593 default=None,
588 )
594 )
589 coreconfigitem('ui', 'paginate',
595 coreconfigitem('ui', 'paginate',
590 default=True,
596 default=True,
591 )
597 )
592 coreconfigitem('ui', 'patch',
598 coreconfigitem('ui', 'patch',
593 default=None,
599 default=None,
594 )
600 )
595 coreconfigitem('ui', 'portablefilenames',
601 coreconfigitem('ui', 'portablefilenames',
596 default='warn',
602 default='warn',
597 )
603 )
598 coreconfigitem('ui', 'promptecho',
604 coreconfigitem('ui', 'promptecho',
599 default=False,
605 default=False,
600 )
606 )
601 coreconfigitem('ui', 'quiet',
607 coreconfigitem('ui', 'quiet',
602 default=False,
608 default=False,
603 )
609 )
604 coreconfigitem('ui', 'quietbookmarkmove',
610 coreconfigitem('ui', 'quietbookmarkmove',
605 default=False,
611 default=False,
606 )
612 )
607 coreconfigitem('ui', 'remotecmd',
613 coreconfigitem('ui', 'remotecmd',
608 default='hg',
614 default='hg',
609 )
615 )
610 coreconfigitem('ui', 'report_untrusted',
616 coreconfigitem('ui', 'report_untrusted',
611 default=True,
617 default=True,
612 )
618 )
613 coreconfigitem('ui', 'rollback',
619 coreconfigitem('ui', 'rollback',
614 default=True,
620 default=True,
615 )
621 )
616 coreconfigitem('ui', 'slash',
622 coreconfigitem('ui', 'slash',
617 default=False,
623 default=False,
618 )
624 )
619 coreconfigitem('ui', 'ssh',
625 coreconfigitem('ui', 'ssh',
620 default='ssh',
626 default='ssh',
621 )
627 )
622 coreconfigitem('ui', 'statuscopies',
628 coreconfigitem('ui', 'statuscopies',
623 default=False,
629 default=False,
624 )
630 )
625 coreconfigitem('ui', 'strict',
631 coreconfigitem('ui', 'strict',
626 default=False,
632 default=False,
627 )
633 )
628 coreconfigitem('ui', 'style',
634 coreconfigitem('ui', 'style',
629 default='',
635 default='',
630 )
636 )
631 coreconfigitem('ui', 'supportcontact',
637 coreconfigitem('ui', 'supportcontact',
632 default=None,
638 default=None,
633 )
639 )
634 coreconfigitem('ui', 'textwidth',
640 coreconfigitem('ui', 'textwidth',
635 default=78,
641 default=78,
636 )
642 )
637 coreconfigitem('ui', 'timeout',
643 coreconfigitem('ui', 'timeout',
638 default='600',
644 default='600',
639 )
645 )
640 coreconfigitem('ui', 'traceback',
646 coreconfigitem('ui', 'traceback',
641 default=False,
647 default=False,
642 )
648 )
643 coreconfigitem('ui', 'tweakdefaults',
649 coreconfigitem('ui', 'tweakdefaults',
644 default=False,
650 default=False,
645 )
651 )
646 coreconfigitem('ui', 'usehttp2',
652 coreconfigitem('ui', 'usehttp2',
647 default=False,
653 default=False,
648 )
654 )
649 coreconfigitem('ui', 'username',
655 coreconfigitem('ui', 'username',
650 alias=[('ui', 'user')]
656 alias=[('ui', 'user')]
651 )
657 )
652 coreconfigitem('ui', 'verbose',
658 coreconfigitem('ui', 'verbose',
653 default=False,
659 default=False,
654 )
660 )
655 coreconfigitem('verify', 'skipflags',
661 coreconfigitem('verify', 'skipflags',
656 default=None,
662 default=None,
657 )
663 )
658 coreconfigitem('web', 'accesslog',
664 coreconfigitem('web', 'accesslog',
659 default='-',
665 default='-',
660 )
666 )
661 coreconfigitem('web', 'address',
667 coreconfigitem('web', 'address',
662 default='',
668 default='',
663 )
669 )
664 coreconfigitem('web', 'allow_archive',
670 coreconfigitem('web', 'allow_archive',
665 default=list,
671 default=list,
666 )
672 )
667 coreconfigitem('web', 'allow_read',
673 coreconfigitem('web', 'allow_read',
668 default=list,
674 default=list,
669 )
675 )
670 coreconfigitem('web', 'baseurl',
676 coreconfigitem('web', 'baseurl',
671 default=None,
677 default=None,
672 )
678 )
673 coreconfigitem('web', 'cacerts',
679 coreconfigitem('web', 'cacerts',
674 default=None,
680 default=None,
675 )
681 )
676 coreconfigitem('web', 'certificate',
682 coreconfigitem('web', 'certificate',
677 default=None,
683 default=None,
678 )
684 )
679 coreconfigitem('web', 'collapse',
685 coreconfigitem('web', 'collapse',
680 default=False,
686 default=False,
681 )
687 )
682 coreconfigitem('web', 'csp',
688 coreconfigitem('web', 'csp',
683 default=None,
689 default=None,
684 )
690 )
685 coreconfigitem('web', 'deny_read',
691 coreconfigitem('web', 'deny_read',
686 default=list,
692 default=list,
687 )
693 )
688 coreconfigitem('web', 'descend',
694 coreconfigitem('web', 'descend',
689 default=True,
695 default=True,
690 )
696 )
691 coreconfigitem('web', 'description',
697 coreconfigitem('web', 'description',
692 default="",
698 default="",
693 )
699 )
694 coreconfigitem('web', 'encoding',
700 coreconfigitem('web', 'encoding',
695 default=lambda: encoding.encoding,
701 default=lambda: encoding.encoding,
696 )
702 )
697 coreconfigitem('web', 'errorlog',
703 coreconfigitem('web', 'errorlog',
698 default='-',
704 default='-',
699 )
705 )
700 coreconfigitem('web', 'ipv6',
706 coreconfigitem('web', 'ipv6',
701 default=False,
707 default=False,
702 )
708 )
703 coreconfigitem('web', 'port',
709 coreconfigitem('web', 'port',
704 default=8000,
710 default=8000,
705 )
711 )
706 coreconfigitem('web', 'prefix',
712 coreconfigitem('web', 'prefix',
707 default='',
713 default='',
708 )
714 )
709 coreconfigitem('web', 'refreshinterval',
715 coreconfigitem('web', 'refreshinterval',
710 default=20,
716 default=20,
711 )
717 )
712 coreconfigitem('web', 'stripes',
718 coreconfigitem('web', 'stripes',
713 default=1,
719 default=1,
714 )
720 )
715 coreconfigitem('web', 'style',
721 coreconfigitem('web', 'style',
716 default='paper',
722 default='paper',
717 )
723 )
718 coreconfigitem('web', 'templates',
724 coreconfigitem('web', 'templates',
719 default=None,
725 default=None,
720 )
726 )
721 coreconfigitem('worker', 'backgroundclose',
727 coreconfigitem('worker', 'backgroundclose',
722 default=dynamicdefault,
728 default=dynamicdefault,
723 )
729 )
724 # Windows defaults to a limit of 512 open files. A buffer of 128
730 # Windows defaults to a limit of 512 open files. A buffer of 128
725 # should give us enough headway.
731 # should give us enough headway.
726 coreconfigitem('worker', 'backgroundclosemaxqueue',
732 coreconfigitem('worker', 'backgroundclosemaxqueue',
727 default=384,
733 default=384,
728 )
734 )
729 coreconfigitem('worker', 'backgroundcloseminfilecount',
735 coreconfigitem('worker', 'backgroundcloseminfilecount',
730 default=2048,
736 default=2048,
731 )
737 )
732 coreconfigitem('worker', 'backgroundclosethreadcount',
738 coreconfigitem('worker', 'backgroundclosethreadcount',
733 default=4,
739 default=4,
734 )
740 )
735 coreconfigitem('worker', 'numcpus',
741 coreconfigitem('worker', 'numcpus',
736 default=None,
742 default=None,
737 )
743 )
@@ -1,1775 +1,1775 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 m: the external merge driver defined for this merge plus its run state
69 m: the external merge driver defined for this merge plus its run state
70 (experimental)
70 (experimental)
71 f: a (filename, dictionary) tuple of optional values for a given file
71 f: a (filename, dictionary) tuple of optional values for a given file
72 X: unsupported mandatory record type (used in tests)
72 X: unsupported mandatory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
74 l: the labels for the parts of the merge.
74 l: the labels for the parts of the merge.
75
75
76 Merge driver run states (experimental):
76 Merge driver run states (experimental):
77 u: driver-resolved files unmarked -- needs to be run next time we're about
77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 to resolve or commit
78 to resolve or commit
79 m: driver-resolved files marked -- only needs to be run before commit
79 m: driver-resolved files marked -- only needs to be run before commit
80 s: success/skipped -- does not need to be run any more
80 s: success/skipped -- does not need to be run any more
81
81
82 '''
82 '''
83 statepathv1 = 'merge/state'
83 statepathv1 = 'merge/state'
84 statepathv2 = 'merge/state2'
84 statepathv2 = 'merge/state2'
85
85
86 @staticmethod
86 @staticmethod
87 def clean(repo, node=None, other=None, labels=None):
87 def clean(repo, node=None, other=None, labels=None):
88 """Initialize a brand new merge state, removing any existing state on
88 """Initialize a brand new merge state, removing any existing state on
89 disk."""
89 disk."""
90 ms = mergestate(repo)
90 ms = mergestate(repo)
91 ms.reset(node, other, labels)
91 ms.reset(node, other, labels)
92 return ms
92 return ms
93
93
94 @staticmethod
94 @staticmethod
95 def read(repo):
95 def read(repo):
96 """Initialize the merge state, reading it from disk."""
96 """Initialize the merge state, reading it from disk."""
97 ms = mergestate(repo)
97 ms = mergestate(repo)
98 ms._read()
98 ms._read()
99 return ms
99 return ms
100
100
101 def __init__(self, repo):
101 def __init__(self, repo):
102 """Initialize the merge state.
102 """Initialize the merge state.
103
103
104 Do not use this directly! Instead call read() or clean()."""
104 Do not use this directly! Instead call read() or clean()."""
105 self._repo = repo
105 self._repo = repo
106 self._dirty = False
106 self._dirty = False
107 self._labels = None
107 self._labels = None
108
108
109 def reset(self, node=None, other=None, labels=None):
109 def reset(self, node=None, other=None, labels=None):
110 self._state = {}
110 self._state = {}
111 self._stateextras = {}
111 self._stateextras = {}
112 self._local = None
112 self._local = None
113 self._other = None
113 self._other = None
114 self._labels = labels
114 self._labels = labels
115 for var in ('localctx', 'otherctx'):
115 for var in ('localctx', 'otherctx'):
116 if var in vars(self):
116 if var in vars(self):
117 delattr(self, var)
117 delattr(self, var)
118 if node:
118 if node:
119 self._local = node
119 self._local = node
120 self._other = other
120 self._other = other
121 self._readmergedriver = None
121 self._readmergedriver = None
122 if self.mergedriver:
122 if self.mergedriver:
123 self._mdstate = 's'
123 self._mdstate = 's'
124 else:
124 else:
125 self._mdstate = 'u'
125 self._mdstate = 'u'
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 self._results = {}
127 self._results = {}
128 self._dirty = False
128 self._dirty = False
129
129
130 def _read(self):
130 def _read(self):
131 """Analyse each record content to restore a serialized state from disk
131 """Analyse each record content to restore a serialized state from disk
132
132
133 This function process "record" entry produced by the de-serialization
133 This function process "record" entry produced by the de-serialization
134 of on disk file.
134 of on disk file.
135 """
135 """
136 self._state = {}
136 self._state = {}
137 self._stateextras = {}
137 self._stateextras = {}
138 self._local = None
138 self._local = None
139 self._other = None
139 self._other = None
140 for var in ('localctx', 'otherctx'):
140 for var in ('localctx', 'otherctx'):
141 if var in vars(self):
141 if var in vars(self):
142 delattr(self, var)
142 delattr(self, var)
143 self._readmergedriver = None
143 self._readmergedriver = None
144 self._mdstate = 's'
144 self._mdstate = 's'
145 unsupported = set()
145 unsupported = set()
146 records = self._readrecords()
146 records = self._readrecords()
147 for rtype, record in records:
147 for rtype, record in records:
148 if rtype == 'L':
148 if rtype == 'L':
149 self._local = bin(record)
149 self._local = bin(record)
150 elif rtype == 'O':
150 elif rtype == 'O':
151 self._other = bin(record)
151 self._other = bin(record)
152 elif rtype == 'm':
152 elif rtype == 'm':
153 bits = record.split('\0', 1)
153 bits = record.split('\0', 1)
154 mdstate = bits[1]
154 mdstate = bits[1]
155 if len(mdstate) != 1 or mdstate not in 'ums':
155 if len(mdstate) != 1 or mdstate not in 'ums':
156 # the merge driver should be idempotent, so just rerun it
156 # the merge driver should be idempotent, so just rerun it
157 mdstate = 'u'
157 mdstate = 'u'
158
158
159 self._readmergedriver = bits[0]
159 self._readmergedriver = bits[0]
160 self._mdstate = mdstate
160 self._mdstate = mdstate
161 elif rtype in 'FDC':
161 elif rtype in 'FDC':
162 bits = record.split('\0')
162 bits = record.split('\0')
163 self._state[bits[0]] = bits[1:]
163 self._state[bits[0]] = bits[1:]
164 elif rtype == 'f':
164 elif rtype == 'f':
165 filename, rawextras = record.split('\0', 1)
165 filename, rawextras = record.split('\0', 1)
166 extraparts = rawextras.split('\0')
166 extraparts = rawextras.split('\0')
167 extras = {}
167 extras = {}
168 i = 0
168 i = 0
169 while i < len(extraparts):
169 while i < len(extraparts):
170 extras[extraparts[i]] = extraparts[i + 1]
170 extras[extraparts[i]] = extraparts[i + 1]
171 i += 2
171 i += 2
172
172
173 self._stateextras[filename] = extras
173 self._stateextras[filename] = extras
174 elif rtype == 'l':
174 elif rtype == 'l':
175 labels = record.split('\0', 2)
175 labels = record.split('\0', 2)
176 self._labels = [l for l in labels if len(l) > 0]
176 self._labels = [l for l in labels if len(l) > 0]
177 elif not rtype.islower():
177 elif not rtype.islower():
178 unsupported.add(rtype)
178 unsupported.add(rtype)
179 self._results = {}
179 self._results = {}
180 self._dirty = False
180 self._dirty = False
181
181
182 if unsupported:
182 if unsupported:
183 raise error.UnsupportedMergeRecords(unsupported)
183 raise error.UnsupportedMergeRecords(unsupported)
184
184
185 def _readrecords(self):
185 def _readrecords(self):
186 """Read merge state from disk and return a list of record (TYPE, data)
186 """Read merge state from disk and return a list of record (TYPE, data)
187
187
188 We read data from both v1 and v2 files and decide which one to use.
188 We read data from both v1 and v2 files and decide which one to use.
189
189
190 V1 has been used by version prior to 2.9.1 and contains less data than
190 V1 has been used by version prior to 2.9.1 and contains less data than
191 v2. We read both versions and check if no data in v2 contradicts
191 v2. We read both versions and check if no data in v2 contradicts
192 v1. If there is not contradiction we can safely assume that both v1
192 v1. If there is not contradiction we can safely assume that both v1
193 and v2 were written at the same time and use the extract data in v2. If
193 and v2 were written at the same time and use the extract data in v2. If
194 there is contradiction we ignore v2 content as we assume an old version
194 there is contradiction we ignore v2 content as we assume an old version
195 of Mercurial has overwritten the mergestate file and left an old v2
195 of Mercurial has overwritten the mergestate file and left an old v2
196 file around.
196 file around.
197
197
198 returns list of record [(TYPE, data), ...]"""
198 returns list of record [(TYPE, data), ...]"""
199 v1records = self._readrecordsv1()
199 v1records = self._readrecordsv1()
200 v2records = self._readrecordsv2()
200 v2records = self._readrecordsv2()
201 if self._v1v2match(v1records, v2records):
201 if self._v1v2match(v1records, v2records):
202 return v2records
202 return v2records
203 else:
203 else:
204 # v1 file is newer than v2 file, use it
204 # v1 file is newer than v2 file, use it
205 # we have to infer the "other" changeset of the merge
205 # we have to infer the "other" changeset of the merge
206 # we cannot do better than that with v1 of the format
206 # we cannot do better than that with v1 of the format
207 mctx = self._repo[None].parents()[-1]
207 mctx = self._repo[None].parents()[-1]
208 v1records.append(('O', mctx.hex()))
208 v1records.append(('O', mctx.hex()))
209 # add place holder "other" file node information
209 # add place holder "other" file node information
210 # nobody is using it yet so we do no need to fetch the data
210 # nobody is using it yet so we do no need to fetch the data
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 for idx, r in enumerate(v1records):
212 for idx, r in enumerate(v1records):
213 if r[0] == 'F':
213 if r[0] == 'F':
214 bits = r[1].split('\0')
214 bits = r[1].split('\0')
215 bits.insert(-2, '')
215 bits.insert(-2, '')
216 v1records[idx] = (r[0], '\0'.join(bits))
216 v1records[idx] = (r[0], '\0'.join(bits))
217 return v1records
217 return v1records
218
218
219 def _v1v2match(self, v1records, v2records):
219 def _v1v2match(self, v1records, v2records):
220 oldv2 = set() # old format version of v2 record
220 oldv2 = set() # old format version of v2 record
221 for rec in v2records:
221 for rec in v2records:
222 if rec[0] == 'L':
222 if rec[0] == 'L':
223 oldv2.add(rec)
223 oldv2.add(rec)
224 elif rec[0] == 'F':
224 elif rec[0] == 'F':
225 # drop the onode data (not contained in v1)
225 # drop the onode data (not contained in v1)
226 oldv2.add(('F', _droponode(rec[1])))
226 oldv2.add(('F', _droponode(rec[1])))
227 for rec in v1records:
227 for rec in v1records:
228 if rec not in oldv2:
228 if rec not in oldv2:
229 return False
229 return False
230 else:
230 else:
231 return True
231 return True
232
232
233 def _readrecordsv1(self):
233 def _readrecordsv1(self):
234 """read on disk merge state for version 1 file
234 """read on disk merge state for version 1 file
235
235
236 returns list of record [(TYPE, data), ...]
236 returns list of record [(TYPE, data), ...]
237
237
238 Note: the "F" data from this file are one entry short
238 Note: the "F" data from this file are one entry short
239 (no "other file node" entry)
239 (no "other file node" entry)
240 """
240 """
241 records = []
241 records = []
242 try:
242 try:
243 f = self._repo.vfs(self.statepathv1)
243 f = self._repo.vfs(self.statepathv1)
244 for i, l in enumerate(f):
244 for i, l in enumerate(f):
245 if i == 0:
245 if i == 0:
246 records.append(('L', l[:-1]))
246 records.append(('L', l[:-1]))
247 else:
247 else:
248 records.append(('F', l[:-1]))
248 records.append(('F', l[:-1]))
249 f.close()
249 f.close()
250 except IOError as err:
250 except IOError as err:
251 if err.errno != errno.ENOENT:
251 if err.errno != errno.ENOENT:
252 raise
252 raise
253 return records
253 return records
254
254
255 def _readrecordsv2(self):
255 def _readrecordsv2(self):
256 """read on disk merge state for version 2 file
256 """read on disk merge state for version 2 file
257
257
258 This format is a list of arbitrary records of the form:
258 This format is a list of arbitrary records of the form:
259
259
260 [type][length][content]
260 [type][length][content]
261
261
262 `type` is a single character, `length` is a 4 byte integer, and
262 `type` is a single character, `length` is a 4 byte integer, and
263 `content` is an arbitrary byte sequence of length `length`.
263 `content` is an arbitrary byte sequence of length `length`.
264
264
265 Mercurial versions prior to 3.7 have a bug where if there are
265 Mercurial versions prior to 3.7 have a bug where if there are
266 unsupported mandatory merge records, attempting to clear out the merge
266 unsupported mandatory merge records, attempting to clear out the merge
267 state with hg update --clean or similar aborts. The 't' record type
267 state with hg update --clean or similar aborts. The 't' record type
268 works around that by writing out what those versions treat as an
268 works around that by writing out what those versions treat as an
269 advisory record, but later versions interpret as special: the first
269 advisory record, but later versions interpret as special: the first
270 character is the 'real' record type and everything onwards is the data.
270 character is the 'real' record type and everything onwards is the data.
271
271
272 Returns list of records [(TYPE, data), ...]."""
272 Returns list of records [(TYPE, data), ...]."""
273 records = []
273 records = []
274 try:
274 try:
275 f = self._repo.vfs(self.statepathv2)
275 f = self._repo.vfs(self.statepathv2)
276 data = f.read()
276 data = f.read()
277 off = 0
277 off = 0
278 end = len(data)
278 end = len(data)
279 while off < end:
279 while off < end:
280 rtype = data[off]
280 rtype = data[off]
281 off += 1
281 off += 1
282 length = _unpack('>I', data[off:(off + 4)])[0]
282 length = _unpack('>I', data[off:(off + 4)])[0]
283 off += 4
283 off += 4
284 record = data[off:(off + length)]
284 record = data[off:(off + length)]
285 off += length
285 off += length
286 if rtype == 't':
286 if rtype == 't':
287 rtype, record = record[0], record[1:]
287 rtype, record = record[0], record[1:]
288 records.append((rtype, record))
288 records.append((rtype, record))
289 f.close()
289 f.close()
290 except IOError as err:
290 except IOError as err:
291 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
292 raise
292 raise
293 return records
293 return records
294
294
295 @util.propertycache
295 @util.propertycache
296 def mergedriver(self):
296 def mergedriver(self):
297 # protect against the following:
297 # protect against the following:
298 # - A configures a malicious merge driver in their hgrc, then
298 # - A configures a malicious merge driver in their hgrc, then
299 # pauses the merge
299 # pauses the merge
300 # - A edits their hgrc to remove references to the merge driver
300 # - A edits their hgrc to remove references to the merge driver
301 # - A gives a copy of their entire repo, including .hg, to B
301 # - A gives a copy of their entire repo, including .hg, to B
302 # - B inspects .hgrc and finds it to be clean
302 # - B inspects .hgrc and finds it to be clean
303 # - B then continues the merge and the malicious merge driver
303 # - B then continues the merge and the malicious merge driver
304 # gets invoked
304 # gets invoked
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 if (self._readmergedriver is not None
306 if (self._readmergedriver is not None
307 and self._readmergedriver != configmergedriver):
307 and self._readmergedriver != configmergedriver):
308 raise error.ConfigError(
308 raise error.ConfigError(
309 _("merge driver changed since merge started"),
309 _("merge driver changed since merge started"),
310 hint=_("revert merge driver change or abort merge"))
310 hint=_("revert merge driver change or abort merge"))
311
311
312 return configmergedriver
312 return configmergedriver
313
313
314 @util.propertycache
314 @util.propertycache
315 def localctx(self):
315 def localctx(self):
316 if self._local is None:
316 if self._local is None:
317 msg = "localctx accessed but self._local isn't set"
317 msg = "localctx accessed but self._local isn't set"
318 raise error.ProgrammingError(msg)
318 raise error.ProgrammingError(msg)
319 return self._repo[self._local]
319 return self._repo[self._local]
320
320
321 @util.propertycache
321 @util.propertycache
322 def otherctx(self):
322 def otherctx(self):
323 if self._other is None:
323 if self._other is None:
324 msg = "otherctx accessed but self._other isn't set"
324 msg = "otherctx accessed but self._other isn't set"
325 raise error.ProgrammingError(msg)
325 raise error.ProgrammingError(msg)
326 return self._repo[self._other]
326 return self._repo[self._other]
327
327
328 def active(self):
328 def active(self):
329 """Whether mergestate is active.
329 """Whether mergestate is active.
330
330
331 Returns True if there appears to be mergestate. This is a rough proxy
331 Returns True if there appears to be mergestate. This is a rough proxy
332 for "is a merge in progress."
332 for "is a merge in progress."
333 """
333 """
334 # Check local variables before looking at filesystem for performance
334 # Check local variables before looking at filesystem for performance
335 # reasons.
335 # reasons.
336 return bool(self._local) or bool(self._state) or \
336 return bool(self._local) or bool(self._state) or \
337 self._repo.vfs.exists(self.statepathv1) or \
337 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv2)
338 self._repo.vfs.exists(self.statepathv2)
339
339
340 def commit(self):
340 def commit(self):
341 """Write current state on disk (if necessary)"""
341 """Write current state on disk (if necessary)"""
342 if self._dirty:
342 if self._dirty:
343 records = self._makerecords()
343 records = self._makerecords()
344 self._writerecords(records)
344 self._writerecords(records)
345 self._dirty = False
345 self._dirty = False
346
346
347 def _makerecords(self):
347 def _makerecords(self):
348 records = []
348 records = []
349 records.append(('L', hex(self._local)))
349 records.append(('L', hex(self._local)))
350 records.append(('O', hex(self._other)))
350 records.append(('O', hex(self._other)))
351 if self.mergedriver:
351 if self.mergedriver:
352 records.append(('m', '\0'.join([
352 records.append(('m', '\0'.join([
353 self.mergedriver, self._mdstate])))
353 self.mergedriver, self._mdstate])))
354 for d, v in self._state.iteritems():
354 for d, v in self._state.iteritems():
355 if v[0] == 'd':
355 if v[0] == 'd':
356 records.append(('D', '\0'.join([d] + v)))
356 records.append(('D', '\0'.join([d] + v)))
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # older versions of Mercurial
358 # older versions of Mercurial
359 elif v[1] == nullhex or v[6] == nullhex:
359 elif v[1] == nullhex or v[6] == nullhex:
360 records.append(('C', '\0'.join([d] + v)))
360 records.append(('C', '\0'.join([d] + v)))
361 else:
361 else:
362 records.append(('F', '\0'.join([d] + v)))
362 records.append(('F', '\0'.join([d] + v)))
363 for filename, extras in sorted(self._stateextras.iteritems()):
363 for filename, extras in sorted(self._stateextras.iteritems()):
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 extras.iteritems())
365 extras.iteritems())
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 if self._labels is not None:
367 if self._labels is not None:
368 labels = '\0'.join(self._labels)
368 labels = '\0'.join(self._labels)
369 records.append(('l', labels))
369 records.append(('l', labels))
370 return records
370 return records
371
371
372 def _writerecords(self, records):
372 def _writerecords(self, records):
373 """Write current state on disk (both v1 and v2)"""
373 """Write current state on disk (both v1 and v2)"""
374 self._writerecordsv1(records)
374 self._writerecordsv1(records)
375 self._writerecordsv2(records)
375 self._writerecordsv2(records)
376
376
377 def _writerecordsv1(self, records):
377 def _writerecordsv1(self, records):
378 """Write current state on disk in a version 1 file"""
378 """Write current state on disk in a version 1 file"""
379 f = self._repo.vfs(self.statepathv1, 'w')
379 f = self._repo.vfs(self.statepathv1, 'w')
380 irecords = iter(records)
380 irecords = iter(records)
381 lrecords = next(irecords)
381 lrecords = next(irecords)
382 assert lrecords[0] == 'L'
382 assert lrecords[0] == 'L'
383 f.write(hex(self._local) + '\n')
383 f.write(hex(self._local) + '\n')
384 for rtype, data in irecords:
384 for rtype, data in irecords:
385 if rtype == 'F':
385 if rtype == 'F':
386 f.write('%s\n' % _droponode(data))
386 f.write('%s\n' % _droponode(data))
387 f.close()
387 f.close()
388
388
389 def _writerecordsv2(self, records):
389 def _writerecordsv2(self, records):
390 """Write current state on disk in a version 2 file
390 """Write current state on disk in a version 2 file
391
391
392 See the docstring for _readrecordsv2 for why we use 't'."""
392 See the docstring for _readrecordsv2 for why we use 't'."""
393 # these are the records that all version 2 clients can read
393 # these are the records that all version 2 clients can read
394 whitelist = 'LOF'
394 whitelist = 'LOF'
395 f = self._repo.vfs(self.statepathv2, 'w')
395 f = self._repo.vfs(self.statepathv2, 'w')
396 for key, data in records:
396 for key, data in records:
397 assert len(key) == 1
397 assert len(key) == 1
398 if key not in whitelist:
398 if key not in whitelist:
399 key, data = 't', '%s%s' % (key, data)
399 key, data = 't', '%s%s' % (key, data)
400 format = '>sI%is' % len(data)
400 format = '>sI%is' % len(data)
401 f.write(_pack(format, key, len(data), data))
401 f.write(_pack(format, key, len(data), data))
402 f.close()
402 f.close()
403
403
404 def add(self, fcl, fco, fca, fd):
404 def add(self, fcl, fco, fca, fd):
405 """add a new (potentially?) conflicting file the merge state
405 """add a new (potentially?) conflicting file the merge state
406 fcl: file context for local,
406 fcl: file context for local,
407 fco: file context for remote,
407 fco: file context for remote,
408 fca: file context for ancestors,
408 fca: file context for ancestors,
409 fd: file path of the resulting merge.
409 fd: file path of the resulting merge.
410
410
411 note: also write the local version to the `.hg/merge` directory.
411 note: also write the local version to the `.hg/merge` directory.
412 """
412 """
413 if fcl.isabsent():
413 if fcl.isabsent():
414 hash = nullhex
414 hash = nullhex
415 else:
415 else:
416 hash = hex(hashlib.sha1(fcl.path()).digest())
416 hash = hex(hashlib.sha1(fcl.path()).digest())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._state[fd] = ['u', hash, fcl.path(),
418 self._state[fd] = ['u', hash, fcl.path(),
419 fca.path(), hex(fca.filenode()),
419 fca.path(), hex(fca.filenode()),
420 fco.path(), hex(fco.filenode()),
420 fco.path(), hex(fco.filenode()),
421 fcl.flags()]
421 fcl.flags()]
422 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
422 self._stateextras[fd] = {'ancestorlinknode': hex(fca.node())}
423 self._dirty = True
423 self._dirty = True
424
424
425 def __contains__(self, dfile):
425 def __contains__(self, dfile):
426 return dfile in self._state
426 return dfile in self._state
427
427
428 def __getitem__(self, dfile):
428 def __getitem__(self, dfile):
429 return self._state[dfile][0]
429 return self._state[dfile][0]
430
430
431 def __iter__(self):
431 def __iter__(self):
432 return iter(sorted(self._state))
432 return iter(sorted(self._state))
433
433
434 def files(self):
434 def files(self):
435 return self._state.keys()
435 return self._state.keys()
436
436
437 def mark(self, dfile, state):
437 def mark(self, dfile, state):
438 self._state[dfile][0] = state
438 self._state[dfile][0] = state
439 self._dirty = True
439 self._dirty = True
440
440
441 def mdstate(self):
441 def mdstate(self):
442 return self._mdstate
442 return self._mdstate
443
443
444 def unresolved(self):
444 def unresolved(self):
445 """Obtain the paths of unresolved files."""
445 """Obtain the paths of unresolved files."""
446
446
447 for f, entry in self._state.iteritems():
447 for f, entry in self._state.iteritems():
448 if entry[0] == 'u':
448 if entry[0] == 'u':
449 yield f
449 yield f
450
450
451 def driverresolved(self):
451 def driverresolved(self):
452 """Obtain the paths of driver-resolved files."""
452 """Obtain the paths of driver-resolved files."""
453
453
454 for f, entry in self._state.items():
454 for f, entry in self._state.items():
455 if entry[0] == 'd':
455 if entry[0] == 'd':
456 yield f
456 yield f
457
457
458 def extras(self, filename):
458 def extras(self, filename):
459 return self._stateextras.setdefault(filename, {})
459 return self._stateextras.setdefault(filename, {})
460
460
461 def _resolve(self, preresolve, dfile, wctx):
461 def _resolve(self, preresolve, dfile, wctx):
462 """rerun merge process for file path `dfile`"""
462 """rerun merge process for file path `dfile`"""
463 if self[dfile] in 'rd':
463 if self[dfile] in 'rd':
464 return True, 0
464 return True, 0
465 stateentry = self._state[dfile]
465 stateentry = self._state[dfile]
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 octx = self._repo[self._other]
467 octx = self._repo[self._other]
468 extras = self.extras(dfile)
468 extras = self.extras(dfile)
469 anccommitnode = extras.get('ancestorlinknode')
469 anccommitnode = extras.get('ancestorlinknode')
470 if anccommitnode:
470 if anccommitnode:
471 actx = self._repo[anccommitnode]
471 actx = self._repo[anccommitnode]
472 else:
472 else:
473 actx = None
473 actx = None
474 fcd = self._filectxorabsent(hash, wctx, dfile)
474 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fco = self._filectxorabsent(onode, octx, ofile)
475 fco = self._filectxorabsent(onode, octx, ofile)
476 # TODO: move this to filectxorabsent
476 # TODO: move this to filectxorabsent
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 # "premerge" x flags
478 # "premerge" x flags
479 flo = fco.flags()
479 flo = fco.flags()
480 fla = fca.flags()
480 fla = fca.flags()
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if fca.node() == nullid and flags != flo:
482 if fca.node() == nullid and flags != flo:
483 if preresolve:
483 if preresolve:
484 self._repo.ui.warn(
484 self._repo.ui.warn(
485 _('warning: cannot merge flags for %s '
485 _('warning: cannot merge flags for %s '
486 'without common ancestor - keeping local flags\n')
486 'without common ancestor - keeping local flags\n')
487 % afile)
487 % afile)
488 elif flags == fla:
488 elif flags == fla:
489 flags = flo
489 flags = flo
490 if preresolve:
490 if preresolve:
491 # restore local
491 # restore local
492 if hash != nullhex:
492 if hash != nullhex:
493 f = self._repo.vfs('merge/' + hash)
493 f = self._repo.vfs('merge/' + hash)
494 wctx[dfile].write(f.read(), flags)
494 wctx[dfile].write(f.read(), flags)
495 f.close()
495 f.close()
496 else:
496 else:
497 wctx[dfile].remove(ignoremissing=True)
497 wctx[dfile].remove(ignoremissing=True)
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
499 self._local, lfile, fcd,
499 self._local, lfile, fcd,
500 fco, fca,
500 fco, fca,
501 labels=self._labels)
501 labels=self._labels)
502 else:
502 else:
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
504 self._local, lfile, fcd,
504 self._local, lfile, fcd,
505 fco, fca,
505 fco, fca,
506 labels=self._labels)
506 labels=self._labels)
507 if r is None:
507 if r is None:
508 # no real conflict
508 # no real conflict
509 del self._state[dfile]
509 del self._state[dfile]
510 self._stateextras.pop(dfile, None)
510 self._stateextras.pop(dfile, None)
511 self._dirty = True
511 self._dirty = True
512 elif not r:
512 elif not r:
513 self.mark(dfile, 'r')
513 self.mark(dfile, 'r')
514
514
515 if complete:
515 if complete:
516 action = None
516 action = None
517 if deleted:
517 if deleted:
518 if fcd.isabsent():
518 if fcd.isabsent():
519 # dc: local picked. Need to drop if present, which may
519 # dc: local picked. Need to drop if present, which may
520 # happen on re-resolves.
520 # happen on re-resolves.
521 action = 'f'
521 action = 'f'
522 else:
522 else:
523 # cd: remote picked (or otherwise deleted)
523 # cd: remote picked (or otherwise deleted)
524 action = 'r'
524 action = 'r'
525 else:
525 else:
526 if fcd.isabsent(): # dc: remote picked
526 if fcd.isabsent(): # dc: remote picked
527 action = 'g'
527 action = 'g'
528 elif fco.isabsent(): # cd: local picked
528 elif fco.isabsent(): # cd: local picked
529 if dfile in self.localctx:
529 if dfile in self.localctx:
530 action = 'am'
530 action = 'am'
531 else:
531 else:
532 action = 'a'
532 action = 'a'
533 # else: regular merges (no action necessary)
533 # else: regular merges (no action necessary)
534 self._results[dfile] = r, action
534 self._results[dfile] = r, action
535
535
536 return complete, r
536 return complete, r
537
537
538 def _filectxorabsent(self, hexnode, ctx, f):
538 def _filectxorabsent(self, hexnode, ctx, f):
539 if hexnode == nullhex:
539 if hexnode == nullhex:
540 return filemerge.absentfilectx(ctx, f)
540 return filemerge.absentfilectx(ctx, f)
541 else:
541 else:
542 return ctx[f]
542 return ctx[f]
543
543
544 def preresolve(self, dfile, wctx):
544 def preresolve(self, dfile, wctx):
545 """run premerge process for dfile
545 """run premerge process for dfile
546
546
547 Returns whether the merge is complete, and the exit code."""
547 Returns whether the merge is complete, and the exit code."""
548 return self._resolve(True, dfile, wctx)
548 return self._resolve(True, dfile, wctx)
549
549
550 def resolve(self, dfile, wctx):
550 def resolve(self, dfile, wctx):
551 """run merge process (assuming premerge was run) for dfile
551 """run merge process (assuming premerge was run) for dfile
552
552
553 Returns the exit code of the merge."""
553 Returns the exit code of the merge."""
554 return self._resolve(False, dfile, wctx)[1]
554 return self._resolve(False, dfile, wctx)[1]
555
555
556 def counts(self):
556 def counts(self):
557 """return counts for updated, merged and removed files in this
557 """return counts for updated, merged and removed files in this
558 session"""
558 session"""
559 updated, merged, removed = 0, 0, 0
559 updated, merged, removed = 0, 0, 0
560 for r, action in self._results.itervalues():
560 for r, action in self._results.itervalues():
561 if r is None:
561 if r is None:
562 updated += 1
562 updated += 1
563 elif r == 0:
563 elif r == 0:
564 if action == 'r':
564 if action == 'r':
565 removed += 1
565 removed += 1
566 else:
566 else:
567 merged += 1
567 merged += 1
568 return updated, merged, removed
568 return updated, merged, removed
569
569
570 def unresolvedcount(self):
570 def unresolvedcount(self):
571 """get unresolved count for this merge (persistent)"""
571 """get unresolved count for this merge (persistent)"""
572 return len(list(self.unresolved()))
572 return len(list(self.unresolved()))
573
573
574 def actions(self):
574 def actions(self):
575 """return lists of actions to perform on the dirstate"""
575 """return lists of actions to perform on the dirstate"""
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 for f, (r, action) in self._results.iteritems():
577 for f, (r, action) in self._results.iteritems():
578 if action is not None:
578 if action is not None:
579 actions[action].append((f, None, "merge result"))
579 actions[action].append((f, None, "merge result"))
580 return actions
580 return actions
581
581
582 def recordactions(self):
582 def recordactions(self):
583 """record remove/add/get actions in the dirstate"""
583 """record remove/add/get actions in the dirstate"""
584 branchmerge = self._repo.dirstate.p2() != nullid
584 branchmerge = self._repo.dirstate.p2() != nullid
585 recordupdates(self._repo, self.actions(), branchmerge)
585 recordupdates(self._repo, self.actions(), branchmerge)
586
586
587 def queueremove(self, f):
587 def queueremove(self, f):
588 """queues a file to be removed from the dirstate
588 """queues a file to be removed from the dirstate
589
589
590 Meant for use by custom merge drivers."""
590 Meant for use by custom merge drivers."""
591 self._results[f] = 0, 'r'
591 self._results[f] = 0, 'r'
592
592
593 def queueadd(self, f):
593 def queueadd(self, f):
594 """queues a file to be added to the dirstate
594 """queues a file to be added to the dirstate
595
595
596 Meant for use by custom merge drivers."""
596 Meant for use by custom merge drivers."""
597 self._results[f] = 0, 'a'
597 self._results[f] = 0, 'a'
598
598
599 def queueget(self, f):
599 def queueget(self, f):
600 """queues a file to be marked modified in the dirstate
600 """queues a file to be marked modified in the dirstate
601
601
602 Meant for use by custom merge drivers."""
602 Meant for use by custom merge drivers."""
603 self._results[f] = 0, 'g'
603 self._results[f] = 0, 'g'
604
604
605 def _getcheckunknownconfig(repo, section, name):
605 def _getcheckunknownconfig(repo, section, name):
606 config = repo.ui.config(section, name, default='abort')
606 config = repo.ui.config(section, name)
607 valid = ['abort', 'ignore', 'warn']
607 valid = ['abort', 'ignore', 'warn']
608 if config not in valid:
608 if config not in valid:
609 validstr = ', '.join(["'" + v + "'" for v in valid])
609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 raise error.ConfigError(_("%s.%s not valid "
610 raise error.ConfigError(_("%s.%s not valid "
611 "('%s' is none of %s)")
611 "('%s' is none of %s)")
612 % (section, name, config, validstr))
612 % (section, name, config, validstr))
613 return config
613 return config
614
614
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 if f2 is None:
616 if f2 is None:
617 f2 = f
617 f2 = f
618 return (repo.wvfs.audit.check(f)
618 return (repo.wvfs.audit.check(f)
619 and repo.wvfs.isfileorlink(f)
619 and repo.wvfs.isfileorlink(f)
620 and repo.dirstate.normalize(f) not in repo.dirstate
620 and repo.dirstate.normalize(f) not in repo.dirstate
621 and mctx[f2].cmp(wctx[f]))
621 and mctx[f2].cmp(wctx[f]))
622
622
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 """
624 """
625 Considers any actions that care about the presence of conflicting unknown
625 Considers any actions that care about the presence of conflicting unknown
626 files. For some actions, the result is to abort; for others, it is to
626 files. For some actions, the result is to abort; for others, it is to
627 choose a different action.
627 choose a different action.
628 """
628 """
629 conflicts = set()
629 conflicts = set()
630 warnconflicts = set()
630 warnconflicts = set()
631 abortconflicts = set()
631 abortconflicts = set()
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 if not force:
634 if not force:
635 def collectconflicts(conflicts, config):
635 def collectconflicts(conflicts, config):
636 if config == 'abort':
636 if config == 'abort':
637 abortconflicts.update(conflicts)
637 abortconflicts.update(conflicts)
638 elif config == 'warn':
638 elif config == 'warn':
639 warnconflicts.update(conflicts)
639 warnconflicts.update(conflicts)
640
640
641 for f, (m, args, msg) in actions.iteritems():
641 for f, (m, args, msg) in actions.iteritems():
642 if m in ('c', 'dc'):
642 if m in ('c', 'dc'):
643 if _checkunknownfile(repo, wctx, mctx, f):
643 if _checkunknownfile(repo, wctx, mctx, f):
644 conflicts.add(f)
644 conflicts.add(f)
645 elif m == 'dg':
645 elif m == 'dg':
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 conflicts.add(f)
647 conflicts.add(f)
648
648
649 ignoredconflicts = set([c for c in conflicts
649 ignoredconflicts = set([c for c in conflicts
650 if repo.dirstate._ignore(c)])
650 if repo.dirstate._ignore(c)])
651 unknownconflicts = conflicts - ignoredconflicts
651 unknownconflicts = conflicts - ignoredconflicts
652 collectconflicts(ignoredconflicts, ignoredconfig)
652 collectconflicts(ignoredconflicts, ignoredconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
654 else:
654 else:
655 for f, (m, args, msg) in actions.iteritems():
655 for f, (m, args, msg) in actions.iteritems():
656 if m == 'cm':
656 if m == 'cm':
657 fl2, anc = args
657 fl2, anc = args
658 different = _checkunknownfile(repo, wctx, mctx, f)
658 different = _checkunknownfile(repo, wctx, mctx, f)
659 if repo.dirstate._ignore(f):
659 if repo.dirstate._ignore(f):
660 config = ignoredconfig
660 config = ignoredconfig
661 else:
661 else:
662 config = unknownconfig
662 config = unknownconfig
663
663
664 # The behavior when force is True is described by this table:
664 # The behavior when force is True is described by this table:
665 # config different mergeforce | action backup
665 # config different mergeforce | action backup
666 # * n * | get n
666 # * n * | get n
667 # * y y | merge -
667 # * y y | merge -
668 # abort y n | merge - (1)
668 # abort y n | merge - (1)
669 # warn y n | warn + get y
669 # warn y n | warn + get y
670 # ignore y n | get y
670 # ignore y n | get y
671 #
671 #
672 # (1) this is probably the wrong behavior here -- we should
672 # (1) this is probably the wrong behavior here -- we should
673 # probably abort, but some actions like rebases currently
673 # probably abort, but some actions like rebases currently
674 # don't like an abort happening in the middle of
674 # don't like an abort happening in the middle of
675 # merge.update.
675 # merge.update.
676 if not different:
676 if not different:
677 actions[f] = ('g', (fl2, False), "remote created")
677 actions[f] = ('g', (fl2, False), "remote created")
678 elif mergeforce or config == 'abort':
678 elif mergeforce or config == 'abort':
679 actions[f] = ('m', (f, f, None, False, anc),
679 actions[f] = ('m', (f, f, None, False, anc),
680 "remote differs from untracked local")
680 "remote differs from untracked local")
681 elif config == 'abort':
681 elif config == 'abort':
682 abortconflicts.add(f)
682 abortconflicts.add(f)
683 else:
683 else:
684 if config == 'warn':
684 if config == 'warn':
685 warnconflicts.add(f)
685 warnconflicts.add(f)
686 actions[f] = ('g', (fl2, True), "remote created")
686 actions[f] = ('g', (fl2, True), "remote created")
687
687
688 for f in sorted(abortconflicts):
688 for f in sorted(abortconflicts):
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 if abortconflicts:
690 if abortconflicts:
691 raise error.Abort(_("untracked files in working directory "
691 raise error.Abort(_("untracked files in working directory "
692 "differ from files in requested revision"))
692 "differ from files in requested revision"))
693
693
694 for f in sorted(warnconflicts):
694 for f in sorted(warnconflicts):
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696
696
697 for f, (m, args, msg) in actions.iteritems():
697 for f, (m, args, msg) in actions.iteritems():
698 backup = f in conflicts
698 backup = f in conflicts
699 if m == 'c':
699 if m == 'c':
700 flags, = args
700 flags, = args
701 actions[f] = ('g', (flags, backup), msg)
701 actions[f] = ('g', (flags, backup), msg)
702
702
703 def _forgetremoved(wctx, mctx, branchmerge):
703 def _forgetremoved(wctx, mctx, branchmerge):
704 """
704 """
705 Forget removed files
705 Forget removed files
706
706
707 If we're jumping between revisions (as opposed to merging), and if
707 If we're jumping between revisions (as opposed to merging), and if
708 neither the working directory nor the target rev has the file,
708 neither the working directory nor the target rev has the file,
709 then we need to remove it from the dirstate, to prevent the
709 then we need to remove it from the dirstate, to prevent the
710 dirstate from listing the file when it is no longer in the
710 dirstate from listing the file when it is no longer in the
711 manifest.
711 manifest.
712
712
713 If we're merging, and the other revision has removed a file
713 If we're merging, and the other revision has removed a file
714 that is not present in the working directory, we need to mark it
714 that is not present in the working directory, we need to mark it
715 as removed.
715 as removed.
716 """
716 """
717
717
718 actions = {}
718 actions = {}
719 m = 'f'
719 m = 'f'
720 if branchmerge:
720 if branchmerge:
721 m = 'r'
721 m = 'r'
722 for f in wctx.deleted():
722 for f in wctx.deleted():
723 if f not in mctx:
723 if f not in mctx:
724 actions[f] = m, None, "forget deleted"
724 actions[f] = m, None, "forget deleted"
725
725
726 if not branchmerge:
726 if not branchmerge:
727 for f in wctx.removed():
727 for f in wctx.removed():
728 if f not in mctx:
728 if f not in mctx:
729 actions[f] = 'f', None, "forget removed"
729 actions[f] = 'f', None, "forget removed"
730
730
731 return actions
731 return actions
732
732
733 def _checkcollision(repo, wmf, actions):
733 def _checkcollision(repo, wmf, actions):
734 # build provisional merged manifest up
734 # build provisional merged manifest up
735 pmmf = set(wmf)
735 pmmf = set(wmf)
736
736
737 if actions:
737 if actions:
738 # k, dr, e and rd are no-op
738 # k, dr, e and rd are no-op
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 for f, args, msg in actions[m]:
740 for f, args, msg in actions[m]:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['r']:
742 for f, args, msg in actions['r']:
743 pmmf.discard(f)
743 pmmf.discard(f)
744 for f, args, msg in actions['dm']:
744 for f, args, msg in actions['dm']:
745 f2, flags = args
745 f2, flags = args
746 pmmf.discard(f2)
746 pmmf.discard(f2)
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['dg']:
748 for f, args, msg in actions['dg']:
749 pmmf.add(f)
749 pmmf.add(f)
750 for f, args, msg in actions['m']:
750 for f, args, msg in actions['m']:
751 f1, f2, fa, move, anc = args
751 f1, f2, fa, move, anc = args
752 if move:
752 if move:
753 pmmf.discard(f1)
753 pmmf.discard(f1)
754 pmmf.add(f)
754 pmmf.add(f)
755
755
756 # check case-folding collision in provisional merged manifest
756 # check case-folding collision in provisional merged manifest
757 foldmap = {}
757 foldmap = {}
758 for f in pmmf:
758 for f in pmmf:
759 fold = util.normcase(f)
759 fold = util.normcase(f)
760 if fold in foldmap:
760 if fold in foldmap:
761 raise error.Abort(_("case-folding collision between %s and %s")
761 raise error.Abort(_("case-folding collision between %s and %s")
762 % (f, foldmap[fold]))
762 % (f, foldmap[fold]))
763 foldmap[fold] = f
763 foldmap[fold] = f
764
764
765 # check case-folding of directories
765 # check case-folding of directories
766 foldprefix = unfoldprefix = lastfull = ''
766 foldprefix = unfoldprefix = lastfull = ''
767 for fold, f in sorted(foldmap.items()):
767 for fold, f in sorted(foldmap.items()):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 # the folded prefix matches but actual casing is different
769 # the folded prefix matches but actual casing is different
770 raise error.Abort(_("case-folding collision between "
770 raise error.Abort(_("case-folding collision between "
771 "%s and directory of %s") % (lastfull, f))
771 "%s and directory of %s") % (lastfull, f))
772 foldprefix = fold + '/'
772 foldprefix = fold + '/'
773 unfoldprefix = f + '/'
773 unfoldprefix = f + '/'
774 lastfull = f
774 lastfull = f
775
775
776 def driverpreprocess(repo, ms, wctx, labels=None):
776 def driverpreprocess(repo, ms, wctx, labels=None):
777 """run the preprocess step of the merge driver, if any
777 """run the preprocess step of the merge driver, if any
778
778
779 This is currently not implemented -- it's an extension point."""
779 This is currently not implemented -- it's an extension point."""
780 return True
780 return True
781
781
782 def driverconclude(repo, ms, wctx, labels=None):
782 def driverconclude(repo, ms, wctx, labels=None):
783 """run the conclude step of the merge driver, if any
783 """run the conclude step of the merge driver, if any
784
784
785 This is currently not implemented -- it's an extension point."""
785 This is currently not implemented -- it's an extension point."""
786 return True
786 return True
787
787
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 acceptremote, followcopies, forcefulldiff=False):
789 acceptremote, followcopies, forcefulldiff=False):
790 """
790 """
791 Merge wctx and p2 with ancestor pa and generate merge action list
791 Merge wctx and p2 with ancestor pa and generate merge action list
792
792
793 branchmerge and force are as passed in to update
793 branchmerge and force are as passed in to update
794 matcher = matcher to filter file lists
794 matcher = matcher to filter file lists
795 acceptremote = accept the incoming changes without prompting
795 acceptremote = accept the incoming changes without prompting
796 """
796 """
797 if matcher is not None and matcher.always():
797 if matcher is not None and matcher.always():
798 matcher = None
798 matcher = None
799
799
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801
801
802 # manifests fetched in order are going to be faster, so prime the caches
802 # manifests fetched in order are going to be faster, so prime the caches
803 [x.manifest() for x in
803 [x.manifest() for x in
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805
805
806 if followcopies:
806 if followcopies:
807 ret = copies.mergecopies(repo, wctx, p2, pa)
807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809
809
810 boolbm = pycompat.bytestr(bool(branchmerge))
810 boolbm = pycompat.bytestr(bool(branchmerge))
811 boolf = pycompat.bytestr(bool(force))
811 boolf = pycompat.bytestr(bool(force))
812 boolm = pycompat.bytestr(bool(matcher))
812 boolm = pycompat.bytestr(bool(matcher))
813 repo.ui.note(_("resolving manifests\n"))
813 repo.ui.note(_("resolving manifests\n"))
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 % (boolbm, boolf, boolm))
815 % (boolbm, boolf, boolm))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817
817
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 copied = set(copy.values())
819 copied = set(copy.values())
820 copied.update(movewithdir.values())
820 copied.update(movewithdir.values())
821
821
822 if '.hgsubstate' in m1:
822 if '.hgsubstate' in m1:
823 # check whether sub state is modified
823 # check whether sub state is modified
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 m1['.hgsubstate'] = modifiednodeid
825 m1['.hgsubstate'] = modifiednodeid
826
826
827 # Don't use m2-vs-ma optimization if:
827 # Don't use m2-vs-ma optimization if:
828 # - ma is the same as m1 or m2, which we're just going to diff again later
828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 # - The caller specifically asks for a full diff, which is useful during bid
829 # - The caller specifically asks for a full diff, which is useful during bid
830 # merge.
830 # merge.
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 # Identify which files are relevant to the merge, so we can limit the
832 # Identify which files are relevant to the merge, so we can limit the
833 # total m1-vs-m2 diff to just those files. This has significant
833 # total m1-vs-m2 diff to just those files. This has significant
834 # performance benefits in large repositories.
834 # performance benefits in large repositories.
835 relevantfiles = set(ma.diff(m2).keys())
835 relevantfiles = set(ma.diff(m2).keys())
836
836
837 # For copied and moved files, we need to add the source file too.
837 # For copied and moved files, we need to add the source file too.
838 for copykey, copyvalue in copy.iteritems():
838 for copykey, copyvalue in copy.iteritems():
839 if copyvalue in relevantfiles:
839 if copyvalue in relevantfiles:
840 relevantfiles.add(copykey)
840 relevantfiles.add(copykey)
841 for movedirkey in movewithdir:
841 for movedirkey in movewithdir:
842 relevantfiles.add(movedirkey)
842 relevantfiles.add(movedirkey)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845
845
846 diff = m1.diff(m2, match=matcher)
846 diff = m1.diff(m2, match=matcher)
847
847
848 if matcher is None:
848 if matcher is None:
849 matcher = matchmod.always('', '')
849 matcher = matchmod.always('', '')
850
850
851 actions = {}
851 actions = {}
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 if n1 and n2: # file exists on both local and remote side
853 if n1 and n2: # file exists on both local and remote side
854 if f not in ma:
854 if f not in ma:
855 fa = copy.get(f, None)
855 fa = copy.get(f, None)
856 if fa is not None:
856 if fa is not None:
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 "both renamed from " + fa)
858 "both renamed from " + fa)
859 else:
859 else:
860 actions[f] = ('m', (f, f, None, False, pa.node()),
860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 "both created")
861 "both created")
862 else:
862 else:
863 a = ma[f]
863 a = ma[f]
864 fla = ma.flags(f)
864 fla = ma.flags(f)
865 nol = 'l' not in fl1 + fl2 + fla
865 nol = 'l' not in fl1 + fl2 + fla
866 if n2 == a and fl2 == fla:
866 if n2 == a and fl2 == fla:
867 actions[f] = ('k', (), "remote unchanged")
867 actions[f] = ('k', (), "remote unchanged")
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 if n1 == n2: # optimization: keep local content
869 if n1 == n2: # optimization: keep local content
870 actions[f] = ('e', (fl2,), "update permissions")
870 actions[f] = ('e', (fl2,), "update permissions")
871 else:
871 else:
872 actions[f] = ('g', (fl2, False), "remote is newer")
872 actions[f] = ('g', (fl2, False), "remote is newer")
873 elif nol and n2 == a: # remote only changed 'x'
873 elif nol and n2 == a: # remote only changed 'x'
874 actions[f] = ('e', (fl2,), "update permissions")
874 actions[f] = ('e', (fl2,), "update permissions")
875 elif nol and n1 == a: # local only changed 'x'
875 elif nol and n1 == a: # local only changed 'x'
876 actions[f] = ('g', (fl1, False), "remote is newer")
876 actions[f] = ('g', (fl1, False), "remote is newer")
877 else: # both changed something
877 else: # both changed something
878 actions[f] = ('m', (f, f, f, False, pa.node()),
878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 "versions differ")
879 "versions differ")
880 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
883 elif f in movewithdir: # directory rename, move local
883 elif f in movewithdir: # directory rename, move local
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m2:
885 if f2 in m2:
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 "remote directory rename, both created")
887 "remote directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dm', (f, fl1),
889 actions[f2] = ('dm', (f, fl1),
890 "remote directory rename - move from " + f)
890 "remote directory rename - move from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 "local copied/moved from " + f2)
894 "local copied/moved from " + f2)
895 elif f in ma: # clean, a different, no remote
895 elif f in ma: # clean, a different, no remote
896 if n1 != ma[f]:
896 if n1 != ma[f]:
897 if acceptremote:
897 if acceptremote:
898 actions[f] = ('r', None, "remote delete")
898 actions[f] = ('r', None, "remote delete")
899 else:
899 else:
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 "prompt changed/deleted")
901 "prompt changed/deleted")
902 elif n1 == addednodeid:
902 elif n1 == addednodeid:
903 # This extra 'a' is added by working copy manifest to mark
903 # This extra 'a' is added by working copy manifest to mark
904 # the file as locally added. We should forget it instead of
904 # the file as locally added. We should forget it instead of
905 # deleting it.
905 # deleting it.
906 actions[f] = ('f', None, "remote deleted")
906 actions[f] = ('f', None, "remote deleted")
907 else:
907 else:
908 actions[f] = ('r', None, "other deleted")
908 actions[f] = ('r', None, "other deleted")
909 elif n2: # file exists only on remote side
909 elif n2: # file exists only on remote side
910 if f in copied:
910 if f in copied:
911 pass # we'll deal with it on m1 side
911 pass # we'll deal with it on m1 side
912 elif f in movewithdir:
912 elif f in movewithdir:
913 f2 = movewithdir[f]
913 f2 = movewithdir[f]
914 if f2 in m1:
914 if f2 in m1:
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 "local directory rename, both created")
916 "local directory rename, both created")
917 else:
917 else:
918 actions[f2] = ('dg', (f, fl2),
918 actions[f2] = ('dg', (f, fl2),
919 "local directory rename - get from " + f)
919 "local directory rename - get from " + f)
920 elif f in copy:
920 elif f in copy:
921 f2 = copy[f]
921 f2 = copy[f]
922 if f2 in m2:
922 if f2 in m2:
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 "remote copied from " + f2)
924 "remote copied from " + f2)
925 else:
925 else:
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 "remote moved from " + f2)
927 "remote moved from " + f2)
928 elif f not in ma:
928 elif f not in ma:
929 # local unknown, remote created: the logic is described by the
929 # local unknown, remote created: the logic is described by the
930 # following table:
930 # following table:
931 #
931 #
932 # force branchmerge different | action
932 # force branchmerge different | action
933 # n * * | create
933 # n * * | create
934 # y n * | create
934 # y n * | create
935 # y y n | create
935 # y y n | create
936 # y y y | merge
936 # y y y | merge
937 #
937 #
938 # Checking whether the files are different is expensive, so we
938 # Checking whether the files are different is expensive, so we
939 # don't do that when we can avoid it.
939 # don't do that when we can avoid it.
940 if not force:
940 if not force:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 elif not branchmerge:
942 elif not branchmerge:
943 actions[f] = ('c', (fl2,), "remote created")
943 actions[f] = ('c', (fl2,), "remote created")
944 else:
944 else:
945 actions[f] = ('cm', (fl2, pa.node()),
945 actions[f] = ('cm', (fl2, pa.node()),
946 "remote created, get or merge")
946 "remote created, get or merge")
947 elif n2 != ma[f]:
947 elif n2 != ma[f]:
948 df = None
948 df = None
949 for d in dirmove:
949 for d in dirmove:
950 if f.startswith(d):
950 if f.startswith(d):
951 # new file added in a directory that was moved
951 # new file added in a directory that was moved
952 df = dirmove[d] + f[len(d):]
952 df = dirmove[d] + f[len(d):]
953 break
953 break
954 if df is not None and df in m1:
954 if df is not None and df in m1:
955 actions[df] = ('m', (df, f, f, False, pa.node()),
955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 "local directory rename - respect move from " + f)
956 "local directory rename - respect move from " + f)
957 elif acceptremote:
957 elif acceptremote:
958 actions[f] = ('c', (fl2,), "remote recreating")
958 actions[f] = ('c', (fl2,), "remote recreating")
959 else:
959 else:
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 "prompt deleted/changed")
961 "prompt deleted/changed")
962
962
963 return actions, diverge, renamedelete
963 return actions, diverge, renamedelete
964
964
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 """Resolves false conflicts where the nodeid changed but the content
966 """Resolves false conflicts where the nodeid changed but the content
967 remained the same."""
967 remained the same."""
968
968
969 for f, (m, args, msg) in actions.items():
969 for f, (m, args, msg) in actions.items():
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 # local did change but ended up with same content
971 # local did change but ended up with same content
972 actions[f] = 'r', None, "prompt same"
972 actions[f] = 'r', None, "prompt same"
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 # remote did change but ended up with same content
974 # remote did change but ended up with same content
975 del actions[f] # don't get = keep local deleted
975 del actions[f] # don't get = keep local deleted
976
976
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 acceptremote, followcopies, matcher=None,
978 acceptremote, followcopies, matcher=None,
979 mergeforce=False):
979 mergeforce=False):
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
981 # Avoid cycle.
981 # Avoid cycle.
982 from . import sparse
982 from . import sparse
983
983
984 if len(ancestors) == 1: # default
984 if len(ancestors) == 1: # default
985 actions, diverge, renamedelete = manifestmerge(
985 actions, diverge, renamedelete = manifestmerge(
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
987 acceptremote, followcopies)
987 acceptremote, followcopies)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
989
989
990 else: # only when merge.preferancestor=* - the default
990 else: # only when merge.preferancestor=* - the default
991 repo.ui.note(
991 repo.ui.note(
992 _("note: merging %s and %s using bids from ancestors %s\n") %
992 _("note: merging %s and %s using bids from ancestors %s\n") %
993 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
993 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
994 for anc in ancestors)))
994 for anc in ancestors)))
995
995
996 # Call for bids
996 # Call for bids
997 fbids = {} # mapping filename to bids (action method to list af actions)
997 fbids = {} # mapping filename to bids (action method to list af actions)
998 diverge, renamedelete = None, None
998 diverge, renamedelete = None, None
999 for ancestor in ancestors:
999 for ancestor in ancestors:
1000 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1000 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1001 actions, diverge1, renamedelete1 = manifestmerge(
1001 actions, diverge1, renamedelete1 = manifestmerge(
1002 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1002 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1003 acceptremote, followcopies, forcefulldiff=True)
1003 acceptremote, followcopies, forcefulldiff=True)
1004 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1004 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1005
1005
1006 # Track the shortest set of warning on the theory that bid
1006 # Track the shortest set of warning on the theory that bid
1007 # merge will correctly incorporate more information
1007 # merge will correctly incorporate more information
1008 if diverge is None or len(diverge1) < len(diverge):
1008 if diverge is None or len(diverge1) < len(diverge):
1009 diverge = diverge1
1009 diverge = diverge1
1010 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1010 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1011 renamedelete = renamedelete1
1011 renamedelete = renamedelete1
1012
1012
1013 for f, a in sorted(actions.iteritems()):
1013 for f, a in sorted(actions.iteritems()):
1014 m, args, msg = a
1014 m, args, msg = a
1015 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1015 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1016 if f in fbids:
1016 if f in fbids:
1017 d = fbids[f]
1017 d = fbids[f]
1018 if m in d:
1018 if m in d:
1019 d[m].append(a)
1019 d[m].append(a)
1020 else:
1020 else:
1021 d[m] = [a]
1021 d[m] = [a]
1022 else:
1022 else:
1023 fbids[f] = {m: [a]}
1023 fbids[f] = {m: [a]}
1024
1024
1025 # Pick the best bid for each file
1025 # Pick the best bid for each file
1026 repo.ui.note(_('\nauction for merging merge bids\n'))
1026 repo.ui.note(_('\nauction for merging merge bids\n'))
1027 actions = {}
1027 actions = {}
1028 dms = [] # filenames that have dm actions
1028 dms = [] # filenames that have dm actions
1029 for f, bids in sorted(fbids.items()):
1029 for f, bids in sorted(fbids.items()):
1030 # bids is a mapping from action method to list af actions
1030 # bids is a mapping from action method to list af actions
1031 # Consensus?
1031 # Consensus?
1032 if len(bids) == 1: # all bids are the same kind of method
1032 if len(bids) == 1: # all bids are the same kind of method
1033 m, l = list(bids.items())[0]
1033 m, l = list(bids.items())[0]
1034 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1034 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1035 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1035 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1036 actions[f] = l[0]
1036 actions[f] = l[0]
1037 if m == 'dm':
1037 if m == 'dm':
1038 dms.append(f)
1038 dms.append(f)
1039 continue
1039 continue
1040 # If keep is an option, just do it.
1040 # If keep is an option, just do it.
1041 if 'k' in bids:
1041 if 'k' in bids:
1042 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1042 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1043 actions[f] = bids['k'][0]
1043 actions[f] = bids['k'][0]
1044 continue
1044 continue
1045 # If there are gets and they all agree [how could they not?], do it.
1045 # If there are gets and they all agree [how could they not?], do it.
1046 if 'g' in bids:
1046 if 'g' in bids:
1047 ga0 = bids['g'][0]
1047 ga0 = bids['g'][0]
1048 if all(a == ga0 for a in bids['g'][1:]):
1048 if all(a == ga0 for a in bids['g'][1:]):
1049 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1049 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1050 actions[f] = ga0
1050 actions[f] = ga0
1051 continue
1051 continue
1052 # TODO: Consider other simple actions such as mode changes
1052 # TODO: Consider other simple actions such as mode changes
1053 # Handle inefficient democrazy.
1053 # Handle inefficient democrazy.
1054 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1054 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1055 for m, l in sorted(bids.items()):
1055 for m, l in sorted(bids.items()):
1056 for _f, args, msg in l:
1056 for _f, args, msg in l:
1057 repo.ui.note(' %s -> %s\n' % (msg, m))
1057 repo.ui.note(' %s -> %s\n' % (msg, m))
1058 # Pick random action. TODO: Instead, prompt user when resolving
1058 # Pick random action. TODO: Instead, prompt user when resolving
1059 m, l = list(bids.items())[0]
1059 m, l = list(bids.items())[0]
1060 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1060 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1061 (f, m))
1061 (f, m))
1062 actions[f] = l[0]
1062 actions[f] = l[0]
1063 if m == 'dm':
1063 if m == 'dm':
1064 dms.append(f)
1064 dms.append(f)
1065 continue
1065 continue
1066 # Work around 'dm' that can cause multiple actions for the same file
1066 # Work around 'dm' that can cause multiple actions for the same file
1067 for f in dms:
1067 for f in dms:
1068 dm, (f0, flags), msg = actions[f]
1068 dm, (f0, flags), msg = actions[f]
1069 assert dm == 'dm', dm
1069 assert dm == 'dm', dm
1070 if f0 in actions and actions[f0][0] == 'r':
1070 if f0 in actions and actions[f0][0] == 'r':
1071 # We have one bid for removing a file and another for moving it.
1071 # We have one bid for removing a file and another for moving it.
1072 # These two could be merged as first move and then delete ...
1072 # These two could be merged as first move and then delete ...
1073 # but instead drop moving and just delete.
1073 # but instead drop moving and just delete.
1074 del actions[f]
1074 del actions[f]
1075 repo.ui.note(_('end of auction\n\n'))
1075 repo.ui.note(_('end of auction\n\n'))
1076
1076
1077 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1077 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1078
1078
1079 if wctx.rev() is None:
1079 if wctx.rev() is None:
1080 fractions = _forgetremoved(wctx, mctx, branchmerge)
1080 fractions = _forgetremoved(wctx, mctx, branchmerge)
1081 actions.update(fractions)
1081 actions.update(fractions)
1082
1082
1083 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1083 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1084 actions)
1084 actions)
1085
1085
1086 return prunedactions, diverge, renamedelete
1086 return prunedactions, diverge, renamedelete
1087
1087
1088 def _getcwd():
1088 def _getcwd():
1089 try:
1089 try:
1090 return pycompat.getcwd()
1090 return pycompat.getcwd()
1091 except OSError as err:
1091 except OSError as err:
1092 if err.errno == errno.ENOENT:
1092 if err.errno == errno.ENOENT:
1093 return None
1093 return None
1094 raise
1094 raise
1095
1095
1096 def batchremove(repo, wctx, actions):
1096 def batchremove(repo, wctx, actions):
1097 """apply removes to the working directory
1097 """apply removes to the working directory
1098
1098
1099 yields tuples for progress updates
1099 yields tuples for progress updates
1100 """
1100 """
1101 verbose = repo.ui.verbose
1101 verbose = repo.ui.verbose
1102 cwd = _getcwd()
1102 cwd = _getcwd()
1103 i = 0
1103 i = 0
1104 for f, args, msg in actions:
1104 for f, args, msg in actions:
1105 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1105 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1106 if verbose:
1106 if verbose:
1107 repo.ui.note(_("removing %s\n") % f)
1107 repo.ui.note(_("removing %s\n") % f)
1108 wctx[f].audit()
1108 wctx[f].audit()
1109 try:
1109 try:
1110 wctx[f].remove(ignoremissing=True)
1110 wctx[f].remove(ignoremissing=True)
1111 except OSError as inst:
1111 except OSError as inst:
1112 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1112 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1113 (f, inst.strerror))
1113 (f, inst.strerror))
1114 if i == 100:
1114 if i == 100:
1115 yield i, f
1115 yield i, f
1116 i = 0
1116 i = 0
1117 i += 1
1117 i += 1
1118 if i > 0:
1118 if i > 0:
1119 yield i, f
1119 yield i, f
1120
1120
1121 if cwd and not _getcwd():
1121 if cwd and not _getcwd():
1122 # cwd was removed in the course of removing files; print a helpful
1122 # cwd was removed in the course of removing files; print a helpful
1123 # warning.
1123 # warning.
1124 repo.ui.warn(_("current directory was removed\n"
1124 repo.ui.warn(_("current directory was removed\n"
1125 "(consider changing to repo root: %s)\n") % repo.root)
1125 "(consider changing to repo root: %s)\n") % repo.root)
1126
1126
1127 # It's necessary to flush here in case we're inside a worker fork and will
1127 # It's necessary to flush here in case we're inside a worker fork and will
1128 # quit after this function.
1128 # quit after this function.
1129 wctx.flushall()
1129 wctx.flushall()
1130
1130
1131 def batchget(repo, mctx, wctx, actions):
1131 def batchget(repo, mctx, wctx, actions):
1132 """apply gets to the working directory
1132 """apply gets to the working directory
1133
1133
1134 mctx is the context to get from
1134 mctx is the context to get from
1135
1135
1136 yields tuples for progress updates
1136 yields tuples for progress updates
1137 """
1137 """
1138 verbose = repo.ui.verbose
1138 verbose = repo.ui.verbose
1139 fctx = mctx.filectx
1139 fctx = mctx.filectx
1140 ui = repo.ui
1140 ui = repo.ui
1141 i = 0
1141 i = 0
1142 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1142 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1143 for f, (flags, backup), msg in actions:
1143 for f, (flags, backup), msg in actions:
1144 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1144 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1145 if verbose:
1145 if verbose:
1146 repo.ui.note(_("getting %s\n") % f)
1146 repo.ui.note(_("getting %s\n") % f)
1147
1147
1148 if backup:
1148 if backup:
1149 absf = repo.wjoin(f)
1149 absf = repo.wjoin(f)
1150 orig = scmutil.origpath(ui, repo, absf)
1150 orig = scmutil.origpath(ui, repo, absf)
1151 try:
1151 try:
1152 if repo.wvfs.isfileorlink(f):
1152 if repo.wvfs.isfileorlink(f):
1153 util.rename(absf, orig)
1153 util.rename(absf, orig)
1154 except OSError as e:
1154 except OSError as e:
1155 if e.errno != errno.ENOENT:
1155 if e.errno != errno.ENOENT:
1156 raise
1156 raise
1157 wctx[f].clearunknown()
1157 wctx[f].clearunknown()
1158 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1158 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1159 if i == 100:
1159 if i == 100:
1160 yield i, f
1160 yield i, f
1161 i = 0
1161 i = 0
1162 i += 1
1162 i += 1
1163 if i > 0:
1163 if i > 0:
1164 yield i, f
1164 yield i, f
1165
1165
1166 # It's necessary to flush here in case we're inside a worker fork and will
1166 # It's necessary to flush here in case we're inside a worker fork and will
1167 # quit after this function.
1167 # quit after this function.
1168 wctx.flushall()
1168 wctx.flushall()
1169
1169
1170 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1170 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1171 """apply the merge action list to the working directory
1171 """apply the merge action list to the working directory
1172
1172
1173 wctx is the working copy context
1173 wctx is the working copy context
1174 mctx is the context to be merged into the working copy
1174 mctx is the context to be merged into the working copy
1175
1175
1176 Return a tuple of counts (updated, merged, removed, unresolved) that
1176 Return a tuple of counts (updated, merged, removed, unresolved) that
1177 describes how many files were affected by the update.
1177 describes how many files were affected by the update.
1178 """
1178 """
1179
1179
1180 updated, merged, removed = 0, 0, 0
1180 updated, merged, removed = 0, 0, 0
1181 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1181 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1182 moves = []
1182 moves = []
1183 for m, l in actions.items():
1183 for m, l in actions.items():
1184 l.sort()
1184 l.sort()
1185
1185
1186 # 'cd' and 'dc' actions are treated like other merge conflicts
1186 # 'cd' and 'dc' actions are treated like other merge conflicts
1187 mergeactions = sorted(actions['cd'])
1187 mergeactions = sorted(actions['cd'])
1188 mergeactions.extend(sorted(actions['dc']))
1188 mergeactions.extend(sorted(actions['dc']))
1189 mergeactions.extend(actions['m'])
1189 mergeactions.extend(actions['m'])
1190 for f, args, msg in mergeactions:
1190 for f, args, msg in mergeactions:
1191 f1, f2, fa, move, anc = args
1191 f1, f2, fa, move, anc = args
1192 if f == '.hgsubstate': # merged internally
1192 if f == '.hgsubstate': # merged internally
1193 continue
1193 continue
1194 if f1 is None:
1194 if f1 is None:
1195 fcl = filemerge.absentfilectx(wctx, fa)
1195 fcl = filemerge.absentfilectx(wctx, fa)
1196 else:
1196 else:
1197 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1197 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1198 fcl = wctx[f1]
1198 fcl = wctx[f1]
1199 if f2 is None:
1199 if f2 is None:
1200 fco = filemerge.absentfilectx(mctx, fa)
1200 fco = filemerge.absentfilectx(mctx, fa)
1201 else:
1201 else:
1202 fco = mctx[f2]
1202 fco = mctx[f2]
1203 actx = repo[anc]
1203 actx = repo[anc]
1204 if fa in actx:
1204 if fa in actx:
1205 fca = actx[fa]
1205 fca = actx[fa]
1206 else:
1206 else:
1207 # TODO: move to absentfilectx
1207 # TODO: move to absentfilectx
1208 fca = repo.filectx(f1, fileid=nullrev)
1208 fca = repo.filectx(f1, fileid=nullrev)
1209 ms.add(fcl, fco, fca, f)
1209 ms.add(fcl, fco, fca, f)
1210 if f1 != f and move:
1210 if f1 != f and move:
1211 moves.append(f1)
1211 moves.append(f1)
1212
1212
1213 _updating = _('updating')
1213 _updating = _('updating')
1214 _files = _('files')
1214 _files = _('files')
1215 progress = repo.ui.progress
1215 progress = repo.ui.progress
1216
1216
1217 # remove renamed files after safely stored
1217 # remove renamed files after safely stored
1218 for f in moves:
1218 for f in moves:
1219 if wctx[f].lexists():
1219 if wctx[f].lexists():
1220 repo.ui.debug("removing %s\n" % f)
1220 repo.ui.debug("removing %s\n" % f)
1221 wctx[f].audit()
1221 wctx[f].audit()
1222 wctx[f].remove()
1222 wctx[f].remove()
1223
1223
1224 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1224 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1225
1225
1226 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1226 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1227 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1227 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1228
1228
1229 # remove in parallel (must come first)
1229 # remove in parallel (must come first)
1230 z = 0
1230 z = 0
1231 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1231 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1232 actions['r'])
1232 actions['r'])
1233 for i, item in prog:
1233 for i, item in prog:
1234 z += i
1234 z += i
1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1236 removed = len(actions['r'])
1236 removed = len(actions['r'])
1237
1237
1238 # We should flush before forking into worker processes, since those workers
1238 # We should flush before forking into worker processes, since those workers
1239 # flush when they complete, and we don't want to duplicate work.
1239 # flush when they complete, and we don't want to duplicate work.
1240 wctx.flushall()
1240 wctx.flushall()
1241
1241
1242 # get in parallel
1242 # get in parallel
1243 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1243 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1244 actions['g'])
1244 actions['g'])
1245 for i, item in prog:
1245 for i, item in prog:
1246 z += i
1246 z += i
1247 progress(_updating, z, item=item, total=numupdates, unit=_files)
1247 progress(_updating, z, item=item, total=numupdates, unit=_files)
1248 updated = len(actions['g'])
1248 updated = len(actions['g'])
1249
1249
1250 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1250 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1251 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1251 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1252
1252
1253 # forget (manifest only, just log it) (must come first)
1253 # forget (manifest only, just log it) (must come first)
1254 for f, args, msg in actions['f']:
1254 for f, args, msg in actions['f']:
1255 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1255 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1256 z += 1
1256 z += 1
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1258
1258
1259 # re-add (manifest only, just log it)
1259 # re-add (manifest only, just log it)
1260 for f, args, msg in actions['a']:
1260 for f, args, msg in actions['a']:
1261 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1261 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1262 z += 1
1262 z += 1
1263 progress(_updating, z, item=f, total=numupdates, unit=_files)
1263 progress(_updating, z, item=f, total=numupdates, unit=_files)
1264
1264
1265 # re-add/mark as modified (manifest only, just log it)
1265 # re-add/mark as modified (manifest only, just log it)
1266 for f, args, msg in actions['am']:
1266 for f, args, msg in actions['am']:
1267 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1267 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1268 z += 1
1268 z += 1
1269 progress(_updating, z, item=f, total=numupdates, unit=_files)
1269 progress(_updating, z, item=f, total=numupdates, unit=_files)
1270
1270
1271 # keep (noop, just log it)
1271 # keep (noop, just log it)
1272 for f, args, msg in actions['k']:
1272 for f, args, msg in actions['k']:
1273 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1273 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1274 # no progress
1274 # no progress
1275
1275
1276 # directory rename, move local
1276 # directory rename, move local
1277 for f, args, msg in actions['dm']:
1277 for f, args, msg in actions['dm']:
1278 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1278 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1279 z += 1
1279 z += 1
1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1281 f0, flags = args
1281 f0, flags = args
1282 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1282 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1283 wctx[f].audit()
1283 wctx[f].audit()
1284 wctx[f].write(wctx.filectx(f0).data(), flags)
1284 wctx[f].write(wctx.filectx(f0).data(), flags)
1285 wctx[f0].remove()
1285 wctx[f0].remove()
1286 updated += 1
1286 updated += 1
1287
1287
1288 # local directory rename, get
1288 # local directory rename, get
1289 for f, args, msg in actions['dg']:
1289 for f, args, msg in actions['dg']:
1290 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1290 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1291 z += 1
1291 z += 1
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1293 f0, flags = args
1293 f0, flags = args
1294 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1294 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1295 wctx[f].write(mctx.filectx(f0).data(), flags)
1295 wctx[f].write(mctx.filectx(f0).data(), flags)
1296 updated += 1
1296 updated += 1
1297
1297
1298 # exec
1298 # exec
1299 for f, args, msg in actions['e']:
1299 for f, args, msg in actions['e']:
1300 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1300 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1301 z += 1
1301 z += 1
1302 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 progress(_updating, z, item=f, total=numupdates, unit=_files)
1303 flags, = args
1303 flags, = args
1304 wctx[f].audit()
1304 wctx[f].audit()
1305 wctx[f].setflags('l' in flags, 'x' in flags)
1305 wctx[f].setflags('l' in flags, 'x' in flags)
1306 updated += 1
1306 updated += 1
1307
1307
1308 # the ordering is important here -- ms.mergedriver will raise if the merge
1308 # the ordering is important here -- ms.mergedriver will raise if the merge
1309 # driver has changed, and we want to be able to bypass it when overwrite is
1309 # driver has changed, and we want to be able to bypass it when overwrite is
1310 # True
1310 # True
1311 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1311 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1312
1312
1313 if usemergedriver:
1313 if usemergedriver:
1314 ms.commit()
1314 ms.commit()
1315 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1315 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1316 # the driver might leave some files unresolved
1316 # the driver might leave some files unresolved
1317 unresolvedf = set(ms.unresolved())
1317 unresolvedf = set(ms.unresolved())
1318 if not proceed:
1318 if not proceed:
1319 # XXX setting unresolved to at least 1 is a hack to make sure we
1319 # XXX setting unresolved to at least 1 is a hack to make sure we
1320 # error out
1320 # error out
1321 return updated, merged, removed, max(len(unresolvedf), 1)
1321 return updated, merged, removed, max(len(unresolvedf), 1)
1322 newactions = []
1322 newactions = []
1323 for f, args, msg in mergeactions:
1323 for f, args, msg in mergeactions:
1324 if f in unresolvedf:
1324 if f in unresolvedf:
1325 newactions.append((f, args, msg))
1325 newactions.append((f, args, msg))
1326 mergeactions = newactions
1326 mergeactions = newactions
1327
1327
1328 # premerge
1328 # premerge
1329 tocomplete = []
1329 tocomplete = []
1330 for f, args, msg in mergeactions:
1330 for f, args, msg in mergeactions:
1331 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1331 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1332 z += 1
1332 z += 1
1333 progress(_updating, z, item=f, total=numupdates, unit=_files)
1333 progress(_updating, z, item=f, total=numupdates, unit=_files)
1334 if f == '.hgsubstate': # subrepo states need updating
1334 if f == '.hgsubstate': # subrepo states need updating
1335 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1335 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1336 overwrite, labels)
1336 overwrite, labels)
1337 continue
1337 continue
1338 wctx[f].audit()
1338 wctx[f].audit()
1339 complete, r = ms.preresolve(f, wctx)
1339 complete, r = ms.preresolve(f, wctx)
1340 if not complete:
1340 if not complete:
1341 numupdates += 1
1341 numupdates += 1
1342 tocomplete.append((f, args, msg))
1342 tocomplete.append((f, args, msg))
1343
1343
1344 # merge
1344 # merge
1345 for f, args, msg in tocomplete:
1345 for f, args, msg in tocomplete:
1346 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1346 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1347 z += 1
1347 z += 1
1348 progress(_updating, z, item=f, total=numupdates, unit=_files)
1348 progress(_updating, z, item=f, total=numupdates, unit=_files)
1349 ms.resolve(f, wctx)
1349 ms.resolve(f, wctx)
1350
1350
1351 ms.commit()
1351 ms.commit()
1352
1352
1353 unresolved = ms.unresolvedcount()
1353 unresolved = ms.unresolvedcount()
1354
1354
1355 if usemergedriver and not unresolved and ms.mdstate() != 's':
1355 if usemergedriver and not unresolved and ms.mdstate() != 's':
1356 if not driverconclude(repo, ms, wctx, labels=labels):
1356 if not driverconclude(repo, ms, wctx, labels=labels):
1357 # XXX setting unresolved to at least 1 is a hack to make sure we
1357 # XXX setting unresolved to at least 1 is a hack to make sure we
1358 # error out
1358 # error out
1359 unresolved = max(unresolved, 1)
1359 unresolved = max(unresolved, 1)
1360
1360
1361 ms.commit()
1361 ms.commit()
1362
1362
1363 msupdated, msmerged, msremoved = ms.counts()
1363 msupdated, msmerged, msremoved = ms.counts()
1364 updated += msupdated
1364 updated += msupdated
1365 merged += msmerged
1365 merged += msmerged
1366 removed += msremoved
1366 removed += msremoved
1367
1367
1368 extraactions = ms.actions()
1368 extraactions = ms.actions()
1369 if extraactions:
1369 if extraactions:
1370 mfiles = set(a[0] for a in actions['m'])
1370 mfiles = set(a[0] for a in actions['m'])
1371 for k, acts in extraactions.iteritems():
1371 for k, acts in extraactions.iteritems():
1372 actions[k].extend(acts)
1372 actions[k].extend(acts)
1373 # Remove these files from actions['m'] as well. This is important
1373 # Remove these files from actions['m'] as well. This is important
1374 # because in recordupdates, files in actions['m'] are processed
1374 # because in recordupdates, files in actions['m'] are processed
1375 # after files in other actions, and the merge driver might add
1375 # after files in other actions, and the merge driver might add
1376 # files to those actions via extraactions above. This can lead to a
1376 # files to those actions via extraactions above. This can lead to a
1377 # file being recorded twice, with poor results. This is especially
1377 # file being recorded twice, with poor results. This is especially
1378 # problematic for actions['r'] (currently only possible with the
1378 # problematic for actions['r'] (currently only possible with the
1379 # merge driver in the initial merge process; interrupted merges
1379 # merge driver in the initial merge process; interrupted merges
1380 # don't go through this flow).
1380 # don't go through this flow).
1381 #
1381 #
1382 # The real fix here is to have indexes by both file and action so
1382 # The real fix here is to have indexes by both file and action so
1383 # that when the action for a file is changed it is automatically
1383 # that when the action for a file is changed it is automatically
1384 # reflected in the other action lists. But that involves a more
1384 # reflected in the other action lists. But that involves a more
1385 # complex data structure, so this will do for now.
1385 # complex data structure, so this will do for now.
1386 #
1386 #
1387 # We don't need to do the same operation for 'dc' and 'cd' because
1387 # We don't need to do the same operation for 'dc' and 'cd' because
1388 # those lists aren't consulted again.
1388 # those lists aren't consulted again.
1389 mfiles.difference_update(a[0] for a in acts)
1389 mfiles.difference_update(a[0] for a in acts)
1390
1390
1391 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1391 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1392
1392
1393 progress(_updating, None, total=numupdates, unit=_files)
1393 progress(_updating, None, total=numupdates, unit=_files)
1394
1394
1395 return updated, merged, removed, unresolved
1395 return updated, merged, removed, unresolved
1396
1396
1397 def recordupdates(repo, actions, branchmerge):
1397 def recordupdates(repo, actions, branchmerge):
1398 "record merge actions to the dirstate"
1398 "record merge actions to the dirstate"
1399 # remove (must come first)
1399 # remove (must come first)
1400 for f, args, msg in actions.get('r', []):
1400 for f, args, msg in actions.get('r', []):
1401 if branchmerge:
1401 if branchmerge:
1402 repo.dirstate.remove(f)
1402 repo.dirstate.remove(f)
1403 else:
1403 else:
1404 repo.dirstate.drop(f)
1404 repo.dirstate.drop(f)
1405
1405
1406 # forget (must come first)
1406 # forget (must come first)
1407 for f, args, msg in actions.get('f', []):
1407 for f, args, msg in actions.get('f', []):
1408 repo.dirstate.drop(f)
1408 repo.dirstate.drop(f)
1409
1409
1410 # re-add
1410 # re-add
1411 for f, args, msg in actions.get('a', []):
1411 for f, args, msg in actions.get('a', []):
1412 repo.dirstate.add(f)
1412 repo.dirstate.add(f)
1413
1413
1414 # re-add/mark as modified
1414 # re-add/mark as modified
1415 for f, args, msg in actions.get('am', []):
1415 for f, args, msg in actions.get('am', []):
1416 if branchmerge:
1416 if branchmerge:
1417 repo.dirstate.normallookup(f)
1417 repo.dirstate.normallookup(f)
1418 else:
1418 else:
1419 repo.dirstate.add(f)
1419 repo.dirstate.add(f)
1420
1420
1421 # exec change
1421 # exec change
1422 for f, args, msg in actions.get('e', []):
1422 for f, args, msg in actions.get('e', []):
1423 repo.dirstate.normallookup(f)
1423 repo.dirstate.normallookup(f)
1424
1424
1425 # keep
1425 # keep
1426 for f, args, msg in actions.get('k', []):
1426 for f, args, msg in actions.get('k', []):
1427 pass
1427 pass
1428
1428
1429 # get
1429 # get
1430 for f, args, msg in actions.get('g', []):
1430 for f, args, msg in actions.get('g', []):
1431 if branchmerge:
1431 if branchmerge:
1432 repo.dirstate.otherparent(f)
1432 repo.dirstate.otherparent(f)
1433 else:
1433 else:
1434 repo.dirstate.normal(f)
1434 repo.dirstate.normal(f)
1435
1435
1436 # merge
1436 # merge
1437 for f, args, msg in actions.get('m', []):
1437 for f, args, msg in actions.get('m', []):
1438 f1, f2, fa, move, anc = args
1438 f1, f2, fa, move, anc = args
1439 if branchmerge:
1439 if branchmerge:
1440 # We've done a branch merge, mark this file as merged
1440 # We've done a branch merge, mark this file as merged
1441 # so that we properly record the merger later
1441 # so that we properly record the merger later
1442 repo.dirstate.merge(f)
1442 repo.dirstate.merge(f)
1443 if f1 != f2: # copy/rename
1443 if f1 != f2: # copy/rename
1444 if move:
1444 if move:
1445 repo.dirstate.remove(f1)
1445 repo.dirstate.remove(f1)
1446 if f1 != f:
1446 if f1 != f:
1447 repo.dirstate.copy(f1, f)
1447 repo.dirstate.copy(f1, f)
1448 else:
1448 else:
1449 repo.dirstate.copy(f2, f)
1449 repo.dirstate.copy(f2, f)
1450 else:
1450 else:
1451 # We've update-merged a locally modified file, so
1451 # We've update-merged a locally modified file, so
1452 # we set the dirstate to emulate a normal checkout
1452 # we set the dirstate to emulate a normal checkout
1453 # of that file some time in the past. Thus our
1453 # of that file some time in the past. Thus our
1454 # merge will appear as a normal local file
1454 # merge will appear as a normal local file
1455 # modification.
1455 # modification.
1456 if f2 == f: # file not locally copied/moved
1456 if f2 == f: # file not locally copied/moved
1457 repo.dirstate.normallookup(f)
1457 repo.dirstate.normallookup(f)
1458 if move:
1458 if move:
1459 repo.dirstate.drop(f1)
1459 repo.dirstate.drop(f1)
1460
1460
1461 # directory rename, move local
1461 # directory rename, move local
1462 for f, args, msg in actions.get('dm', []):
1462 for f, args, msg in actions.get('dm', []):
1463 f0, flag = args
1463 f0, flag = args
1464 if branchmerge:
1464 if branchmerge:
1465 repo.dirstate.add(f)
1465 repo.dirstate.add(f)
1466 repo.dirstate.remove(f0)
1466 repo.dirstate.remove(f0)
1467 repo.dirstate.copy(f0, f)
1467 repo.dirstate.copy(f0, f)
1468 else:
1468 else:
1469 repo.dirstate.normal(f)
1469 repo.dirstate.normal(f)
1470 repo.dirstate.drop(f0)
1470 repo.dirstate.drop(f0)
1471
1471
1472 # directory rename, get
1472 # directory rename, get
1473 for f, args, msg in actions.get('dg', []):
1473 for f, args, msg in actions.get('dg', []):
1474 f0, flag = args
1474 f0, flag = args
1475 if branchmerge:
1475 if branchmerge:
1476 repo.dirstate.add(f)
1476 repo.dirstate.add(f)
1477 repo.dirstate.copy(f0, f)
1477 repo.dirstate.copy(f0, f)
1478 else:
1478 else:
1479 repo.dirstate.normal(f)
1479 repo.dirstate.normal(f)
1480
1480
1481 def update(repo, node, branchmerge, force, ancestor=None,
1481 def update(repo, node, branchmerge, force, ancestor=None,
1482 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1482 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1483 updatecheck=None, wc=None):
1483 updatecheck=None, wc=None):
1484 """
1484 """
1485 Perform a merge between the working directory and the given node
1485 Perform a merge between the working directory and the given node
1486
1486
1487 node = the node to update to
1487 node = the node to update to
1488 branchmerge = whether to merge between branches
1488 branchmerge = whether to merge between branches
1489 force = whether to force branch merging or file overwriting
1489 force = whether to force branch merging or file overwriting
1490 matcher = a matcher to filter file lists (dirstate not updated)
1490 matcher = a matcher to filter file lists (dirstate not updated)
1491 mergeancestor = whether it is merging with an ancestor. If true,
1491 mergeancestor = whether it is merging with an ancestor. If true,
1492 we should accept the incoming changes for any prompts that occur.
1492 we should accept the incoming changes for any prompts that occur.
1493 If false, merging with an ancestor (fast-forward) is only allowed
1493 If false, merging with an ancestor (fast-forward) is only allowed
1494 between different named branches. This flag is used by rebase extension
1494 between different named branches. This flag is used by rebase extension
1495 as a temporary fix and should be avoided in general.
1495 as a temporary fix and should be avoided in general.
1496 labels = labels to use for base, local and other
1496 labels = labels to use for base, local and other
1497 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1497 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1498 this is True, then 'force' should be True as well.
1498 this is True, then 'force' should be True as well.
1499
1499
1500 The table below shows all the behaviors of the update command
1500 The table below shows all the behaviors of the update command
1501 given the -c and -C or no options, whether the working directory
1501 given the -c and -C or no options, whether the working directory
1502 is dirty, whether a revision is specified, and the relationship of
1502 is dirty, whether a revision is specified, and the relationship of
1503 the parent rev to the target rev (linear or not). Match from top first. The
1503 the parent rev to the target rev (linear or not). Match from top first. The
1504 -n option doesn't exist on the command line, but represents the
1504 -n option doesn't exist on the command line, but represents the
1505 experimental.updatecheck=noconflict option.
1505 experimental.updatecheck=noconflict option.
1506
1506
1507 This logic is tested by test-update-branches.t.
1507 This logic is tested by test-update-branches.t.
1508
1508
1509 -c -C -n -m dirty rev linear | result
1509 -c -C -n -m dirty rev linear | result
1510 y y * * * * * | (1)
1510 y y * * * * * | (1)
1511 y * y * * * * | (1)
1511 y * y * * * * | (1)
1512 y * * y * * * | (1)
1512 y * * y * * * | (1)
1513 * y y * * * * | (1)
1513 * y y * * * * | (1)
1514 * y * y * * * | (1)
1514 * y * y * * * | (1)
1515 * * y y * * * | (1)
1515 * * y y * * * | (1)
1516 * * * * * n n | x
1516 * * * * * n n | x
1517 * * * * n * * | ok
1517 * * * * n * * | ok
1518 n n n n y * y | merge
1518 n n n n y * y | merge
1519 n n n n y y n | (2)
1519 n n n n y y n | (2)
1520 n n n y y * * | merge
1520 n n n y y * * | merge
1521 n n y n y * * | merge if no conflict
1521 n n y n y * * | merge if no conflict
1522 n y n n y * * | discard
1522 n y n n y * * | discard
1523 y n n n y * * | (3)
1523 y n n n y * * | (3)
1524
1524
1525 x = can't happen
1525 x = can't happen
1526 * = don't-care
1526 * = don't-care
1527 1 = incompatible options (checked in commands.py)
1527 1 = incompatible options (checked in commands.py)
1528 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1528 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1529 3 = abort: uncommitted changes (checked in commands.py)
1529 3 = abort: uncommitted changes (checked in commands.py)
1530
1530
1531 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1531 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1532 to repo[None] if None is passed.
1532 to repo[None] if None is passed.
1533
1533
1534 Return the same tuple as applyupdates().
1534 Return the same tuple as applyupdates().
1535 """
1535 """
1536 # Avoid cycle.
1536 # Avoid cycle.
1537 from . import sparse
1537 from . import sparse
1538
1538
1539 # This function used to find the default destination if node was None, but
1539 # This function used to find the default destination if node was None, but
1540 # that's now in destutil.py.
1540 # that's now in destutil.py.
1541 assert node is not None
1541 assert node is not None
1542 if not branchmerge and not force:
1542 if not branchmerge and not force:
1543 # TODO: remove the default once all callers that pass branchmerge=False
1543 # TODO: remove the default once all callers that pass branchmerge=False
1544 # and force=False pass a value for updatecheck. We may want to allow
1544 # and force=False pass a value for updatecheck. We may want to allow
1545 # updatecheck='abort' to better suppport some of these callers.
1545 # updatecheck='abort' to better suppport some of these callers.
1546 if updatecheck is None:
1546 if updatecheck is None:
1547 updatecheck = 'linear'
1547 updatecheck = 'linear'
1548 assert updatecheck in ('none', 'linear', 'noconflict')
1548 assert updatecheck in ('none', 'linear', 'noconflict')
1549 # If we're doing a partial update, we need to skip updating
1549 # If we're doing a partial update, we need to skip updating
1550 # the dirstate, so make a note of any partial-ness to the
1550 # the dirstate, so make a note of any partial-ness to the
1551 # update here.
1551 # update here.
1552 if matcher is None or matcher.always():
1552 if matcher is None or matcher.always():
1553 partial = False
1553 partial = False
1554 else:
1554 else:
1555 partial = True
1555 partial = True
1556 with repo.wlock():
1556 with repo.wlock():
1557 if wc is None:
1557 if wc is None:
1558 wc = repo[None]
1558 wc = repo[None]
1559 pl = wc.parents()
1559 pl = wc.parents()
1560 p1 = pl[0]
1560 p1 = pl[0]
1561 pas = [None]
1561 pas = [None]
1562 if ancestor is not None:
1562 if ancestor is not None:
1563 pas = [repo[ancestor]]
1563 pas = [repo[ancestor]]
1564
1564
1565 overwrite = force and not branchmerge
1565 overwrite = force and not branchmerge
1566
1566
1567 p2 = repo[node]
1567 p2 = repo[node]
1568 if pas[0] is None:
1568 if pas[0] is None:
1569 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1569 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1570 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1570 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1571 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1571 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1572 else:
1572 else:
1573 pas = [p1.ancestor(p2, warn=branchmerge)]
1573 pas = [p1.ancestor(p2, warn=branchmerge)]
1574
1574
1575 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1575 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1576
1576
1577 ### check phase
1577 ### check phase
1578 if not overwrite:
1578 if not overwrite:
1579 if len(pl) > 1:
1579 if len(pl) > 1:
1580 raise error.Abort(_("outstanding uncommitted merge"))
1580 raise error.Abort(_("outstanding uncommitted merge"))
1581 ms = mergestate.read(repo)
1581 ms = mergestate.read(repo)
1582 if list(ms.unresolved()):
1582 if list(ms.unresolved()):
1583 raise error.Abort(_("outstanding merge conflicts"))
1583 raise error.Abort(_("outstanding merge conflicts"))
1584 if branchmerge:
1584 if branchmerge:
1585 if pas == [p2]:
1585 if pas == [p2]:
1586 raise error.Abort(_("merging with a working directory ancestor"
1586 raise error.Abort(_("merging with a working directory ancestor"
1587 " has no effect"))
1587 " has no effect"))
1588 elif pas == [p1]:
1588 elif pas == [p1]:
1589 if not mergeancestor and wc.branch() == p2.branch():
1589 if not mergeancestor and wc.branch() == p2.branch():
1590 raise error.Abort(_("nothing to merge"),
1590 raise error.Abort(_("nothing to merge"),
1591 hint=_("use 'hg update' "
1591 hint=_("use 'hg update' "
1592 "or check 'hg heads'"))
1592 "or check 'hg heads'"))
1593 if not force and (wc.files() or wc.deleted()):
1593 if not force and (wc.files() or wc.deleted()):
1594 raise error.Abort(_("uncommitted changes"),
1594 raise error.Abort(_("uncommitted changes"),
1595 hint=_("use 'hg status' to list changes"))
1595 hint=_("use 'hg status' to list changes"))
1596 for s in sorted(wc.substate):
1596 for s in sorted(wc.substate):
1597 wc.sub(s).bailifchanged()
1597 wc.sub(s).bailifchanged()
1598
1598
1599 elif not overwrite:
1599 elif not overwrite:
1600 if p1 == p2: # no-op update
1600 if p1 == p2: # no-op update
1601 # call the hooks and exit early
1601 # call the hooks and exit early
1602 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1602 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1603 repo.hook('update', parent1=xp2, parent2='', error=0)
1603 repo.hook('update', parent1=xp2, parent2='', error=0)
1604 return 0, 0, 0, 0
1604 return 0, 0, 0, 0
1605
1605
1606 if (updatecheck == 'linear' and
1606 if (updatecheck == 'linear' and
1607 pas not in ([p1], [p2])): # nonlinear
1607 pas not in ([p1], [p2])): # nonlinear
1608 dirty = wc.dirty(missing=True)
1608 dirty = wc.dirty(missing=True)
1609 if dirty:
1609 if dirty:
1610 # Branching is a bit strange to ensure we do the minimal
1610 # Branching is a bit strange to ensure we do the minimal
1611 # amount of call to obsutil.foreground.
1611 # amount of call to obsutil.foreground.
1612 foreground = obsutil.foreground(repo, [p1.node()])
1612 foreground = obsutil.foreground(repo, [p1.node()])
1613 # note: the <node> variable contains a random identifier
1613 # note: the <node> variable contains a random identifier
1614 if repo[node].node() in foreground:
1614 if repo[node].node() in foreground:
1615 pass # allow updating to successors
1615 pass # allow updating to successors
1616 else:
1616 else:
1617 msg = _("uncommitted changes")
1617 msg = _("uncommitted changes")
1618 hint = _("commit or update --clean to discard changes")
1618 hint = _("commit or update --clean to discard changes")
1619 raise error.UpdateAbort(msg, hint=hint)
1619 raise error.UpdateAbort(msg, hint=hint)
1620 else:
1620 else:
1621 # Allow jumping branches if clean and specific rev given
1621 # Allow jumping branches if clean and specific rev given
1622 pass
1622 pass
1623
1623
1624 if overwrite:
1624 if overwrite:
1625 pas = [wc]
1625 pas = [wc]
1626 elif not branchmerge:
1626 elif not branchmerge:
1627 pas = [p1]
1627 pas = [p1]
1628
1628
1629 # deprecated config: merge.followcopies
1629 # deprecated config: merge.followcopies
1630 followcopies = repo.ui.configbool('merge', 'followcopies')
1630 followcopies = repo.ui.configbool('merge', 'followcopies')
1631 if overwrite:
1631 if overwrite:
1632 followcopies = False
1632 followcopies = False
1633 elif not pas[0]:
1633 elif not pas[0]:
1634 followcopies = False
1634 followcopies = False
1635 if not branchmerge and not wc.dirty(missing=True):
1635 if not branchmerge and not wc.dirty(missing=True):
1636 followcopies = False
1636 followcopies = False
1637
1637
1638 ### calculate phase
1638 ### calculate phase
1639 actionbyfile, diverge, renamedelete = calculateupdates(
1639 actionbyfile, diverge, renamedelete = calculateupdates(
1640 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1640 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1641 followcopies, matcher=matcher, mergeforce=mergeforce)
1641 followcopies, matcher=matcher, mergeforce=mergeforce)
1642
1642
1643 if updatecheck == 'noconflict':
1643 if updatecheck == 'noconflict':
1644 for f, (m, args, msg) in actionbyfile.iteritems():
1644 for f, (m, args, msg) in actionbyfile.iteritems():
1645 if m not in ('g', 'k', 'e', 'r'):
1645 if m not in ('g', 'k', 'e', 'r'):
1646 msg = _("conflicting changes")
1646 msg = _("conflicting changes")
1647 hint = _("commit or update --clean to discard changes")
1647 hint = _("commit or update --clean to discard changes")
1648 raise error.Abort(msg, hint=hint)
1648 raise error.Abort(msg, hint=hint)
1649
1649
1650 # Prompt and create actions. Most of this is in the resolve phase
1650 # Prompt and create actions. Most of this is in the resolve phase
1651 # already, but we can't handle .hgsubstate in filemerge or
1651 # already, but we can't handle .hgsubstate in filemerge or
1652 # subrepo.submerge yet so we have to keep prompting for it.
1652 # subrepo.submerge yet so we have to keep prompting for it.
1653 if '.hgsubstate' in actionbyfile:
1653 if '.hgsubstate' in actionbyfile:
1654 f = '.hgsubstate'
1654 f = '.hgsubstate'
1655 m, args, msg = actionbyfile[f]
1655 m, args, msg = actionbyfile[f]
1656 prompts = filemerge.partextras(labels)
1656 prompts = filemerge.partextras(labels)
1657 prompts['f'] = f
1657 prompts['f'] = f
1658 if m == 'cd':
1658 if m == 'cd':
1659 if repo.ui.promptchoice(
1659 if repo.ui.promptchoice(
1660 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1660 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1661 "use (c)hanged version or (d)elete?"
1661 "use (c)hanged version or (d)elete?"
1662 "$$ &Changed $$ &Delete") % prompts, 0):
1662 "$$ &Changed $$ &Delete") % prompts, 0):
1663 actionbyfile[f] = ('r', None, "prompt delete")
1663 actionbyfile[f] = ('r', None, "prompt delete")
1664 elif f in p1:
1664 elif f in p1:
1665 actionbyfile[f] = ('am', None, "prompt keep")
1665 actionbyfile[f] = ('am', None, "prompt keep")
1666 else:
1666 else:
1667 actionbyfile[f] = ('a', None, "prompt keep")
1667 actionbyfile[f] = ('a', None, "prompt keep")
1668 elif m == 'dc':
1668 elif m == 'dc':
1669 f1, f2, fa, move, anc = args
1669 f1, f2, fa, move, anc = args
1670 flags = p2[f2].flags()
1670 flags = p2[f2].flags()
1671 if repo.ui.promptchoice(
1671 if repo.ui.promptchoice(
1672 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1672 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1673 "use (c)hanged version or leave (d)eleted?"
1673 "use (c)hanged version or leave (d)eleted?"
1674 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1674 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1675 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1675 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1676 else:
1676 else:
1677 del actionbyfile[f]
1677 del actionbyfile[f]
1678
1678
1679 # Convert to dictionary-of-lists format
1679 # Convert to dictionary-of-lists format
1680 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1680 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1681 for f, (m, args, msg) in actionbyfile.iteritems():
1681 for f, (m, args, msg) in actionbyfile.iteritems():
1682 if m not in actions:
1682 if m not in actions:
1683 actions[m] = []
1683 actions[m] = []
1684 actions[m].append((f, args, msg))
1684 actions[m].append((f, args, msg))
1685
1685
1686 if not util.fscasesensitive(repo.path):
1686 if not util.fscasesensitive(repo.path):
1687 # check collision between files only in p2 for clean update
1687 # check collision between files only in p2 for clean update
1688 if (not branchmerge and
1688 if (not branchmerge and
1689 (force or not wc.dirty(missing=True, branch=False))):
1689 (force or not wc.dirty(missing=True, branch=False))):
1690 _checkcollision(repo, p2.manifest(), None)
1690 _checkcollision(repo, p2.manifest(), None)
1691 else:
1691 else:
1692 _checkcollision(repo, wc.manifest(), actions)
1692 _checkcollision(repo, wc.manifest(), actions)
1693
1693
1694 # divergent renames
1694 # divergent renames
1695 for f, fl in sorted(diverge.iteritems()):
1695 for f, fl in sorted(diverge.iteritems()):
1696 repo.ui.warn(_("note: possible conflict - %s was renamed "
1696 repo.ui.warn(_("note: possible conflict - %s was renamed "
1697 "multiple times to:\n") % f)
1697 "multiple times to:\n") % f)
1698 for nf in fl:
1698 for nf in fl:
1699 repo.ui.warn(" %s\n" % nf)
1699 repo.ui.warn(" %s\n" % nf)
1700
1700
1701 # rename and delete
1701 # rename and delete
1702 for f, fl in sorted(renamedelete.iteritems()):
1702 for f, fl in sorted(renamedelete.iteritems()):
1703 repo.ui.warn(_("note: possible conflict - %s was deleted "
1703 repo.ui.warn(_("note: possible conflict - %s was deleted "
1704 "and renamed to:\n") % f)
1704 "and renamed to:\n") % f)
1705 for nf in fl:
1705 for nf in fl:
1706 repo.ui.warn(" %s\n" % nf)
1706 repo.ui.warn(" %s\n" % nf)
1707
1707
1708 ### apply phase
1708 ### apply phase
1709 if not branchmerge: # just jump to the new rev
1709 if not branchmerge: # just jump to the new rev
1710 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1710 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1711 if not partial:
1711 if not partial:
1712 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1712 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1713 # note that we're in the middle of an update
1713 # note that we're in the middle of an update
1714 repo.vfs.write('updatestate', p2.hex())
1714 repo.vfs.write('updatestate', p2.hex())
1715
1715
1716 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1716 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1717 wc.flushall()
1717 wc.flushall()
1718
1718
1719 if not partial:
1719 if not partial:
1720 with repo.dirstate.parentchange():
1720 with repo.dirstate.parentchange():
1721 repo.setparents(fp1, fp2)
1721 repo.setparents(fp1, fp2)
1722 recordupdates(repo, actions, branchmerge)
1722 recordupdates(repo, actions, branchmerge)
1723 # update completed, clear state
1723 # update completed, clear state
1724 util.unlink(repo.vfs.join('updatestate'))
1724 util.unlink(repo.vfs.join('updatestate'))
1725
1725
1726 if not branchmerge:
1726 if not branchmerge:
1727 repo.dirstate.setbranch(p2.branch())
1727 repo.dirstate.setbranch(p2.branch())
1728
1728
1729 # If we're updating to a location, clean up any stale temporary includes
1729 # If we're updating to a location, clean up any stale temporary includes
1730 # (ex: this happens during hg rebase --abort).
1730 # (ex: this happens during hg rebase --abort).
1731 if not branchmerge:
1731 if not branchmerge:
1732 sparse.prunetemporaryincludes(repo)
1732 sparse.prunetemporaryincludes(repo)
1733
1733
1734 if not partial:
1734 if not partial:
1735 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1735 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1736 return stats
1736 return stats
1737
1737
1738 def graft(repo, ctx, pctx, labels, keepparent=False):
1738 def graft(repo, ctx, pctx, labels, keepparent=False):
1739 """Do a graft-like merge.
1739 """Do a graft-like merge.
1740
1740
1741 This is a merge where the merge ancestor is chosen such that one
1741 This is a merge where the merge ancestor is chosen such that one
1742 or more changesets are grafted onto the current changeset. In
1742 or more changesets are grafted onto the current changeset. In
1743 addition to the merge, this fixes up the dirstate to include only
1743 addition to the merge, this fixes up the dirstate to include only
1744 a single parent (if keepparent is False) and tries to duplicate any
1744 a single parent (if keepparent is False) and tries to duplicate any
1745 renames/copies appropriately.
1745 renames/copies appropriately.
1746
1746
1747 ctx - changeset to rebase
1747 ctx - changeset to rebase
1748 pctx - merge base, usually ctx.p1()
1748 pctx - merge base, usually ctx.p1()
1749 labels - merge labels eg ['local', 'graft']
1749 labels - merge labels eg ['local', 'graft']
1750 keepparent - keep second parent if any
1750 keepparent - keep second parent if any
1751
1751
1752 """
1752 """
1753 # If we're grafting a descendant onto an ancestor, be sure to pass
1753 # If we're grafting a descendant onto an ancestor, be sure to pass
1754 # mergeancestor=True to update. This does two things: 1) allows the merge if
1754 # mergeancestor=True to update. This does two things: 1) allows the merge if
1755 # the destination is the same as the parent of the ctx (so we can use graft
1755 # the destination is the same as the parent of the ctx (so we can use graft
1756 # to copy commits), and 2) informs update that the incoming changes are
1756 # to copy commits), and 2) informs update that the incoming changes are
1757 # newer than the destination so it doesn't prompt about "remote changed foo
1757 # newer than the destination so it doesn't prompt about "remote changed foo
1758 # which local deleted".
1758 # which local deleted".
1759 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1759 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1760
1760
1761 stats = update(repo, ctx.node(), True, True, pctx.node(),
1761 stats = update(repo, ctx.node(), True, True, pctx.node(),
1762 mergeancestor=mergeancestor, labels=labels)
1762 mergeancestor=mergeancestor, labels=labels)
1763
1763
1764 pother = nullid
1764 pother = nullid
1765 parents = ctx.parents()
1765 parents = ctx.parents()
1766 if keepparent and len(parents) == 2 and pctx in parents:
1766 if keepparent and len(parents) == 2 and pctx in parents:
1767 parents.remove(pctx)
1767 parents.remove(pctx)
1768 pother = parents[0].node()
1768 pother = parents[0].node()
1769
1769
1770 with repo.dirstate.parentchange():
1770 with repo.dirstate.parentchange():
1771 repo.setparents(repo['.'].node(), pother)
1771 repo.setparents(repo['.'].node(), pother)
1772 repo.dirstate.write(repo.currenttransaction())
1772 repo.dirstate.write(repo.currenttransaction())
1773 # fix up dirstate for copies and renames
1773 # fix up dirstate for copies and renames
1774 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1774 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1775 return stats
1775 return stats
General Comments 0
You need to be logged in to leave comments. Login now