##// END OF EJS Templates
configitems: register the 'merge.preferancestor' config
Boris Feld -
r34481:99c3dee3 default
parent child Browse files
Show More
@@ -1,689 +1,692 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 encoding,
13 encoding,
14 error,
14 error,
15 )
15 )
16
16
17 def loadconfigtable(ui, extname, configtable):
17 def loadconfigtable(ui, extname, configtable):
18 """update config item known to the ui with the extension ones"""
18 """update config item known to the ui with the extension ones"""
19 for section, items in configtable.items():
19 for section, items in configtable.items():
20 knownitems = ui._knownconfig.setdefault(section, {})
20 knownitems = ui._knownconfig.setdefault(section, {})
21 knownkeys = set(knownitems)
21 knownkeys = set(knownitems)
22 newkeys = set(items)
22 newkeys = set(items)
23 for key in sorted(knownkeys & newkeys):
23 for key in sorted(knownkeys & newkeys):
24 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg %= (extname, section, key)
25 msg %= (extname, section, key)
26 ui.develwarn(msg, config='warn-config')
26 ui.develwarn(msg, config='warn-config')
27
27
28 knownitems.update(items)
28 knownitems.update(items)
29
29
30 class configitem(object):
30 class configitem(object):
31 """represent a known config item
31 """represent a known config item
32
32
33 :section: the official config section where to find this item,
33 :section: the official config section where to find this item,
34 :name: the official name within the section,
34 :name: the official name within the section,
35 :default: default value for this item,
35 :default: default value for this item,
36 :alias: optional list of tuples as alternatives.
36 :alias: optional list of tuples as alternatives.
37 """
37 """
38
38
39 def __init__(self, section, name, default=None, alias=()):
39 def __init__(self, section, name, default=None, alias=()):
40 self.section = section
40 self.section = section
41 self.name = name
41 self.name = name
42 self.default = default
42 self.default = default
43 self.alias = list(alias)
43 self.alias = list(alias)
44
44
45 coreitems = {}
45 coreitems = {}
46
46
47 def _register(configtable, *args, **kwargs):
47 def _register(configtable, *args, **kwargs):
48 item = configitem(*args, **kwargs)
48 item = configitem(*args, **kwargs)
49 section = configtable.setdefault(item.section, {})
49 section = configtable.setdefault(item.section, {})
50 if item.name in section:
50 if item.name in section:
51 msg = "duplicated config item registration for '%s.%s'"
51 msg = "duplicated config item registration for '%s.%s'"
52 raise error.ProgrammingError(msg % (item.section, item.name))
52 raise error.ProgrammingError(msg % (item.section, item.name))
53 section[item.name] = item
53 section[item.name] = item
54
54
55 # special value for case where the default is derived from other values
55 # special value for case where the default is derived from other values
56 dynamicdefault = object()
56 dynamicdefault = object()
57
57
58 # Registering actual config items
58 # Registering actual config items
59
59
60 def getitemregister(configtable):
60 def getitemregister(configtable):
61 return functools.partial(_register, configtable)
61 return functools.partial(_register, configtable)
62
62
63 coreconfigitem = getitemregister(coreitems)
63 coreconfigitem = getitemregister(coreitems)
64
64
65 coreconfigitem('auth', 'cookiefile',
65 coreconfigitem('auth', 'cookiefile',
66 default=None,
66 default=None,
67 )
67 )
68 # bookmarks.pushing: internal hack for discovery
68 # bookmarks.pushing: internal hack for discovery
69 coreconfigitem('bookmarks', 'pushing',
69 coreconfigitem('bookmarks', 'pushing',
70 default=list,
70 default=list,
71 )
71 )
72 # bundle.mainreporoot: internal hack for bundlerepo
72 # bundle.mainreporoot: internal hack for bundlerepo
73 coreconfigitem('bundle', 'mainreporoot',
73 coreconfigitem('bundle', 'mainreporoot',
74 default='',
74 default='',
75 )
75 )
76 # bundle.reorder: experimental config
76 # bundle.reorder: experimental config
77 coreconfigitem('bundle', 'reorder',
77 coreconfigitem('bundle', 'reorder',
78 default='auto',
78 default='auto',
79 )
79 )
80 coreconfigitem('censor', 'policy',
80 coreconfigitem('censor', 'policy',
81 default='abort',
81 default='abort',
82 )
82 )
83 coreconfigitem('chgserver', 'idletimeout',
83 coreconfigitem('chgserver', 'idletimeout',
84 default=3600,
84 default=3600,
85 )
85 )
86 coreconfigitem('chgserver', 'skiphash',
86 coreconfigitem('chgserver', 'skiphash',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('cmdserver', 'log',
89 coreconfigitem('cmdserver', 'log',
90 default=None,
90 default=None,
91 )
91 )
92 coreconfigitem('color', 'mode',
92 coreconfigitem('color', 'mode',
93 default='auto',
93 default='auto',
94 )
94 )
95 coreconfigitem('color', 'pagermode',
95 coreconfigitem('color', 'pagermode',
96 default=dynamicdefault,
96 default=dynamicdefault,
97 )
97 )
98 coreconfigitem('commands', 'status.relative',
98 coreconfigitem('commands', 'status.relative',
99 default=False,
99 default=False,
100 )
100 )
101 coreconfigitem('commands', 'status.skipstates',
101 coreconfigitem('commands', 'status.skipstates',
102 default=[],
102 default=[],
103 )
103 )
104 coreconfigitem('commands', 'status.verbose',
104 coreconfigitem('commands', 'status.verbose',
105 default=False,
105 default=False,
106 )
106 )
107 coreconfigitem('commands', 'update.requiredest',
107 coreconfigitem('commands', 'update.requiredest',
108 default=False,
108 default=False,
109 )
109 )
110 coreconfigitem('devel', 'all-warnings',
110 coreconfigitem('devel', 'all-warnings',
111 default=False,
111 default=False,
112 )
112 )
113 coreconfigitem('devel', 'bundle2.debug',
113 coreconfigitem('devel', 'bundle2.debug',
114 default=False,
114 default=False,
115 )
115 )
116 coreconfigitem('devel', 'check-locks',
116 coreconfigitem('devel', 'check-locks',
117 default=False,
117 default=False,
118 )
118 )
119 coreconfigitem('devel', 'check-relroot',
119 coreconfigitem('devel', 'check-relroot',
120 default=False,
120 default=False,
121 )
121 )
122 coreconfigitem('devel', 'default-date',
122 coreconfigitem('devel', 'default-date',
123 default=None,
123 default=None,
124 )
124 )
125 coreconfigitem('devel', 'deprec-warn',
125 coreconfigitem('devel', 'deprec-warn',
126 default=False,
126 default=False,
127 )
127 )
128 coreconfigitem('devel', 'disableloaddefaultcerts',
128 coreconfigitem('devel', 'disableloaddefaultcerts',
129 default=False,
129 default=False,
130 )
130 )
131 coreconfigitem('devel', 'legacy.exchange',
131 coreconfigitem('devel', 'legacy.exchange',
132 default=list,
132 default=list,
133 )
133 )
134 coreconfigitem('devel', 'servercafile',
134 coreconfigitem('devel', 'servercafile',
135 default='',
135 default='',
136 )
136 )
137 coreconfigitem('devel', 'serverexactprotocol',
137 coreconfigitem('devel', 'serverexactprotocol',
138 default='',
138 default='',
139 )
139 )
140 coreconfigitem('devel', 'serverrequirecert',
140 coreconfigitem('devel', 'serverrequirecert',
141 default=False,
141 default=False,
142 )
142 )
143 coreconfigitem('devel', 'strip-obsmarkers',
143 coreconfigitem('devel', 'strip-obsmarkers',
144 default=True,
144 default=True,
145 )
145 )
146 coreconfigitem('email', 'charsets',
146 coreconfigitem('email', 'charsets',
147 default=list,
147 default=list,
148 )
148 )
149 coreconfigitem('email', 'from',
149 coreconfigitem('email', 'from',
150 default=None,
150 default=None,
151 )
151 )
152 coreconfigitem('email', 'method',
152 coreconfigitem('email', 'method',
153 default='smtp',
153 default='smtp',
154 )
154 )
155 coreconfigitem('experimental', 'bundle-phases',
155 coreconfigitem('experimental', 'bundle-phases',
156 default=False,
156 default=False,
157 )
157 )
158 coreconfigitem('experimental', 'bundle2-advertise',
158 coreconfigitem('experimental', 'bundle2-advertise',
159 default=True,
159 default=True,
160 )
160 )
161 coreconfigitem('experimental', 'bundle2-output-capture',
161 coreconfigitem('experimental', 'bundle2-output-capture',
162 default=False,
162 default=False,
163 )
163 )
164 coreconfigitem('experimental', 'bundle2.pushback',
164 coreconfigitem('experimental', 'bundle2.pushback',
165 default=False,
165 default=False,
166 )
166 )
167 coreconfigitem('experimental', 'bundle2lazylocking',
167 coreconfigitem('experimental', 'bundle2lazylocking',
168 default=False,
168 default=False,
169 )
169 )
170 coreconfigitem('experimental', 'bundlecomplevel',
170 coreconfigitem('experimental', 'bundlecomplevel',
171 default=None,
171 default=None,
172 )
172 )
173 coreconfigitem('experimental', 'changegroup3',
173 coreconfigitem('experimental', 'changegroup3',
174 default=False,
174 default=False,
175 )
175 )
176 coreconfigitem('experimental', 'clientcompressionengines',
176 coreconfigitem('experimental', 'clientcompressionengines',
177 default=list,
177 default=list,
178 )
178 )
179 coreconfigitem('experimental', 'copytrace',
179 coreconfigitem('experimental', 'copytrace',
180 default='on',
180 default='on',
181 )
181 )
182 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
182 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
183 default=100,
183 default=100,
184 )
184 )
185 coreconfigitem('experimental', 'crecordtest',
185 coreconfigitem('experimental', 'crecordtest',
186 default=None,
186 default=None,
187 )
187 )
188 coreconfigitem('experimental', 'editortmpinhg',
188 coreconfigitem('experimental', 'editortmpinhg',
189 default=False,
189 default=False,
190 )
190 )
191 coreconfigitem('experimental', 'stabilization',
191 coreconfigitem('experimental', 'stabilization',
192 default=list,
192 default=list,
193 alias=[('experimental', 'evolution')],
193 alias=[('experimental', 'evolution')],
194 )
194 )
195 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
195 coreconfigitem('experimental', 'stabilization.bundle-obsmarker',
196 default=False,
196 default=False,
197 alias=[('experimental', 'evolution.bundle-obsmarker')],
197 alias=[('experimental', 'evolution.bundle-obsmarker')],
198 )
198 )
199 coreconfigitem('experimental', 'stabilization.track-operation',
199 coreconfigitem('experimental', 'stabilization.track-operation',
200 default=True,
200 default=True,
201 alias=[('experimental', 'evolution.track-operation')]
201 alias=[('experimental', 'evolution.track-operation')]
202 )
202 )
203 coreconfigitem('experimental', 'exportableenviron',
203 coreconfigitem('experimental', 'exportableenviron',
204 default=list,
204 default=list,
205 )
205 )
206 coreconfigitem('experimental', 'extendedheader.index',
206 coreconfigitem('experimental', 'extendedheader.index',
207 default=None,
207 default=None,
208 )
208 )
209 coreconfigitem('experimental', 'extendedheader.similarity',
209 coreconfigitem('experimental', 'extendedheader.similarity',
210 default=False,
210 default=False,
211 )
211 )
212 coreconfigitem('experimental', 'format.compression',
212 coreconfigitem('experimental', 'format.compression',
213 default='zlib',
213 default='zlib',
214 )
214 )
215 coreconfigitem('experimental', 'graphshorten',
215 coreconfigitem('experimental', 'graphshorten',
216 default=False,
216 default=False,
217 )
217 )
218 coreconfigitem('experimental', 'hook-track-tags',
218 coreconfigitem('experimental', 'hook-track-tags',
219 default=False,
219 default=False,
220 )
220 )
221 coreconfigitem('experimental', 'httppostargs',
221 coreconfigitem('experimental', 'httppostargs',
222 default=False,
222 default=False,
223 )
223 )
224 coreconfigitem('experimental', 'manifestv2',
224 coreconfigitem('experimental', 'manifestv2',
225 default=False,
225 default=False,
226 )
226 )
227 coreconfigitem('experimental', 'mergedriver',
227 coreconfigitem('experimental', 'mergedriver',
228 default=None,
228 default=None,
229 )
229 )
230 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
230 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
231 default=False,
231 default=False,
232 )
232 )
233 coreconfigitem('experimental', 'rebase.multidest',
233 coreconfigitem('experimental', 'rebase.multidest',
234 default=False,
234 default=False,
235 )
235 )
236 coreconfigitem('experimental', 'revertalternateinteractivemode',
236 coreconfigitem('experimental', 'revertalternateinteractivemode',
237 default=True,
237 default=True,
238 )
238 )
239 coreconfigitem('experimental', 'revlogv2',
239 coreconfigitem('experimental', 'revlogv2',
240 default=None,
240 default=None,
241 )
241 )
242 coreconfigitem('experimental', 'spacemovesdown',
242 coreconfigitem('experimental', 'spacemovesdown',
243 default=False,
243 default=False,
244 )
244 )
245 coreconfigitem('experimental', 'treemanifest',
245 coreconfigitem('experimental', 'treemanifest',
246 default=False,
246 default=False,
247 )
247 )
248 coreconfigitem('experimental', 'updatecheck',
248 coreconfigitem('experimental', 'updatecheck',
249 default=None,
249 default=None,
250 )
250 )
251 coreconfigitem('format', 'aggressivemergedeltas',
251 coreconfigitem('format', 'aggressivemergedeltas',
252 default=False,
252 default=False,
253 )
253 )
254 coreconfigitem('format', 'chunkcachesize',
254 coreconfigitem('format', 'chunkcachesize',
255 default=None,
255 default=None,
256 )
256 )
257 coreconfigitem('format', 'dotencode',
257 coreconfigitem('format', 'dotencode',
258 default=True,
258 default=True,
259 )
259 )
260 coreconfigitem('format', 'generaldelta',
260 coreconfigitem('format', 'generaldelta',
261 default=False,
261 default=False,
262 )
262 )
263 coreconfigitem('format', 'manifestcachesize',
263 coreconfigitem('format', 'manifestcachesize',
264 default=None,
264 default=None,
265 )
265 )
266 coreconfigitem('format', 'maxchainlen',
266 coreconfigitem('format', 'maxchainlen',
267 default=None,
267 default=None,
268 )
268 )
269 coreconfigitem('format', 'obsstore-version',
269 coreconfigitem('format', 'obsstore-version',
270 default=None,
270 default=None,
271 )
271 )
272 coreconfigitem('format', 'usefncache',
272 coreconfigitem('format', 'usefncache',
273 default=True,
273 default=True,
274 )
274 )
275 coreconfigitem('format', 'usegeneraldelta',
275 coreconfigitem('format', 'usegeneraldelta',
276 default=True,
276 default=True,
277 )
277 )
278 coreconfigitem('format', 'usestore',
278 coreconfigitem('format', 'usestore',
279 default=True,
279 default=True,
280 )
280 )
281 coreconfigitem('hostsecurity', 'ciphers',
281 coreconfigitem('hostsecurity', 'ciphers',
282 default=None,
282 default=None,
283 )
283 )
284 coreconfigitem('hostsecurity', 'disabletls10warning',
284 coreconfigitem('hostsecurity', 'disabletls10warning',
285 default=False,
285 default=False,
286 )
286 )
287 coreconfigitem('http_proxy', 'always',
287 coreconfigitem('http_proxy', 'always',
288 default=False,
288 default=False,
289 )
289 )
290 coreconfigitem('http_proxy', 'host',
290 coreconfigitem('http_proxy', 'host',
291 default=None,
291 default=None,
292 )
292 )
293 coreconfigitem('http_proxy', 'no',
293 coreconfigitem('http_proxy', 'no',
294 default=list,
294 default=list,
295 )
295 )
296 coreconfigitem('http_proxy', 'passwd',
296 coreconfigitem('http_proxy', 'passwd',
297 default=None,
297 default=None,
298 )
298 )
299 coreconfigitem('http_proxy', 'user',
299 coreconfigitem('http_proxy', 'user',
300 default=None,
300 default=None,
301 )
301 )
302 coreconfigitem('merge', 'followcopies',
302 coreconfigitem('merge', 'followcopies',
303 default=True,
303 default=True,
304 )
304 )
305 coreconfigitem('merge', 'preferancestor',
306 default=lambda: ['*'],
307 )
305 coreconfigitem('pager', 'ignore',
308 coreconfigitem('pager', 'ignore',
306 default=list,
309 default=list,
307 )
310 )
308 coreconfigitem('patch', 'eol',
311 coreconfigitem('patch', 'eol',
309 default='strict',
312 default='strict',
310 )
313 )
311 coreconfigitem('patch', 'fuzz',
314 coreconfigitem('patch', 'fuzz',
312 default=2,
315 default=2,
313 )
316 )
314 coreconfigitem('paths', 'default',
317 coreconfigitem('paths', 'default',
315 default=None,
318 default=None,
316 )
319 )
317 coreconfigitem('paths', 'default-push',
320 coreconfigitem('paths', 'default-push',
318 default=None,
321 default=None,
319 )
322 )
320 coreconfigitem('phases', 'checksubrepos',
323 coreconfigitem('phases', 'checksubrepos',
321 default='follow',
324 default='follow',
322 )
325 )
323 coreconfigitem('phases', 'new-commit',
326 coreconfigitem('phases', 'new-commit',
324 default=dynamicdefault,
327 default=dynamicdefault,
325 )
328 )
326 coreconfigitem('phases', 'publish',
329 coreconfigitem('phases', 'publish',
327 default=True,
330 default=True,
328 )
331 )
329 coreconfigitem('profiling', 'enabled',
332 coreconfigitem('profiling', 'enabled',
330 default=False,
333 default=False,
331 )
334 )
332 coreconfigitem('profiling', 'format',
335 coreconfigitem('profiling', 'format',
333 default='text',
336 default='text',
334 )
337 )
335 coreconfigitem('profiling', 'freq',
338 coreconfigitem('profiling', 'freq',
336 default=1000,
339 default=1000,
337 )
340 )
338 coreconfigitem('profiling', 'limit',
341 coreconfigitem('profiling', 'limit',
339 default=30,
342 default=30,
340 )
343 )
341 coreconfigitem('profiling', 'nested',
344 coreconfigitem('profiling', 'nested',
342 default=0,
345 default=0,
343 )
346 )
344 coreconfigitem('profiling', 'output',
347 coreconfigitem('profiling', 'output',
345 default=None,
348 default=None,
346 )
349 )
347 coreconfigitem('profiling', 'showmax',
350 coreconfigitem('profiling', 'showmax',
348 default=0.999,
351 default=0.999,
349 )
352 )
350 coreconfigitem('profiling', 'showmin',
353 coreconfigitem('profiling', 'showmin',
351 default=dynamicdefault,
354 default=dynamicdefault,
352 )
355 )
353 coreconfigitem('profiling', 'sort',
356 coreconfigitem('profiling', 'sort',
354 default='inlinetime',
357 default='inlinetime',
355 )
358 )
356 coreconfigitem('profiling', 'statformat',
359 coreconfigitem('profiling', 'statformat',
357 default='hotpath',
360 default='hotpath',
358 )
361 )
359 coreconfigitem('profiling', 'type',
362 coreconfigitem('profiling', 'type',
360 default='stat',
363 default='stat',
361 )
364 )
362 coreconfigitem('progress', 'assume-tty',
365 coreconfigitem('progress', 'assume-tty',
363 default=False,
366 default=False,
364 )
367 )
365 coreconfigitem('progress', 'changedelay',
368 coreconfigitem('progress', 'changedelay',
366 default=1,
369 default=1,
367 )
370 )
368 coreconfigitem('progress', 'clear-complete',
371 coreconfigitem('progress', 'clear-complete',
369 default=True,
372 default=True,
370 )
373 )
371 coreconfigitem('progress', 'debug',
374 coreconfigitem('progress', 'debug',
372 default=False,
375 default=False,
373 )
376 )
374 coreconfigitem('progress', 'delay',
377 coreconfigitem('progress', 'delay',
375 default=3,
378 default=3,
376 )
379 )
377 coreconfigitem('progress', 'disable',
380 coreconfigitem('progress', 'disable',
378 default=False,
381 default=False,
379 )
382 )
380 coreconfigitem('progress', 'estimateinterval',
383 coreconfigitem('progress', 'estimateinterval',
381 default=60.0,
384 default=60.0,
382 )
385 )
383 coreconfigitem('progress', 'refresh',
386 coreconfigitem('progress', 'refresh',
384 default=0.1,
387 default=0.1,
385 )
388 )
386 coreconfigitem('progress', 'width',
389 coreconfigitem('progress', 'width',
387 default=dynamicdefault,
390 default=dynamicdefault,
388 )
391 )
389 coreconfigitem('push', 'pushvars.server',
392 coreconfigitem('push', 'pushvars.server',
390 default=False,
393 default=False,
391 )
394 )
392 coreconfigitem('server', 'bundle1',
395 coreconfigitem('server', 'bundle1',
393 default=True,
396 default=True,
394 )
397 )
395 coreconfigitem('server', 'bundle1gd',
398 coreconfigitem('server', 'bundle1gd',
396 default=None,
399 default=None,
397 )
400 )
398 coreconfigitem('server', 'compressionengines',
401 coreconfigitem('server', 'compressionengines',
399 default=list,
402 default=list,
400 )
403 )
401 coreconfigitem('server', 'concurrent-push-mode',
404 coreconfigitem('server', 'concurrent-push-mode',
402 default='strict',
405 default='strict',
403 )
406 )
404 coreconfigitem('server', 'disablefullbundle',
407 coreconfigitem('server', 'disablefullbundle',
405 default=False,
408 default=False,
406 )
409 )
407 coreconfigitem('server', 'maxhttpheaderlen',
410 coreconfigitem('server', 'maxhttpheaderlen',
408 default=1024,
411 default=1024,
409 )
412 )
410 coreconfigitem('server', 'preferuncompressed',
413 coreconfigitem('server', 'preferuncompressed',
411 default=False,
414 default=False,
412 )
415 )
413 coreconfigitem('server', 'uncompressed',
416 coreconfigitem('server', 'uncompressed',
414 default=True,
417 default=True,
415 )
418 )
416 coreconfigitem('server', 'uncompressedallowsecret',
419 coreconfigitem('server', 'uncompressedallowsecret',
417 default=False,
420 default=False,
418 )
421 )
419 coreconfigitem('server', 'validate',
422 coreconfigitem('server', 'validate',
420 default=False,
423 default=False,
421 )
424 )
422 coreconfigitem('server', 'zliblevel',
425 coreconfigitem('server', 'zliblevel',
423 default=-1,
426 default=-1,
424 )
427 )
425 coreconfigitem('smtp', 'host',
428 coreconfigitem('smtp', 'host',
426 default=None,
429 default=None,
427 )
430 )
428 coreconfigitem('smtp', 'local_hostname',
431 coreconfigitem('smtp', 'local_hostname',
429 default=None,
432 default=None,
430 )
433 )
431 coreconfigitem('smtp', 'password',
434 coreconfigitem('smtp', 'password',
432 default=None,
435 default=None,
433 )
436 )
434 coreconfigitem('smtp', 'port',
437 coreconfigitem('smtp', 'port',
435 default=dynamicdefault,
438 default=dynamicdefault,
436 )
439 )
437 coreconfigitem('smtp', 'tls',
440 coreconfigitem('smtp', 'tls',
438 default='none',
441 default='none',
439 )
442 )
440 coreconfigitem('smtp', 'username',
443 coreconfigitem('smtp', 'username',
441 default=None,
444 default=None,
442 )
445 )
443 coreconfigitem('sparse', 'missingwarning',
446 coreconfigitem('sparse', 'missingwarning',
444 default=True,
447 default=True,
445 )
448 )
446 coreconfigitem('trusted', 'groups',
449 coreconfigitem('trusted', 'groups',
447 default=list,
450 default=list,
448 )
451 )
449 coreconfigitem('trusted', 'users',
452 coreconfigitem('trusted', 'users',
450 default=list,
453 default=list,
451 )
454 )
452 coreconfigitem('ui', '_usedassubrepo',
455 coreconfigitem('ui', '_usedassubrepo',
453 default=False,
456 default=False,
454 )
457 )
455 coreconfigitem('ui', 'allowemptycommit',
458 coreconfigitem('ui', 'allowemptycommit',
456 default=False,
459 default=False,
457 )
460 )
458 coreconfigitem('ui', 'archivemeta',
461 coreconfigitem('ui', 'archivemeta',
459 default=True,
462 default=True,
460 )
463 )
461 coreconfigitem('ui', 'askusername',
464 coreconfigitem('ui', 'askusername',
462 default=False,
465 default=False,
463 )
466 )
464 coreconfigitem('ui', 'clonebundlefallback',
467 coreconfigitem('ui', 'clonebundlefallback',
465 default=False,
468 default=False,
466 )
469 )
467 coreconfigitem('ui', 'clonebundleprefers',
470 coreconfigitem('ui', 'clonebundleprefers',
468 default=list,
471 default=list,
469 )
472 )
470 coreconfigitem('ui', 'clonebundles',
473 coreconfigitem('ui', 'clonebundles',
471 default=True,
474 default=True,
472 )
475 )
473 coreconfigitem('ui', 'color',
476 coreconfigitem('ui', 'color',
474 default='auto',
477 default='auto',
475 )
478 )
476 coreconfigitem('ui', 'commitsubrepos',
479 coreconfigitem('ui', 'commitsubrepos',
477 default=False,
480 default=False,
478 )
481 )
479 coreconfigitem('ui', 'debug',
482 coreconfigitem('ui', 'debug',
480 default=False,
483 default=False,
481 )
484 )
482 coreconfigitem('ui', 'debugger',
485 coreconfigitem('ui', 'debugger',
483 default=None,
486 default=None,
484 )
487 )
485 coreconfigitem('ui', 'fallbackencoding',
488 coreconfigitem('ui', 'fallbackencoding',
486 default=None,
489 default=None,
487 )
490 )
488 coreconfigitem('ui', 'forcecwd',
491 coreconfigitem('ui', 'forcecwd',
489 default=None,
492 default=None,
490 )
493 )
491 coreconfigitem('ui', 'forcemerge',
494 coreconfigitem('ui', 'forcemerge',
492 default=None,
495 default=None,
493 )
496 )
494 coreconfigitem('ui', 'formatdebug',
497 coreconfigitem('ui', 'formatdebug',
495 default=False,
498 default=False,
496 )
499 )
497 coreconfigitem('ui', 'formatjson',
500 coreconfigitem('ui', 'formatjson',
498 default=False,
501 default=False,
499 )
502 )
500 coreconfigitem('ui', 'formatted',
503 coreconfigitem('ui', 'formatted',
501 default=None,
504 default=None,
502 )
505 )
503 coreconfigitem('ui', 'graphnodetemplate',
506 coreconfigitem('ui', 'graphnodetemplate',
504 default=None,
507 default=None,
505 )
508 )
506 coreconfigitem('ui', 'http2debuglevel',
509 coreconfigitem('ui', 'http2debuglevel',
507 default=None,
510 default=None,
508 )
511 )
509 coreconfigitem('ui', 'interactive',
512 coreconfigitem('ui', 'interactive',
510 default=None,
513 default=None,
511 )
514 )
512 coreconfigitem('ui', 'interface',
515 coreconfigitem('ui', 'interface',
513 default=None,
516 default=None,
514 )
517 )
515 coreconfigitem('ui', 'logblockedtimes',
518 coreconfigitem('ui', 'logblockedtimes',
516 default=False,
519 default=False,
517 )
520 )
518 coreconfigitem('ui', 'logtemplate',
521 coreconfigitem('ui', 'logtemplate',
519 default=None,
522 default=None,
520 )
523 )
521 coreconfigitem('ui', 'merge',
524 coreconfigitem('ui', 'merge',
522 default=None,
525 default=None,
523 )
526 )
524 coreconfigitem('ui', 'mergemarkers',
527 coreconfigitem('ui', 'mergemarkers',
525 default='basic',
528 default='basic',
526 )
529 )
527 coreconfigitem('ui', 'mergemarkertemplate',
530 coreconfigitem('ui', 'mergemarkertemplate',
528 default=('{node|short} '
531 default=('{node|short} '
529 '{ifeq(tags, "tip", "", '
532 '{ifeq(tags, "tip", "", '
530 'ifeq(tags, "", "", "{tags} "))}'
533 'ifeq(tags, "", "", "{tags} "))}'
531 '{if(bookmarks, "{bookmarks} ")}'
534 '{if(bookmarks, "{bookmarks} ")}'
532 '{ifeq(branch, "default", "", "{branch} ")}'
535 '{ifeq(branch, "default", "", "{branch} ")}'
533 '- {author|user}: {desc|firstline}')
536 '- {author|user}: {desc|firstline}')
534 )
537 )
535 coreconfigitem('ui', 'nontty',
538 coreconfigitem('ui', 'nontty',
536 default=False,
539 default=False,
537 )
540 )
538 coreconfigitem('ui', 'origbackuppath',
541 coreconfigitem('ui', 'origbackuppath',
539 default=None,
542 default=None,
540 )
543 )
541 coreconfigitem('ui', 'paginate',
544 coreconfigitem('ui', 'paginate',
542 default=True,
545 default=True,
543 )
546 )
544 coreconfigitem('ui', 'patch',
547 coreconfigitem('ui', 'patch',
545 default=None,
548 default=None,
546 )
549 )
547 coreconfigitem('ui', 'portablefilenames',
550 coreconfigitem('ui', 'portablefilenames',
548 default='warn',
551 default='warn',
549 )
552 )
550 coreconfigitem('ui', 'promptecho',
553 coreconfigitem('ui', 'promptecho',
551 default=False,
554 default=False,
552 )
555 )
553 coreconfigitem('ui', 'quiet',
556 coreconfigitem('ui', 'quiet',
554 default=False,
557 default=False,
555 )
558 )
556 coreconfigitem('ui', 'quietbookmarkmove',
559 coreconfigitem('ui', 'quietbookmarkmove',
557 default=False,
560 default=False,
558 )
561 )
559 coreconfigitem('ui', 'remotecmd',
562 coreconfigitem('ui', 'remotecmd',
560 default='hg',
563 default='hg',
561 )
564 )
562 coreconfigitem('ui', 'report_untrusted',
565 coreconfigitem('ui', 'report_untrusted',
563 default=True,
566 default=True,
564 )
567 )
565 coreconfigitem('ui', 'rollback',
568 coreconfigitem('ui', 'rollback',
566 default=True,
569 default=True,
567 )
570 )
568 coreconfigitem('ui', 'slash',
571 coreconfigitem('ui', 'slash',
569 default=False,
572 default=False,
570 )
573 )
571 coreconfigitem('ui', 'ssh',
574 coreconfigitem('ui', 'ssh',
572 default='ssh',
575 default='ssh',
573 )
576 )
574 coreconfigitem('ui', 'statuscopies',
577 coreconfigitem('ui', 'statuscopies',
575 default=False,
578 default=False,
576 )
579 )
577 coreconfigitem('ui', 'strict',
580 coreconfigitem('ui', 'strict',
578 default=False,
581 default=False,
579 )
582 )
580 coreconfigitem('ui', 'style',
583 coreconfigitem('ui', 'style',
581 default='',
584 default='',
582 )
585 )
583 coreconfigitem('ui', 'supportcontact',
586 coreconfigitem('ui', 'supportcontact',
584 default=None,
587 default=None,
585 )
588 )
586 coreconfigitem('ui', 'textwidth',
589 coreconfigitem('ui', 'textwidth',
587 default=78,
590 default=78,
588 )
591 )
589 coreconfigitem('ui', 'timeout',
592 coreconfigitem('ui', 'timeout',
590 default='600',
593 default='600',
591 )
594 )
592 coreconfigitem('ui', 'traceback',
595 coreconfigitem('ui', 'traceback',
593 default=False,
596 default=False,
594 )
597 )
595 coreconfigitem('ui', 'tweakdefaults',
598 coreconfigitem('ui', 'tweakdefaults',
596 default=False,
599 default=False,
597 )
600 )
598 coreconfigitem('ui', 'usehttp2',
601 coreconfigitem('ui', 'usehttp2',
599 default=False,
602 default=False,
600 )
603 )
601 coreconfigitem('ui', 'username',
604 coreconfigitem('ui', 'username',
602 alias=[('ui', 'user')]
605 alias=[('ui', 'user')]
603 )
606 )
604 coreconfigitem('ui', 'verbose',
607 coreconfigitem('ui', 'verbose',
605 default=False,
608 default=False,
606 )
609 )
607 coreconfigitem('verify', 'skipflags',
610 coreconfigitem('verify', 'skipflags',
608 default=None,
611 default=None,
609 )
612 )
610 coreconfigitem('web', 'accesslog',
613 coreconfigitem('web', 'accesslog',
611 default='-',
614 default='-',
612 )
615 )
613 coreconfigitem('web', 'address',
616 coreconfigitem('web', 'address',
614 default='',
617 default='',
615 )
618 )
616 coreconfigitem('web', 'allow_archive',
619 coreconfigitem('web', 'allow_archive',
617 default=list,
620 default=list,
618 )
621 )
619 coreconfigitem('web', 'allow_read',
622 coreconfigitem('web', 'allow_read',
620 default=list,
623 default=list,
621 )
624 )
622 coreconfigitem('web', 'baseurl',
625 coreconfigitem('web', 'baseurl',
623 default=None,
626 default=None,
624 )
627 )
625 coreconfigitem('web', 'cacerts',
628 coreconfigitem('web', 'cacerts',
626 default=None,
629 default=None,
627 )
630 )
628 coreconfigitem('web', 'certificate',
631 coreconfigitem('web', 'certificate',
629 default=None,
632 default=None,
630 )
633 )
631 coreconfigitem('web', 'collapse',
634 coreconfigitem('web', 'collapse',
632 default=False,
635 default=False,
633 )
636 )
634 coreconfigitem('web', 'csp',
637 coreconfigitem('web', 'csp',
635 default=None,
638 default=None,
636 )
639 )
637 coreconfigitem('web', 'deny_read',
640 coreconfigitem('web', 'deny_read',
638 default=list,
641 default=list,
639 )
642 )
640 coreconfigitem('web', 'descend',
643 coreconfigitem('web', 'descend',
641 default=True,
644 default=True,
642 )
645 )
643 coreconfigitem('web', 'description',
646 coreconfigitem('web', 'description',
644 default="",
647 default="",
645 )
648 )
646 coreconfigitem('web', 'encoding',
649 coreconfigitem('web', 'encoding',
647 default=lambda: encoding.encoding,
650 default=lambda: encoding.encoding,
648 )
651 )
649 coreconfigitem('web', 'errorlog',
652 coreconfigitem('web', 'errorlog',
650 default='-',
653 default='-',
651 )
654 )
652 coreconfigitem('web', 'ipv6',
655 coreconfigitem('web', 'ipv6',
653 default=False,
656 default=False,
654 )
657 )
655 coreconfigitem('web', 'port',
658 coreconfigitem('web', 'port',
656 default=8000,
659 default=8000,
657 )
660 )
658 coreconfigitem('web', 'prefix',
661 coreconfigitem('web', 'prefix',
659 default='',
662 default='',
660 )
663 )
661 coreconfigitem('web', 'refreshinterval',
664 coreconfigitem('web', 'refreshinterval',
662 default=20,
665 default=20,
663 )
666 )
664 coreconfigitem('web', 'stripes',
667 coreconfigitem('web', 'stripes',
665 default=1,
668 default=1,
666 )
669 )
667 coreconfigitem('web', 'style',
670 coreconfigitem('web', 'style',
668 default='paper',
671 default='paper',
669 )
672 )
670 coreconfigitem('web', 'templates',
673 coreconfigitem('web', 'templates',
671 default=None,
674 default=None,
672 )
675 )
673 coreconfigitem('worker', 'backgroundclose',
676 coreconfigitem('worker', 'backgroundclose',
674 default=dynamicdefault,
677 default=dynamicdefault,
675 )
678 )
676 # Windows defaults to a limit of 512 open files. A buffer of 128
679 # Windows defaults to a limit of 512 open files. A buffer of 128
677 # should give us enough headway.
680 # should give us enough headway.
678 coreconfigitem('worker', 'backgroundclosemaxqueue',
681 coreconfigitem('worker', 'backgroundclosemaxqueue',
679 default=384,
682 default=384,
680 )
683 )
681 coreconfigitem('worker', 'backgroundcloseminfilecount',
684 coreconfigitem('worker', 'backgroundcloseminfilecount',
682 default=2048,
685 default=2048,
683 )
686 )
684 coreconfigitem('worker', 'backgroundclosethreadcount',
687 coreconfigitem('worker', 'backgroundclosethreadcount',
685 default=4,
688 default=4,
686 )
689 )
687 coreconfigitem('worker', 'numcpus',
690 coreconfigitem('worker', 'numcpus',
688 default=None,
691 default=None,
689 )
692 )
@@ -1,2568 +1,2568 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from .thirdparty import (
28 from .thirdparty import (
29 attr,
29 attr,
30 )
30 )
31 from . import (
31 from . import (
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mdiff,
36 mdiff,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 revlog,
43 revlog,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 util,
47 util,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52 nonascii = re.compile(r'[^\x21-\x7f]').search
52 nonascii = re.compile(r'[^\x21-\x7f]').search
53
53
54 class basectx(object):
54 class basectx(object):
55 """A basectx object represents the common logic for its children:
55 """A basectx object represents the common logic for its children:
56 changectx: read-only context that is already present in the repo,
56 changectx: read-only context that is already present in the repo,
57 workingctx: a context that represents the working directory and can
57 workingctx: a context that represents the working directory and can
58 be committed,
58 be committed,
59 memctx: a context that represents changes in-memory and can also
59 memctx: a context that represents changes in-memory and can also
60 be committed."""
60 be committed."""
61 def __new__(cls, repo, changeid='', *args, **kwargs):
61 def __new__(cls, repo, changeid='', *args, **kwargs):
62 if isinstance(changeid, basectx):
62 if isinstance(changeid, basectx):
63 return changeid
63 return changeid
64
64
65 o = super(basectx, cls).__new__(cls)
65 o = super(basectx, cls).__new__(cls)
66
66
67 o._repo = repo
67 o._repo = repo
68 o._rev = nullrev
68 o._rev = nullrev
69 o._node = nullid
69 o._node = nullid
70
70
71 return o
71 return o
72
72
73 def __bytes__(self):
73 def __bytes__(self):
74 return short(self.node())
74 return short(self.node())
75
75
76 __str__ = encoding.strmethod(__bytes__)
76 __str__ = encoding.strmethod(__bytes__)
77
77
78 def __int__(self):
78 def __int__(self):
79 return self.rev()
79 return self.rev()
80
80
81 def __repr__(self):
81 def __repr__(self):
82 return r"<%s %s>" % (type(self).__name__, str(self))
82 return r"<%s %s>" % (type(self).__name__, str(self))
83
83
84 def __eq__(self, other):
84 def __eq__(self, other):
85 try:
85 try:
86 return type(self) == type(other) and self._rev == other._rev
86 return type(self) == type(other) and self._rev == other._rev
87 except AttributeError:
87 except AttributeError:
88 return False
88 return False
89
89
90 def __ne__(self, other):
90 def __ne__(self, other):
91 return not (self == other)
91 return not (self == other)
92
92
93 def __contains__(self, key):
93 def __contains__(self, key):
94 return key in self._manifest
94 return key in self._manifest
95
95
96 def __getitem__(self, key):
96 def __getitem__(self, key):
97 return self.filectx(key)
97 return self.filectx(key)
98
98
99 def __iter__(self):
99 def __iter__(self):
100 return iter(self._manifest)
100 return iter(self._manifest)
101
101
102 def _buildstatusmanifest(self, status):
102 def _buildstatusmanifest(self, status):
103 """Builds a manifest that includes the given status results, if this is
103 """Builds a manifest that includes the given status results, if this is
104 a working copy context. For non-working copy contexts, it just returns
104 a working copy context. For non-working copy contexts, it just returns
105 the normal manifest."""
105 the normal manifest."""
106 return self.manifest()
106 return self.manifest()
107
107
108 def _matchstatus(self, other, match):
108 def _matchstatus(self, other, match):
109 """This internal method provides a way for child objects to override the
109 """This internal method provides a way for child objects to override the
110 match operator.
110 match operator.
111 """
111 """
112 return match
112 return match
113
113
114 def _buildstatus(self, other, s, match, listignored, listclean,
114 def _buildstatus(self, other, s, match, listignored, listclean,
115 listunknown):
115 listunknown):
116 """build a status with respect to another context"""
116 """build a status with respect to another context"""
117 # Load earliest manifest first for caching reasons. More specifically,
117 # Load earliest manifest first for caching reasons. More specifically,
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta application.
122 # delta application.
123 mf2 = None
123 mf2 = None
124 if self.rev() is not None and self.rev() < other.rev():
124 if self.rev() is not None and self.rev() < other.rev():
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
127 if mf2 is None:
127 if mf2 is None:
128 mf2 = self._buildstatusmanifest(s)
128 mf2 = self._buildstatusmanifest(s)
129
129
130 modified, added = [], []
130 modified, added = [], []
131 removed = []
131 removed = []
132 clean = []
132 clean = []
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deletedset = set(deleted)
134 deletedset = set(deleted)
135 d = mf1.diff(mf2, match=match, clean=listclean)
135 d = mf1.diff(mf2, match=match, clean=listclean)
136 for fn, value in d.iteritems():
136 for fn, value in d.iteritems():
137 if fn in deletedset:
137 if fn in deletedset:
138 continue
138 continue
139 if value is None:
139 if value is None:
140 clean.append(fn)
140 clean.append(fn)
141 continue
141 continue
142 (node1, flag1), (node2, flag2) = value
142 (node1, flag1), (node2, flag2) = value
143 if node1 is None:
143 if node1 is None:
144 added.append(fn)
144 added.append(fn)
145 elif node2 is None:
145 elif node2 is None:
146 removed.append(fn)
146 removed.append(fn)
147 elif flag1 != flag2:
147 elif flag1 != flag2:
148 modified.append(fn)
148 modified.append(fn)
149 elif node2 not in wdirnodes:
149 elif node2 not in wdirnodes:
150 # When comparing files between two commits, we save time by
150 # When comparing files between two commits, we save time by
151 # not comparing the file contents when the nodeids differ.
151 # not comparing the file contents when the nodeids differ.
152 # Note that this means we incorrectly report a reverted change
152 # Note that this means we incorrectly report a reverted change
153 # to a file as a modification.
153 # to a file as a modification.
154 modified.append(fn)
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
155 elif self[fn].cmp(other[fn]):
156 modified.append(fn)
156 modified.append(fn)
157 else:
157 else:
158 clean.append(fn)
158 clean.append(fn)
159
159
160 if removed:
160 if removed:
161 # need to filter files if they are already reported as removed
161 # need to filter files if they are already reported as removed
162 unknown = [fn for fn in unknown if fn not in mf1 and
162 unknown = [fn for fn in unknown if fn not in mf1 and
163 (not match or match(fn))]
163 (not match or match(fn))]
164 ignored = [fn for fn in ignored if fn not in mf1 and
164 ignored = [fn for fn in ignored if fn not in mf1 and
165 (not match or match(fn))]
165 (not match or match(fn))]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(modified, added, removed, deleted, unknown,
169 return scmutil.status(modified, added, removed, deleted, unknown,
170 ignored, clean)
170 ignored, clean)
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepo.state(self, self._repo.ui)
174 return subrepo.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183 def hex(self):
183 def hex(self):
184 return hex(self.node())
184 return hex(self.node())
185 def manifest(self):
185 def manifest(self):
186 return self._manifest
186 return self._manifest
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189 def repo(self):
189 def repo(self):
190 return self._repo
190 return self._repo
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 msg = ("'context.unstable' is deprecated, "
208 msg = ("'context.unstable' is deprecated, "
209 "use 'context.orphan'")
209 "use 'context.orphan'")
210 self._repo.ui.deprecwarn(msg, '4.4')
210 self._repo.ui.deprecwarn(msg, '4.4')
211 return self.orphan()
211 return self.orphan()
212
212
213 def orphan(self):
213 def orphan(self):
214 """True if the changeset is not obsolete but it's ancestor are"""
214 """True if the changeset is not obsolete but it's ancestor are"""
215 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
215 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
216
216
217 def bumped(self):
217 def bumped(self):
218 msg = ("'context.bumped' is deprecated, "
218 msg = ("'context.bumped' is deprecated, "
219 "use 'context.phasedivergent'")
219 "use 'context.phasedivergent'")
220 self._repo.ui.deprecwarn(msg, '4.4')
220 self._repo.ui.deprecwarn(msg, '4.4')
221 return self.phasedivergent()
221 return self.phasedivergent()
222
222
223 def phasedivergent(self):
223 def phasedivergent(self):
224 """True if the changeset try to be a successor of a public changeset
224 """True if the changeset try to be a successor of a public changeset
225
225
226 Only non-public and non-obsolete changesets may be bumped.
226 Only non-public and non-obsolete changesets may be bumped.
227 """
227 """
228 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
228 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
229
229
230 def divergent(self):
230 def divergent(self):
231 msg = ("'context.divergent' is deprecated, "
231 msg = ("'context.divergent' is deprecated, "
232 "use 'context.contentdivergent'")
232 "use 'context.contentdivergent'")
233 self._repo.ui.deprecwarn(msg, '4.4')
233 self._repo.ui.deprecwarn(msg, '4.4')
234 return self.contentdivergent()
234 return self.contentdivergent()
235
235
236 def contentdivergent(self):
236 def contentdivergent(self):
237 """Is a successors of a changeset with multiple possible successors set
237 """Is a successors of a changeset with multiple possible successors set
238
238
239 Only non-public and non-obsolete changesets may be divergent.
239 Only non-public and non-obsolete changesets may be divergent.
240 """
240 """
241 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
241 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
242
242
243 def troubled(self):
243 def troubled(self):
244 msg = ("'context.troubled' is deprecated, "
244 msg = ("'context.troubled' is deprecated, "
245 "use 'context.isunstable'")
245 "use 'context.isunstable'")
246 self._repo.ui.deprecwarn(msg, '4.4')
246 self._repo.ui.deprecwarn(msg, '4.4')
247 return self.isunstable()
247 return self.isunstable()
248
248
249 def isunstable(self):
249 def isunstable(self):
250 """True if the changeset is either unstable, bumped or divergent"""
250 """True if the changeset is either unstable, bumped or divergent"""
251 return self.orphan() or self.phasedivergent() or self.contentdivergent()
251 return self.orphan() or self.phasedivergent() or self.contentdivergent()
252
252
253 def troubles(self):
253 def troubles(self):
254 """Keep the old version around in order to avoid breaking extensions
254 """Keep the old version around in order to avoid breaking extensions
255 about different return values.
255 about different return values.
256 """
256 """
257 msg = ("'context.troubles' is deprecated, "
257 msg = ("'context.troubles' is deprecated, "
258 "use 'context.instabilities'")
258 "use 'context.instabilities'")
259 self._repo.ui.deprecwarn(msg, '4.4')
259 self._repo.ui.deprecwarn(msg, '4.4')
260
260
261 troubles = []
261 troubles = []
262 if self.orphan():
262 if self.orphan():
263 troubles.append('orphan')
263 troubles.append('orphan')
264 if self.phasedivergent():
264 if self.phasedivergent():
265 troubles.append('bumped')
265 troubles.append('bumped')
266 if self.contentdivergent():
266 if self.contentdivergent():
267 troubles.append('divergent')
267 troubles.append('divergent')
268 return troubles
268 return troubles
269
269
270 def instabilities(self):
270 def instabilities(self):
271 """return the list of instabilities affecting this changeset.
271 """return the list of instabilities affecting this changeset.
272
272
273 Instabilities are returned as strings. possible values are:
273 Instabilities are returned as strings. possible values are:
274 - orphan,
274 - orphan,
275 - phase-divergent,
275 - phase-divergent,
276 - content-divergent.
276 - content-divergent.
277 """
277 """
278 instabilities = []
278 instabilities = []
279 if self.orphan():
279 if self.orphan():
280 instabilities.append('orphan')
280 instabilities.append('orphan')
281 if self.phasedivergent():
281 if self.phasedivergent():
282 instabilities.append('phase-divergent')
282 instabilities.append('phase-divergent')
283 if self.contentdivergent():
283 if self.contentdivergent():
284 instabilities.append('content-divergent')
284 instabilities.append('content-divergent')
285 return instabilities
285 return instabilities
286
286
287 def parents(self):
287 def parents(self):
288 """return contexts for each parent changeset"""
288 """return contexts for each parent changeset"""
289 return self._parents
289 return self._parents
290
290
291 def p1(self):
291 def p1(self):
292 return self._parents[0]
292 return self._parents[0]
293
293
294 def p2(self):
294 def p2(self):
295 parents = self._parents
295 parents = self._parents
296 if len(parents) == 2:
296 if len(parents) == 2:
297 return parents[1]
297 return parents[1]
298 return changectx(self._repo, nullrev)
298 return changectx(self._repo, nullrev)
299
299
300 def _fileinfo(self, path):
300 def _fileinfo(self, path):
301 if r'_manifest' in self.__dict__:
301 if r'_manifest' in self.__dict__:
302 try:
302 try:
303 return self._manifest[path], self._manifest.flags(path)
303 return self._manifest[path], self._manifest.flags(path)
304 except KeyError:
304 except KeyError:
305 raise error.ManifestLookupError(self._node, path,
305 raise error.ManifestLookupError(self._node, path,
306 _('not found in manifest'))
306 _('not found in manifest'))
307 if r'_manifestdelta' in self.__dict__ or path in self.files():
307 if r'_manifestdelta' in self.__dict__ or path in self.files():
308 if path in self._manifestdelta:
308 if path in self._manifestdelta:
309 return (self._manifestdelta[path],
309 return (self._manifestdelta[path],
310 self._manifestdelta.flags(path))
310 self._manifestdelta.flags(path))
311 mfl = self._repo.manifestlog
311 mfl = self._repo.manifestlog
312 try:
312 try:
313 node, flag = mfl[self._changeset.manifest].find(path)
313 node, flag = mfl[self._changeset.manifest].find(path)
314 except KeyError:
314 except KeyError:
315 raise error.ManifestLookupError(self._node, path,
315 raise error.ManifestLookupError(self._node, path,
316 _('not found in manifest'))
316 _('not found in manifest'))
317
317
318 return node, flag
318 return node, flag
319
319
320 def filenode(self, path):
320 def filenode(self, path):
321 return self._fileinfo(path)[0]
321 return self._fileinfo(path)[0]
322
322
323 def flags(self, path):
323 def flags(self, path):
324 try:
324 try:
325 return self._fileinfo(path)[1]
325 return self._fileinfo(path)[1]
326 except error.LookupError:
326 except error.LookupError:
327 return ''
327 return ''
328
328
329 def sub(self, path, allowcreate=True):
329 def sub(self, path, allowcreate=True):
330 '''return a subrepo for the stored revision of path, never wdir()'''
330 '''return a subrepo for the stored revision of path, never wdir()'''
331 return subrepo.subrepo(self, path, allowcreate=allowcreate)
331 return subrepo.subrepo(self, path, allowcreate=allowcreate)
332
332
333 def nullsub(self, path, pctx):
333 def nullsub(self, path, pctx):
334 return subrepo.nullsubrepo(self, path, pctx)
334 return subrepo.nullsubrepo(self, path, pctx)
335
335
336 def workingsub(self, path):
336 def workingsub(self, path):
337 '''return a subrepo for the stored revision, or wdir if this is a wdir
337 '''return a subrepo for the stored revision, or wdir if this is a wdir
338 context.
338 context.
339 '''
339 '''
340 return subrepo.subrepo(self, path, allowwdir=True)
340 return subrepo.subrepo(self, path, allowwdir=True)
341
341
342 def match(self, pats=None, include=None, exclude=None, default='glob',
342 def match(self, pats=None, include=None, exclude=None, default='glob',
343 listsubrepos=False, badfn=None):
343 listsubrepos=False, badfn=None):
344 r = self._repo
344 r = self._repo
345 return matchmod.match(r.root, r.getcwd(), pats,
345 return matchmod.match(r.root, r.getcwd(), pats,
346 include, exclude, default,
346 include, exclude, default,
347 auditor=r.nofsauditor, ctx=self,
347 auditor=r.nofsauditor, ctx=self,
348 listsubrepos=listsubrepos, badfn=badfn)
348 listsubrepos=listsubrepos, badfn=badfn)
349
349
350 def diff(self, ctx2=None, match=None, **opts):
350 def diff(self, ctx2=None, match=None, **opts):
351 """Returns a diff generator for the given contexts and matcher"""
351 """Returns a diff generator for the given contexts and matcher"""
352 if ctx2 is None:
352 if ctx2 is None:
353 ctx2 = self.p1()
353 ctx2 = self.p1()
354 if ctx2 is not None:
354 if ctx2 is not None:
355 ctx2 = self._repo[ctx2]
355 ctx2 = self._repo[ctx2]
356 diffopts = patch.diffopts(self._repo.ui, opts)
356 diffopts = patch.diffopts(self._repo.ui, opts)
357 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
357 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
358
358
359 def dirs(self):
359 def dirs(self):
360 return self._manifest.dirs()
360 return self._manifest.dirs()
361
361
362 def hasdir(self, dir):
362 def hasdir(self, dir):
363 return self._manifest.hasdir(dir)
363 return self._manifest.hasdir(dir)
364
364
365 def status(self, other=None, match=None, listignored=False,
365 def status(self, other=None, match=None, listignored=False,
366 listclean=False, listunknown=False, listsubrepos=False):
366 listclean=False, listunknown=False, listsubrepos=False):
367 """return status of files between two nodes or node and working
367 """return status of files between two nodes or node and working
368 directory.
368 directory.
369
369
370 If other is None, compare this node with working directory.
370 If other is None, compare this node with working directory.
371
371
372 returns (modified, added, removed, deleted, unknown, ignored, clean)
372 returns (modified, added, removed, deleted, unknown, ignored, clean)
373 """
373 """
374
374
375 ctx1 = self
375 ctx1 = self
376 ctx2 = self._repo[other]
376 ctx2 = self._repo[other]
377
377
378 # This next code block is, admittedly, fragile logic that tests for
378 # This next code block is, admittedly, fragile logic that tests for
379 # reversing the contexts and wouldn't need to exist if it weren't for
379 # reversing the contexts and wouldn't need to exist if it weren't for
380 # the fast (and common) code path of comparing the working directory
380 # the fast (and common) code path of comparing the working directory
381 # with its first parent.
381 # with its first parent.
382 #
382 #
383 # What we're aiming for here is the ability to call:
383 # What we're aiming for here is the ability to call:
384 #
384 #
385 # workingctx.status(parentctx)
385 # workingctx.status(parentctx)
386 #
386 #
387 # If we always built the manifest for each context and compared those,
387 # If we always built the manifest for each context and compared those,
388 # then we'd be done. But the special case of the above call means we
388 # then we'd be done. But the special case of the above call means we
389 # just copy the manifest of the parent.
389 # just copy the manifest of the parent.
390 reversed = False
390 reversed = False
391 if (not isinstance(ctx1, changectx)
391 if (not isinstance(ctx1, changectx)
392 and isinstance(ctx2, changectx)):
392 and isinstance(ctx2, changectx)):
393 reversed = True
393 reversed = True
394 ctx1, ctx2 = ctx2, ctx1
394 ctx1, ctx2 = ctx2, ctx1
395
395
396 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
396 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
397 match = ctx2._matchstatus(ctx1, match)
397 match = ctx2._matchstatus(ctx1, match)
398 r = scmutil.status([], [], [], [], [], [], [])
398 r = scmutil.status([], [], [], [], [], [], [])
399 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
399 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
400 listunknown)
400 listunknown)
401
401
402 if reversed:
402 if reversed:
403 # Reverse added and removed. Clear deleted, unknown and ignored as
403 # Reverse added and removed. Clear deleted, unknown and ignored as
404 # these make no sense to reverse.
404 # these make no sense to reverse.
405 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
405 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
406 r.clean)
406 r.clean)
407
407
408 if listsubrepos:
408 if listsubrepos:
409 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
409 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
410 try:
410 try:
411 rev2 = ctx2.subrev(subpath)
411 rev2 = ctx2.subrev(subpath)
412 except KeyError:
412 except KeyError:
413 # A subrepo that existed in node1 was deleted between
413 # A subrepo that existed in node1 was deleted between
414 # node1 and node2 (inclusive). Thus, ctx2's substate
414 # node1 and node2 (inclusive). Thus, ctx2's substate
415 # won't contain that subpath. The best we can do ignore it.
415 # won't contain that subpath. The best we can do ignore it.
416 rev2 = None
416 rev2 = None
417 submatch = matchmod.subdirmatcher(subpath, match)
417 submatch = matchmod.subdirmatcher(subpath, match)
418 s = sub.status(rev2, match=submatch, ignored=listignored,
418 s = sub.status(rev2, match=submatch, ignored=listignored,
419 clean=listclean, unknown=listunknown,
419 clean=listclean, unknown=listunknown,
420 listsubrepos=True)
420 listsubrepos=True)
421 for rfiles, sfiles in zip(r, s):
421 for rfiles, sfiles in zip(r, s):
422 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
422 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
423
423
424 for l in r:
424 for l in r:
425 l.sort()
425 l.sort()
426
426
427 return r
427 return r
428
428
429 def _filterederror(repo, changeid):
429 def _filterederror(repo, changeid):
430 """build an exception to be raised about a filtered changeid
430 """build an exception to be raised about a filtered changeid
431
431
432 This is extracted in a function to help extensions (eg: evolve) to
432 This is extracted in a function to help extensions (eg: evolve) to
433 experiment with various message variants."""
433 experiment with various message variants."""
434 if repo.filtername.startswith('visible'):
434 if repo.filtername.startswith('visible'):
435 msg = _("hidden revision '%s'") % changeid
435 msg = _("hidden revision '%s'") % changeid
436 hint = _('use --hidden to access hidden revisions')
436 hint = _('use --hidden to access hidden revisions')
437 return error.FilteredRepoLookupError(msg, hint=hint)
437 return error.FilteredRepoLookupError(msg, hint=hint)
438 msg = _("filtered revision '%s' (not in '%s' subset)")
438 msg = _("filtered revision '%s' (not in '%s' subset)")
439 msg %= (changeid, repo.filtername)
439 msg %= (changeid, repo.filtername)
440 return error.FilteredRepoLookupError(msg)
440 return error.FilteredRepoLookupError(msg)
441
441
442 class changectx(basectx):
442 class changectx(basectx):
443 """A changecontext object makes access to data related to a particular
443 """A changecontext object makes access to data related to a particular
444 changeset convenient. It represents a read-only context already present in
444 changeset convenient. It represents a read-only context already present in
445 the repo."""
445 the repo."""
446 def __init__(self, repo, changeid=''):
446 def __init__(self, repo, changeid=''):
447 """changeid is a revision number, node, or tag"""
447 """changeid is a revision number, node, or tag"""
448
448
449 # since basectx.__new__ already took care of copying the object, we
449 # since basectx.__new__ already took care of copying the object, we
450 # don't need to do anything in __init__, so we just exit here
450 # don't need to do anything in __init__, so we just exit here
451 if isinstance(changeid, basectx):
451 if isinstance(changeid, basectx):
452 return
452 return
453
453
454 if changeid == '':
454 if changeid == '':
455 changeid = '.'
455 changeid = '.'
456 self._repo = repo
456 self._repo = repo
457
457
458 try:
458 try:
459 if isinstance(changeid, int):
459 if isinstance(changeid, int):
460 self._node = repo.changelog.node(changeid)
460 self._node = repo.changelog.node(changeid)
461 self._rev = changeid
461 self._rev = changeid
462 return
462 return
463 if not pycompat.ispy3 and isinstance(changeid, long):
463 if not pycompat.ispy3 and isinstance(changeid, long):
464 changeid = str(changeid)
464 changeid = str(changeid)
465 if changeid == 'null':
465 if changeid == 'null':
466 self._node = nullid
466 self._node = nullid
467 self._rev = nullrev
467 self._rev = nullrev
468 return
468 return
469 if changeid == 'tip':
469 if changeid == 'tip':
470 self._node = repo.changelog.tip()
470 self._node = repo.changelog.tip()
471 self._rev = repo.changelog.rev(self._node)
471 self._rev = repo.changelog.rev(self._node)
472 return
472 return
473 if changeid == '.' or changeid == repo.dirstate.p1():
473 if changeid == '.' or changeid == repo.dirstate.p1():
474 # this is a hack to delay/avoid loading obsmarkers
474 # this is a hack to delay/avoid loading obsmarkers
475 # when we know that '.' won't be hidden
475 # when we know that '.' won't be hidden
476 self._node = repo.dirstate.p1()
476 self._node = repo.dirstate.p1()
477 self._rev = repo.unfiltered().changelog.rev(self._node)
477 self._rev = repo.unfiltered().changelog.rev(self._node)
478 return
478 return
479 if len(changeid) == 20:
479 if len(changeid) == 20:
480 try:
480 try:
481 self._node = changeid
481 self._node = changeid
482 self._rev = repo.changelog.rev(changeid)
482 self._rev = repo.changelog.rev(changeid)
483 return
483 return
484 except error.FilteredRepoLookupError:
484 except error.FilteredRepoLookupError:
485 raise
485 raise
486 except LookupError:
486 except LookupError:
487 pass
487 pass
488
488
489 try:
489 try:
490 r = int(changeid)
490 r = int(changeid)
491 if '%d' % r != changeid:
491 if '%d' % r != changeid:
492 raise ValueError
492 raise ValueError
493 l = len(repo.changelog)
493 l = len(repo.changelog)
494 if r < 0:
494 if r < 0:
495 r += l
495 r += l
496 if r < 0 or r >= l and r != wdirrev:
496 if r < 0 or r >= l and r != wdirrev:
497 raise ValueError
497 raise ValueError
498 self._rev = r
498 self._rev = r
499 self._node = repo.changelog.node(r)
499 self._node = repo.changelog.node(r)
500 return
500 return
501 except error.FilteredIndexError:
501 except error.FilteredIndexError:
502 raise
502 raise
503 except (ValueError, OverflowError, IndexError):
503 except (ValueError, OverflowError, IndexError):
504 pass
504 pass
505
505
506 if len(changeid) == 40:
506 if len(changeid) == 40:
507 try:
507 try:
508 self._node = bin(changeid)
508 self._node = bin(changeid)
509 self._rev = repo.changelog.rev(self._node)
509 self._rev = repo.changelog.rev(self._node)
510 return
510 return
511 except error.FilteredLookupError:
511 except error.FilteredLookupError:
512 raise
512 raise
513 except (TypeError, LookupError):
513 except (TypeError, LookupError):
514 pass
514 pass
515
515
516 # lookup bookmarks through the name interface
516 # lookup bookmarks through the name interface
517 try:
517 try:
518 self._node = repo.names.singlenode(repo, changeid)
518 self._node = repo.names.singlenode(repo, changeid)
519 self._rev = repo.changelog.rev(self._node)
519 self._rev = repo.changelog.rev(self._node)
520 return
520 return
521 except KeyError:
521 except KeyError:
522 pass
522 pass
523 except error.FilteredRepoLookupError:
523 except error.FilteredRepoLookupError:
524 raise
524 raise
525 except error.RepoLookupError:
525 except error.RepoLookupError:
526 pass
526 pass
527
527
528 self._node = repo.unfiltered().changelog._partialmatch(changeid)
528 self._node = repo.unfiltered().changelog._partialmatch(changeid)
529 if self._node is not None:
529 if self._node is not None:
530 self._rev = repo.changelog.rev(self._node)
530 self._rev = repo.changelog.rev(self._node)
531 return
531 return
532
532
533 # lookup failed
533 # lookup failed
534 # check if it might have come from damaged dirstate
534 # check if it might have come from damaged dirstate
535 #
535 #
536 # XXX we could avoid the unfiltered if we had a recognizable
536 # XXX we could avoid the unfiltered if we had a recognizable
537 # exception for filtered changeset access
537 # exception for filtered changeset access
538 if changeid in repo.unfiltered().dirstate.parents():
538 if changeid in repo.unfiltered().dirstate.parents():
539 msg = _("working directory has unknown parent '%s'!")
539 msg = _("working directory has unknown parent '%s'!")
540 raise error.Abort(msg % short(changeid))
540 raise error.Abort(msg % short(changeid))
541 try:
541 try:
542 if len(changeid) == 20 and nonascii(changeid):
542 if len(changeid) == 20 and nonascii(changeid):
543 changeid = hex(changeid)
543 changeid = hex(changeid)
544 except TypeError:
544 except TypeError:
545 pass
545 pass
546 except (error.FilteredIndexError, error.FilteredLookupError,
546 except (error.FilteredIndexError, error.FilteredLookupError,
547 error.FilteredRepoLookupError):
547 error.FilteredRepoLookupError):
548 raise _filterederror(repo, changeid)
548 raise _filterederror(repo, changeid)
549 except IndexError:
549 except IndexError:
550 pass
550 pass
551 raise error.RepoLookupError(
551 raise error.RepoLookupError(
552 _("unknown revision '%s'") % changeid)
552 _("unknown revision '%s'") % changeid)
553
553
554 def __hash__(self):
554 def __hash__(self):
555 try:
555 try:
556 return hash(self._rev)
556 return hash(self._rev)
557 except AttributeError:
557 except AttributeError:
558 return id(self)
558 return id(self)
559
559
560 def __nonzero__(self):
560 def __nonzero__(self):
561 return self._rev != nullrev
561 return self._rev != nullrev
562
562
563 __bool__ = __nonzero__
563 __bool__ = __nonzero__
564
564
565 @propertycache
565 @propertycache
566 def _changeset(self):
566 def _changeset(self):
567 return self._repo.changelog.changelogrevision(self.rev())
567 return self._repo.changelog.changelogrevision(self.rev())
568
568
569 @propertycache
569 @propertycache
570 def _manifest(self):
570 def _manifest(self):
571 return self._manifestctx.read()
571 return self._manifestctx.read()
572
572
573 @property
573 @property
574 def _manifestctx(self):
574 def _manifestctx(self):
575 return self._repo.manifestlog[self._changeset.manifest]
575 return self._repo.manifestlog[self._changeset.manifest]
576
576
577 @propertycache
577 @propertycache
578 def _manifestdelta(self):
578 def _manifestdelta(self):
579 return self._manifestctx.readdelta()
579 return self._manifestctx.readdelta()
580
580
581 @propertycache
581 @propertycache
582 def _parents(self):
582 def _parents(self):
583 repo = self._repo
583 repo = self._repo
584 p1, p2 = repo.changelog.parentrevs(self._rev)
584 p1, p2 = repo.changelog.parentrevs(self._rev)
585 if p2 == nullrev:
585 if p2 == nullrev:
586 return [changectx(repo, p1)]
586 return [changectx(repo, p1)]
587 return [changectx(repo, p1), changectx(repo, p2)]
587 return [changectx(repo, p1), changectx(repo, p2)]
588
588
589 def changeset(self):
589 def changeset(self):
590 c = self._changeset
590 c = self._changeset
591 return (
591 return (
592 c.manifest,
592 c.manifest,
593 c.user,
593 c.user,
594 c.date,
594 c.date,
595 c.files,
595 c.files,
596 c.description,
596 c.description,
597 c.extra,
597 c.extra,
598 )
598 )
599 def manifestnode(self):
599 def manifestnode(self):
600 return self._changeset.manifest
600 return self._changeset.manifest
601
601
602 def user(self):
602 def user(self):
603 return self._changeset.user
603 return self._changeset.user
604 def date(self):
604 def date(self):
605 return self._changeset.date
605 return self._changeset.date
606 def files(self):
606 def files(self):
607 return self._changeset.files
607 return self._changeset.files
608 def description(self):
608 def description(self):
609 return self._changeset.description
609 return self._changeset.description
610 def branch(self):
610 def branch(self):
611 return encoding.tolocal(self._changeset.extra.get("branch"))
611 return encoding.tolocal(self._changeset.extra.get("branch"))
612 def closesbranch(self):
612 def closesbranch(self):
613 return 'close' in self._changeset.extra
613 return 'close' in self._changeset.extra
614 def extra(self):
614 def extra(self):
615 return self._changeset.extra
615 return self._changeset.extra
616 def tags(self):
616 def tags(self):
617 return self._repo.nodetags(self._node)
617 return self._repo.nodetags(self._node)
618 def bookmarks(self):
618 def bookmarks(self):
619 return self._repo.nodebookmarks(self._node)
619 return self._repo.nodebookmarks(self._node)
620 def phase(self):
620 def phase(self):
621 return self._repo._phasecache.phase(self._repo, self._rev)
621 return self._repo._phasecache.phase(self._repo, self._rev)
622 def hidden(self):
622 def hidden(self):
623 return self._rev in repoview.filterrevs(self._repo, 'visible')
623 return self._rev in repoview.filterrevs(self._repo, 'visible')
624
624
625 def children(self):
625 def children(self):
626 """return contexts for each child changeset"""
626 """return contexts for each child changeset"""
627 c = self._repo.changelog.children(self._node)
627 c = self._repo.changelog.children(self._node)
628 return [changectx(self._repo, x) for x in c]
628 return [changectx(self._repo, x) for x in c]
629
629
630 def ancestors(self):
630 def ancestors(self):
631 for a in self._repo.changelog.ancestors([self._rev]):
631 for a in self._repo.changelog.ancestors([self._rev]):
632 yield changectx(self._repo, a)
632 yield changectx(self._repo, a)
633
633
634 def descendants(self):
634 def descendants(self):
635 for d in self._repo.changelog.descendants([self._rev]):
635 for d in self._repo.changelog.descendants([self._rev]):
636 yield changectx(self._repo, d)
636 yield changectx(self._repo, d)
637
637
638 def filectx(self, path, fileid=None, filelog=None):
638 def filectx(self, path, fileid=None, filelog=None):
639 """get a file context from this changeset"""
639 """get a file context from this changeset"""
640 if fileid is None:
640 if fileid is None:
641 fileid = self.filenode(path)
641 fileid = self.filenode(path)
642 return filectx(self._repo, path, fileid=fileid,
642 return filectx(self._repo, path, fileid=fileid,
643 changectx=self, filelog=filelog)
643 changectx=self, filelog=filelog)
644
644
645 def ancestor(self, c2, warn=False):
645 def ancestor(self, c2, warn=False):
646 """return the "best" ancestor context of self and c2
646 """return the "best" ancestor context of self and c2
647
647
648 If there are multiple candidates, it will show a message and check
648 If there are multiple candidates, it will show a message and check
649 merge.preferancestor configuration before falling back to the
649 merge.preferancestor configuration before falling back to the
650 revlog ancestor."""
650 revlog ancestor."""
651 # deal with workingctxs
651 # deal with workingctxs
652 n2 = c2._node
652 n2 = c2._node
653 if n2 is None:
653 if n2 is None:
654 n2 = c2._parents[0]._node
654 n2 = c2._parents[0]._node
655 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
655 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
656 if not cahs:
656 if not cahs:
657 anc = nullid
657 anc = nullid
658 elif len(cahs) == 1:
658 elif len(cahs) == 1:
659 anc = cahs[0]
659 anc = cahs[0]
660 else:
660 else:
661 # experimental config: merge.preferancestor
661 # experimental config: merge.preferancestor
662 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
662 for r in self._repo.ui.configlist('merge', 'preferancestor'):
663 try:
663 try:
664 ctx = changectx(self._repo, r)
664 ctx = changectx(self._repo, r)
665 except error.RepoLookupError:
665 except error.RepoLookupError:
666 continue
666 continue
667 anc = ctx.node()
667 anc = ctx.node()
668 if anc in cahs:
668 if anc in cahs:
669 break
669 break
670 else:
670 else:
671 anc = self._repo.changelog.ancestor(self._node, n2)
671 anc = self._repo.changelog.ancestor(self._node, n2)
672 if warn:
672 if warn:
673 self._repo.ui.status(
673 self._repo.ui.status(
674 (_("note: using %s as ancestor of %s and %s\n") %
674 (_("note: using %s as ancestor of %s and %s\n") %
675 (short(anc), short(self._node), short(n2))) +
675 (short(anc), short(self._node), short(n2))) +
676 ''.join(_(" alternatively, use --config "
676 ''.join(_(" alternatively, use --config "
677 "merge.preferancestor=%s\n") %
677 "merge.preferancestor=%s\n") %
678 short(n) for n in sorted(cahs) if n != anc))
678 short(n) for n in sorted(cahs) if n != anc))
679 return changectx(self._repo, anc)
679 return changectx(self._repo, anc)
680
680
681 def descendant(self, other):
681 def descendant(self, other):
682 """True if other is descendant of this changeset"""
682 """True if other is descendant of this changeset"""
683 return self._repo.changelog.descendant(self._rev, other._rev)
683 return self._repo.changelog.descendant(self._rev, other._rev)
684
684
685 def walk(self, match):
685 def walk(self, match):
686 '''Generates matching file names.'''
686 '''Generates matching file names.'''
687
687
688 # Wrap match.bad method to have message with nodeid
688 # Wrap match.bad method to have message with nodeid
689 def bad(fn, msg):
689 def bad(fn, msg):
690 # The manifest doesn't know about subrepos, so don't complain about
690 # The manifest doesn't know about subrepos, so don't complain about
691 # paths into valid subrepos.
691 # paths into valid subrepos.
692 if any(fn == s or fn.startswith(s + '/')
692 if any(fn == s or fn.startswith(s + '/')
693 for s in self.substate):
693 for s in self.substate):
694 return
694 return
695 match.bad(fn, _('no such file in rev %s') % self)
695 match.bad(fn, _('no such file in rev %s') % self)
696
696
697 m = matchmod.badmatch(match, bad)
697 m = matchmod.badmatch(match, bad)
698 return self._manifest.walk(m)
698 return self._manifest.walk(m)
699
699
700 def matches(self, match):
700 def matches(self, match):
701 return self.walk(match)
701 return self.walk(match)
702
702
703 class basefilectx(object):
703 class basefilectx(object):
704 """A filecontext object represents the common logic for its children:
704 """A filecontext object represents the common logic for its children:
705 filectx: read-only access to a filerevision that is already present
705 filectx: read-only access to a filerevision that is already present
706 in the repo,
706 in the repo,
707 workingfilectx: a filecontext that represents files from the working
707 workingfilectx: a filecontext that represents files from the working
708 directory,
708 directory,
709 memfilectx: a filecontext that represents files in-memory,
709 memfilectx: a filecontext that represents files in-memory,
710 overlayfilectx: duplicate another filecontext with some fields overridden.
710 overlayfilectx: duplicate another filecontext with some fields overridden.
711 """
711 """
712 @propertycache
712 @propertycache
713 def _filelog(self):
713 def _filelog(self):
714 return self._repo.file(self._path)
714 return self._repo.file(self._path)
715
715
716 @propertycache
716 @propertycache
717 def _changeid(self):
717 def _changeid(self):
718 if r'_changeid' in self.__dict__:
718 if r'_changeid' in self.__dict__:
719 return self._changeid
719 return self._changeid
720 elif r'_changectx' in self.__dict__:
720 elif r'_changectx' in self.__dict__:
721 return self._changectx.rev()
721 return self._changectx.rev()
722 elif r'_descendantrev' in self.__dict__:
722 elif r'_descendantrev' in self.__dict__:
723 # this file context was created from a revision with a known
723 # this file context was created from a revision with a known
724 # descendant, we can (lazily) correct for linkrev aliases
724 # descendant, we can (lazily) correct for linkrev aliases
725 return self._adjustlinkrev(self._descendantrev)
725 return self._adjustlinkrev(self._descendantrev)
726 else:
726 else:
727 return self._filelog.linkrev(self._filerev)
727 return self._filelog.linkrev(self._filerev)
728
728
729 @propertycache
729 @propertycache
730 def _filenode(self):
730 def _filenode(self):
731 if r'_fileid' in self.__dict__:
731 if r'_fileid' in self.__dict__:
732 return self._filelog.lookup(self._fileid)
732 return self._filelog.lookup(self._fileid)
733 else:
733 else:
734 return self._changectx.filenode(self._path)
734 return self._changectx.filenode(self._path)
735
735
736 @propertycache
736 @propertycache
737 def _filerev(self):
737 def _filerev(self):
738 return self._filelog.rev(self._filenode)
738 return self._filelog.rev(self._filenode)
739
739
740 @propertycache
740 @propertycache
741 def _repopath(self):
741 def _repopath(self):
742 return self._path
742 return self._path
743
743
744 def __nonzero__(self):
744 def __nonzero__(self):
745 try:
745 try:
746 self._filenode
746 self._filenode
747 return True
747 return True
748 except error.LookupError:
748 except error.LookupError:
749 # file is missing
749 # file is missing
750 return False
750 return False
751
751
752 __bool__ = __nonzero__
752 __bool__ = __nonzero__
753
753
754 def __bytes__(self):
754 def __bytes__(self):
755 try:
755 try:
756 return "%s@%s" % (self.path(), self._changectx)
756 return "%s@%s" % (self.path(), self._changectx)
757 except error.LookupError:
757 except error.LookupError:
758 return "%s@???" % self.path()
758 return "%s@???" % self.path()
759
759
760 __str__ = encoding.strmethod(__bytes__)
760 __str__ = encoding.strmethod(__bytes__)
761
761
762 def __repr__(self):
762 def __repr__(self):
763 return "<%s %s>" % (type(self).__name__, str(self))
763 return "<%s %s>" % (type(self).__name__, str(self))
764
764
765 def __hash__(self):
765 def __hash__(self):
766 try:
766 try:
767 return hash((self._path, self._filenode))
767 return hash((self._path, self._filenode))
768 except AttributeError:
768 except AttributeError:
769 return id(self)
769 return id(self)
770
770
771 def __eq__(self, other):
771 def __eq__(self, other):
772 try:
772 try:
773 return (type(self) == type(other) and self._path == other._path
773 return (type(self) == type(other) and self._path == other._path
774 and self._filenode == other._filenode)
774 and self._filenode == other._filenode)
775 except AttributeError:
775 except AttributeError:
776 return False
776 return False
777
777
778 def __ne__(self, other):
778 def __ne__(self, other):
779 return not (self == other)
779 return not (self == other)
780
780
781 def filerev(self):
781 def filerev(self):
782 return self._filerev
782 return self._filerev
783 def filenode(self):
783 def filenode(self):
784 return self._filenode
784 return self._filenode
785 @propertycache
785 @propertycache
786 def _flags(self):
786 def _flags(self):
787 return self._changectx.flags(self._path)
787 return self._changectx.flags(self._path)
788 def flags(self):
788 def flags(self):
789 return self._flags
789 return self._flags
790 def filelog(self):
790 def filelog(self):
791 return self._filelog
791 return self._filelog
792 def rev(self):
792 def rev(self):
793 return self._changeid
793 return self._changeid
794 def linkrev(self):
794 def linkrev(self):
795 return self._filelog.linkrev(self._filerev)
795 return self._filelog.linkrev(self._filerev)
796 def node(self):
796 def node(self):
797 return self._changectx.node()
797 return self._changectx.node()
798 def hex(self):
798 def hex(self):
799 return self._changectx.hex()
799 return self._changectx.hex()
800 def user(self):
800 def user(self):
801 return self._changectx.user()
801 return self._changectx.user()
802 def date(self):
802 def date(self):
803 return self._changectx.date()
803 return self._changectx.date()
804 def files(self):
804 def files(self):
805 return self._changectx.files()
805 return self._changectx.files()
806 def description(self):
806 def description(self):
807 return self._changectx.description()
807 return self._changectx.description()
808 def branch(self):
808 def branch(self):
809 return self._changectx.branch()
809 return self._changectx.branch()
810 def extra(self):
810 def extra(self):
811 return self._changectx.extra()
811 return self._changectx.extra()
812 def phase(self):
812 def phase(self):
813 return self._changectx.phase()
813 return self._changectx.phase()
814 def phasestr(self):
814 def phasestr(self):
815 return self._changectx.phasestr()
815 return self._changectx.phasestr()
816 def manifest(self):
816 def manifest(self):
817 return self._changectx.manifest()
817 return self._changectx.manifest()
818 def changectx(self):
818 def changectx(self):
819 return self._changectx
819 return self._changectx
820 def renamed(self):
820 def renamed(self):
821 return self._copied
821 return self._copied
822 def repo(self):
822 def repo(self):
823 return self._repo
823 return self._repo
824 def size(self):
824 def size(self):
825 return len(self.data())
825 return len(self.data())
826
826
827 def path(self):
827 def path(self):
828 return self._path
828 return self._path
829
829
830 def isbinary(self):
830 def isbinary(self):
831 try:
831 try:
832 return util.binary(self.data())
832 return util.binary(self.data())
833 except IOError:
833 except IOError:
834 return False
834 return False
835 def isexec(self):
835 def isexec(self):
836 return 'x' in self.flags()
836 return 'x' in self.flags()
837 def islink(self):
837 def islink(self):
838 return 'l' in self.flags()
838 return 'l' in self.flags()
839
839
840 def isabsent(self):
840 def isabsent(self):
841 """whether this filectx represents a file not in self._changectx
841 """whether this filectx represents a file not in self._changectx
842
842
843 This is mainly for merge code to detect change/delete conflicts. This is
843 This is mainly for merge code to detect change/delete conflicts. This is
844 expected to be True for all subclasses of basectx."""
844 expected to be True for all subclasses of basectx."""
845 return False
845 return False
846
846
847 _customcmp = False
847 _customcmp = False
848 def cmp(self, fctx):
848 def cmp(self, fctx):
849 """compare with other file context
849 """compare with other file context
850
850
851 returns True if different than fctx.
851 returns True if different than fctx.
852 """
852 """
853 if fctx._customcmp:
853 if fctx._customcmp:
854 return fctx.cmp(self)
854 return fctx.cmp(self)
855
855
856 if (fctx._filenode is None
856 if (fctx._filenode is None
857 and (self._repo._encodefilterpats
857 and (self._repo._encodefilterpats
858 # if file data starts with '\1\n', empty metadata block is
858 # if file data starts with '\1\n', empty metadata block is
859 # prepended, which adds 4 bytes to filelog.size().
859 # prepended, which adds 4 bytes to filelog.size().
860 or self.size() - 4 == fctx.size())
860 or self.size() - 4 == fctx.size())
861 or self.size() == fctx.size()):
861 or self.size() == fctx.size()):
862 return self._filelog.cmp(self._filenode, fctx.data())
862 return self._filelog.cmp(self._filenode, fctx.data())
863
863
864 return True
864 return True
865
865
866 def _adjustlinkrev(self, srcrev, inclusive=False):
866 def _adjustlinkrev(self, srcrev, inclusive=False):
867 """return the first ancestor of <srcrev> introducing <fnode>
867 """return the first ancestor of <srcrev> introducing <fnode>
868
868
869 If the linkrev of the file revision does not point to an ancestor of
869 If the linkrev of the file revision does not point to an ancestor of
870 srcrev, we'll walk down the ancestors until we find one introducing
870 srcrev, we'll walk down the ancestors until we find one introducing
871 this file revision.
871 this file revision.
872
872
873 :srcrev: the changeset revision we search ancestors from
873 :srcrev: the changeset revision we search ancestors from
874 :inclusive: if true, the src revision will also be checked
874 :inclusive: if true, the src revision will also be checked
875 """
875 """
876 repo = self._repo
876 repo = self._repo
877 cl = repo.unfiltered().changelog
877 cl = repo.unfiltered().changelog
878 mfl = repo.manifestlog
878 mfl = repo.manifestlog
879 # fetch the linkrev
879 # fetch the linkrev
880 lkr = self.linkrev()
880 lkr = self.linkrev()
881 # hack to reuse ancestor computation when searching for renames
881 # hack to reuse ancestor computation when searching for renames
882 memberanc = getattr(self, '_ancestrycontext', None)
882 memberanc = getattr(self, '_ancestrycontext', None)
883 iteranc = None
883 iteranc = None
884 if srcrev is None:
884 if srcrev is None:
885 # wctx case, used by workingfilectx during mergecopy
885 # wctx case, used by workingfilectx during mergecopy
886 revs = [p.rev() for p in self._repo[None].parents()]
886 revs = [p.rev() for p in self._repo[None].parents()]
887 inclusive = True # we skipped the real (revless) source
887 inclusive = True # we skipped the real (revless) source
888 else:
888 else:
889 revs = [srcrev]
889 revs = [srcrev]
890 if memberanc is None:
890 if memberanc is None:
891 memberanc = iteranc = cl.ancestors(revs, lkr,
891 memberanc = iteranc = cl.ancestors(revs, lkr,
892 inclusive=inclusive)
892 inclusive=inclusive)
893 # check if this linkrev is an ancestor of srcrev
893 # check if this linkrev is an ancestor of srcrev
894 if lkr not in memberanc:
894 if lkr not in memberanc:
895 if iteranc is None:
895 if iteranc is None:
896 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
896 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
897 fnode = self._filenode
897 fnode = self._filenode
898 path = self._path
898 path = self._path
899 for a in iteranc:
899 for a in iteranc:
900 ac = cl.read(a) # get changeset data (we avoid object creation)
900 ac = cl.read(a) # get changeset data (we avoid object creation)
901 if path in ac[3]: # checking the 'files' field.
901 if path in ac[3]: # checking the 'files' field.
902 # The file has been touched, check if the content is
902 # The file has been touched, check if the content is
903 # similar to the one we search for.
903 # similar to the one we search for.
904 if fnode == mfl[ac[0]].readfast().get(path):
904 if fnode == mfl[ac[0]].readfast().get(path):
905 return a
905 return a
906 # In theory, we should never get out of that loop without a result.
906 # In theory, we should never get out of that loop without a result.
907 # But if manifest uses a buggy file revision (not children of the
907 # But if manifest uses a buggy file revision (not children of the
908 # one it replaces) we could. Such a buggy situation will likely
908 # one it replaces) we could. Such a buggy situation will likely
909 # result is crash somewhere else at to some point.
909 # result is crash somewhere else at to some point.
910 return lkr
910 return lkr
911
911
912 def introrev(self):
912 def introrev(self):
913 """return the rev of the changeset which introduced this file revision
913 """return the rev of the changeset which introduced this file revision
914
914
915 This method is different from linkrev because it take into account the
915 This method is different from linkrev because it take into account the
916 changeset the filectx was created from. It ensures the returned
916 changeset the filectx was created from. It ensures the returned
917 revision is one of its ancestors. This prevents bugs from
917 revision is one of its ancestors. This prevents bugs from
918 'linkrev-shadowing' when a file revision is used by multiple
918 'linkrev-shadowing' when a file revision is used by multiple
919 changesets.
919 changesets.
920 """
920 """
921 lkr = self.linkrev()
921 lkr = self.linkrev()
922 attrs = vars(self)
922 attrs = vars(self)
923 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
923 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
924 if noctx or self.rev() == lkr:
924 if noctx or self.rev() == lkr:
925 return self.linkrev()
925 return self.linkrev()
926 return self._adjustlinkrev(self.rev(), inclusive=True)
926 return self._adjustlinkrev(self.rev(), inclusive=True)
927
927
928 def _parentfilectx(self, path, fileid, filelog):
928 def _parentfilectx(self, path, fileid, filelog):
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
929 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
930 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
931 if '_changeid' in vars(self) or '_changectx' in vars(self):
931 if '_changeid' in vars(self) or '_changectx' in vars(self):
932 # If self is associated with a changeset (probably explicitly
932 # If self is associated with a changeset (probably explicitly
933 # fed), ensure the created filectx is associated with a
933 # fed), ensure the created filectx is associated with a
934 # changeset that is an ancestor of self.changectx.
934 # changeset that is an ancestor of self.changectx.
935 # This lets us later use _adjustlinkrev to get a correct link.
935 # This lets us later use _adjustlinkrev to get a correct link.
936 fctx._descendantrev = self.rev()
936 fctx._descendantrev = self.rev()
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
937 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 elif '_descendantrev' in vars(self):
938 elif '_descendantrev' in vars(self):
939 # Otherwise propagate _descendantrev if we have one associated.
939 # Otherwise propagate _descendantrev if we have one associated.
940 fctx._descendantrev = self._descendantrev
940 fctx._descendantrev = self._descendantrev
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 return fctx
942 return fctx
943
943
944 def parents(self):
944 def parents(self):
945 _path = self._path
945 _path = self._path
946 fl = self._filelog
946 fl = self._filelog
947 parents = self._filelog.parents(self._filenode)
947 parents = self._filelog.parents(self._filenode)
948 pl = [(_path, node, fl) for node in parents if node != nullid]
948 pl = [(_path, node, fl) for node in parents if node != nullid]
949
949
950 r = fl.renamed(self._filenode)
950 r = fl.renamed(self._filenode)
951 if r:
951 if r:
952 # - In the simple rename case, both parent are nullid, pl is empty.
952 # - In the simple rename case, both parent are nullid, pl is empty.
953 # - In case of merge, only one of the parent is null id and should
953 # - In case of merge, only one of the parent is null id and should
954 # be replaced with the rename information. This parent is -always-
954 # be replaced with the rename information. This parent is -always-
955 # the first one.
955 # the first one.
956 #
956 #
957 # As null id have always been filtered out in the previous list
957 # As null id have always been filtered out in the previous list
958 # comprehension, inserting to 0 will always result in "replacing
958 # comprehension, inserting to 0 will always result in "replacing
959 # first nullid parent with rename information.
959 # first nullid parent with rename information.
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
960 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
961
961
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
962 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
963
963
964 def p1(self):
964 def p1(self):
965 return self.parents()[0]
965 return self.parents()[0]
966
966
967 def p2(self):
967 def p2(self):
968 p = self.parents()
968 p = self.parents()
969 if len(p) == 2:
969 if len(p) == 2:
970 return p[1]
970 return p[1]
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
971 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
972
972
973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
973 def annotate(self, follow=False, linenumber=False, skiprevs=None,
974 diffopts=None):
974 diffopts=None):
975 '''returns a list of tuples of ((ctx, number), line) for each line
975 '''returns a list of tuples of ((ctx, number), line) for each line
976 in the file, where ctx is the filectx of the node where
976 in the file, where ctx is the filectx of the node where
977 that line was last changed; if linenumber parameter is true, number is
977 that line was last changed; if linenumber parameter is true, number is
978 the line number at the first appearance in the managed file, otherwise,
978 the line number at the first appearance in the managed file, otherwise,
979 number has a fixed value of False.
979 number has a fixed value of False.
980 '''
980 '''
981
981
982 def lines(text):
982 def lines(text):
983 if text.endswith("\n"):
983 if text.endswith("\n"):
984 return text.count("\n")
984 return text.count("\n")
985 return text.count("\n") + int(bool(text))
985 return text.count("\n") + int(bool(text))
986
986
987 if linenumber:
987 if linenumber:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([annotateline(fctx=rev, lineno=i)
989 return ([annotateline(fctx=rev, lineno=i)
990 for i in xrange(1, lines(text) + 1)], text)
990 for i in xrange(1, lines(text) + 1)], text)
991 else:
991 else:
992 def decorate(text, rev):
992 def decorate(text, rev):
993 return ([annotateline(fctx=rev)] * lines(text), text)
993 return ([annotateline(fctx=rev)] * lines(text), text)
994
994
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
995 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
996
996
997 def parents(f):
997 def parents(f):
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
998 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
999 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1000 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1001 # isn't an ancestor of the srcrev.
1001 # isn't an ancestor of the srcrev.
1002 f._changeid
1002 f._changeid
1003 pl = f.parents()
1003 pl = f.parents()
1004
1004
1005 # Don't return renamed parents if we aren't following.
1005 # Don't return renamed parents if we aren't following.
1006 if not follow:
1006 if not follow:
1007 pl = [p for p in pl if p.path() == f.path()]
1007 pl = [p for p in pl if p.path() == f.path()]
1008
1008
1009 # renamed filectx won't have a filelog yet, so set it
1009 # renamed filectx won't have a filelog yet, so set it
1010 # from the cache to save time
1010 # from the cache to save time
1011 for p in pl:
1011 for p in pl:
1012 if not '_filelog' in p.__dict__:
1012 if not '_filelog' in p.__dict__:
1013 p._filelog = getlog(p.path())
1013 p._filelog = getlog(p.path())
1014
1014
1015 return pl
1015 return pl
1016
1016
1017 # use linkrev to find the first changeset where self appeared
1017 # use linkrev to find the first changeset where self appeared
1018 base = self
1018 base = self
1019 introrev = self.introrev()
1019 introrev = self.introrev()
1020 if self.rev() != introrev:
1020 if self.rev() != introrev:
1021 base = self.filectx(self.filenode(), changeid=introrev)
1021 base = self.filectx(self.filenode(), changeid=introrev)
1022 if getattr(base, '_ancestrycontext', None) is None:
1022 if getattr(base, '_ancestrycontext', None) is None:
1023 cl = self._repo.changelog
1023 cl = self._repo.changelog
1024 if introrev is None:
1024 if introrev is None:
1025 # wctx is not inclusive, but works because _ancestrycontext
1025 # wctx is not inclusive, but works because _ancestrycontext
1026 # is used to test filelog revisions
1026 # is used to test filelog revisions
1027 ac = cl.ancestors([p.rev() for p in base.parents()],
1027 ac = cl.ancestors([p.rev() for p in base.parents()],
1028 inclusive=True)
1028 inclusive=True)
1029 else:
1029 else:
1030 ac = cl.ancestors([introrev], inclusive=True)
1030 ac = cl.ancestors([introrev], inclusive=True)
1031 base._ancestrycontext = ac
1031 base._ancestrycontext = ac
1032
1032
1033 # This algorithm would prefer to be recursive, but Python is a
1033 # This algorithm would prefer to be recursive, but Python is a
1034 # bit recursion-hostile. Instead we do an iterative
1034 # bit recursion-hostile. Instead we do an iterative
1035 # depth-first search.
1035 # depth-first search.
1036
1036
1037 # 1st DFS pre-calculates pcache and needed
1037 # 1st DFS pre-calculates pcache and needed
1038 visit = [base]
1038 visit = [base]
1039 pcache = {}
1039 pcache = {}
1040 needed = {base: 1}
1040 needed = {base: 1}
1041 while visit:
1041 while visit:
1042 f = visit.pop()
1042 f = visit.pop()
1043 if f in pcache:
1043 if f in pcache:
1044 continue
1044 continue
1045 pl = parents(f)
1045 pl = parents(f)
1046 pcache[f] = pl
1046 pcache[f] = pl
1047 for p in pl:
1047 for p in pl:
1048 needed[p] = needed.get(p, 0) + 1
1048 needed[p] = needed.get(p, 0) + 1
1049 if p not in pcache:
1049 if p not in pcache:
1050 visit.append(p)
1050 visit.append(p)
1051
1051
1052 # 2nd DFS does the actual annotate
1052 # 2nd DFS does the actual annotate
1053 visit[:] = [base]
1053 visit[:] = [base]
1054 hist = {}
1054 hist = {}
1055 while visit:
1055 while visit:
1056 f = visit[-1]
1056 f = visit[-1]
1057 if f in hist:
1057 if f in hist:
1058 visit.pop()
1058 visit.pop()
1059 continue
1059 continue
1060
1060
1061 ready = True
1061 ready = True
1062 pl = pcache[f]
1062 pl = pcache[f]
1063 for p in pl:
1063 for p in pl:
1064 if p not in hist:
1064 if p not in hist:
1065 ready = False
1065 ready = False
1066 visit.append(p)
1066 visit.append(p)
1067 if ready:
1067 if ready:
1068 visit.pop()
1068 visit.pop()
1069 curr = decorate(f.data(), f)
1069 curr = decorate(f.data(), f)
1070 skipchild = False
1070 skipchild = False
1071 if skiprevs is not None:
1071 if skiprevs is not None:
1072 skipchild = f._changeid in skiprevs
1072 skipchild = f._changeid in skiprevs
1073 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1073 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1074 diffopts)
1074 diffopts)
1075 for p in pl:
1075 for p in pl:
1076 if needed[p] == 1:
1076 if needed[p] == 1:
1077 del hist[p]
1077 del hist[p]
1078 del needed[p]
1078 del needed[p]
1079 else:
1079 else:
1080 needed[p] -= 1
1080 needed[p] -= 1
1081
1081
1082 hist[f] = curr
1082 hist[f] = curr
1083 del pcache[f]
1083 del pcache[f]
1084
1084
1085 return zip(hist[base][0], hist[base][1].splitlines(True))
1085 return zip(hist[base][0], hist[base][1].splitlines(True))
1086
1086
1087 def ancestors(self, followfirst=False):
1087 def ancestors(self, followfirst=False):
1088 visit = {}
1088 visit = {}
1089 c = self
1089 c = self
1090 if followfirst:
1090 if followfirst:
1091 cut = 1
1091 cut = 1
1092 else:
1092 else:
1093 cut = None
1093 cut = None
1094
1094
1095 while True:
1095 while True:
1096 for parent in c.parents()[:cut]:
1096 for parent in c.parents()[:cut]:
1097 visit[(parent.linkrev(), parent.filenode())] = parent
1097 visit[(parent.linkrev(), parent.filenode())] = parent
1098 if not visit:
1098 if not visit:
1099 break
1099 break
1100 c = visit.pop(max(visit))
1100 c = visit.pop(max(visit))
1101 yield c
1101 yield c
1102
1102
1103 def decodeddata(self):
1103 def decodeddata(self):
1104 """Returns `data()` after running repository decoding filters.
1104 """Returns `data()` after running repository decoding filters.
1105
1105
1106 This is often equivalent to how the data would be expressed on disk.
1106 This is often equivalent to how the data would be expressed on disk.
1107 """
1107 """
1108 return self._repo.wwritedata(self.path(), self.data())
1108 return self._repo.wwritedata(self.path(), self.data())
1109
1109
1110 @attr.s(slots=True, frozen=True)
1110 @attr.s(slots=True, frozen=True)
1111 class annotateline(object):
1111 class annotateline(object):
1112 fctx = attr.ib()
1112 fctx = attr.ib()
1113 lineno = attr.ib(default=False)
1113 lineno = attr.ib(default=False)
1114 # Whether this annotation was the result of a skip-annotate.
1114 # Whether this annotation was the result of a skip-annotate.
1115 skip = attr.ib(default=False)
1115 skip = attr.ib(default=False)
1116
1116
1117 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1117 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1118 r'''
1118 r'''
1119 Given parent and child fctxes and annotate data for parents, for all lines
1119 Given parent and child fctxes and annotate data for parents, for all lines
1120 in either parent that match the child, annotate the child with the parent's
1120 in either parent that match the child, annotate the child with the parent's
1121 data.
1121 data.
1122
1122
1123 Additionally, if `skipchild` is True, replace all other lines with parent
1123 Additionally, if `skipchild` is True, replace all other lines with parent
1124 annotate data as well such that child is never blamed for any lines.
1124 annotate data as well such that child is never blamed for any lines.
1125
1125
1126 See test-annotate.py for unit tests.
1126 See test-annotate.py for unit tests.
1127 '''
1127 '''
1128 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1128 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1129 for parent in parents]
1129 for parent in parents]
1130
1130
1131 if skipchild:
1131 if skipchild:
1132 # Need to iterate over the blocks twice -- make it a list
1132 # Need to iterate over the blocks twice -- make it a list
1133 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1133 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1134 # Mercurial currently prefers p2 over p1 for annotate.
1134 # Mercurial currently prefers p2 over p1 for annotate.
1135 # TODO: change this?
1135 # TODO: change this?
1136 for parent, blocks in pblocks:
1136 for parent, blocks in pblocks:
1137 for (a1, a2, b1, b2), t in blocks:
1137 for (a1, a2, b1, b2), t in blocks:
1138 # Changed blocks ('!') or blocks made only of blank lines ('~')
1138 # Changed blocks ('!') or blocks made only of blank lines ('~')
1139 # belong to the child.
1139 # belong to the child.
1140 if t == '=':
1140 if t == '=':
1141 child[0][b1:b2] = parent[0][a1:a2]
1141 child[0][b1:b2] = parent[0][a1:a2]
1142
1142
1143 if skipchild:
1143 if skipchild:
1144 # Now try and match up anything that couldn't be matched,
1144 # Now try and match up anything that couldn't be matched,
1145 # Reversing pblocks maintains bias towards p2, matching above
1145 # Reversing pblocks maintains bias towards p2, matching above
1146 # behavior.
1146 # behavior.
1147 pblocks.reverse()
1147 pblocks.reverse()
1148
1148
1149 # The heuristics are:
1149 # The heuristics are:
1150 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1150 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1151 # This could potentially be smarter but works well enough.
1151 # This could potentially be smarter but works well enough.
1152 # * For a non-matching section, do a best-effort fit. Match lines in
1152 # * For a non-matching section, do a best-effort fit. Match lines in
1153 # diff hunks 1:1, dropping lines as necessary.
1153 # diff hunks 1:1, dropping lines as necessary.
1154 # * Repeat the last line as a last resort.
1154 # * Repeat the last line as a last resort.
1155
1155
1156 # First, replace as much as possible without repeating the last line.
1156 # First, replace as much as possible without repeating the last line.
1157 remaining = [(parent, []) for parent, _blocks in pblocks]
1157 remaining = [(parent, []) for parent, _blocks in pblocks]
1158 for idx, (parent, blocks) in enumerate(pblocks):
1158 for idx, (parent, blocks) in enumerate(pblocks):
1159 for (a1, a2, b1, b2), _t in blocks:
1159 for (a1, a2, b1, b2), _t in blocks:
1160 if a2 - a1 >= b2 - b1:
1160 if a2 - a1 >= b2 - b1:
1161 for bk in xrange(b1, b2):
1161 for bk in xrange(b1, b2):
1162 if child[0][bk].fctx == childfctx:
1162 if child[0][bk].fctx == childfctx:
1163 ak = min(a1 + (bk - b1), a2 - 1)
1163 ak = min(a1 + (bk - b1), a2 - 1)
1164 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1164 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1165 else:
1165 else:
1166 remaining[idx][1].append((a1, a2, b1, b2))
1166 remaining[idx][1].append((a1, a2, b1, b2))
1167
1167
1168 # Then, look at anything left, which might involve repeating the last
1168 # Then, look at anything left, which might involve repeating the last
1169 # line.
1169 # line.
1170 for parent, blocks in remaining:
1170 for parent, blocks in remaining:
1171 for a1, a2, b1, b2 in blocks:
1171 for a1, a2, b1, b2 in blocks:
1172 for bk in xrange(b1, b2):
1172 for bk in xrange(b1, b2):
1173 if child[0][bk].fctx == childfctx:
1173 if child[0][bk].fctx == childfctx:
1174 ak = min(a1 + (bk - b1), a2 - 1)
1174 ak = min(a1 + (bk - b1), a2 - 1)
1175 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1175 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1176 return child
1176 return child
1177
1177
1178 class filectx(basefilectx):
1178 class filectx(basefilectx):
1179 """A filecontext object makes access to data related to a particular
1179 """A filecontext object makes access to data related to a particular
1180 filerevision convenient."""
1180 filerevision convenient."""
1181 def __init__(self, repo, path, changeid=None, fileid=None,
1181 def __init__(self, repo, path, changeid=None, fileid=None,
1182 filelog=None, changectx=None):
1182 filelog=None, changectx=None):
1183 """changeid can be a changeset revision, node, or tag.
1183 """changeid can be a changeset revision, node, or tag.
1184 fileid can be a file revision or node."""
1184 fileid can be a file revision or node."""
1185 self._repo = repo
1185 self._repo = repo
1186 self._path = path
1186 self._path = path
1187
1187
1188 assert (changeid is not None
1188 assert (changeid is not None
1189 or fileid is not None
1189 or fileid is not None
1190 or changectx is not None), \
1190 or changectx is not None), \
1191 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1191 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1192 % (changeid, fileid, changectx))
1192 % (changeid, fileid, changectx))
1193
1193
1194 if filelog is not None:
1194 if filelog is not None:
1195 self._filelog = filelog
1195 self._filelog = filelog
1196
1196
1197 if changeid is not None:
1197 if changeid is not None:
1198 self._changeid = changeid
1198 self._changeid = changeid
1199 if changectx is not None:
1199 if changectx is not None:
1200 self._changectx = changectx
1200 self._changectx = changectx
1201 if fileid is not None:
1201 if fileid is not None:
1202 self._fileid = fileid
1202 self._fileid = fileid
1203
1203
1204 @propertycache
1204 @propertycache
1205 def _changectx(self):
1205 def _changectx(self):
1206 try:
1206 try:
1207 return changectx(self._repo, self._changeid)
1207 return changectx(self._repo, self._changeid)
1208 except error.FilteredRepoLookupError:
1208 except error.FilteredRepoLookupError:
1209 # Linkrev may point to any revision in the repository. When the
1209 # Linkrev may point to any revision in the repository. When the
1210 # repository is filtered this may lead to `filectx` trying to build
1210 # repository is filtered this may lead to `filectx` trying to build
1211 # `changectx` for filtered revision. In such case we fallback to
1211 # `changectx` for filtered revision. In such case we fallback to
1212 # creating `changectx` on the unfiltered version of the reposition.
1212 # creating `changectx` on the unfiltered version of the reposition.
1213 # This fallback should not be an issue because `changectx` from
1213 # This fallback should not be an issue because `changectx` from
1214 # `filectx` are not used in complex operations that care about
1214 # `filectx` are not used in complex operations that care about
1215 # filtering.
1215 # filtering.
1216 #
1216 #
1217 # This fallback is a cheap and dirty fix that prevent several
1217 # This fallback is a cheap and dirty fix that prevent several
1218 # crashes. It does not ensure the behavior is correct. However the
1218 # crashes. It does not ensure the behavior is correct. However the
1219 # behavior was not correct before filtering either and "incorrect
1219 # behavior was not correct before filtering either and "incorrect
1220 # behavior" is seen as better as "crash"
1220 # behavior" is seen as better as "crash"
1221 #
1221 #
1222 # Linkrevs have several serious troubles with filtering that are
1222 # Linkrevs have several serious troubles with filtering that are
1223 # complicated to solve. Proper handling of the issue here should be
1223 # complicated to solve. Proper handling of the issue here should be
1224 # considered when solving linkrev issue are on the table.
1224 # considered when solving linkrev issue are on the table.
1225 return changectx(self._repo.unfiltered(), self._changeid)
1225 return changectx(self._repo.unfiltered(), self._changeid)
1226
1226
1227 def filectx(self, fileid, changeid=None):
1227 def filectx(self, fileid, changeid=None):
1228 '''opens an arbitrary revision of the file without
1228 '''opens an arbitrary revision of the file without
1229 opening a new filelog'''
1229 opening a new filelog'''
1230 return filectx(self._repo, self._path, fileid=fileid,
1230 return filectx(self._repo, self._path, fileid=fileid,
1231 filelog=self._filelog, changeid=changeid)
1231 filelog=self._filelog, changeid=changeid)
1232
1232
1233 def rawdata(self):
1233 def rawdata(self):
1234 return self._filelog.revision(self._filenode, raw=True)
1234 return self._filelog.revision(self._filenode, raw=True)
1235
1235
1236 def rawflags(self):
1236 def rawflags(self):
1237 """low-level revlog flags"""
1237 """low-level revlog flags"""
1238 return self._filelog.flags(self._filerev)
1238 return self._filelog.flags(self._filerev)
1239
1239
1240 def data(self):
1240 def data(self):
1241 try:
1241 try:
1242 return self._filelog.read(self._filenode)
1242 return self._filelog.read(self._filenode)
1243 except error.CensoredNodeError:
1243 except error.CensoredNodeError:
1244 if self._repo.ui.config("censor", "policy") == "ignore":
1244 if self._repo.ui.config("censor", "policy") == "ignore":
1245 return ""
1245 return ""
1246 raise error.Abort(_("censored node: %s") % short(self._filenode),
1246 raise error.Abort(_("censored node: %s") % short(self._filenode),
1247 hint=_("set censor.policy to ignore errors"))
1247 hint=_("set censor.policy to ignore errors"))
1248
1248
1249 def size(self):
1249 def size(self):
1250 return self._filelog.size(self._filerev)
1250 return self._filelog.size(self._filerev)
1251
1251
1252 @propertycache
1252 @propertycache
1253 def _copied(self):
1253 def _copied(self):
1254 """check if file was actually renamed in this changeset revision
1254 """check if file was actually renamed in this changeset revision
1255
1255
1256 If rename logged in file revision, we report copy for changeset only
1256 If rename logged in file revision, we report copy for changeset only
1257 if file revisions linkrev points back to the changeset in question
1257 if file revisions linkrev points back to the changeset in question
1258 or both changeset parents contain different file revisions.
1258 or both changeset parents contain different file revisions.
1259 """
1259 """
1260
1260
1261 renamed = self._filelog.renamed(self._filenode)
1261 renamed = self._filelog.renamed(self._filenode)
1262 if not renamed:
1262 if not renamed:
1263 return renamed
1263 return renamed
1264
1264
1265 if self.rev() == self.linkrev():
1265 if self.rev() == self.linkrev():
1266 return renamed
1266 return renamed
1267
1267
1268 name = self.path()
1268 name = self.path()
1269 fnode = self._filenode
1269 fnode = self._filenode
1270 for p in self._changectx.parents():
1270 for p in self._changectx.parents():
1271 try:
1271 try:
1272 if fnode == p.filenode(name):
1272 if fnode == p.filenode(name):
1273 return None
1273 return None
1274 except error.LookupError:
1274 except error.LookupError:
1275 pass
1275 pass
1276 return renamed
1276 return renamed
1277
1277
1278 def children(self):
1278 def children(self):
1279 # hard for renames
1279 # hard for renames
1280 c = self._filelog.children(self._filenode)
1280 c = self._filelog.children(self._filenode)
1281 return [filectx(self._repo, self._path, fileid=x,
1281 return [filectx(self._repo, self._path, fileid=x,
1282 filelog=self._filelog) for x in c]
1282 filelog=self._filelog) for x in c]
1283
1283
1284 class committablectx(basectx):
1284 class committablectx(basectx):
1285 """A committablectx object provides common functionality for a context that
1285 """A committablectx object provides common functionality for a context that
1286 wants the ability to commit, e.g. workingctx or memctx."""
1286 wants the ability to commit, e.g. workingctx or memctx."""
1287 def __init__(self, repo, text="", user=None, date=None, extra=None,
1287 def __init__(self, repo, text="", user=None, date=None, extra=None,
1288 changes=None):
1288 changes=None):
1289 self._repo = repo
1289 self._repo = repo
1290 self._rev = None
1290 self._rev = None
1291 self._node = None
1291 self._node = None
1292 self._text = text
1292 self._text = text
1293 if date:
1293 if date:
1294 self._date = util.parsedate(date)
1294 self._date = util.parsedate(date)
1295 if user:
1295 if user:
1296 self._user = user
1296 self._user = user
1297 if changes:
1297 if changes:
1298 self._status = changes
1298 self._status = changes
1299
1299
1300 self._extra = {}
1300 self._extra = {}
1301 if extra:
1301 if extra:
1302 self._extra = extra.copy()
1302 self._extra = extra.copy()
1303 if 'branch' not in self._extra:
1303 if 'branch' not in self._extra:
1304 try:
1304 try:
1305 branch = encoding.fromlocal(self._repo.dirstate.branch())
1305 branch = encoding.fromlocal(self._repo.dirstate.branch())
1306 except UnicodeDecodeError:
1306 except UnicodeDecodeError:
1307 raise error.Abort(_('branch name not in UTF-8!'))
1307 raise error.Abort(_('branch name not in UTF-8!'))
1308 self._extra['branch'] = branch
1308 self._extra['branch'] = branch
1309 if self._extra['branch'] == '':
1309 if self._extra['branch'] == '':
1310 self._extra['branch'] = 'default'
1310 self._extra['branch'] = 'default'
1311
1311
1312 def __bytes__(self):
1312 def __bytes__(self):
1313 return bytes(self._parents[0]) + "+"
1313 return bytes(self._parents[0]) + "+"
1314
1314
1315 __str__ = encoding.strmethod(__bytes__)
1315 __str__ = encoding.strmethod(__bytes__)
1316
1316
1317 def __nonzero__(self):
1317 def __nonzero__(self):
1318 return True
1318 return True
1319
1319
1320 __bool__ = __nonzero__
1320 __bool__ = __nonzero__
1321
1321
1322 def _buildflagfunc(self):
1322 def _buildflagfunc(self):
1323 # Create a fallback function for getting file flags when the
1323 # Create a fallback function for getting file flags when the
1324 # filesystem doesn't support them
1324 # filesystem doesn't support them
1325
1325
1326 copiesget = self._repo.dirstate.copies().get
1326 copiesget = self._repo.dirstate.copies().get
1327 parents = self.parents()
1327 parents = self.parents()
1328 if len(parents) < 2:
1328 if len(parents) < 2:
1329 # when we have one parent, it's easy: copy from parent
1329 # when we have one parent, it's easy: copy from parent
1330 man = parents[0].manifest()
1330 man = parents[0].manifest()
1331 def func(f):
1331 def func(f):
1332 f = copiesget(f, f)
1332 f = copiesget(f, f)
1333 return man.flags(f)
1333 return man.flags(f)
1334 else:
1334 else:
1335 # merges are tricky: we try to reconstruct the unstored
1335 # merges are tricky: we try to reconstruct the unstored
1336 # result from the merge (issue1802)
1336 # result from the merge (issue1802)
1337 p1, p2 = parents
1337 p1, p2 = parents
1338 pa = p1.ancestor(p2)
1338 pa = p1.ancestor(p2)
1339 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1339 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1340
1340
1341 def func(f):
1341 def func(f):
1342 f = copiesget(f, f) # may be wrong for merges with copies
1342 f = copiesget(f, f) # may be wrong for merges with copies
1343 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1343 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1344 if fl1 == fl2:
1344 if fl1 == fl2:
1345 return fl1
1345 return fl1
1346 if fl1 == fla:
1346 if fl1 == fla:
1347 return fl2
1347 return fl2
1348 if fl2 == fla:
1348 if fl2 == fla:
1349 return fl1
1349 return fl1
1350 return '' # punt for conflicts
1350 return '' # punt for conflicts
1351
1351
1352 return func
1352 return func
1353
1353
1354 @propertycache
1354 @propertycache
1355 def _flagfunc(self):
1355 def _flagfunc(self):
1356 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1356 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1357
1357
1358 @propertycache
1358 @propertycache
1359 def _status(self):
1359 def _status(self):
1360 return self._repo.status()
1360 return self._repo.status()
1361
1361
1362 @propertycache
1362 @propertycache
1363 def _user(self):
1363 def _user(self):
1364 return self._repo.ui.username()
1364 return self._repo.ui.username()
1365
1365
1366 @propertycache
1366 @propertycache
1367 def _date(self):
1367 def _date(self):
1368 ui = self._repo.ui
1368 ui = self._repo.ui
1369 date = ui.configdate('devel', 'default-date')
1369 date = ui.configdate('devel', 'default-date')
1370 if date is None:
1370 if date is None:
1371 date = util.makedate()
1371 date = util.makedate()
1372 return date
1372 return date
1373
1373
1374 def subrev(self, subpath):
1374 def subrev(self, subpath):
1375 return None
1375 return None
1376
1376
1377 def manifestnode(self):
1377 def manifestnode(self):
1378 return None
1378 return None
1379 def user(self):
1379 def user(self):
1380 return self._user or self._repo.ui.username()
1380 return self._user or self._repo.ui.username()
1381 def date(self):
1381 def date(self):
1382 return self._date
1382 return self._date
1383 def description(self):
1383 def description(self):
1384 return self._text
1384 return self._text
1385 def files(self):
1385 def files(self):
1386 return sorted(self._status.modified + self._status.added +
1386 return sorted(self._status.modified + self._status.added +
1387 self._status.removed)
1387 self._status.removed)
1388
1388
1389 def modified(self):
1389 def modified(self):
1390 return self._status.modified
1390 return self._status.modified
1391 def added(self):
1391 def added(self):
1392 return self._status.added
1392 return self._status.added
1393 def removed(self):
1393 def removed(self):
1394 return self._status.removed
1394 return self._status.removed
1395 def deleted(self):
1395 def deleted(self):
1396 return self._status.deleted
1396 return self._status.deleted
1397 def branch(self):
1397 def branch(self):
1398 return encoding.tolocal(self._extra['branch'])
1398 return encoding.tolocal(self._extra['branch'])
1399 def closesbranch(self):
1399 def closesbranch(self):
1400 return 'close' in self._extra
1400 return 'close' in self._extra
1401 def extra(self):
1401 def extra(self):
1402 return self._extra
1402 return self._extra
1403
1403
1404 def tags(self):
1404 def tags(self):
1405 return []
1405 return []
1406
1406
1407 def bookmarks(self):
1407 def bookmarks(self):
1408 b = []
1408 b = []
1409 for p in self.parents():
1409 for p in self.parents():
1410 b.extend(p.bookmarks())
1410 b.extend(p.bookmarks())
1411 return b
1411 return b
1412
1412
1413 def phase(self):
1413 def phase(self):
1414 phase = phases.draft # default phase to draft
1414 phase = phases.draft # default phase to draft
1415 for p in self.parents():
1415 for p in self.parents():
1416 phase = max(phase, p.phase())
1416 phase = max(phase, p.phase())
1417 return phase
1417 return phase
1418
1418
1419 def hidden(self):
1419 def hidden(self):
1420 return False
1420 return False
1421
1421
1422 def children(self):
1422 def children(self):
1423 return []
1423 return []
1424
1424
1425 def flags(self, path):
1425 def flags(self, path):
1426 if r'_manifest' in self.__dict__:
1426 if r'_manifest' in self.__dict__:
1427 try:
1427 try:
1428 return self._manifest.flags(path)
1428 return self._manifest.flags(path)
1429 except KeyError:
1429 except KeyError:
1430 return ''
1430 return ''
1431
1431
1432 try:
1432 try:
1433 return self._flagfunc(path)
1433 return self._flagfunc(path)
1434 except OSError:
1434 except OSError:
1435 return ''
1435 return ''
1436
1436
1437 def ancestor(self, c2):
1437 def ancestor(self, c2):
1438 """return the "best" ancestor context of self and c2"""
1438 """return the "best" ancestor context of self and c2"""
1439 return self._parents[0].ancestor(c2) # punt on two parents for now
1439 return self._parents[0].ancestor(c2) # punt on two parents for now
1440
1440
1441 def walk(self, match):
1441 def walk(self, match):
1442 '''Generates matching file names.'''
1442 '''Generates matching file names.'''
1443 return sorted(self._repo.dirstate.walk(match,
1443 return sorted(self._repo.dirstate.walk(match,
1444 subrepos=sorted(self.substate),
1444 subrepos=sorted(self.substate),
1445 unknown=True, ignored=False))
1445 unknown=True, ignored=False))
1446
1446
1447 def matches(self, match):
1447 def matches(self, match):
1448 return sorted(self._repo.dirstate.matches(match))
1448 return sorted(self._repo.dirstate.matches(match))
1449
1449
1450 def ancestors(self):
1450 def ancestors(self):
1451 for p in self._parents:
1451 for p in self._parents:
1452 yield p
1452 yield p
1453 for a in self._repo.changelog.ancestors(
1453 for a in self._repo.changelog.ancestors(
1454 [p.rev() for p in self._parents]):
1454 [p.rev() for p in self._parents]):
1455 yield changectx(self._repo, a)
1455 yield changectx(self._repo, a)
1456
1456
1457 def markcommitted(self, node):
1457 def markcommitted(self, node):
1458 """Perform post-commit cleanup necessary after committing this ctx
1458 """Perform post-commit cleanup necessary after committing this ctx
1459
1459
1460 Specifically, this updates backing stores this working context
1460 Specifically, this updates backing stores this working context
1461 wraps to reflect the fact that the changes reflected by this
1461 wraps to reflect the fact that the changes reflected by this
1462 workingctx have been committed. For example, it marks
1462 workingctx have been committed. For example, it marks
1463 modified and added files as normal in the dirstate.
1463 modified and added files as normal in the dirstate.
1464
1464
1465 """
1465 """
1466
1466
1467 with self._repo.dirstate.parentchange():
1467 with self._repo.dirstate.parentchange():
1468 for f in self.modified() + self.added():
1468 for f in self.modified() + self.added():
1469 self._repo.dirstate.normal(f)
1469 self._repo.dirstate.normal(f)
1470 for f in self.removed():
1470 for f in self.removed():
1471 self._repo.dirstate.drop(f)
1471 self._repo.dirstate.drop(f)
1472 self._repo.dirstate.setparents(node)
1472 self._repo.dirstate.setparents(node)
1473
1473
1474 # write changes out explicitly, because nesting wlock at
1474 # write changes out explicitly, because nesting wlock at
1475 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1475 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1476 # from immediately doing so for subsequent changing files
1476 # from immediately doing so for subsequent changing files
1477 self._repo.dirstate.write(self._repo.currenttransaction())
1477 self._repo.dirstate.write(self._repo.currenttransaction())
1478
1478
1479 def dirty(self, missing=False, merge=True, branch=True):
1479 def dirty(self, missing=False, merge=True, branch=True):
1480 return False
1480 return False
1481
1481
1482 class workingctx(committablectx):
1482 class workingctx(committablectx):
1483 """A workingctx object makes access to data related to
1483 """A workingctx object makes access to data related to
1484 the current working directory convenient.
1484 the current working directory convenient.
1485 date - any valid date string or (unixtime, offset), or None.
1485 date - any valid date string or (unixtime, offset), or None.
1486 user - username string, or None.
1486 user - username string, or None.
1487 extra - a dictionary of extra values, or None.
1487 extra - a dictionary of extra values, or None.
1488 changes - a list of file lists as returned by localrepo.status()
1488 changes - a list of file lists as returned by localrepo.status()
1489 or None to use the repository status.
1489 or None to use the repository status.
1490 """
1490 """
1491 def __init__(self, repo, text="", user=None, date=None, extra=None,
1491 def __init__(self, repo, text="", user=None, date=None, extra=None,
1492 changes=None):
1492 changes=None):
1493 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1493 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1494
1494
1495 def __iter__(self):
1495 def __iter__(self):
1496 d = self._repo.dirstate
1496 d = self._repo.dirstate
1497 for f in d:
1497 for f in d:
1498 if d[f] != 'r':
1498 if d[f] != 'r':
1499 yield f
1499 yield f
1500
1500
1501 def __contains__(self, key):
1501 def __contains__(self, key):
1502 return self._repo.dirstate[key] not in "?r"
1502 return self._repo.dirstate[key] not in "?r"
1503
1503
1504 def hex(self):
1504 def hex(self):
1505 return hex(wdirid)
1505 return hex(wdirid)
1506
1506
1507 @propertycache
1507 @propertycache
1508 def _parents(self):
1508 def _parents(self):
1509 p = self._repo.dirstate.parents()
1509 p = self._repo.dirstate.parents()
1510 if p[1] == nullid:
1510 if p[1] == nullid:
1511 p = p[:-1]
1511 p = p[:-1]
1512 return [changectx(self._repo, x) for x in p]
1512 return [changectx(self._repo, x) for x in p]
1513
1513
1514 def filectx(self, path, filelog=None):
1514 def filectx(self, path, filelog=None):
1515 """get a file context from the working directory"""
1515 """get a file context from the working directory"""
1516 return workingfilectx(self._repo, path, workingctx=self,
1516 return workingfilectx(self._repo, path, workingctx=self,
1517 filelog=filelog)
1517 filelog=filelog)
1518
1518
1519 def dirty(self, missing=False, merge=True, branch=True):
1519 def dirty(self, missing=False, merge=True, branch=True):
1520 "check whether a working directory is modified"
1520 "check whether a working directory is modified"
1521 # check subrepos first
1521 # check subrepos first
1522 for s in sorted(self.substate):
1522 for s in sorted(self.substate):
1523 if self.sub(s).dirty(missing=missing):
1523 if self.sub(s).dirty(missing=missing):
1524 return True
1524 return True
1525 # check current working dir
1525 # check current working dir
1526 return ((merge and self.p2()) or
1526 return ((merge and self.p2()) or
1527 (branch and self.branch() != self.p1().branch()) or
1527 (branch and self.branch() != self.p1().branch()) or
1528 self.modified() or self.added() or self.removed() or
1528 self.modified() or self.added() or self.removed() or
1529 (missing and self.deleted()))
1529 (missing and self.deleted()))
1530
1530
1531 def add(self, list, prefix=""):
1531 def add(self, list, prefix=""):
1532 with self._repo.wlock():
1532 with self._repo.wlock():
1533 ui, ds = self._repo.ui, self._repo.dirstate
1533 ui, ds = self._repo.ui, self._repo.dirstate
1534 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1534 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1535 rejected = []
1535 rejected = []
1536 lstat = self._repo.wvfs.lstat
1536 lstat = self._repo.wvfs.lstat
1537 for f in list:
1537 for f in list:
1538 # ds.pathto() returns an absolute file when this is invoked from
1538 # ds.pathto() returns an absolute file when this is invoked from
1539 # the keyword extension. That gets flagged as non-portable on
1539 # the keyword extension. That gets flagged as non-portable on
1540 # Windows, since it contains the drive letter and colon.
1540 # Windows, since it contains the drive letter and colon.
1541 scmutil.checkportable(ui, os.path.join(prefix, f))
1541 scmutil.checkportable(ui, os.path.join(prefix, f))
1542 try:
1542 try:
1543 st = lstat(f)
1543 st = lstat(f)
1544 except OSError:
1544 except OSError:
1545 ui.warn(_("%s does not exist!\n") % uipath(f))
1545 ui.warn(_("%s does not exist!\n") % uipath(f))
1546 rejected.append(f)
1546 rejected.append(f)
1547 continue
1547 continue
1548 if st.st_size > 10000000:
1548 if st.st_size > 10000000:
1549 ui.warn(_("%s: up to %d MB of RAM may be required "
1549 ui.warn(_("%s: up to %d MB of RAM may be required "
1550 "to manage this file\n"
1550 "to manage this file\n"
1551 "(use 'hg revert %s' to cancel the "
1551 "(use 'hg revert %s' to cancel the "
1552 "pending addition)\n")
1552 "pending addition)\n")
1553 % (f, 3 * st.st_size // 1000000, uipath(f)))
1553 % (f, 3 * st.st_size // 1000000, uipath(f)))
1554 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1554 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1555 ui.warn(_("%s not added: only files and symlinks "
1555 ui.warn(_("%s not added: only files and symlinks "
1556 "supported currently\n") % uipath(f))
1556 "supported currently\n") % uipath(f))
1557 rejected.append(f)
1557 rejected.append(f)
1558 elif ds[f] in 'amn':
1558 elif ds[f] in 'amn':
1559 ui.warn(_("%s already tracked!\n") % uipath(f))
1559 ui.warn(_("%s already tracked!\n") % uipath(f))
1560 elif ds[f] == 'r':
1560 elif ds[f] == 'r':
1561 ds.normallookup(f)
1561 ds.normallookup(f)
1562 else:
1562 else:
1563 ds.add(f)
1563 ds.add(f)
1564 return rejected
1564 return rejected
1565
1565
1566 def forget(self, files, prefix=""):
1566 def forget(self, files, prefix=""):
1567 with self._repo.wlock():
1567 with self._repo.wlock():
1568 ds = self._repo.dirstate
1568 ds = self._repo.dirstate
1569 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1569 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1570 rejected = []
1570 rejected = []
1571 for f in files:
1571 for f in files:
1572 if f not in self._repo.dirstate:
1572 if f not in self._repo.dirstate:
1573 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1573 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1574 rejected.append(f)
1574 rejected.append(f)
1575 elif self._repo.dirstate[f] != 'a':
1575 elif self._repo.dirstate[f] != 'a':
1576 self._repo.dirstate.remove(f)
1576 self._repo.dirstate.remove(f)
1577 else:
1577 else:
1578 self._repo.dirstate.drop(f)
1578 self._repo.dirstate.drop(f)
1579 return rejected
1579 return rejected
1580
1580
1581 def undelete(self, list):
1581 def undelete(self, list):
1582 pctxs = self.parents()
1582 pctxs = self.parents()
1583 with self._repo.wlock():
1583 with self._repo.wlock():
1584 ds = self._repo.dirstate
1584 ds = self._repo.dirstate
1585 for f in list:
1585 for f in list:
1586 if self._repo.dirstate[f] != 'r':
1586 if self._repo.dirstate[f] != 'r':
1587 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1587 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1588 else:
1588 else:
1589 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1589 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1590 t = fctx.data()
1590 t = fctx.data()
1591 self._repo.wwrite(f, t, fctx.flags())
1591 self._repo.wwrite(f, t, fctx.flags())
1592 self._repo.dirstate.normal(f)
1592 self._repo.dirstate.normal(f)
1593
1593
1594 def copy(self, source, dest):
1594 def copy(self, source, dest):
1595 try:
1595 try:
1596 st = self._repo.wvfs.lstat(dest)
1596 st = self._repo.wvfs.lstat(dest)
1597 except OSError as err:
1597 except OSError as err:
1598 if err.errno != errno.ENOENT:
1598 if err.errno != errno.ENOENT:
1599 raise
1599 raise
1600 self._repo.ui.warn(_("%s does not exist!\n")
1600 self._repo.ui.warn(_("%s does not exist!\n")
1601 % self._repo.dirstate.pathto(dest))
1601 % self._repo.dirstate.pathto(dest))
1602 return
1602 return
1603 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1603 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1604 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1604 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1605 "symbolic link\n")
1605 "symbolic link\n")
1606 % self._repo.dirstate.pathto(dest))
1606 % self._repo.dirstate.pathto(dest))
1607 else:
1607 else:
1608 with self._repo.wlock():
1608 with self._repo.wlock():
1609 if self._repo.dirstate[dest] in '?':
1609 if self._repo.dirstate[dest] in '?':
1610 self._repo.dirstate.add(dest)
1610 self._repo.dirstate.add(dest)
1611 elif self._repo.dirstate[dest] in 'r':
1611 elif self._repo.dirstate[dest] in 'r':
1612 self._repo.dirstate.normallookup(dest)
1612 self._repo.dirstate.normallookup(dest)
1613 self._repo.dirstate.copy(source, dest)
1613 self._repo.dirstate.copy(source, dest)
1614
1614
1615 def match(self, pats=None, include=None, exclude=None, default='glob',
1615 def match(self, pats=None, include=None, exclude=None, default='glob',
1616 listsubrepos=False, badfn=None):
1616 listsubrepos=False, badfn=None):
1617 r = self._repo
1617 r = self._repo
1618
1618
1619 # Only a case insensitive filesystem needs magic to translate user input
1619 # Only a case insensitive filesystem needs magic to translate user input
1620 # to actual case in the filesystem.
1620 # to actual case in the filesystem.
1621 icasefs = not util.fscasesensitive(r.root)
1621 icasefs = not util.fscasesensitive(r.root)
1622 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1622 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1623 default, auditor=r.auditor, ctx=self,
1623 default, auditor=r.auditor, ctx=self,
1624 listsubrepos=listsubrepos, badfn=badfn,
1624 listsubrepos=listsubrepos, badfn=badfn,
1625 icasefs=icasefs)
1625 icasefs=icasefs)
1626
1626
1627 def flushall(self):
1627 def flushall(self):
1628 pass # For overlayworkingfilectx compatibility.
1628 pass # For overlayworkingfilectx compatibility.
1629
1629
1630 def _filtersuspectsymlink(self, files):
1630 def _filtersuspectsymlink(self, files):
1631 if not files or self._repo.dirstate._checklink:
1631 if not files or self._repo.dirstate._checklink:
1632 return files
1632 return files
1633
1633
1634 # Symlink placeholders may get non-symlink-like contents
1634 # Symlink placeholders may get non-symlink-like contents
1635 # via user error or dereferencing by NFS or Samba servers,
1635 # via user error or dereferencing by NFS or Samba servers,
1636 # so we filter out any placeholders that don't look like a
1636 # so we filter out any placeholders that don't look like a
1637 # symlink
1637 # symlink
1638 sane = []
1638 sane = []
1639 for f in files:
1639 for f in files:
1640 if self.flags(f) == 'l':
1640 if self.flags(f) == 'l':
1641 d = self[f].data()
1641 d = self[f].data()
1642 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1642 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1643 self._repo.ui.debug('ignoring suspect symlink placeholder'
1643 self._repo.ui.debug('ignoring suspect symlink placeholder'
1644 ' "%s"\n' % f)
1644 ' "%s"\n' % f)
1645 continue
1645 continue
1646 sane.append(f)
1646 sane.append(f)
1647 return sane
1647 return sane
1648
1648
1649 def _checklookup(self, files):
1649 def _checklookup(self, files):
1650 # check for any possibly clean files
1650 # check for any possibly clean files
1651 if not files:
1651 if not files:
1652 return [], [], []
1652 return [], [], []
1653
1653
1654 modified = []
1654 modified = []
1655 deleted = []
1655 deleted = []
1656 fixup = []
1656 fixup = []
1657 pctx = self._parents[0]
1657 pctx = self._parents[0]
1658 # do a full compare of any files that might have changed
1658 # do a full compare of any files that might have changed
1659 for f in sorted(files):
1659 for f in sorted(files):
1660 try:
1660 try:
1661 # This will return True for a file that got replaced by a
1661 # This will return True for a file that got replaced by a
1662 # directory in the interim, but fixing that is pretty hard.
1662 # directory in the interim, but fixing that is pretty hard.
1663 if (f not in pctx or self.flags(f) != pctx.flags(f)
1663 if (f not in pctx or self.flags(f) != pctx.flags(f)
1664 or pctx[f].cmp(self[f])):
1664 or pctx[f].cmp(self[f])):
1665 modified.append(f)
1665 modified.append(f)
1666 else:
1666 else:
1667 fixup.append(f)
1667 fixup.append(f)
1668 except (IOError, OSError):
1668 except (IOError, OSError):
1669 # A file become inaccessible in between? Mark it as deleted,
1669 # A file become inaccessible in between? Mark it as deleted,
1670 # matching dirstate behavior (issue5584).
1670 # matching dirstate behavior (issue5584).
1671 # The dirstate has more complex behavior around whether a
1671 # The dirstate has more complex behavior around whether a
1672 # missing file matches a directory, etc, but we don't need to
1672 # missing file matches a directory, etc, but we don't need to
1673 # bother with that: if f has made it to this point, we're sure
1673 # bother with that: if f has made it to this point, we're sure
1674 # it's in the dirstate.
1674 # it's in the dirstate.
1675 deleted.append(f)
1675 deleted.append(f)
1676
1676
1677 return modified, deleted, fixup
1677 return modified, deleted, fixup
1678
1678
1679 def _poststatusfixup(self, status, fixup):
1679 def _poststatusfixup(self, status, fixup):
1680 """update dirstate for files that are actually clean"""
1680 """update dirstate for files that are actually clean"""
1681 poststatus = self._repo.postdsstatus()
1681 poststatus = self._repo.postdsstatus()
1682 if fixup or poststatus:
1682 if fixup or poststatus:
1683 try:
1683 try:
1684 oldid = self._repo.dirstate.identity()
1684 oldid = self._repo.dirstate.identity()
1685
1685
1686 # updating the dirstate is optional
1686 # updating the dirstate is optional
1687 # so we don't wait on the lock
1687 # so we don't wait on the lock
1688 # wlock can invalidate the dirstate, so cache normal _after_
1688 # wlock can invalidate the dirstate, so cache normal _after_
1689 # taking the lock
1689 # taking the lock
1690 with self._repo.wlock(False):
1690 with self._repo.wlock(False):
1691 if self._repo.dirstate.identity() == oldid:
1691 if self._repo.dirstate.identity() == oldid:
1692 if fixup:
1692 if fixup:
1693 normal = self._repo.dirstate.normal
1693 normal = self._repo.dirstate.normal
1694 for f in fixup:
1694 for f in fixup:
1695 normal(f)
1695 normal(f)
1696 # write changes out explicitly, because nesting
1696 # write changes out explicitly, because nesting
1697 # wlock at runtime may prevent 'wlock.release()'
1697 # wlock at runtime may prevent 'wlock.release()'
1698 # after this block from doing so for subsequent
1698 # after this block from doing so for subsequent
1699 # changing files
1699 # changing files
1700 tr = self._repo.currenttransaction()
1700 tr = self._repo.currenttransaction()
1701 self._repo.dirstate.write(tr)
1701 self._repo.dirstate.write(tr)
1702
1702
1703 if poststatus:
1703 if poststatus:
1704 for ps in poststatus:
1704 for ps in poststatus:
1705 ps(self, status)
1705 ps(self, status)
1706 else:
1706 else:
1707 # in this case, writing changes out breaks
1707 # in this case, writing changes out breaks
1708 # consistency, because .hg/dirstate was
1708 # consistency, because .hg/dirstate was
1709 # already changed simultaneously after last
1709 # already changed simultaneously after last
1710 # caching (see also issue5584 for detail)
1710 # caching (see also issue5584 for detail)
1711 self._repo.ui.debug('skip updating dirstate: '
1711 self._repo.ui.debug('skip updating dirstate: '
1712 'identity mismatch\n')
1712 'identity mismatch\n')
1713 except error.LockError:
1713 except error.LockError:
1714 pass
1714 pass
1715 finally:
1715 finally:
1716 # Even if the wlock couldn't be grabbed, clear out the list.
1716 # Even if the wlock couldn't be grabbed, clear out the list.
1717 self._repo.clearpostdsstatus()
1717 self._repo.clearpostdsstatus()
1718
1718
1719 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1719 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1720 '''Gets the status from the dirstate -- internal use only.'''
1720 '''Gets the status from the dirstate -- internal use only.'''
1721 subrepos = []
1721 subrepos = []
1722 if '.hgsub' in self:
1722 if '.hgsub' in self:
1723 subrepos = sorted(self.substate)
1723 subrepos = sorted(self.substate)
1724 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1724 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1725 clean=clean, unknown=unknown)
1725 clean=clean, unknown=unknown)
1726
1726
1727 # check for any possibly clean files
1727 # check for any possibly clean files
1728 fixup = []
1728 fixup = []
1729 if cmp:
1729 if cmp:
1730 modified2, deleted2, fixup = self._checklookup(cmp)
1730 modified2, deleted2, fixup = self._checklookup(cmp)
1731 s.modified.extend(modified2)
1731 s.modified.extend(modified2)
1732 s.deleted.extend(deleted2)
1732 s.deleted.extend(deleted2)
1733
1733
1734 if fixup and clean:
1734 if fixup and clean:
1735 s.clean.extend(fixup)
1735 s.clean.extend(fixup)
1736
1736
1737 self._poststatusfixup(s, fixup)
1737 self._poststatusfixup(s, fixup)
1738
1738
1739 if match.always():
1739 if match.always():
1740 # cache for performance
1740 # cache for performance
1741 if s.unknown or s.ignored or s.clean:
1741 if s.unknown or s.ignored or s.clean:
1742 # "_status" is cached with list*=False in the normal route
1742 # "_status" is cached with list*=False in the normal route
1743 self._status = scmutil.status(s.modified, s.added, s.removed,
1743 self._status = scmutil.status(s.modified, s.added, s.removed,
1744 s.deleted, [], [], [])
1744 s.deleted, [], [], [])
1745 else:
1745 else:
1746 self._status = s
1746 self._status = s
1747
1747
1748 return s
1748 return s
1749
1749
1750 @propertycache
1750 @propertycache
1751 def _manifest(self):
1751 def _manifest(self):
1752 """generate a manifest corresponding to the values in self._status
1752 """generate a manifest corresponding to the values in self._status
1753
1753
1754 This reuse the file nodeid from parent, but we use special node
1754 This reuse the file nodeid from parent, but we use special node
1755 identifiers for added and modified files. This is used by manifests
1755 identifiers for added and modified files. This is used by manifests
1756 merge to see that files are different and by update logic to avoid
1756 merge to see that files are different and by update logic to avoid
1757 deleting newly added files.
1757 deleting newly added files.
1758 """
1758 """
1759 return self._buildstatusmanifest(self._status)
1759 return self._buildstatusmanifest(self._status)
1760
1760
1761 def _buildstatusmanifest(self, status):
1761 def _buildstatusmanifest(self, status):
1762 """Builds a manifest that includes the given status results."""
1762 """Builds a manifest that includes the given status results."""
1763 parents = self.parents()
1763 parents = self.parents()
1764
1764
1765 man = parents[0].manifest().copy()
1765 man = parents[0].manifest().copy()
1766
1766
1767 ff = self._flagfunc
1767 ff = self._flagfunc
1768 for i, l in ((addednodeid, status.added),
1768 for i, l in ((addednodeid, status.added),
1769 (modifiednodeid, status.modified)):
1769 (modifiednodeid, status.modified)):
1770 for f in l:
1770 for f in l:
1771 man[f] = i
1771 man[f] = i
1772 try:
1772 try:
1773 man.setflag(f, ff(f))
1773 man.setflag(f, ff(f))
1774 except OSError:
1774 except OSError:
1775 pass
1775 pass
1776
1776
1777 for f in status.deleted + status.removed:
1777 for f in status.deleted + status.removed:
1778 if f in man:
1778 if f in man:
1779 del man[f]
1779 del man[f]
1780
1780
1781 return man
1781 return man
1782
1782
1783 def _buildstatus(self, other, s, match, listignored, listclean,
1783 def _buildstatus(self, other, s, match, listignored, listclean,
1784 listunknown):
1784 listunknown):
1785 """build a status with respect to another context
1785 """build a status with respect to another context
1786
1786
1787 This includes logic for maintaining the fast path of status when
1787 This includes logic for maintaining the fast path of status when
1788 comparing the working directory against its parent, which is to skip
1788 comparing the working directory against its parent, which is to skip
1789 building a new manifest if self (working directory) is not comparing
1789 building a new manifest if self (working directory) is not comparing
1790 against its parent (repo['.']).
1790 against its parent (repo['.']).
1791 """
1791 """
1792 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1792 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1793 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1793 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1794 # might have accidentally ended up with the entire contents of the file
1794 # might have accidentally ended up with the entire contents of the file
1795 # they are supposed to be linking to.
1795 # they are supposed to be linking to.
1796 s.modified[:] = self._filtersuspectsymlink(s.modified)
1796 s.modified[:] = self._filtersuspectsymlink(s.modified)
1797 if other != self._repo['.']:
1797 if other != self._repo['.']:
1798 s = super(workingctx, self)._buildstatus(other, s, match,
1798 s = super(workingctx, self)._buildstatus(other, s, match,
1799 listignored, listclean,
1799 listignored, listclean,
1800 listunknown)
1800 listunknown)
1801 return s
1801 return s
1802
1802
1803 def _matchstatus(self, other, match):
1803 def _matchstatus(self, other, match):
1804 """override the match method with a filter for directory patterns
1804 """override the match method with a filter for directory patterns
1805
1805
1806 We use inheritance to customize the match.bad method only in cases of
1806 We use inheritance to customize the match.bad method only in cases of
1807 workingctx since it belongs only to the working directory when
1807 workingctx since it belongs only to the working directory when
1808 comparing against the parent changeset.
1808 comparing against the parent changeset.
1809
1809
1810 If we aren't comparing against the working directory's parent, then we
1810 If we aren't comparing against the working directory's parent, then we
1811 just use the default match object sent to us.
1811 just use the default match object sent to us.
1812 """
1812 """
1813 if other != self._repo['.']:
1813 if other != self._repo['.']:
1814 def bad(f, msg):
1814 def bad(f, msg):
1815 # 'f' may be a directory pattern from 'match.files()',
1815 # 'f' may be a directory pattern from 'match.files()',
1816 # so 'f not in ctx1' is not enough
1816 # so 'f not in ctx1' is not enough
1817 if f not in other and not other.hasdir(f):
1817 if f not in other and not other.hasdir(f):
1818 self._repo.ui.warn('%s: %s\n' %
1818 self._repo.ui.warn('%s: %s\n' %
1819 (self._repo.dirstate.pathto(f), msg))
1819 (self._repo.dirstate.pathto(f), msg))
1820 match.bad = bad
1820 match.bad = bad
1821 return match
1821 return match
1822
1822
1823 def markcommitted(self, node):
1823 def markcommitted(self, node):
1824 super(workingctx, self).markcommitted(node)
1824 super(workingctx, self).markcommitted(node)
1825
1825
1826 sparse.aftercommit(self._repo, node)
1826 sparse.aftercommit(self._repo, node)
1827
1827
1828 class committablefilectx(basefilectx):
1828 class committablefilectx(basefilectx):
1829 """A committablefilectx provides common functionality for a file context
1829 """A committablefilectx provides common functionality for a file context
1830 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1830 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1831 def __init__(self, repo, path, filelog=None, ctx=None):
1831 def __init__(self, repo, path, filelog=None, ctx=None):
1832 self._repo = repo
1832 self._repo = repo
1833 self._path = path
1833 self._path = path
1834 self._changeid = None
1834 self._changeid = None
1835 self._filerev = self._filenode = None
1835 self._filerev = self._filenode = None
1836
1836
1837 if filelog is not None:
1837 if filelog is not None:
1838 self._filelog = filelog
1838 self._filelog = filelog
1839 if ctx:
1839 if ctx:
1840 self._changectx = ctx
1840 self._changectx = ctx
1841
1841
1842 def __nonzero__(self):
1842 def __nonzero__(self):
1843 return True
1843 return True
1844
1844
1845 __bool__ = __nonzero__
1845 __bool__ = __nonzero__
1846
1846
1847 def linkrev(self):
1847 def linkrev(self):
1848 # linked to self._changectx no matter if file is modified or not
1848 # linked to self._changectx no matter if file is modified or not
1849 return self.rev()
1849 return self.rev()
1850
1850
1851 def parents(self):
1851 def parents(self):
1852 '''return parent filectxs, following copies if necessary'''
1852 '''return parent filectxs, following copies if necessary'''
1853 def filenode(ctx, path):
1853 def filenode(ctx, path):
1854 return ctx._manifest.get(path, nullid)
1854 return ctx._manifest.get(path, nullid)
1855
1855
1856 path = self._path
1856 path = self._path
1857 fl = self._filelog
1857 fl = self._filelog
1858 pcl = self._changectx._parents
1858 pcl = self._changectx._parents
1859 renamed = self.renamed()
1859 renamed = self.renamed()
1860
1860
1861 if renamed:
1861 if renamed:
1862 pl = [renamed + (None,)]
1862 pl = [renamed + (None,)]
1863 else:
1863 else:
1864 pl = [(path, filenode(pcl[0], path), fl)]
1864 pl = [(path, filenode(pcl[0], path), fl)]
1865
1865
1866 for pc in pcl[1:]:
1866 for pc in pcl[1:]:
1867 pl.append((path, filenode(pc, path), fl))
1867 pl.append((path, filenode(pc, path), fl))
1868
1868
1869 return [self._parentfilectx(p, fileid=n, filelog=l)
1869 return [self._parentfilectx(p, fileid=n, filelog=l)
1870 for p, n, l in pl if n != nullid]
1870 for p, n, l in pl if n != nullid]
1871
1871
1872 def children(self):
1872 def children(self):
1873 return []
1873 return []
1874
1874
1875 class workingfilectx(committablefilectx):
1875 class workingfilectx(committablefilectx):
1876 """A workingfilectx object makes access to data related to a particular
1876 """A workingfilectx object makes access to data related to a particular
1877 file in the working directory convenient."""
1877 file in the working directory convenient."""
1878 def __init__(self, repo, path, filelog=None, workingctx=None):
1878 def __init__(self, repo, path, filelog=None, workingctx=None):
1879 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1879 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1880
1880
1881 @propertycache
1881 @propertycache
1882 def _changectx(self):
1882 def _changectx(self):
1883 return workingctx(self._repo)
1883 return workingctx(self._repo)
1884
1884
1885 def data(self):
1885 def data(self):
1886 return self._repo.wread(self._path)
1886 return self._repo.wread(self._path)
1887 def renamed(self):
1887 def renamed(self):
1888 rp = self._repo.dirstate.copied(self._path)
1888 rp = self._repo.dirstate.copied(self._path)
1889 if not rp:
1889 if not rp:
1890 return None
1890 return None
1891 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1891 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1892
1892
1893 def size(self):
1893 def size(self):
1894 return self._repo.wvfs.lstat(self._path).st_size
1894 return self._repo.wvfs.lstat(self._path).st_size
1895 def date(self):
1895 def date(self):
1896 t, tz = self._changectx.date()
1896 t, tz = self._changectx.date()
1897 try:
1897 try:
1898 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1898 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1899 except OSError as err:
1899 except OSError as err:
1900 if err.errno != errno.ENOENT:
1900 if err.errno != errno.ENOENT:
1901 raise
1901 raise
1902 return (t, tz)
1902 return (t, tz)
1903
1903
1904 def exists(self):
1904 def exists(self):
1905 return self._repo.wvfs.exists(self._path)
1905 return self._repo.wvfs.exists(self._path)
1906
1906
1907 def lexists(self):
1907 def lexists(self):
1908 return self._repo.wvfs.lexists(self._path)
1908 return self._repo.wvfs.lexists(self._path)
1909
1909
1910 def audit(self):
1910 def audit(self):
1911 return self._repo.wvfs.audit(self._path)
1911 return self._repo.wvfs.audit(self._path)
1912
1912
1913 def cmp(self, fctx):
1913 def cmp(self, fctx):
1914 """compare with other file context
1914 """compare with other file context
1915
1915
1916 returns True if different than fctx.
1916 returns True if different than fctx.
1917 """
1917 """
1918 # fctx should be a filectx (not a workingfilectx)
1918 # fctx should be a filectx (not a workingfilectx)
1919 # invert comparison to reuse the same code path
1919 # invert comparison to reuse the same code path
1920 return fctx.cmp(self)
1920 return fctx.cmp(self)
1921
1921
1922 def remove(self, ignoremissing=False):
1922 def remove(self, ignoremissing=False):
1923 """wraps unlink for a repo's working directory"""
1923 """wraps unlink for a repo's working directory"""
1924 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1924 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1925
1925
1926 def write(self, data, flags, backgroundclose=False):
1926 def write(self, data, flags, backgroundclose=False):
1927 """wraps repo.wwrite"""
1927 """wraps repo.wwrite"""
1928 self._repo.wwrite(self._path, data, flags,
1928 self._repo.wwrite(self._path, data, flags,
1929 backgroundclose=backgroundclose)
1929 backgroundclose=backgroundclose)
1930
1930
1931 def clearunknown(self):
1931 def clearunknown(self):
1932 """Removes conflicting items in the working directory so that
1932 """Removes conflicting items in the working directory so that
1933 ``write()`` can be called successfully.
1933 ``write()`` can be called successfully.
1934 """
1934 """
1935 wvfs = self._repo.wvfs
1935 wvfs = self._repo.wvfs
1936 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1936 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1937 wvfs.removedirs(self._path)
1937 wvfs.removedirs(self._path)
1938
1938
1939 def setflags(self, l, x):
1939 def setflags(self, l, x):
1940 self._repo.wvfs.setflags(self._path, l, x)
1940 self._repo.wvfs.setflags(self._path, l, x)
1941
1941
1942 class overlayworkingctx(workingctx):
1942 class overlayworkingctx(workingctx):
1943 """Wraps another mutable context with a write-back cache that can be flushed
1943 """Wraps another mutable context with a write-back cache that can be flushed
1944 at a later time.
1944 at a later time.
1945
1945
1946 self._cache[path] maps to a dict with keys: {
1946 self._cache[path] maps to a dict with keys: {
1947 'exists': bool?
1947 'exists': bool?
1948 'date': date?
1948 'date': date?
1949 'data': str?
1949 'data': str?
1950 'flags': str?
1950 'flags': str?
1951 }
1951 }
1952 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1952 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1953 is `False`, the file was deleted.
1953 is `False`, the file was deleted.
1954 """
1954 """
1955
1955
1956 def __init__(self, repo, wrappedctx):
1956 def __init__(self, repo, wrappedctx):
1957 super(overlayworkingctx, self).__init__(repo)
1957 super(overlayworkingctx, self).__init__(repo)
1958 self._repo = repo
1958 self._repo = repo
1959 self._wrappedctx = wrappedctx
1959 self._wrappedctx = wrappedctx
1960 self._clean()
1960 self._clean()
1961
1961
1962 def data(self, path):
1962 def data(self, path):
1963 if self.isdirty(path):
1963 if self.isdirty(path):
1964 if self._cache[path]['exists']:
1964 if self._cache[path]['exists']:
1965 if self._cache[path]['data']:
1965 if self._cache[path]['data']:
1966 return self._cache[path]['data']
1966 return self._cache[path]['data']
1967 else:
1967 else:
1968 # Must fallback here, too, because we only set flags.
1968 # Must fallback here, too, because we only set flags.
1969 return self._wrappedctx[path].data()
1969 return self._wrappedctx[path].data()
1970 else:
1970 else:
1971 raise error.ProgrammingError("No such file or directory: %s" %
1971 raise error.ProgrammingError("No such file or directory: %s" %
1972 self._path)
1972 self._path)
1973 else:
1973 else:
1974 return self._wrappedctx[path].data()
1974 return self._wrappedctx[path].data()
1975
1975
1976 def filedate(self, path):
1976 def filedate(self, path):
1977 if self.isdirty(path):
1977 if self.isdirty(path):
1978 return self._cache[path]['date']
1978 return self._cache[path]['date']
1979 else:
1979 else:
1980 return self._wrappedctx[path].date()
1980 return self._wrappedctx[path].date()
1981
1981
1982 def flags(self, path):
1982 def flags(self, path):
1983 if self.isdirty(path):
1983 if self.isdirty(path):
1984 if self._cache[path]['exists']:
1984 if self._cache[path]['exists']:
1985 return self._cache[path]['flags']
1985 return self._cache[path]['flags']
1986 else:
1986 else:
1987 raise error.ProgrammingError("No such file or directory: %s" %
1987 raise error.ProgrammingError("No such file or directory: %s" %
1988 self._path)
1988 self._path)
1989 else:
1989 else:
1990 return self._wrappedctx[path].flags()
1990 return self._wrappedctx[path].flags()
1991
1991
1992 def write(self, path, data, flags=''):
1992 def write(self, path, data, flags=''):
1993 if data is None:
1993 if data is None:
1994 raise error.ProgrammingError("data must be non-None")
1994 raise error.ProgrammingError("data must be non-None")
1995 self._markdirty(path, exists=True, data=data, date=util.makedate(),
1995 self._markdirty(path, exists=True, data=data, date=util.makedate(),
1996 flags=flags)
1996 flags=flags)
1997
1997
1998 def setflags(self, path, l, x):
1998 def setflags(self, path, l, x):
1999 self._markdirty(path, exists=True, date=util.makedate(),
1999 self._markdirty(path, exists=True, date=util.makedate(),
2000 flags=(l and 'l' or '') + (x and 'x' or ''))
2000 flags=(l and 'l' or '') + (x and 'x' or ''))
2001
2001
2002 def remove(self, path):
2002 def remove(self, path):
2003 self._markdirty(path, exists=False)
2003 self._markdirty(path, exists=False)
2004
2004
2005 def exists(self, path):
2005 def exists(self, path):
2006 """exists behaves like `lexists`, but needs to follow symlinks and
2006 """exists behaves like `lexists`, but needs to follow symlinks and
2007 return False if they are broken.
2007 return False if they are broken.
2008 """
2008 """
2009 if self.isdirty(path):
2009 if self.isdirty(path):
2010 # If this path exists and is a symlink, "follow" it by calling
2010 # If this path exists and is a symlink, "follow" it by calling
2011 # exists on the destination path.
2011 # exists on the destination path.
2012 if (self._cache[path]['exists'] and
2012 if (self._cache[path]['exists'] and
2013 'l' in self._cache[path]['flags']):
2013 'l' in self._cache[path]['flags']):
2014 return self.exists(self._cache[path]['data'].strip())
2014 return self.exists(self._cache[path]['data'].strip())
2015 else:
2015 else:
2016 return self._cache[path]['exists']
2016 return self._cache[path]['exists']
2017 return self._wrappedctx[path].exists()
2017 return self._wrappedctx[path].exists()
2018
2018
2019 def lexists(self, path):
2019 def lexists(self, path):
2020 """lexists returns True if the path exists"""
2020 """lexists returns True if the path exists"""
2021 if self.isdirty(path):
2021 if self.isdirty(path):
2022 return self._cache[path]['exists']
2022 return self._cache[path]['exists']
2023 return self._wrappedctx[path].lexists()
2023 return self._wrappedctx[path].lexists()
2024
2024
2025 def size(self, path):
2025 def size(self, path):
2026 if self.isdirty(path):
2026 if self.isdirty(path):
2027 if self._cache[path]['exists']:
2027 if self._cache[path]['exists']:
2028 return len(self._cache[path]['data'])
2028 return len(self._cache[path]['data'])
2029 else:
2029 else:
2030 raise error.ProgrammingError("No such file or directory: %s" %
2030 raise error.ProgrammingError("No such file or directory: %s" %
2031 self._path)
2031 self._path)
2032 return self._wrappedctx[path].size()
2032 return self._wrappedctx[path].size()
2033
2033
2034 def flushall(self):
2034 def flushall(self):
2035 for path in self._writeorder:
2035 for path in self._writeorder:
2036 entry = self._cache[path]
2036 entry = self._cache[path]
2037 if entry['exists']:
2037 if entry['exists']:
2038 self._wrappedctx[path].clearunknown()
2038 self._wrappedctx[path].clearunknown()
2039 if entry['data'] is not None:
2039 if entry['data'] is not None:
2040 if entry['flags'] is None:
2040 if entry['flags'] is None:
2041 raise error.ProgrammingError('data set but not flags')
2041 raise error.ProgrammingError('data set but not flags')
2042 self._wrappedctx[path].write(
2042 self._wrappedctx[path].write(
2043 entry['data'],
2043 entry['data'],
2044 entry['flags'])
2044 entry['flags'])
2045 else:
2045 else:
2046 self._wrappedctx[path].setflags(
2046 self._wrappedctx[path].setflags(
2047 'l' in entry['flags'],
2047 'l' in entry['flags'],
2048 'x' in entry['flags'])
2048 'x' in entry['flags'])
2049 else:
2049 else:
2050 self._wrappedctx[path].remove(path)
2050 self._wrappedctx[path].remove(path)
2051 self._clean()
2051 self._clean()
2052
2052
2053 def isdirty(self, path):
2053 def isdirty(self, path):
2054 return path in self._cache
2054 return path in self._cache
2055
2055
2056 def _clean(self):
2056 def _clean(self):
2057 self._cache = {}
2057 self._cache = {}
2058 self._writeorder = []
2058 self._writeorder = []
2059
2059
2060 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2060 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2061 if path not in self._cache:
2061 if path not in self._cache:
2062 self._writeorder.append(path)
2062 self._writeorder.append(path)
2063
2063
2064 self._cache[path] = {
2064 self._cache[path] = {
2065 'exists': exists,
2065 'exists': exists,
2066 'data': data,
2066 'data': data,
2067 'date': date,
2067 'date': date,
2068 'flags': flags,
2068 'flags': flags,
2069 }
2069 }
2070
2070
2071 def filectx(self, path, filelog=None):
2071 def filectx(self, path, filelog=None):
2072 return overlayworkingfilectx(self._repo, path, parent=self,
2072 return overlayworkingfilectx(self._repo, path, parent=self,
2073 filelog=filelog)
2073 filelog=filelog)
2074
2074
2075 class overlayworkingfilectx(workingfilectx):
2075 class overlayworkingfilectx(workingfilectx):
2076 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2076 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2077 cache, which can be flushed through later by calling ``flush()``."""
2077 cache, which can be flushed through later by calling ``flush()``."""
2078
2078
2079 def __init__(self, repo, path, filelog=None, parent=None):
2079 def __init__(self, repo, path, filelog=None, parent=None):
2080 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2080 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2081 parent)
2081 parent)
2082 self._repo = repo
2082 self._repo = repo
2083 self._parent = parent
2083 self._parent = parent
2084 self._path = path
2084 self._path = path
2085
2085
2086 def ctx(self):
2086 def ctx(self):
2087 return self._parent
2087 return self._parent
2088
2088
2089 def data(self):
2089 def data(self):
2090 return self._parent.data(self._path)
2090 return self._parent.data(self._path)
2091
2091
2092 def date(self):
2092 def date(self):
2093 return self._parent.filedate(self._path)
2093 return self._parent.filedate(self._path)
2094
2094
2095 def exists(self):
2095 def exists(self):
2096 return self.lexists()
2096 return self.lexists()
2097
2097
2098 def lexists(self):
2098 def lexists(self):
2099 return self._parent.exists(self._path)
2099 return self._parent.exists(self._path)
2100
2100
2101 def renamed(self):
2101 def renamed(self):
2102 # Copies are currently tracked in the dirstate as before. Straight copy
2102 # Copies are currently tracked in the dirstate as before. Straight copy
2103 # from workingfilectx.
2103 # from workingfilectx.
2104 rp = self._repo.dirstate.copied(self._path)
2104 rp = self._repo.dirstate.copied(self._path)
2105 if not rp:
2105 if not rp:
2106 return None
2106 return None
2107 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2107 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2108
2108
2109 def size(self):
2109 def size(self):
2110 return self._parent.size(self._path)
2110 return self._parent.size(self._path)
2111
2111
2112 def audit(self):
2112 def audit(self):
2113 pass
2113 pass
2114
2114
2115 def flags(self):
2115 def flags(self):
2116 return self._parent.flags(self._path)
2116 return self._parent.flags(self._path)
2117
2117
2118 def setflags(self, islink, isexec):
2118 def setflags(self, islink, isexec):
2119 return self._parent.setflags(self._path, islink, isexec)
2119 return self._parent.setflags(self._path, islink, isexec)
2120
2120
2121 def write(self, data, flags, backgroundclose=False):
2121 def write(self, data, flags, backgroundclose=False):
2122 return self._parent.write(self._path, data, flags)
2122 return self._parent.write(self._path, data, flags)
2123
2123
2124 def remove(self, ignoremissing=False):
2124 def remove(self, ignoremissing=False):
2125 return self._parent.remove(self._path)
2125 return self._parent.remove(self._path)
2126
2126
2127 class workingcommitctx(workingctx):
2127 class workingcommitctx(workingctx):
2128 """A workingcommitctx object makes access to data related to
2128 """A workingcommitctx object makes access to data related to
2129 the revision being committed convenient.
2129 the revision being committed convenient.
2130
2130
2131 This hides changes in the working directory, if they aren't
2131 This hides changes in the working directory, if they aren't
2132 committed in this context.
2132 committed in this context.
2133 """
2133 """
2134 def __init__(self, repo, changes,
2134 def __init__(self, repo, changes,
2135 text="", user=None, date=None, extra=None):
2135 text="", user=None, date=None, extra=None):
2136 super(workingctx, self).__init__(repo, text, user, date, extra,
2136 super(workingctx, self).__init__(repo, text, user, date, extra,
2137 changes)
2137 changes)
2138
2138
2139 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2139 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2140 """Return matched files only in ``self._status``
2140 """Return matched files only in ``self._status``
2141
2141
2142 Uncommitted files appear "clean" via this context, even if
2142 Uncommitted files appear "clean" via this context, even if
2143 they aren't actually so in the working directory.
2143 they aren't actually so in the working directory.
2144 """
2144 """
2145 if clean:
2145 if clean:
2146 clean = [f for f in self._manifest if f not in self._changedset]
2146 clean = [f for f in self._manifest if f not in self._changedset]
2147 else:
2147 else:
2148 clean = []
2148 clean = []
2149 return scmutil.status([f for f in self._status.modified if match(f)],
2149 return scmutil.status([f for f in self._status.modified if match(f)],
2150 [f for f in self._status.added if match(f)],
2150 [f for f in self._status.added if match(f)],
2151 [f for f in self._status.removed if match(f)],
2151 [f for f in self._status.removed if match(f)],
2152 [], [], [], clean)
2152 [], [], [], clean)
2153
2153
2154 @propertycache
2154 @propertycache
2155 def _changedset(self):
2155 def _changedset(self):
2156 """Return the set of files changed in this context
2156 """Return the set of files changed in this context
2157 """
2157 """
2158 changed = set(self._status.modified)
2158 changed = set(self._status.modified)
2159 changed.update(self._status.added)
2159 changed.update(self._status.added)
2160 changed.update(self._status.removed)
2160 changed.update(self._status.removed)
2161 return changed
2161 return changed
2162
2162
2163 def makecachingfilectxfn(func):
2163 def makecachingfilectxfn(func):
2164 """Create a filectxfn that caches based on the path.
2164 """Create a filectxfn that caches based on the path.
2165
2165
2166 We can't use util.cachefunc because it uses all arguments as the cache
2166 We can't use util.cachefunc because it uses all arguments as the cache
2167 key and this creates a cycle since the arguments include the repo and
2167 key and this creates a cycle since the arguments include the repo and
2168 memctx.
2168 memctx.
2169 """
2169 """
2170 cache = {}
2170 cache = {}
2171
2171
2172 def getfilectx(repo, memctx, path):
2172 def getfilectx(repo, memctx, path):
2173 if path not in cache:
2173 if path not in cache:
2174 cache[path] = func(repo, memctx, path)
2174 cache[path] = func(repo, memctx, path)
2175 return cache[path]
2175 return cache[path]
2176
2176
2177 return getfilectx
2177 return getfilectx
2178
2178
2179 def memfilefromctx(ctx):
2179 def memfilefromctx(ctx):
2180 """Given a context return a memfilectx for ctx[path]
2180 """Given a context return a memfilectx for ctx[path]
2181
2181
2182 This is a convenience method for building a memctx based on another
2182 This is a convenience method for building a memctx based on another
2183 context.
2183 context.
2184 """
2184 """
2185 def getfilectx(repo, memctx, path):
2185 def getfilectx(repo, memctx, path):
2186 fctx = ctx[path]
2186 fctx = ctx[path]
2187 # this is weird but apparently we only keep track of one parent
2187 # this is weird but apparently we only keep track of one parent
2188 # (why not only store that instead of a tuple?)
2188 # (why not only store that instead of a tuple?)
2189 copied = fctx.renamed()
2189 copied = fctx.renamed()
2190 if copied:
2190 if copied:
2191 copied = copied[0]
2191 copied = copied[0]
2192 return memfilectx(repo, path, fctx.data(),
2192 return memfilectx(repo, path, fctx.data(),
2193 islink=fctx.islink(), isexec=fctx.isexec(),
2193 islink=fctx.islink(), isexec=fctx.isexec(),
2194 copied=copied, memctx=memctx)
2194 copied=copied, memctx=memctx)
2195
2195
2196 return getfilectx
2196 return getfilectx
2197
2197
2198 def memfilefrompatch(patchstore):
2198 def memfilefrompatch(patchstore):
2199 """Given a patch (e.g. patchstore object) return a memfilectx
2199 """Given a patch (e.g. patchstore object) return a memfilectx
2200
2200
2201 This is a convenience method for building a memctx based on a patchstore.
2201 This is a convenience method for building a memctx based on a patchstore.
2202 """
2202 """
2203 def getfilectx(repo, memctx, path):
2203 def getfilectx(repo, memctx, path):
2204 data, mode, copied = patchstore.getfile(path)
2204 data, mode, copied = patchstore.getfile(path)
2205 if data is None:
2205 if data is None:
2206 return None
2206 return None
2207 islink, isexec = mode
2207 islink, isexec = mode
2208 return memfilectx(repo, path, data, islink=islink,
2208 return memfilectx(repo, path, data, islink=islink,
2209 isexec=isexec, copied=copied,
2209 isexec=isexec, copied=copied,
2210 memctx=memctx)
2210 memctx=memctx)
2211
2211
2212 return getfilectx
2212 return getfilectx
2213
2213
2214 class memctx(committablectx):
2214 class memctx(committablectx):
2215 """Use memctx to perform in-memory commits via localrepo.commitctx().
2215 """Use memctx to perform in-memory commits via localrepo.commitctx().
2216
2216
2217 Revision information is supplied at initialization time while
2217 Revision information is supplied at initialization time while
2218 related files data and is made available through a callback
2218 related files data and is made available through a callback
2219 mechanism. 'repo' is the current localrepo, 'parents' is a
2219 mechanism. 'repo' is the current localrepo, 'parents' is a
2220 sequence of two parent revisions identifiers (pass None for every
2220 sequence of two parent revisions identifiers (pass None for every
2221 missing parent), 'text' is the commit message and 'files' lists
2221 missing parent), 'text' is the commit message and 'files' lists
2222 names of files touched by the revision (normalized and relative to
2222 names of files touched by the revision (normalized and relative to
2223 repository root).
2223 repository root).
2224
2224
2225 filectxfn(repo, memctx, path) is a callable receiving the
2225 filectxfn(repo, memctx, path) is a callable receiving the
2226 repository, the current memctx object and the normalized path of
2226 repository, the current memctx object and the normalized path of
2227 requested file, relative to repository root. It is fired by the
2227 requested file, relative to repository root. It is fired by the
2228 commit function for every file in 'files', but calls order is
2228 commit function for every file in 'files', but calls order is
2229 undefined. If the file is available in the revision being
2229 undefined. If the file is available in the revision being
2230 committed (updated or added), filectxfn returns a memfilectx
2230 committed (updated or added), filectxfn returns a memfilectx
2231 object. If the file was removed, filectxfn return None for recent
2231 object. If the file was removed, filectxfn return None for recent
2232 Mercurial. Moved files are represented by marking the source file
2232 Mercurial. Moved files are represented by marking the source file
2233 removed and the new file added with copy information (see
2233 removed and the new file added with copy information (see
2234 memfilectx).
2234 memfilectx).
2235
2235
2236 user receives the committer name and defaults to current
2236 user receives the committer name and defaults to current
2237 repository username, date is the commit date in any format
2237 repository username, date is the commit date in any format
2238 supported by util.parsedate() and defaults to current date, extra
2238 supported by util.parsedate() and defaults to current date, extra
2239 is a dictionary of metadata or is left empty.
2239 is a dictionary of metadata or is left empty.
2240 """
2240 """
2241
2241
2242 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2242 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2243 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2243 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2244 # this field to determine what to do in filectxfn.
2244 # this field to determine what to do in filectxfn.
2245 _returnnoneformissingfiles = True
2245 _returnnoneformissingfiles = True
2246
2246
2247 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2247 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2248 date=None, extra=None, branch=None, editor=False):
2248 date=None, extra=None, branch=None, editor=False):
2249 super(memctx, self).__init__(repo, text, user, date, extra)
2249 super(memctx, self).__init__(repo, text, user, date, extra)
2250 self._rev = None
2250 self._rev = None
2251 self._node = None
2251 self._node = None
2252 parents = [(p or nullid) for p in parents]
2252 parents = [(p or nullid) for p in parents]
2253 p1, p2 = parents
2253 p1, p2 = parents
2254 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2254 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2255 files = sorted(set(files))
2255 files = sorted(set(files))
2256 self._files = files
2256 self._files = files
2257 if branch is not None:
2257 if branch is not None:
2258 self._extra['branch'] = encoding.fromlocal(branch)
2258 self._extra['branch'] = encoding.fromlocal(branch)
2259 self.substate = {}
2259 self.substate = {}
2260
2260
2261 if isinstance(filectxfn, patch.filestore):
2261 if isinstance(filectxfn, patch.filestore):
2262 filectxfn = memfilefrompatch(filectxfn)
2262 filectxfn = memfilefrompatch(filectxfn)
2263 elif not callable(filectxfn):
2263 elif not callable(filectxfn):
2264 # if store is not callable, wrap it in a function
2264 # if store is not callable, wrap it in a function
2265 filectxfn = memfilefromctx(filectxfn)
2265 filectxfn = memfilefromctx(filectxfn)
2266
2266
2267 # memoizing increases performance for e.g. vcs convert scenarios.
2267 # memoizing increases performance for e.g. vcs convert scenarios.
2268 self._filectxfn = makecachingfilectxfn(filectxfn)
2268 self._filectxfn = makecachingfilectxfn(filectxfn)
2269
2269
2270 if editor:
2270 if editor:
2271 self._text = editor(self._repo, self, [])
2271 self._text = editor(self._repo, self, [])
2272 self._repo.savecommitmessage(self._text)
2272 self._repo.savecommitmessage(self._text)
2273
2273
2274 def filectx(self, path, filelog=None):
2274 def filectx(self, path, filelog=None):
2275 """get a file context from the working directory
2275 """get a file context from the working directory
2276
2276
2277 Returns None if file doesn't exist and should be removed."""
2277 Returns None if file doesn't exist and should be removed."""
2278 return self._filectxfn(self._repo, self, path)
2278 return self._filectxfn(self._repo, self, path)
2279
2279
2280 def commit(self):
2280 def commit(self):
2281 """commit context to the repo"""
2281 """commit context to the repo"""
2282 return self._repo.commitctx(self)
2282 return self._repo.commitctx(self)
2283
2283
2284 @propertycache
2284 @propertycache
2285 def _manifest(self):
2285 def _manifest(self):
2286 """generate a manifest based on the return values of filectxfn"""
2286 """generate a manifest based on the return values of filectxfn"""
2287
2287
2288 # keep this simple for now; just worry about p1
2288 # keep this simple for now; just worry about p1
2289 pctx = self._parents[0]
2289 pctx = self._parents[0]
2290 man = pctx.manifest().copy()
2290 man = pctx.manifest().copy()
2291
2291
2292 for f in self._status.modified:
2292 for f in self._status.modified:
2293 p1node = nullid
2293 p1node = nullid
2294 p2node = nullid
2294 p2node = nullid
2295 p = pctx[f].parents() # if file isn't in pctx, check p2?
2295 p = pctx[f].parents() # if file isn't in pctx, check p2?
2296 if len(p) > 0:
2296 if len(p) > 0:
2297 p1node = p[0].filenode()
2297 p1node = p[0].filenode()
2298 if len(p) > 1:
2298 if len(p) > 1:
2299 p2node = p[1].filenode()
2299 p2node = p[1].filenode()
2300 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2300 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2301
2301
2302 for f in self._status.added:
2302 for f in self._status.added:
2303 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2303 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2304
2304
2305 for f in self._status.removed:
2305 for f in self._status.removed:
2306 if f in man:
2306 if f in man:
2307 del man[f]
2307 del man[f]
2308
2308
2309 return man
2309 return man
2310
2310
2311 @propertycache
2311 @propertycache
2312 def _status(self):
2312 def _status(self):
2313 """Calculate exact status from ``files`` specified at construction
2313 """Calculate exact status from ``files`` specified at construction
2314 """
2314 """
2315 man1 = self.p1().manifest()
2315 man1 = self.p1().manifest()
2316 p2 = self._parents[1]
2316 p2 = self._parents[1]
2317 # "1 < len(self._parents)" can't be used for checking
2317 # "1 < len(self._parents)" can't be used for checking
2318 # existence of the 2nd parent, because "memctx._parents" is
2318 # existence of the 2nd parent, because "memctx._parents" is
2319 # explicitly initialized by the list, of which length is 2.
2319 # explicitly initialized by the list, of which length is 2.
2320 if p2.node() != nullid:
2320 if p2.node() != nullid:
2321 man2 = p2.manifest()
2321 man2 = p2.manifest()
2322 managing = lambda f: f in man1 or f in man2
2322 managing = lambda f: f in man1 or f in man2
2323 else:
2323 else:
2324 managing = lambda f: f in man1
2324 managing = lambda f: f in man1
2325
2325
2326 modified, added, removed = [], [], []
2326 modified, added, removed = [], [], []
2327 for f in self._files:
2327 for f in self._files:
2328 if not managing(f):
2328 if not managing(f):
2329 added.append(f)
2329 added.append(f)
2330 elif self[f]:
2330 elif self[f]:
2331 modified.append(f)
2331 modified.append(f)
2332 else:
2332 else:
2333 removed.append(f)
2333 removed.append(f)
2334
2334
2335 return scmutil.status(modified, added, removed, [], [], [], [])
2335 return scmutil.status(modified, added, removed, [], [], [], [])
2336
2336
2337 class memfilectx(committablefilectx):
2337 class memfilectx(committablefilectx):
2338 """memfilectx represents an in-memory file to commit.
2338 """memfilectx represents an in-memory file to commit.
2339
2339
2340 See memctx and committablefilectx for more details.
2340 See memctx and committablefilectx for more details.
2341 """
2341 """
2342 def __init__(self, repo, path, data, islink=False,
2342 def __init__(self, repo, path, data, islink=False,
2343 isexec=False, copied=None, memctx=None):
2343 isexec=False, copied=None, memctx=None):
2344 """
2344 """
2345 path is the normalized file path relative to repository root.
2345 path is the normalized file path relative to repository root.
2346 data is the file content as a string.
2346 data is the file content as a string.
2347 islink is True if the file is a symbolic link.
2347 islink is True if the file is a symbolic link.
2348 isexec is True if the file is executable.
2348 isexec is True if the file is executable.
2349 copied is the source file path if current file was copied in the
2349 copied is the source file path if current file was copied in the
2350 revision being committed, or None."""
2350 revision being committed, or None."""
2351 super(memfilectx, self).__init__(repo, path, None, memctx)
2351 super(memfilectx, self).__init__(repo, path, None, memctx)
2352 self._data = data
2352 self._data = data
2353 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2353 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2354 self._copied = None
2354 self._copied = None
2355 if copied:
2355 if copied:
2356 self._copied = (copied, nullid)
2356 self._copied = (copied, nullid)
2357
2357
2358 def data(self):
2358 def data(self):
2359 return self._data
2359 return self._data
2360
2360
2361 def remove(self, ignoremissing=False):
2361 def remove(self, ignoremissing=False):
2362 """wraps unlink for a repo's working directory"""
2362 """wraps unlink for a repo's working directory"""
2363 # need to figure out what to do here
2363 # need to figure out what to do here
2364 del self._changectx[self._path]
2364 del self._changectx[self._path]
2365
2365
2366 def write(self, data, flags):
2366 def write(self, data, flags):
2367 """wraps repo.wwrite"""
2367 """wraps repo.wwrite"""
2368 self._data = data
2368 self._data = data
2369
2369
2370 class overlayfilectx(committablefilectx):
2370 class overlayfilectx(committablefilectx):
2371 """Like memfilectx but take an original filectx and optional parameters to
2371 """Like memfilectx but take an original filectx and optional parameters to
2372 override parts of it. This is useful when fctx.data() is expensive (i.e.
2372 override parts of it. This is useful when fctx.data() is expensive (i.e.
2373 flag processor is expensive) and raw data, flags, and filenode could be
2373 flag processor is expensive) and raw data, flags, and filenode could be
2374 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2374 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2375 """
2375 """
2376
2376
2377 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2377 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2378 copied=None, ctx=None):
2378 copied=None, ctx=None):
2379 """originalfctx: filecontext to duplicate
2379 """originalfctx: filecontext to duplicate
2380
2380
2381 datafunc: None or a function to override data (file content). It is a
2381 datafunc: None or a function to override data (file content). It is a
2382 function to be lazy. path, flags, copied, ctx: None or overridden value
2382 function to be lazy. path, flags, copied, ctx: None or overridden value
2383
2383
2384 copied could be (path, rev), or False. copied could also be just path,
2384 copied could be (path, rev), or False. copied could also be just path,
2385 and will be converted to (path, nullid). This simplifies some callers.
2385 and will be converted to (path, nullid). This simplifies some callers.
2386 """
2386 """
2387
2387
2388 if path is None:
2388 if path is None:
2389 path = originalfctx.path()
2389 path = originalfctx.path()
2390 if ctx is None:
2390 if ctx is None:
2391 ctx = originalfctx.changectx()
2391 ctx = originalfctx.changectx()
2392 ctxmatch = lambda: True
2392 ctxmatch = lambda: True
2393 else:
2393 else:
2394 ctxmatch = lambda: ctx == originalfctx.changectx()
2394 ctxmatch = lambda: ctx == originalfctx.changectx()
2395
2395
2396 repo = originalfctx.repo()
2396 repo = originalfctx.repo()
2397 flog = originalfctx.filelog()
2397 flog = originalfctx.filelog()
2398 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2398 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2399
2399
2400 if copied is None:
2400 if copied is None:
2401 copied = originalfctx.renamed()
2401 copied = originalfctx.renamed()
2402 copiedmatch = lambda: True
2402 copiedmatch = lambda: True
2403 else:
2403 else:
2404 if copied and not isinstance(copied, tuple):
2404 if copied and not isinstance(copied, tuple):
2405 # repo._filecommit will recalculate copyrev so nullid is okay
2405 # repo._filecommit will recalculate copyrev so nullid is okay
2406 copied = (copied, nullid)
2406 copied = (copied, nullid)
2407 copiedmatch = lambda: copied == originalfctx.renamed()
2407 copiedmatch = lambda: copied == originalfctx.renamed()
2408
2408
2409 # When data, copied (could affect data), ctx (could affect filelog
2409 # When data, copied (could affect data), ctx (could affect filelog
2410 # parents) are not overridden, rawdata, rawflags, and filenode may be
2410 # parents) are not overridden, rawdata, rawflags, and filenode may be
2411 # reused (repo._filecommit should double check filelog parents).
2411 # reused (repo._filecommit should double check filelog parents).
2412 #
2412 #
2413 # path, flags are not hashed in filelog (but in manifestlog) so they do
2413 # path, flags are not hashed in filelog (but in manifestlog) so they do
2414 # not affect reusable here.
2414 # not affect reusable here.
2415 #
2415 #
2416 # If ctx or copied is overridden to a same value with originalfctx,
2416 # If ctx or copied is overridden to a same value with originalfctx,
2417 # still consider it's reusable. originalfctx.renamed() may be a bit
2417 # still consider it's reusable. originalfctx.renamed() may be a bit
2418 # expensive so it's not called unless necessary. Assuming datafunc is
2418 # expensive so it's not called unless necessary. Assuming datafunc is
2419 # always expensive, do not call it for this "reusable" test.
2419 # always expensive, do not call it for this "reusable" test.
2420 reusable = datafunc is None and ctxmatch() and copiedmatch()
2420 reusable = datafunc is None and ctxmatch() and copiedmatch()
2421
2421
2422 if datafunc is None:
2422 if datafunc is None:
2423 datafunc = originalfctx.data
2423 datafunc = originalfctx.data
2424 if flags is None:
2424 if flags is None:
2425 flags = originalfctx.flags()
2425 flags = originalfctx.flags()
2426
2426
2427 self._datafunc = datafunc
2427 self._datafunc = datafunc
2428 self._flags = flags
2428 self._flags = flags
2429 self._copied = copied
2429 self._copied = copied
2430
2430
2431 if reusable:
2431 if reusable:
2432 # copy extra fields from originalfctx
2432 # copy extra fields from originalfctx
2433 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2433 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2434 for attr_ in attrs:
2434 for attr_ in attrs:
2435 if util.safehasattr(originalfctx, attr_):
2435 if util.safehasattr(originalfctx, attr_):
2436 setattr(self, attr_, getattr(originalfctx, attr_))
2436 setattr(self, attr_, getattr(originalfctx, attr_))
2437
2437
2438 def data(self):
2438 def data(self):
2439 return self._datafunc()
2439 return self._datafunc()
2440
2440
2441 class metadataonlyctx(committablectx):
2441 class metadataonlyctx(committablectx):
2442 """Like memctx but it's reusing the manifest of different commit.
2442 """Like memctx but it's reusing the manifest of different commit.
2443 Intended to be used by lightweight operations that are creating
2443 Intended to be used by lightweight operations that are creating
2444 metadata-only changes.
2444 metadata-only changes.
2445
2445
2446 Revision information is supplied at initialization time. 'repo' is the
2446 Revision information is supplied at initialization time. 'repo' is the
2447 current localrepo, 'ctx' is original revision which manifest we're reuisng
2447 current localrepo, 'ctx' is original revision which manifest we're reuisng
2448 'parents' is a sequence of two parent revisions identifiers (pass None for
2448 'parents' is a sequence of two parent revisions identifiers (pass None for
2449 every missing parent), 'text' is the commit.
2449 every missing parent), 'text' is the commit.
2450
2450
2451 user receives the committer name and defaults to current repository
2451 user receives the committer name and defaults to current repository
2452 username, date is the commit date in any format supported by
2452 username, date is the commit date in any format supported by
2453 util.parsedate() and defaults to current date, extra is a dictionary of
2453 util.parsedate() and defaults to current date, extra is a dictionary of
2454 metadata or is left empty.
2454 metadata or is left empty.
2455 """
2455 """
2456 def __new__(cls, repo, originalctx, *args, **kwargs):
2456 def __new__(cls, repo, originalctx, *args, **kwargs):
2457 return super(metadataonlyctx, cls).__new__(cls, repo)
2457 return super(metadataonlyctx, cls).__new__(cls, repo)
2458
2458
2459 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2459 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2460 date=None, extra=None, editor=False):
2460 date=None, extra=None, editor=False):
2461 if text is None:
2461 if text is None:
2462 text = originalctx.description()
2462 text = originalctx.description()
2463 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2463 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2464 self._rev = None
2464 self._rev = None
2465 self._node = None
2465 self._node = None
2466 self._originalctx = originalctx
2466 self._originalctx = originalctx
2467 self._manifestnode = originalctx.manifestnode()
2467 self._manifestnode = originalctx.manifestnode()
2468 if parents is None:
2468 if parents is None:
2469 parents = originalctx.parents()
2469 parents = originalctx.parents()
2470 else:
2470 else:
2471 parents = [repo[p] for p in parents if p is not None]
2471 parents = [repo[p] for p in parents if p is not None]
2472 parents = parents[:]
2472 parents = parents[:]
2473 while len(parents) < 2:
2473 while len(parents) < 2:
2474 parents.append(repo[nullid])
2474 parents.append(repo[nullid])
2475 p1, p2 = self._parents = parents
2475 p1, p2 = self._parents = parents
2476
2476
2477 # sanity check to ensure that the reused manifest parents are
2477 # sanity check to ensure that the reused manifest parents are
2478 # manifests of our commit parents
2478 # manifests of our commit parents
2479 mp1, mp2 = self.manifestctx().parents
2479 mp1, mp2 = self.manifestctx().parents
2480 if p1 != nullid and p1.manifestnode() != mp1:
2480 if p1 != nullid and p1.manifestnode() != mp1:
2481 raise RuntimeError('can\'t reuse the manifest: '
2481 raise RuntimeError('can\'t reuse the manifest: '
2482 'its p1 doesn\'t match the new ctx p1')
2482 'its p1 doesn\'t match the new ctx p1')
2483 if p2 != nullid and p2.manifestnode() != mp2:
2483 if p2 != nullid and p2.manifestnode() != mp2:
2484 raise RuntimeError('can\'t reuse the manifest: '
2484 raise RuntimeError('can\'t reuse the manifest: '
2485 'its p2 doesn\'t match the new ctx p2')
2485 'its p2 doesn\'t match the new ctx p2')
2486
2486
2487 self._files = originalctx.files()
2487 self._files = originalctx.files()
2488 self.substate = {}
2488 self.substate = {}
2489
2489
2490 if editor:
2490 if editor:
2491 self._text = editor(self._repo, self, [])
2491 self._text = editor(self._repo, self, [])
2492 self._repo.savecommitmessage(self._text)
2492 self._repo.savecommitmessage(self._text)
2493
2493
2494 def manifestnode(self):
2494 def manifestnode(self):
2495 return self._manifestnode
2495 return self._manifestnode
2496
2496
2497 @property
2497 @property
2498 def _manifestctx(self):
2498 def _manifestctx(self):
2499 return self._repo.manifestlog[self._manifestnode]
2499 return self._repo.manifestlog[self._manifestnode]
2500
2500
2501 def filectx(self, path, filelog=None):
2501 def filectx(self, path, filelog=None):
2502 return self._originalctx.filectx(path, filelog=filelog)
2502 return self._originalctx.filectx(path, filelog=filelog)
2503
2503
2504 def commit(self):
2504 def commit(self):
2505 """commit context to the repo"""
2505 """commit context to the repo"""
2506 return self._repo.commitctx(self)
2506 return self._repo.commitctx(self)
2507
2507
2508 @property
2508 @property
2509 def _manifest(self):
2509 def _manifest(self):
2510 return self._originalctx.manifest()
2510 return self._originalctx.manifest()
2511
2511
2512 @propertycache
2512 @propertycache
2513 def _status(self):
2513 def _status(self):
2514 """Calculate exact status from ``files`` specified in the ``origctx``
2514 """Calculate exact status from ``files`` specified in the ``origctx``
2515 and parents manifests.
2515 and parents manifests.
2516 """
2516 """
2517 man1 = self.p1().manifest()
2517 man1 = self.p1().manifest()
2518 p2 = self._parents[1]
2518 p2 = self._parents[1]
2519 # "1 < len(self._parents)" can't be used for checking
2519 # "1 < len(self._parents)" can't be used for checking
2520 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2520 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2521 # explicitly initialized by the list, of which length is 2.
2521 # explicitly initialized by the list, of which length is 2.
2522 if p2.node() != nullid:
2522 if p2.node() != nullid:
2523 man2 = p2.manifest()
2523 man2 = p2.manifest()
2524 managing = lambda f: f in man1 or f in man2
2524 managing = lambda f: f in man1 or f in man2
2525 else:
2525 else:
2526 managing = lambda f: f in man1
2526 managing = lambda f: f in man1
2527
2527
2528 modified, added, removed = [], [], []
2528 modified, added, removed = [], [], []
2529 for f in self._files:
2529 for f in self._files:
2530 if not managing(f):
2530 if not managing(f):
2531 added.append(f)
2531 added.append(f)
2532 elif f in self:
2532 elif f in self:
2533 modified.append(f)
2533 modified.append(f)
2534 else:
2534 else:
2535 removed.append(f)
2535 removed.append(f)
2536
2536
2537 return scmutil.status(modified, added, removed, [], [], [], [])
2537 return scmutil.status(modified, added, removed, [], [], [], [])
2538
2538
2539 class arbitraryfilectx(object):
2539 class arbitraryfilectx(object):
2540 """Allows you to use filectx-like functions on a file in an arbitrary
2540 """Allows you to use filectx-like functions on a file in an arbitrary
2541 location on disk, possibly not in the working directory.
2541 location on disk, possibly not in the working directory.
2542 """
2542 """
2543 def __init__(self, path):
2543 def __init__(self, path):
2544 self._path = path
2544 self._path = path
2545
2545
2546 def cmp(self, otherfilectx):
2546 def cmp(self, otherfilectx):
2547 return self.data() != otherfilectx.data()
2547 return self.data() != otherfilectx.data()
2548
2548
2549 def path(self):
2549 def path(self):
2550 return self._path
2550 return self._path
2551
2551
2552 def flags(self):
2552 def flags(self):
2553 return ''
2553 return ''
2554
2554
2555 def data(self):
2555 def data(self):
2556 return util.readfile(self._path)
2556 return util.readfile(self._path)
2557
2557
2558 def decodeddata(self):
2558 def decodeddata(self):
2559 with open(self._path, "rb") as f:
2559 with open(self._path, "rb") as f:
2560 return f.read()
2560 return f.read()
2561
2561
2562 def remove(self):
2562 def remove(self):
2563 util.unlink(self._path)
2563 util.unlink(self._path)
2564
2564
2565 def write(self, data, flags):
2565 def write(self, data, flags):
2566 assert not flags
2566 assert not flags
2567 with open(self._path, "w") as f:
2567 with open(self._path, "w") as f:
2568 f.write(data)
2568 f.write(data)
@@ -1,1775 +1,1775 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullhex,
21 nullhex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 )
24 )
25 from . import (
25 from . import (
26 copies,
26 copies,
27 error,
27 error,
28 filemerge,
28 filemerge,
29 match as matchmod,
29 match as matchmod,
30 obsutil,
30 obsutil,
31 pycompat,
31 pycompat,
32 scmutil,
32 scmutil,
33 subrepo,
33 subrepo,
34 util,
34 util,
35 worker,
35 worker,
36 )
36 )
37
37
38 _pack = struct.pack
38 _pack = struct.pack
39 _unpack = struct.unpack
39 _unpack = struct.unpack
40
40
41 def _droponode(data):
41 def _droponode(data):
42 # used for compatibility for v1
42 # used for compatibility for v1
43 bits = data.split('\0')
43 bits = data.split('\0')
44 bits = bits[:-2] + bits[-1:]
44 bits = bits[:-2] + bits[-1:]
45 return '\0'.join(bits)
45 return '\0'.join(bits)
46
46
47 class mergestate(object):
47 class mergestate(object):
48 '''track 3-way merge state of individual files
48 '''track 3-way merge state of individual files
49
49
50 The merge state is stored on disk when needed. Two files are used: one with
50 The merge state is stored on disk when needed. Two files are used: one with
51 an old format (version 1), and one with a new format (version 2). Version 2
51 an old format (version 1), and one with a new format (version 2). Version 2
52 stores a superset of the data in version 1, including new kinds of records
52 stores a superset of the data in version 1, including new kinds of records
53 in the future. For more about the new format, see the documentation for
53 in the future. For more about the new format, see the documentation for
54 `_readrecordsv2`.
54 `_readrecordsv2`.
55
55
56 Each record can contain arbitrary content, and has an associated type. This
56 Each record can contain arbitrary content, and has an associated type. This
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
57 `type` should be a letter. If `type` is uppercase, the record is mandatory:
58 versions of Mercurial that don't support it should abort. If `type` is
58 versions of Mercurial that don't support it should abort. If `type` is
59 lowercase, the record can be safely ignored.
59 lowercase, the record can be safely ignored.
60
60
61 Currently known records:
61 Currently known records:
62
62
63 L: the node of the "local" part of the merge (hexified version)
63 L: the node of the "local" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
64 O: the node of the "other" part of the merge (hexified version)
65 F: a file to be merged entry
65 F: a file to be merged entry
66 C: a change/delete or delete/change conflict
66 C: a change/delete or delete/change conflict
67 D: a file that the external merge driver will merge internally
67 D: a file that the external merge driver will merge internally
68 (experimental)
68 (experimental)
69 m: the external merge driver defined for this merge plus its run state
69 m: the external merge driver defined for this merge plus its run state
70 (experimental)
70 (experimental)
71 f: a (filename, dictionary) tuple of optional values for a given file
71 f: a (filename, dictionary) tuple of optional values for a given file
72 X: unsupported mandatory record type (used in tests)
72 X: unsupported mandatory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
73 x: unsupported advisory record type (used in tests)
74 l: the labels for the parts of the merge.
74 l: the labels for the parts of the merge.
75
75
76 Merge driver run states (experimental):
76 Merge driver run states (experimental):
77 u: driver-resolved files unmarked -- needs to be run next time we're about
77 u: driver-resolved files unmarked -- needs to be run next time we're about
78 to resolve or commit
78 to resolve or commit
79 m: driver-resolved files marked -- only needs to be run before commit
79 m: driver-resolved files marked -- only needs to be run before commit
80 s: success/skipped -- does not need to be run any more
80 s: success/skipped -- does not need to be run any more
81
81
82 '''
82 '''
83 statepathv1 = 'merge/state'
83 statepathv1 = 'merge/state'
84 statepathv2 = 'merge/state2'
84 statepathv2 = 'merge/state2'
85
85
86 @staticmethod
86 @staticmethod
87 def clean(repo, node=None, other=None, labels=None):
87 def clean(repo, node=None, other=None, labels=None):
88 """Initialize a brand new merge state, removing any existing state on
88 """Initialize a brand new merge state, removing any existing state on
89 disk."""
89 disk."""
90 ms = mergestate(repo)
90 ms = mergestate(repo)
91 ms.reset(node, other, labels)
91 ms.reset(node, other, labels)
92 return ms
92 return ms
93
93
94 @staticmethod
94 @staticmethod
95 def read(repo):
95 def read(repo):
96 """Initialize the merge state, reading it from disk."""
96 """Initialize the merge state, reading it from disk."""
97 ms = mergestate(repo)
97 ms = mergestate(repo)
98 ms._read()
98 ms._read()
99 return ms
99 return ms
100
100
101 def __init__(self, repo):
101 def __init__(self, repo):
102 """Initialize the merge state.
102 """Initialize the merge state.
103
103
104 Do not use this directly! Instead call read() or clean()."""
104 Do not use this directly! Instead call read() or clean()."""
105 self._repo = repo
105 self._repo = repo
106 self._dirty = False
106 self._dirty = False
107 self._labels = None
107 self._labels = None
108
108
109 def reset(self, node=None, other=None, labels=None):
109 def reset(self, node=None, other=None, labels=None):
110 self._state = {}
110 self._state = {}
111 self._stateextras = {}
111 self._stateextras = {}
112 self._local = None
112 self._local = None
113 self._other = None
113 self._other = None
114 self._labels = labels
114 self._labels = labels
115 for var in ('localctx', 'otherctx'):
115 for var in ('localctx', 'otherctx'):
116 if var in vars(self):
116 if var in vars(self):
117 delattr(self, var)
117 delattr(self, var)
118 if node:
118 if node:
119 self._local = node
119 self._local = node
120 self._other = other
120 self._other = other
121 self._readmergedriver = None
121 self._readmergedriver = None
122 if self.mergedriver:
122 if self.mergedriver:
123 self._mdstate = 's'
123 self._mdstate = 's'
124 else:
124 else:
125 self._mdstate = 'u'
125 self._mdstate = 'u'
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
126 shutil.rmtree(self._repo.vfs.join('merge'), True)
127 self._results = {}
127 self._results = {}
128 self._dirty = False
128 self._dirty = False
129
129
130 def _read(self):
130 def _read(self):
131 """Analyse each record content to restore a serialized state from disk
131 """Analyse each record content to restore a serialized state from disk
132
132
133 This function process "record" entry produced by the de-serialization
133 This function process "record" entry produced by the de-serialization
134 of on disk file.
134 of on disk file.
135 """
135 """
136 self._state = {}
136 self._state = {}
137 self._stateextras = {}
137 self._stateextras = {}
138 self._local = None
138 self._local = None
139 self._other = None
139 self._other = None
140 for var in ('localctx', 'otherctx'):
140 for var in ('localctx', 'otherctx'):
141 if var in vars(self):
141 if var in vars(self):
142 delattr(self, var)
142 delattr(self, var)
143 self._readmergedriver = None
143 self._readmergedriver = None
144 self._mdstate = 's'
144 self._mdstate = 's'
145 unsupported = set()
145 unsupported = set()
146 records = self._readrecords()
146 records = self._readrecords()
147 for rtype, record in records:
147 for rtype, record in records:
148 if rtype == 'L':
148 if rtype == 'L':
149 self._local = bin(record)
149 self._local = bin(record)
150 elif rtype == 'O':
150 elif rtype == 'O':
151 self._other = bin(record)
151 self._other = bin(record)
152 elif rtype == 'm':
152 elif rtype == 'm':
153 bits = record.split('\0', 1)
153 bits = record.split('\0', 1)
154 mdstate = bits[1]
154 mdstate = bits[1]
155 if len(mdstate) != 1 or mdstate not in 'ums':
155 if len(mdstate) != 1 or mdstate not in 'ums':
156 # the merge driver should be idempotent, so just rerun it
156 # the merge driver should be idempotent, so just rerun it
157 mdstate = 'u'
157 mdstate = 'u'
158
158
159 self._readmergedriver = bits[0]
159 self._readmergedriver = bits[0]
160 self._mdstate = mdstate
160 self._mdstate = mdstate
161 elif rtype in 'FDC':
161 elif rtype in 'FDC':
162 bits = record.split('\0')
162 bits = record.split('\0')
163 self._state[bits[0]] = bits[1:]
163 self._state[bits[0]] = bits[1:]
164 elif rtype == 'f':
164 elif rtype == 'f':
165 filename, rawextras = record.split('\0', 1)
165 filename, rawextras = record.split('\0', 1)
166 extraparts = rawextras.split('\0')
166 extraparts = rawextras.split('\0')
167 extras = {}
167 extras = {}
168 i = 0
168 i = 0
169 while i < len(extraparts):
169 while i < len(extraparts):
170 extras[extraparts[i]] = extraparts[i + 1]
170 extras[extraparts[i]] = extraparts[i + 1]
171 i += 2
171 i += 2
172
172
173 self._stateextras[filename] = extras
173 self._stateextras[filename] = extras
174 elif rtype == 'l':
174 elif rtype == 'l':
175 labels = record.split('\0', 2)
175 labels = record.split('\0', 2)
176 self._labels = [l for l in labels if len(l) > 0]
176 self._labels = [l for l in labels if len(l) > 0]
177 elif not rtype.islower():
177 elif not rtype.islower():
178 unsupported.add(rtype)
178 unsupported.add(rtype)
179 self._results = {}
179 self._results = {}
180 self._dirty = False
180 self._dirty = False
181
181
182 if unsupported:
182 if unsupported:
183 raise error.UnsupportedMergeRecords(unsupported)
183 raise error.UnsupportedMergeRecords(unsupported)
184
184
185 def _readrecords(self):
185 def _readrecords(self):
186 """Read merge state from disk and return a list of record (TYPE, data)
186 """Read merge state from disk and return a list of record (TYPE, data)
187
187
188 We read data from both v1 and v2 files and decide which one to use.
188 We read data from both v1 and v2 files and decide which one to use.
189
189
190 V1 has been used by version prior to 2.9.1 and contains less data than
190 V1 has been used by version prior to 2.9.1 and contains less data than
191 v2. We read both versions and check if no data in v2 contradicts
191 v2. We read both versions and check if no data in v2 contradicts
192 v1. If there is not contradiction we can safely assume that both v1
192 v1. If there is not contradiction we can safely assume that both v1
193 and v2 were written at the same time and use the extract data in v2. If
193 and v2 were written at the same time and use the extract data in v2. If
194 there is contradiction we ignore v2 content as we assume an old version
194 there is contradiction we ignore v2 content as we assume an old version
195 of Mercurial has overwritten the mergestate file and left an old v2
195 of Mercurial has overwritten the mergestate file and left an old v2
196 file around.
196 file around.
197
197
198 returns list of record [(TYPE, data), ...]"""
198 returns list of record [(TYPE, data), ...]"""
199 v1records = self._readrecordsv1()
199 v1records = self._readrecordsv1()
200 v2records = self._readrecordsv2()
200 v2records = self._readrecordsv2()
201 if self._v1v2match(v1records, v2records):
201 if self._v1v2match(v1records, v2records):
202 return v2records
202 return v2records
203 else:
203 else:
204 # v1 file is newer than v2 file, use it
204 # v1 file is newer than v2 file, use it
205 # we have to infer the "other" changeset of the merge
205 # we have to infer the "other" changeset of the merge
206 # we cannot do better than that with v1 of the format
206 # we cannot do better than that with v1 of the format
207 mctx = self._repo[None].parents()[-1]
207 mctx = self._repo[None].parents()[-1]
208 v1records.append(('O', mctx.hex()))
208 v1records.append(('O', mctx.hex()))
209 # add place holder "other" file node information
209 # add place holder "other" file node information
210 # nobody is using it yet so we do no need to fetch the data
210 # nobody is using it yet so we do no need to fetch the data
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
211 # if mctx was wrong `mctx[bits[-2]]` may fails.
212 for idx, r in enumerate(v1records):
212 for idx, r in enumerate(v1records):
213 if r[0] == 'F':
213 if r[0] == 'F':
214 bits = r[1].split('\0')
214 bits = r[1].split('\0')
215 bits.insert(-2, '')
215 bits.insert(-2, '')
216 v1records[idx] = (r[0], '\0'.join(bits))
216 v1records[idx] = (r[0], '\0'.join(bits))
217 return v1records
217 return v1records
218
218
219 def _v1v2match(self, v1records, v2records):
219 def _v1v2match(self, v1records, v2records):
220 oldv2 = set() # old format version of v2 record
220 oldv2 = set() # old format version of v2 record
221 for rec in v2records:
221 for rec in v2records:
222 if rec[0] == 'L':
222 if rec[0] == 'L':
223 oldv2.add(rec)
223 oldv2.add(rec)
224 elif rec[0] == 'F':
224 elif rec[0] == 'F':
225 # drop the onode data (not contained in v1)
225 # drop the onode data (not contained in v1)
226 oldv2.add(('F', _droponode(rec[1])))
226 oldv2.add(('F', _droponode(rec[1])))
227 for rec in v1records:
227 for rec in v1records:
228 if rec not in oldv2:
228 if rec not in oldv2:
229 return False
229 return False
230 else:
230 else:
231 return True
231 return True
232
232
233 def _readrecordsv1(self):
233 def _readrecordsv1(self):
234 """read on disk merge state for version 1 file
234 """read on disk merge state for version 1 file
235
235
236 returns list of record [(TYPE, data), ...]
236 returns list of record [(TYPE, data), ...]
237
237
238 Note: the "F" data from this file are one entry short
238 Note: the "F" data from this file are one entry short
239 (no "other file node" entry)
239 (no "other file node" entry)
240 """
240 """
241 records = []
241 records = []
242 try:
242 try:
243 f = self._repo.vfs(self.statepathv1)
243 f = self._repo.vfs(self.statepathv1)
244 for i, l in enumerate(f):
244 for i, l in enumerate(f):
245 if i == 0:
245 if i == 0:
246 records.append(('L', l[:-1]))
246 records.append(('L', l[:-1]))
247 else:
247 else:
248 records.append(('F', l[:-1]))
248 records.append(('F', l[:-1]))
249 f.close()
249 f.close()
250 except IOError as err:
250 except IOError as err:
251 if err.errno != errno.ENOENT:
251 if err.errno != errno.ENOENT:
252 raise
252 raise
253 return records
253 return records
254
254
255 def _readrecordsv2(self):
255 def _readrecordsv2(self):
256 """read on disk merge state for version 2 file
256 """read on disk merge state for version 2 file
257
257
258 This format is a list of arbitrary records of the form:
258 This format is a list of arbitrary records of the form:
259
259
260 [type][length][content]
260 [type][length][content]
261
261
262 `type` is a single character, `length` is a 4 byte integer, and
262 `type` is a single character, `length` is a 4 byte integer, and
263 `content` is an arbitrary byte sequence of length `length`.
263 `content` is an arbitrary byte sequence of length `length`.
264
264
265 Mercurial versions prior to 3.7 have a bug where if there are
265 Mercurial versions prior to 3.7 have a bug where if there are
266 unsupported mandatory merge records, attempting to clear out the merge
266 unsupported mandatory merge records, attempting to clear out the merge
267 state with hg update --clean or similar aborts. The 't' record type
267 state with hg update --clean or similar aborts. The 't' record type
268 works around that by writing out what those versions treat as an
268 works around that by writing out what those versions treat as an
269 advisory record, but later versions interpret as special: the first
269 advisory record, but later versions interpret as special: the first
270 character is the 'real' record type and everything onwards is the data.
270 character is the 'real' record type and everything onwards is the data.
271
271
272 Returns list of records [(TYPE, data), ...]."""
272 Returns list of records [(TYPE, data), ...]."""
273 records = []
273 records = []
274 try:
274 try:
275 f = self._repo.vfs(self.statepathv2)
275 f = self._repo.vfs(self.statepathv2)
276 data = f.read()
276 data = f.read()
277 off = 0
277 off = 0
278 end = len(data)
278 end = len(data)
279 while off < end:
279 while off < end:
280 rtype = data[off]
280 rtype = data[off]
281 off += 1
281 off += 1
282 length = _unpack('>I', data[off:(off + 4)])[0]
282 length = _unpack('>I', data[off:(off + 4)])[0]
283 off += 4
283 off += 4
284 record = data[off:(off + length)]
284 record = data[off:(off + length)]
285 off += length
285 off += length
286 if rtype == 't':
286 if rtype == 't':
287 rtype, record = record[0], record[1:]
287 rtype, record = record[0], record[1:]
288 records.append((rtype, record))
288 records.append((rtype, record))
289 f.close()
289 f.close()
290 except IOError as err:
290 except IOError as err:
291 if err.errno != errno.ENOENT:
291 if err.errno != errno.ENOENT:
292 raise
292 raise
293 return records
293 return records
294
294
295 @util.propertycache
295 @util.propertycache
296 def mergedriver(self):
296 def mergedriver(self):
297 # protect against the following:
297 # protect against the following:
298 # - A configures a malicious merge driver in their hgrc, then
298 # - A configures a malicious merge driver in their hgrc, then
299 # pauses the merge
299 # pauses the merge
300 # - A edits their hgrc to remove references to the merge driver
300 # - A edits their hgrc to remove references to the merge driver
301 # - A gives a copy of their entire repo, including .hg, to B
301 # - A gives a copy of their entire repo, including .hg, to B
302 # - B inspects .hgrc and finds it to be clean
302 # - B inspects .hgrc and finds it to be clean
303 # - B then continues the merge and the malicious merge driver
303 # - B then continues the merge and the malicious merge driver
304 # gets invoked
304 # gets invoked
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
305 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
306 if (self._readmergedriver is not None
306 if (self._readmergedriver is not None
307 and self._readmergedriver != configmergedriver):
307 and self._readmergedriver != configmergedriver):
308 raise error.ConfigError(
308 raise error.ConfigError(
309 _("merge driver changed since merge started"),
309 _("merge driver changed since merge started"),
310 hint=_("revert merge driver change or abort merge"))
310 hint=_("revert merge driver change or abort merge"))
311
311
312 return configmergedriver
312 return configmergedriver
313
313
314 @util.propertycache
314 @util.propertycache
315 def localctx(self):
315 def localctx(self):
316 if self._local is None:
316 if self._local is None:
317 msg = "localctx accessed but self._local isn't set"
317 msg = "localctx accessed but self._local isn't set"
318 raise error.ProgrammingError(msg)
318 raise error.ProgrammingError(msg)
319 return self._repo[self._local]
319 return self._repo[self._local]
320
320
321 @util.propertycache
321 @util.propertycache
322 def otherctx(self):
322 def otherctx(self):
323 if self._other is None:
323 if self._other is None:
324 msg = "otherctx accessed but self._other isn't set"
324 msg = "otherctx accessed but self._other isn't set"
325 raise error.ProgrammingError(msg)
325 raise error.ProgrammingError(msg)
326 return self._repo[self._other]
326 return self._repo[self._other]
327
327
328 def active(self):
328 def active(self):
329 """Whether mergestate is active.
329 """Whether mergestate is active.
330
330
331 Returns True if there appears to be mergestate. This is a rough proxy
331 Returns True if there appears to be mergestate. This is a rough proxy
332 for "is a merge in progress."
332 for "is a merge in progress."
333 """
333 """
334 # Check local variables before looking at filesystem for performance
334 # Check local variables before looking at filesystem for performance
335 # reasons.
335 # reasons.
336 return bool(self._local) or bool(self._state) or \
336 return bool(self._local) or bool(self._state) or \
337 self._repo.vfs.exists(self.statepathv1) or \
337 self._repo.vfs.exists(self.statepathv1) or \
338 self._repo.vfs.exists(self.statepathv2)
338 self._repo.vfs.exists(self.statepathv2)
339
339
340 def commit(self):
340 def commit(self):
341 """Write current state on disk (if necessary)"""
341 """Write current state on disk (if necessary)"""
342 if self._dirty:
342 if self._dirty:
343 records = self._makerecords()
343 records = self._makerecords()
344 self._writerecords(records)
344 self._writerecords(records)
345 self._dirty = False
345 self._dirty = False
346
346
347 def _makerecords(self):
347 def _makerecords(self):
348 records = []
348 records = []
349 records.append(('L', hex(self._local)))
349 records.append(('L', hex(self._local)))
350 records.append(('O', hex(self._other)))
350 records.append(('O', hex(self._other)))
351 if self.mergedriver:
351 if self.mergedriver:
352 records.append(('m', '\0'.join([
352 records.append(('m', '\0'.join([
353 self.mergedriver, self._mdstate])))
353 self.mergedriver, self._mdstate])))
354 for d, v in self._state.iteritems():
354 for d, v in self._state.iteritems():
355 if v[0] == 'd':
355 if v[0] == 'd':
356 records.append(('D', '\0'.join([d] + v)))
356 records.append(('D', '\0'.join([d] + v)))
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
357 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
358 # older versions of Mercurial
358 # older versions of Mercurial
359 elif v[1] == nullhex or v[6] == nullhex:
359 elif v[1] == nullhex or v[6] == nullhex:
360 records.append(('C', '\0'.join([d] + v)))
360 records.append(('C', '\0'.join([d] + v)))
361 else:
361 else:
362 records.append(('F', '\0'.join([d] + v)))
362 records.append(('F', '\0'.join([d] + v)))
363 for filename, extras in sorted(self._stateextras.iteritems()):
363 for filename, extras in sorted(self._stateextras.iteritems()):
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
364 rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in
365 extras.iteritems())
365 extras.iteritems())
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
366 records.append(('f', '%s\0%s' % (filename, rawextras)))
367 if self._labels is not None:
367 if self._labels is not None:
368 labels = '\0'.join(self._labels)
368 labels = '\0'.join(self._labels)
369 records.append(('l', labels))
369 records.append(('l', labels))
370 return records
370 return records
371
371
372 def _writerecords(self, records):
372 def _writerecords(self, records):
373 """Write current state on disk (both v1 and v2)"""
373 """Write current state on disk (both v1 and v2)"""
374 self._writerecordsv1(records)
374 self._writerecordsv1(records)
375 self._writerecordsv2(records)
375 self._writerecordsv2(records)
376
376
377 def _writerecordsv1(self, records):
377 def _writerecordsv1(self, records):
378 """Write current state on disk in a version 1 file"""
378 """Write current state on disk in a version 1 file"""
379 f = self._repo.vfs(self.statepathv1, 'w')
379 f = self._repo.vfs(self.statepathv1, 'w')
380 irecords = iter(records)
380 irecords = iter(records)
381 lrecords = next(irecords)
381 lrecords = next(irecords)
382 assert lrecords[0] == 'L'
382 assert lrecords[0] == 'L'
383 f.write(hex(self._local) + '\n')
383 f.write(hex(self._local) + '\n')
384 for rtype, data in irecords:
384 for rtype, data in irecords:
385 if rtype == 'F':
385 if rtype == 'F':
386 f.write('%s\n' % _droponode(data))
386 f.write('%s\n' % _droponode(data))
387 f.close()
387 f.close()
388
388
389 def _writerecordsv2(self, records):
389 def _writerecordsv2(self, records):
390 """Write current state on disk in a version 2 file
390 """Write current state on disk in a version 2 file
391
391
392 See the docstring for _readrecordsv2 for why we use 't'."""
392 See the docstring for _readrecordsv2 for why we use 't'."""
393 # these are the records that all version 2 clients can read
393 # these are the records that all version 2 clients can read
394 whitelist = 'LOF'
394 whitelist = 'LOF'
395 f = self._repo.vfs(self.statepathv2, 'w')
395 f = self._repo.vfs(self.statepathv2, 'w')
396 for key, data in records:
396 for key, data in records:
397 assert len(key) == 1
397 assert len(key) == 1
398 if key not in whitelist:
398 if key not in whitelist:
399 key, data = 't', '%s%s' % (key, data)
399 key, data = 't', '%s%s' % (key, data)
400 format = '>sI%is' % len(data)
400 format = '>sI%is' % len(data)
401 f.write(_pack(format, key, len(data), data))
401 f.write(_pack(format, key, len(data), data))
402 f.close()
402 f.close()
403
403
404 def add(self, fcl, fco, fca, fd):
404 def add(self, fcl, fco, fca, fd):
405 """add a new (potentially?) conflicting file the merge state
405 """add a new (potentially?) conflicting file the merge state
406 fcl: file context for local,
406 fcl: file context for local,
407 fco: file context for remote,
407 fco: file context for remote,
408 fca: file context for ancestors,
408 fca: file context for ancestors,
409 fd: file path of the resulting merge.
409 fd: file path of the resulting merge.
410
410
411 note: also write the local version to the `.hg/merge` directory.
411 note: also write the local version to the `.hg/merge` directory.
412 """
412 """
413 if fcl.isabsent():
413 if fcl.isabsent():
414 hash = nullhex
414 hash = nullhex
415 else:
415 else:
416 hash = hex(hashlib.sha1(fcl.path()).digest())
416 hash = hex(hashlib.sha1(fcl.path()).digest())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
417 self._repo.vfs.write('merge/' + hash, fcl.data())
418 self._state[fd] = ['u', hash, fcl.path(),
418 self._state[fd] = ['u', hash, fcl.path(),
419 fca.path(), hex(fca.filenode()),
419 fca.path(), hex(fca.filenode()),
420 fco.path(), hex(fco.filenode()),
420 fco.path(), hex(fco.filenode()),
421 fcl.flags()]
421 fcl.flags()]
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
422 self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) }
423 self._dirty = True
423 self._dirty = True
424
424
425 def __contains__(self, dfile):
425 def __contains__(self, dfile):
426 return dfile in self._state
426 return dfile in self._state
427
427
428 def __getitem__(self, dfile):
428 def __getitem__(self, dfile):
429 return self._state[dfile][0]
429 return self._state[dfile][0]
430
430
431 def __iter__(self):
431 def __iter__(self):
432 return iter(sorted(self._state))
432 return iter(sorted(self._state))
433
433
434 def files(self):
434 def files(self):
435 return self._state.keys()
435 return self._state.keys()
436
436
437 def mark(self, dfile, state):
437 def mark(self, dfile, state):
438 self._state[dfile][0] = state
438 self._state[dfile][0] = state
439 self._dirty = True
439 self._dirty = True
440
440
441 def mdstate(self):
441 def mdstate(self):
442 return self._mdstate
442 return self._mdstate
443
443
444 def unresolved(self):
444 def unresolved(self):
445 """Obtain the paths of unresolved files."""
445 """Obtain the paths of unresolved files."""
446
446
447 for f, entry in self._state.iteritems():
447 for f, entry in self._state.iteritems():
448 if entry[0] == 'u':
448 if entry[0] == 'u':
449 yield f
449 yield f
450
450
451 def driverresolved(self):
451 def driverresolved(self):
452 """Obtain the paths of driver-resolved files."""
452 """Obtain the paths of driver-resolved files."""
453
453
454 for f, entry in self._state.items():
454 for f, entry in self._state.items():
455 if entry[0] == 'd':
455 if entry[0] == 'd':
456 yield f
456 yield f
457
457
458 def extras(self, filename):
458 def extras(self, filename):
459 return self._stateextras.setdefault(filename, {})
459 return self._stateextras.setdefault(filename, {})
460
460
461 def _resolve(self, preresolve, dfile, wctx):
461 def _resolve(self, preresolve, dfile, wctx):
462 """rerun merge process for file path `dfile`"""
462 """rerun merge process for file path `dfile`"""
463 if self[dfile] in 'rd':
463 if self[dfile] in 'rd':
464 return True, 0
464 return True, 0
465 stateentry = self._state[dfile]
465 stateentry = self._state[dfile]
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
466 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
467 octx = self._repo[self._other]
467 octx = self._repo[self._other]
468 extras = self.extras(dfile)
468 extras = self.extras(dfile)
469 anccommitnode = extras.get('ancestorlinknode')
469 anccommitnode = extras.get('ancestorlinknode')
470 if anccommitnode:
470 if anccommitnode:
471 actx = self._repo[anccommitnode]
471 actx = self._repo[anccommitnode]
472 else:
472 else:
473 actx = None
473 actx = None
474 fcd = self._filectxorabsent(hash, wctx, dfile)
474 fcd = self._filectxorabsent(hash, wctx, dfile)
475 fco = self._filectxorabsent(onode, octx, ofile)
475 fco = self._filectxorabsent(onode, octx, ofile)
476 # TODO: move this to filectxorabsent
476 # TODO: move this to filectxorabsent
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
477 fca = self._repo.filectx(afile, fileid=anode, changeid=actx)
478 # "premerge" x flags
478 # "premerge" x flags
479 flo = fco.flags()
479 flo = fco.flags()
480 fla = fca.flags()
480 fla = fca.flags()
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
481 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
482 if fca.node() == nullid and flags != flo:
482 if fca.node() == nullid and flags != flo:
483 if preresolve:
483 if preresolve:
484 self._repo.ui.warn(
484 self._repo.ui.warn(
485 _('warning: cannot merge flags for %s '
485 _('warning: cannot merge flags for %s '
486 'without common ancestor - keeping local flags\n')
486 'without common ancestor - keeping local flags\n')
487 % afile)
487 % afile)
488 elif flags == fla:
488 elif flags == fla:
489 flags = flo
489 flags = flo
490 if preresolve:
490 if preresolve:
491 # restore local
491 # restore local
492 if hash != nullhex:
492 if hash != nullhex:
493 f = self._repo.vfs('merge/' + hash)
493 f = self._repo.vfs('merge/' + hash)
494 wctx[dfile].write(f.read(), flags)
494 wctx[dfile].write(f.read(), flags)
495 f.close()
495 f.close()
496 else:
496 else:
497 wctx[dfile].remove(ignoremissing=True)
497 wctx[dfile].remove(ignoremissing=True)
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
498 complete, r, deleted = filemerge.premerge(self._repo, wctx,
499 self._local, lfile, fcd,
499 self._local, lfile, fcd,
500 fco, fca,
500 fco, fca,
501 labels=self._labels)
501 labels=self._labels)
502 else:
502 else:
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
503 complete, r, deleted = filemerge.filemerge(self._repo, wctx,
504 self._local, lfile, fcd,
504 self._local, lfile, fcd,
505 fco, fca,
505 fco, fca,
506 labels=self._labels)
506 labels=self._labels)
507 if r is None:
507 if r is None:
508 # no real conflict
508 # no real conflict
509 del self._state[dfile]
509 del self._state[dfile]
510 self._stateextras.pop(dfile, None)
510 self._stateextras.pop(dfile, None)
511 self._dirty = True
511 self._dirty = True
512 elif not r:
512 elif not r:
513 self.mark(dfile, 'r')
513 self.mark(dfile, 'r')
514
514
515 if complete:
515 if complete:
516 action = None
516 action = None
517 if deleted:
517 if deleted:
518 if fcd.isabsent():
518 if fcd.isabsent():
519 # dc: local picked. Need to drop if present, which may
519 # dc: local picked. Need to drop if present, which may
520 # happen on re-resolves.
520 # happen on re-resolves.
521 action = 'f'
521 action = 'f'
522 else:
522 else:
523 # cd: remote picked (or otherwise deleted)
523 # cd: remote picked (or otherwise deleted)
524 action = 'r'
524 action = 'r'
525 else:
525 else:
526 if fcd.isabsent(): # dc: remote picked
526 if fcd.isabsent(): # dc: remote picked
527 action = 'g'
527 action = 'g'
528 elif fco.isabsent(): # cd: local picked
528 elif fco.isabsent(): # cd: local picked
529 if dfile in self.localctx:
529 if dfile in self.localctx:
530 action = 'am'
530 action = 'am'
531 else:
531 else:
532 action = 'a'
532 action = 'a'
533 # else: regular merges (no action necessary)
533 # else: regular merges (no action necessary)
534 self._results[dfile] = r, action
534 self._results[dfile] = r, action
535
535
536 return complete, r
536 return complete, r
537
537
538 def _filectxorabsent(self, hexnode, ctx, f):
538 def _filectxorabsent(self, hexnode, ctx, f):
539 if hexnode == nullhex:
539 if hexnode == nullhex:
540 return filemerge.absentfilectx(ctx, f)
540 return filemerge.absentfilectx(ctx, f)
541 else:
541 else:
542 return ctx[f]
542 return ctx[f]
543
543
544 def preresolve(self, dfile, wctx):
544 def preresolve(self, dfile, wctx):
545 """run premerge process for dfile
545 """run premerge process for dfile
546
546
547 Returns whether the merge is complete, and the exit code."""
547 Returns whether the merge is complete, and the exit code."""
548 return self._resolve(True, dfile, wctx)
548 return self._resolve(True, dfile, wctx)
549
549
550 def resolve(self, dfile, wctx):
550 def resolve(self, dfile, wctx):
551 """run merge process (assuming premerge was run) for dfile
551 """run merge process (assuming premerge was run) for dfile
552
552
553 Returns the exit code of the merge."""
553 Returns the exit code of the merge."""
554 return self._resolve(False, dfile, wctx)[1]
554 return self._resolve(False, dfile, wctx)[1]
555
555
556 def counts(self):
556 def counts(self):
557 """return counts for updated, merged and removed files in this
557 """return counts for updated, merged and removed files in this
558 session"""
558 session"""
559 updated, merged, removed = 0, 0, 0
559 updated, merged, removed = 0, 0, 0
560 for r, action in self._results.itervalues():
560 for r, action in self._results.itervalues():
561 if r is None:
561 if r is None:
562 updated += 1
562 updated += 1
563 elif r == 0:
563 elif r == 0:
564 if action == 'r':
564 if action == 'r':
565 removed += 1
565 removed += 1
566 else:
566 else:
567 merged += 1
567 merged += 1
568 return updated, merged, removed
568 return updated, merged, removed
569
569
570 def unresolvedcount(self):
570 def unresolvedcount(self):
571 """get unresolved count for this merge (persistent)"""
571 """get unresolved count for this merge (persistent)"""
572 return len(list(self.unresolved()))
572 return len(list(self.unresolved()))
573
573
574 def actions(self):
574 def actions(self):
575 """return lists of actions to perform on the dirstate"""
575 """return lists of actions to perform on the dirstate"""
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
576 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
577 for f, (r, action) in self._results.iteritems():
577 for f, (r, action) in self._results.iteritems():
578 if action is not None:
578 if action is not None:
579 actions[action].append((f, None, "merge result"))
579 actions[action].append((f, None, "merge result"))
580 return actions
580 return actions
581
581
582 def recordactions(self):
582 def recordactions(self):
583 """record remove/add/get actions in the dirstate"""
583 """record remove/add/get actions in the dirstate"""
584 branchmerge = self._repo.dirstate.p2() != nullid
584 branchmerge = self._repo.dirstate.p2() != nullid
585 recordupdates(self._repo, self.actions(), branchmerge)
585 recordupdates(self._repo, self.actions(), branchmerge)
586
586
587 def queueremove(self, f):
587 def queueremove(self, f):
588 """queues a file to be removed from the dirstate
588 """queues a file to be removed from the dirstate
589
589
590 Meant for use by custom merge drivers."""
590 Meant for use by custom merge drivers."""
591 self._results[f] = 0, 'r'
591 self._results[f] = 0, 'r'
592
592
593 def queueadd(self, f):
593 def queueadd(self, f):
594 """queues a file to be added to the dirstate
594 """queues a file to be added to the dirstate
595
595
596 Meant for use by custom merge drivers."""
596 Meant for use by custom merge drivers."""
597 self._results[f] = 0, 'a'
597 self._results[f] = 0, 'a'
598
598
599 def queueget(self, f):
599 def queueget(self, f):
600 """queues a file to be marked modified in the dirstate
600 """queues a file to be marked modified in the dirstate
601
601
602 Meant for use by custom merge drivers."""
602 Meant for use by custom merge drivers."""
603 self._results[f] = 0, 'g'
603 self._results[f] = 0, 'g'
604
604
605 def _getcheckunknownconfig(repo, section, name):
605 def _getcheckunknownconfig(repo, section, name):
606 config = repo.ui.config(section, name, default='abort')
606 config = repo.ui.config(section, name, default='abort')
607 valid = ['abort', 'ignore', 'warn']
607 valid = ['abort', 'ignore', 'warn']
608 if config not in valid:
608 if config not in valid:
609 validstr = ', '.join(["'" + v + "'" for v in valid])
609 validstr = ', '.join(["'" + v + "'" for v in valid])
610 raise error.ConfigError(_("%s.%s not valid "
610 raise error.ConfigError(_("%s.%s not valid "
611 "('%s' is none of %s)")
611 "('%s' is none of %s)")
612 % (section, name, config, validstr))
612 % (section, name, config, validstr))
613 return config
613 return config
614
614
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
615 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
616 if f2 is None:
616 if f2 is None:
617 f2 = f
617 f2 = f
618 return (repo.wvfs.audit.check(f)
618 return (repo.wvfs.audit.check(f)
619 and repo.wvfs.isfileorlink(f)
619 and repo.wvfs.isfileorlink(f)
620 and repo.dirstate.normalize(f) not in repo.dirstate
620 and repo.dirstate.normalize(f) not in repo.dirstate
621 and mctx[f2].cmp(wctx[f]))
621 and mctx[f2].cmp(wctx[f]))
622
622
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
623 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
624 """
624 """
625 Considers any actions that care about the presence of conflicting unknown
625 Considers any actions that care about the presence of conflicting unknown
626 files. For some actions, the result is to abort; for others, it is to
626 files. For some actions, the result is to abort; for others, it is to
627 choose a different action.
627 choose a different action.
628 """
628 """
629 conflicts = set()
629 conflicts = set()
630 warnconflicts = set()
630 warnconflicts = set()
631 abortconflicts = set()
631 abortconflicts = set()
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
632 unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
633 ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored')
634 if not force:
634 if not force:
635 def collectconflicts(conflicts, config):
635 def collectconflicts(conflicts, config):
636 if config == 'abort':
636 if config == 'abort':
637 abortconflicts.update(conflicts)
637 abortconflicts.update(conflicts)
638 elif config == 'warn':
638 elif config == 'warn':
639 warnconflicts.update(conflicts)
639 warnconflicts.update(conflicts)
640
640
641 for f, (m, args, msg) in actions.iteritems():
641 for f, (m, args, msg) in actions.iteritems():
642 if m in ('c', 'dc'):
642 if m in ('c', 'dc'):
643 if _checkunknownfile(repo, wctx, mctx, f):
643 if _checkunknownfile(repo, wctx, mctx, f):
644 conflicts.add(f)
644 conflicts.add(f)
645 elif m == 'dg':
645 elif m == 'dg':
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
646 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
647 conflicts.add(f)
647 conflicts.add(f)
648
648
649 ignoredconflicts = set([c for c in conflicts
649 ignoredconflicts = set([c for c in conflicts
650 if repo.dirstate._ignore(c)])
650 if repo.dirstate._ignore(c)])
651 unknownconflicts = conflicts - ignoredconflicts
651 unknownconflicts = conflicts - ignoredconflicts
652 collectconflicts(ignoredconflicts, ignoredconfig)
652 collectconflicts(ignoredconflicts, ignoredconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
653 collectconflicts(unknownconflicts, unknownconfig)
654 else:
654 else:
655 for f, (m, args, msg) in actions.iteritems():
655 for f, (m, args, msg) in actions.iteritems():
656 if m == 'cm':
656 if m == 'cm':
657 fl2, anc = args
657 fl2, anc = args
658 different = _checkunknownfile(repo, wctx, mctx, f)
658 different = _checkunknownfile(repo, wctx, mctx, f)
659 if repo.dirstate._ignore(f):
659 if repo.dirstate._ignore(f):
660 config = ignoredconfig
660 config = ignoredconfig
661 else:
661 else:
662 config = unknownconfig
662 config = unknownconfig
663
663
664 # The behavior when force is True is described by this table:
664 # The behavior when force is True is described by this table:
665 # config different mergeforce | action backup
665 # config different mergeforce | action backup
666 # * n * | get n
666 # * n * | get n
667 # * y y | merge -
667 # * y y | merge -
668 # abort y n | merge - (1)
668 # abort y n | merge - (1)
669 # warn y n | warn + get y
669 # warn y n | warn + get y
670 # ignore y n | get y
670 # ignore y n | get y
671 #
671 #
672 # (1) this is probably the wrong behavior here -- we should
672 # (1) this is probably the wrong behavior here -- we should
673 # probably abort, but some actions like rebases currently
673 # probably abort, but some actions like rebases currently
674 # don't like an abort happening in the middle of
674 # don't like an abort happening in the middle of
675 # merge.update.
675 # merge.update.
676 if not different:
676 if not different:
677 actions[f] = ('g', (fl2, False), "remote created")
677 actions[f] = ('g', (fl2, False), "remote created")
678 elif mergeforce or config == 'abort':
678 elif mergeforce or config == 'abort':
679 actions[f] = ('m', (f, f, None, False, anc),
679 actions[f] = ('m', (f, f, None, False, anc),
680 "remote differs from untracked local")
680 "remote differs from untracked local")
681 elif config == 'abort':
681 elif config == 'abort':
682 abortconflicts.add(f)
682 abortconflicts.add(f)
683 else:
683 else:
684 if config == 'warn':
684 if config == 'warn':
685 warnconflicts.add(f)
685 warnconflicts.add(f)
686 actions[f] = ('g', (fl2, True), "remote created")
686 actions[f] = ('g', (fl2, True), "remote created")
687
687
688 for f in sorted(abortconflicts):
688 for f in sorted(abortconflicts):
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
689 repo.ui.warn(_("%s: untracked file differs\n") % f)
690 if abortconflicts:
690 if abortconflicts:
691 raise error.Abort(_("untracked files in working directory "
691 raise error.Abort(_("untracked files in working directory "
692 "differ from files in requested revision"))
692 "differ from files in requested revision"))
693
693
694 for f in sorted(warnconflicts):
694 for f in sorted(warnconflicts):
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
695 repo.ui.warn(_("%s: replacing untracked file\n") % f)
696
696
697 for f, (m, args, msg) in actions.iteritems():
697 for f, (m, args, msg) in actions.iteritems():
698 backup = f in conflicts
698 backup = f in conflicts
699 if m == 'c':
699 if m == 'c':
700 flags, = args
700 flags, = args
701 actions[f] = ('g', (flags, backup), msg)
701 actions[f] = ('g', (flags, backup), msg)
702
702
703 def _forgetremoved(wctx, mctx, branchmerge):
703 def _forgetremoved(wctx, mctx, branchmerge):
704 """
704 """
705 Forget removed files
705 Forget removed files
706
706
707 If we're jumping between revisions (as opposed to merging), and if
707 If we're jumping between revisions (as opposed to merging), and if
708 neither the working directory nor the target rev has the file,
708 neither the working directory nor the target rev has the file,
709 then we need to remove it from the dirstate, to prevent the
709 then we need to remove it from the dirstate, to prevent the
710 dirstate from listing the file when it is no longer in the
710 dirstate from listing the file when it is no longer in the
711 manifest.
711 manifest.
712
712
713 If we're merging, and the other revision has removed a file
713 If we're merging, and the other revision has removed a file
714 that is not present in the working directory, we need to mark it
714 that is not present in the working directory, we need to mark it
715 as removed.
715 as removed.
716 """
716 """
717
717
718 actions = {}
718 actions = {}
719 m = 'f'
719 m = 'f'
720 if branchmerge:
720 if branchmerge:
721 m = 'r'
721 m = 'r'
722 for f in wctx.deleted():
722 for f in wctx.deleted():
723 if f not in mctx:
723 if f not in mctx:
724 actions[f] = m, None, "forget deleted"
724 actions[f] = m, None, "forget deleted"
725
725
726 if not branchmerge:
726 if not branchmerge:
727 for f in wctx.removed():
727 for f in wctx.removed():
728 if f not in mctx:
728 if f not in mctx:
729 actions[f] = 'f', None, "forget removed"
729 actions[f] = 'f', None, "forget removed"
730
730
731 return actions
731 return actions
732
732
733 def _checkcollision(repo, wmf, actions):
733 def _checkcollision(repo, wmf, actions):
734 # build provisional merged manifest up
734 # build provisional merged manifest up
735 pmmf = set(wmf)
735 pmmf = set(wmf)
736
736
737 if actions:
737 if actions:
738 # k, dr, e and rd are no-op
738 # k, dr, e and rd are no-op
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
739 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
740 for f, args, msg in actions[m]:
740 for f, args, msg in actions[m]:
741 pmmf.add(f)
741 pmmf.add(f)
742 for f, args, msg in actions['r']:
742 for f, args, msg in actions['r']:
743 pmmf.discard(f)
743 pmmf.discard(f)
744 for f, args, msg in actions['dm']:
744 for f, args, msg in actions['dm']:
745 f2, flags = args
745 f2, flags = args
746 pmmf.discard(f2)
746 pmmf.discard(f2)
747 pmmf.add(f)
747 pmmf.add(f)
748 for f, args, msg in actions['dg']:
748 for f, args, msg in actions['dg']:
749 pmmf.add(f)
749 pmmf.add(f)
750 for f, args, msg in actions['m']:
750 for f, args, msg in actions['m']:
751 f1, f2, fa, move, anc = args
751 f1, f2, fa, move, anc = args
752 if move:
752 if move:
753 pmmf.discard(f1)
753 pmmf.discard(f1)
754 pmmf.add(f)
754 pmmf.add(f)
755
755
756 # check case-folding collision in provisional merged manifest
756 # check case-folding collision in provisional merged manifest
757 foldmap = {}
757 foldmap = {}
758 for f in pmmf:
758 for f in pmmf:
759 fold = util.normcase(f)
759 fold = util.normcase(f)
760 if fold in foldmap:
760 if fold in foldmap:
761 raise error.Abort(_("case-folding collision between %s and %s")
761 raise error.Abort(_("case-folding collision between %s and %s")
762 % (f, foldmap[fold]))
762 % (f, foldmap[fold]))
763 foldmap[fold] = f
763 foldmap[fold] = f
764
764
765 # check case-folding of directories
765 # check case-folding of directories
766 foldprefix = unfoldprefix = lastfull = ''
766 foldprefix = unfoldprefix = lastfull = ''
767 for fold, f in sorted(foldmap.items()):
767 for fold, f in sorted(foldmap.items()):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
768 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
769 # the folded prefix matches but actual casing is different
769 # the folded prefix matches but actual casing is different
770 raise error.Abort(_("case-folding collision between "
770 raise error.Abort(_("case-folding collision between "
771 "%s and directory of %s") % (lastfull, f))
771 "%s and directory of %s") % (lastfull, f))
772 foldprefix = fold + '/'
772 foldprefix = fold + '/'
773 unfoldprefix = f + '/'
773 unfoldprefix = f + '/'
774 lastfull = f
774 lastfull = f
775
775
776 def driverpreprocess(repo, ms, wctx, labels=None):
776 def driverpreprocess(repo, ms, wctx, labels=None):
777 """run the preprocess step of the merge driver, if any
777 """run the preprocess step of the merge driver, if any
778
778
779 This is currently not implemented -- it's an extension point."""
779 This is currently not implemented -- it's an extension point."""
780 return True
780 return True
781
781
782 def driverconclude(repo, ms, wctx, labels=None):
782 def driverconclude(repo, ms, wctx, labels=None):
783 """run the conclude step of the merge driver, if any
783 """run the conclude step of the merge driver, if any
784
784
785 This is currently not implemented -- it's an extension point."""
785 This is currently not implemented -- it's an extension point."""
786 return True
786 return True
787
787
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
788 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
789 acceptremote, followcopies, forcefulldiff=False):
789 acceptremote, followcopies, forcefulldiff=False):
790 """
790 """
791 Merge wctx and p2 with ancestor pa and generate merge action list
791 Merge wctx and p2 with ancestor pa and generate merge action list
792
792
793 branchmerge and force are as passed in to update
793 branchmerge and force are as passed in to update
794 matcher = matcher to filter file lists
794 matcher = matcher to filter file lists
795 acceptremote = accept the incoming changes without prompting
795 acceptremote = accept the incoming changes without prompting
796 """
796 """
797 if matcher is not None and matcher.always():
797 if matcher is not None and matcher.always():
798 matcher = None
798 matcher = None
799
799
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
800 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
801
801
802 # manifests fetched in order are going to be faster, so prime the caches
802 # manifests fetched in order are going to be faster, so prime the caches
803 [x.manifest() for x in
803 [x.manifest() for x in
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
804 sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)]
805
805
806 if followcopies:
806 if followcopies:
807 ret = copies.mergecopies(repo, wctx, p2, pa)
807 ret = copies.mergecopies(repo, wctx, p2, pa)
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
808 copy, movewithdir, diverge, renamedelete, dirmove = ret
809
809
810 boolbm = pycompat.bytestr(bool(branchmerge))
810 boolbm = pycompat.bytestr(bool(branchmerge))
811 boolf = pycompat.bytestr(bool(force))
811 boolf = pycompat.bytestr(bool(force))
812 boolm = pycompat.bytestr(bool(matcher))
812 boolm = pycompat.bytestr(bool(matcher))
813 repo.ui.note(_("resolving manifests\n"))
813 repo.ui.note(_("resolving manifests\n"))
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
814 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
815 % (boolbm, boolf, boolm))
815 % (boolbm, boolf, boolm))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
816 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
817
817
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
818 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
819 copied = set(copy.values())
819 copied = set(copy.values())
820 copied.update(movewithdir.values())
820 copied.update(movewithdir.values())
821
821
822 if '.hgsubstate' in m1:
822 if '.hgsubstate' in m1:
823 # check whether sub state is modified
823 # check whether sub state is modified
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
824 if any(wctx.sub(s).dirty() for s in wctx.substate):
825 m1['.hgsubstate'] = modifiednodeid
825 m1['.hgsubstate'] = modifiednodeid
826
826
827 # Don't use m2-vs-ma optimization if:
827 # Don't use m2-vs-ma optimization if:
828 # - ma is the same as m1 or m2, which we're just going to diff again later
828 # - ma is the same as m1 or m2, which we're just going to diff again later
829 # - The caller specifically asks for a full diff, which is useful during bid
829 # - The caller specifically asks for a full diff, which is useful during bid
830 # merge.
830 # merge.
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
831 if (pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff):
832 # Identify which files are relevant to the merge, so we can limit the
832 # Identify which files are relevant to the merge, so we can limit the
833 # total m1-vs-m2 diff to just those files. This has significant
833 # total m1-vs-m2 diff to just those files. This has significant
834 # performance benefits in large repositories.
834 # performance benefits in large repositories.
835 relevantfiles = set(ma.diff(m2).keys())
835 relevantfiles = set(ma.diff(m2).keys())
836
836
837 # For copied and moved files, we need to add the source file too.
837 # For copied and moved files, we need to add the source file too.
838 for copykey, copyvalue in copy.iteritems():
838 for copykey, copyvalue in copy.iteritems():
839 if copyvalue in relevantfiles:
839 if copyvalue in relevantfiles:
840 relevantfiles.add(copykey)
840 relevantfiles.add(copykey)
841 for movedirkey in movewithdir:
841 for movedirkey in movewithdir:
842 relevantfiles.add(movedirkey)
842 relevantfiles.add(movedirkey)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
843 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
844 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
845
845
846 diff = m1.diff(m2, match=matcher)
846 diff = m1.diff(m2, match=matcher)
847
847
848 if matcher is None:
848 if matcher is None:
849 matcher = matchmod.always('', '')
849 matcher = matchmod.always('', '')
850
850
851 actions = {}
851 actions = {}
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
852 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
853 if n1 and n2: # file exists on both local and remote side
853 if n1 and n2: # file exists on both local and remote side
854 if f not in ma:
854 if f not in ma:
855 fa = copy.get(f, None)
855 fa = copy.get(f, None)
856 if fa is not None:
856 if fa is not None:
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
857 actions[f] = ('m', (f, f, fa, False, pa.node()),
858 "both renamed from " + fa)
858 "both renamed from " + fa)
859 else:
859 else:
860 actions[f] = ('m', (f, f, None, False, pa.node()),
860 actions[f] = ('m', (f, f, None, False, pa.node()),
861 "both created")
861 "both created")
862 else:
862 else:
863 a = ma[f]
863 a = ma[f]
864 fla = ma.flags(f)
864 fla = ma.flags(f)
865 nol = 'l' not in fl1 + fl2 + fla
865 nol = 'l' not in fl1 + fl2 + fla
866 if n2 == a and fl2 == fla:
866 if n2 == a and fl2 == fla:
867 actions[f] = ('k' , (), "remote unchanged")
867 actions[f] = ('k' , (), "remote unchanged")
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
868 elif n1 == a and fl1 == fla: # local unchanged - use remote
869 if n1 == n2: # optimization: keep local content
869 if n1 == n2: # optimization: keep local content
870 actions[f] = ('e', (fl2,), "update permissions")
870 actions[f] = ('e', (fl2,), "update permissions")
871 else:
871 else:
872 actions[f] = ('g', (fl2, False), "remote is newer")
872 actions[f] = ('g', (fl2, False), "remote is newer")
873 elif nol and n2 == a: # remote only changed 'x'
873 elif nol and n2 == a: # remote only changed 'x'
874 actions[f] = ('e', (fl2,), "update permissions")
874 actions[f] = ('e', (fl2,), "update permissions")
875 elif nol and n1 == a: # local only changed 'x'
875 elif nol and n1 == a: # local only changed 'x'
876 actions[f] = ('g', (fl1, False), "remote is newer")
876 actions[f] = ('g', (fl1, False), "remote is newer")
877 else: # both changed something
877 else: # both changed something
878 actions[f] = ('m', (f, f, f, False, pa.node()),
878 actions[f] = ('m', (f, f, f, False, pa.node()),
879 "versions differ")
879 "versions differ")
880 elif n1: # file exists only on local side
880 elif n1: # file exists only on local side
881 if f in copied:
881 if f in copied:
882 pass # we'll deal with it on m2 side
882 pass # we'll deal with it on m2 side
883 elif f in movewithdir: # directory rename, move local
883 elif f in movewithdir: # directory rename, move local
884 f2 = movewithdir[f]
884 f2 = movewithdir[f]
885 if f2 in m2:
885 if f2 in m2:
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
886 actions[f2] = ('m', (f, f2, None, True, pa.node()),
887 "remote directory rename, both created")
887 "remote directory rename, both created")
888 else:
888 else:
889 actions[f2] = ('dm', (f, fl1),
889 actions[f2] = ('dm', (f, fl1),
890 "remote directory rename - move from " + f)
890 "remote directory rename - move from " + f)
891 elif f in copy:
891 elif f in copy:
892 f2 = copy[f]
892 f2 = copy[f]
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
893 actions[f] = ('m', (f, f2, f2, False, pa.node()),
894 "local copied/moved from " + f2)
894 "local copied/moved from " + f2)
895 elif f in ma: # clean, a different, no remote
895 elif f in ma: # clean, a different, no remote
896 if n1 != ma[f]:
896 if n1 != ma[f]:
897 if acceptremote:
897 if acceptremote:
898 actions[f] = ('r', None, "remote delete")
898 actions[f] = ('r', None, "remote delete")
899 else:
899 else:
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
900 actions[f] = ('cd', (f, None, f, False, pa.node()),
901 "prompt changed/deleted")
901 "prompt changed/deleted")
902 elif n1 == addednodeid:
902 elif n1 == addednodeid:
903 # This extra 'a' is added by working copy manifest to mark
903 # This extra 'a' is added by working copy manifest to mark
904 # the file as locally added. We should forget it instead of
904 # the file as locally added. We should forget it instead of
905 # deleting it.
905 # deleting it.
906 actions[f] = ('f', None, "remote deleted")
906 actions[f] = ('f', None, "remote deleted")
907 else:
907 else:
908 actions[f] = ('r', None, "other deleted")
908 actions[f] = ('r', None, "other deleted")
909 elif n2: # file exists only on remote side
909 elif n2: # file exists only on remote side
910 if f in copied:
910 if f in copied:
911 pass # we'll deal with it on m1 side
911 pass # we'll deal with it on m1 side
912 elif f in movewithdir:
912 elif f in movewithdir:
913 f2 = movewithdir[f]
913 f2 = movewithdir[f]
914 if f2 in m1:
914 if f2 in m1:
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
915 actions[f2] = ('m', (f2, f, None, False, pa.node()),
916 "local directory rename, both created")
916 "local directory rename, both created")
917 else:
917 else:
918 actions[f2] = ('dg', (f, fl2),
918 actions[f2] = ('dg', (f, fl2),
919 "local directory rename - get from " + f)
919 "local directory rename - get from " + f)
920 elif f in copy:
920 elif f in copy:
921 f2 = copy[f]
921 f2 = copy[f]
922 if f2 in m2:
922 if f2 in m2:
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
923 actions[f] = ('m', (f2, f, f2, False, pa.node()),
924 "remote copied from " + f2)
924 "remote copied from " + f2)
925 else:
925 else:
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
926 actions[f] = ('m', (f2, f, f2, True, pa.node()),
927 "remote moved from " + f2)
927 "remote moved from " + f2)
928 elif f not in ma:
928 elif f not in ma:
929 # local unknown, remote created: the logic is described by the
929 # local unknown, remote created: the logic is described by the
930 # following table:
930 # following table:
931 #
931 #
932 # force branchmerge different | action
932 # force branchmerge different | action
933 # n * * | create
933 # n * * | create
934 # y n * | create
934 # y n * | create
935 # y y n | create
935 # y y n | create
936 # y y y | merge
936 # y y y | merge
937 #
937 #
938 # Checking whether the files are different is expensive, so we
938 # Checking whether the files are different is expensive, so we
939 # don't do that when we can avoid it.
939 # don't do that when we can avoid it.
940 if not force:
940 if not force:
941 actions[f] = ('c', (fl2,), "remote created")
941 actions[f] = ('c', (fl2,), "remote created")
942 elif not branchmerge:
942 elif not branchmerge:
943 actions[f] = ('c', (fl2,), "remote created")
943 actions[f] = ('c', (fl2,), "remote created")
944 else:
944 else:
945 actions[f] = ('cm', (fl2, pa.node()),
945 actions[f] = ('cm', (fl2, pa.node()),
946 "remote created, get or merge")
946 "remote created, get or merge")
947 elif n2 != ma[f]:
947 elif n2 != ma[f]:
948 df = None
948 df = None
949 for d in dirmove:
949 for d in dirmove:
950 if f.startswith(d):
950 if f.startswith(d):
951 # new file added in a directory that was moved
951 # new file added in a directory that was moved
952 df = dirmove[d] + f[len(d):]
952 df = dirmove[d] + f[len(d):]
953 break
953 break
954 if df is not None and df in m1:
954 if df is not None and df in m1:
955 actions[df] = ('m', (df, f, f, False, pa.node()),
955 actions[df] = ('m', (df, f, f, False, pa.node()),
956 "local directory rename - respect move from " + f)
956 "local directory rename - respect move from " + f)
957 elif acceptremote:
957 elif acceptremote:
958 actions[f] = ('c', (fl2,), "remote recreating")
958 actions[f] = ('c', (fl2,), "remote recreating")
959 else:
959 else:
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
960 actions[f] = ('dc', (None, f, f, False, pa.node()),
961 "prompt deleted/changed")
961 "prompt deleted/changed")
962
962
963 return actions, diverge, renamedelete
963 return actions, diverge, renamedelete
964
964
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
965 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
966 """Resolves false conflicts where the nodeid changed but the content
966 """Resolves false conflicts where the nodeid changed but the content
967 remained the same."""
967 remained the same."""
968
968
969 for f, (m, args, msg) in actions.items():
969 for f, (m, args, msg) in actions.items():
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
970 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
971 # local did change but ended up with same content
971 # local did change but ended up with same content
972 actions[f] = 'r', None, "prompt same"
972 actions[f] = 'r', None, "prompt same"
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
973 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
974 # remote did change but ended up with same content
974 # remote did change but ended up with same content
975 del actions[f] # don't get = keep local deleted
975 del actions[f] # don't get = keep local deleted
976
976
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
977 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
978 acceptremote, followcopies, matcher=None,
978 acceptremote, followcopies, matcher=None,
979 mergeforce=False):
979 mergeforce=False):
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
980 """Calculate the actions needed to merge mctx into wctx using ancestors"""
981 # Avoid cycle.
981 # Avoid cycle.
982 from . import sparse
982 from . import sparse
983
983
984 if len(ancestors) == 1: # default
984 if len(ancestors) == 1: # default
985 actions, diverge, renamedelete = manifestmerge(
985 actions, diverge, renamedelete = manifestmerge(
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
986 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
987 acceptremote, followcopies)
987 acceptremote, followcopies)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
988 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
989
989
990 else: # only when merge.preferancestor=* - the default
990 else: # only when merge.preferancestor=* - the default
991 repo.ui.note(
991 repo.ui.note(
992 _("note: merging %s and %s using bids from ancestors %s\n") %
992 _("note: merging %s and %s using bids from ancestors %s\n") %
993 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
993 (wctx, mctx, _(' and ').join(pycompat.bytestr(anc)
994 for anc in ancestors)))
994 for anc in ancestors)))
995
995
996 # Call for bids
996 # Call for bids
997 fbids = {} # mapping filename to bids (action method to list af actions)
997 fbids = {} # mapping filename to bids (action method to list af actions)
998 diverge, renamedelete = None, None
998 diverge, renamedelete = None, None
999 for ancestor in ancestors:
999 for ancestor in ancestors:
1000 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1000 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
1001 actions, diverge1, renamedelete1 = manifestmerge(
1001 actions, diverge1, renamedelete1 = manifestmerge(
1002 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1002 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
1003 acceptremote, followcopies, forcefulldiff=True)
1003 acceptremote, followcopies, forcefulldiff=True)
1004 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1004 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1005
1005
1006 # Track the shortest set of warning on the theory that bid
1006 # Track the shortest set of warning on the theory that bid
1007 # merge will correctly incorporate more information
1007 # merge will correctly incorporate more information
1008 if diverge is None or len(diverge1) < len(diverge):
1008 if diverge is None or len(diverge1) < len(diverge):
1009 diverge = diverge1
1009 diverge = diverge1
1010 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1010 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1011 renamedelete = renamedelete1
1011 renamedelete = renamedelete1
1012
1012
1013 for f, a in sorted(actions.iteritems()):
1013 for f, a in sorted(actions.iteritems()):
1014 m, args, msg = a
1014 m, args, msg = a
1015 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1015 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
1016 if f in fbids:
1016 if f in fbids:
1017 d = fbids[f]
1017 d = fbids[f]
1018 if m in d:
1018 if m in d:
1019 d[m].append(a)
1019 d[m].append(a)
1020 else:
1020 else:
1021 d[m] = [a]
1021 d[m] = [a]
1022 else:
1022 else:
1023 fbids[f] = {m: [a]}
1023 fbids[f] = {m: [a]}
1024
1024
1025 # Pick the best bid for each file
1025 # Pick the best bid for each file
1026 repo.ui.note(_('\nauction for merging merge bids\n'))
1026 repo.ui.note(_('\nauction for merging merge bids\n'))
1027 actions = {}
1027 actions = {}
1028 dms = [] # filenames that have dm actions
1028 dms = [] # filenames that have dm actions
1029 for f, bids in sorted(fbids.items()):
1029 for f, bids in sorted(fbids.items()):
1030 # bids is a mapping from action method to list af actions
1030 # bids is a mapping from action method to list af actions
1031 # Consensus?
1031 # Consensus?
1032 if len(bids) == 1: # all bids are the same kind of method
1032 if len(bids) == 1: # all bids are the same kind of method
1033 m, l = list(bids.items())[0]
1033 m, l = list(bids.items())[0]
1034 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1034 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1035 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1035 repo.ui.note(_(" %s: consensus for %s\n") % (f, m))
1036 actions[f] = l[0]
1036 actions[f] = l[0]
1037 if m == 'dm':
1037 if m == 'dm':
1038 dms.append(f)
1038 dms.append(f)
1039 continue
1039 continue
1040 # If keep is an option, just do it.
1040 # If keep is an option, just do it.
1041 if 'k' in bids:
1041 if 'k' in bids:
1042 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1042 repo.ui.note(_(" %s: picking 'keep' action\n") % f)
1043 actions[f] = bids['k'][0]
1043 actions[f] = bids['k'][0]
1044 continue
1044 continue
1045 # If there are gets and they all agree [how could they not?], do it.
1045 # If there are gets and they all agree [how could they not?], do it.
1046 if 'g' in bids:
1046 if 'g' in bids:
1047 ga0 = bids['g'][0]
1047 ga0 = bids['g'][0]
1048 if all(a == ga0 for a in bids['g'][1:]):
1048 if all(a == ga0 for a in bids['g'][1:]):
1049 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1049 repo.ui.note(_(" %s: picking 'get' action\n") % f)
1050 actions[f] = ga0
1050 actions[f] = ga0
1051 continue
1051 continue
1052 # TODO: Consider other simple actions such as mode changes
1052 # TODO: Consider other simple actions such as mode changes
1053 # Handle inefficient democrazy.
1053 # Handle inefficient democrazy.
1054 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1054 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
1055 for m, l in sorted(bids.items()):
1055 for m, l in sorted(bids.items()):
1056 for _f, args, msg in l:
1056 for _f, args, msg in l:
1057 repo.ui.note(' %s -> %s\n' % (msg, m))
1057 repo.ui.note(' %s -> %s\n' % (msg, m))
1058 # Pick random action. TODO: Instead, prompt user when resolving
1058 # Pick random action. TODO: Instead, prompt user when resolving
1059 m, l = list(bids.items())[0]
1059 m, l = list(bids.items())[0]
1060 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1060 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
1061 (f, m))
1061 (f, m))
1062 actions[f] = l[0]
1062 actions[f] = l[0]
1063 if m == 'dm':
1063 if m == 'dm':
1064 dms.append(f)
1064 dms.append(f)
1065 continue
1065 continue
1066 # Work around 'dm' that can cause multiple actions for the same file
1066 # Work around 'dm' that can cause multiple actions for the same file
1067 for f in dms:
1067 for f in dms:
1068 dm, (f0, flags), msg = actions[f]
1068 dm, (f0, flags), msg = actions[f]
1069 assert dm == 'dm', dm
1069 assert dm == 'dm', dm
1070 if f0 in actions and actions[f0][0] == 'r':
1070 if f0 in actions and actions[f0][0] == 'r':
1071 # We have one bid for removing a file and another for moving it.
1071 # We have one bid for removing a file and another for moving it.
1072 # These two could be merged as first move and then delete ...
1072 # These two could be merged as first move and then delete ...
1073 # but instead drop moving and just delete.
1073 # but instead drop moving and just delete.
1074 del actions[f]
1074 del actions[f]
1075 repo.ui.note(_('end of auction\n\n'))
1075 repo.ui.note(_('end of auction\n\n'))
1076
1076
1077 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1077 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1078
1078
1079 if wctx.rev() is None:
1079 if wctx.rev() is None:
1080 fractions = _forgetremoved(wctx, mctx, branchmerge)
1080 fractions = _forgetremoved(wctx, mctx, branchmerge)
1081 actions.update(fractions)
1081 actions.update(fractions)
1082
1082
1083 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1083 prunedactions = sparse.filterupdatesactions(repo, wctx, mctx, branchmerge,
1084 actions)
1084 actions)
1085
1085
1086 return prunedactions, diverge, renamedelete
1086 return prunedactions, diverge, renamedelete
1087
1087
1088 def _getcwd():
1088 def _getcwd():
1089 try:
1089 try:
1090 return pycompat.getcwd()
1090 return pycompat.getcwd()
1091 except OSError as err:
1091 except OSError as err:
1092 if err.errno == errno.ENOENT:
1092 if err.errno == errno.ENOENT:
1093 return None
1093 return None
1094 raise
1094 raise
1095
1095
1096 def batchremove(repo, wctx, actions):
1096 def batchremove(repo, wctx, actions):
1097 """apply removes to the working directory
1097 """apply removes to the working directory
1098
1098
1099 yields tuples for progress updates
1099 yields tuples for progress updates
1100 """
1100 """
1101 verbose = repo.ui.verbose
1101 verbose = repo.ui.verbose
1102 cwd = _getcwd()
1102 cwd = _getcwd()
1103 i = 0
1103 i = 0
1104 for f, args, msg in actions:
1104 for f, args, msg in actions:
1105 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1105 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
1106 if verbose:
1106 if verbose:
1107 repo.ui.note(_("removing %s\n") % f)
1107 repo.ui.note(_("removing %s\n") % f)
1108 wctx[f].audit()
1108 wctx[f].audit()
1109 try:
1109 try:
1110 wctx[f].remove(ignoremissing=True)
1110 wctx[f].remove(ignoremissing=True)
1111 except OSError as inst:
1111 except OSError as inst:
1112 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1112 repo.ui.warn(_("update failed to remove %s: %s!\n") %
1113 (f, inst.strerror))
1113 (f, inst.strerror))
1114 if i == 100:
1114 if i == 100:
1115 yield i, f
1115 yield i, f
1116 i = 0
1116 i = 0
1117 i += 1
1117 i += 1
1118 if i > 0:
1118 if i > 0:
1119 yield i, f
1119 yield i, f
1120
1120
1121 if cwd and not _getcwd():
1121 if cwd and not _getcwd():
1122 # cwd was removed in the course of removing files; print a helpful
1122 # cwd was removed in the course of removing files; print a helpful
1123 # warning.
1123 # warning.
1124 repo.ui.warn(_("current directory was removed\n"
1124 repo.ui.warn(_("current directory was removed\n"
1125 "(consider changing to repo root: %s)\n") % repo.root)
1125 "(consider changing to repo root: %s)\n") % repo.root)
1126
1126
1127 # It's necessary to flush here in case we're inside a worker fork and will
1127 # It's necessary to flush here in case we're inside a worker fork and will
1128 # quit after this function.
1128 # quit after this function.
1129 wctx.flushall()
1129 wctx.flushall()
1130
1130
1131 def batchget(repo, mctx, wctx, actions):
1131 def batchget(repo, mctx, wctx, actions):
1132 """apply gets to the working directory
1132 """apply gets to the working directory
1133
1133
1134 mctx is the context to get from
1134 mctx is the context to get from
1135
1135
1136 yields tuples for progress updates
1136 yields tuples for progress updates
1137 """
1137 """
1138 verbose = repo.ui.verbose
1138 verbose = repo.ui.verbose
1139 fctx = mctx.filectx
1139 fctx = mctx.filectx
1140 ui = repo.ui
1140 ui = repo.ui
1141 i = 0
1141 i = 0
1142 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1142 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1143 for f, (flags, backup), msg in actions:
1143 for f, (flags, backup), msg in actions:
1144 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1144 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
1145 if verbose:
1145 if verbose:
1146 repo.ui.note(_("getting %s\n") % f)
1146 repo.ui.note(_("getting %s\n") % f)
1147
1147
1148 if backup:
1148 if backup:
1149 absf = repo.wjoin(f)
1149 absf = repo.wjoin(f)
1150 orig = scmutil.origpath(ui, repo, absf)
1150 orig = scmutil.origpath(ui, repo, absf)
1151 try:
1151 try:
1152 if repo.wvfs.isfileorlink(f):
1152 if repo.wvfs.isfileorlink(f):
1153 util.rename(absf, orig)
1153 util.rename(absf, orig)
1154 except OSError as e:
1154 except OSError as e:
1155 if e.errno != errno.ENOENT:
1155 if e.errno != errno.ENOENT:
1156 raise
1156 raise
1157 wctx[f].clearunknown()
1157 wctx[f].clearunknown()
1158 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1158 wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
1159 if i == 100:
1159 if i == 100:
1160 yield i, f
1160 yield i, f
1161 i = 0
1161 i = 0
1162 i += 1
1162 i += 1
1163 if i > 0:
1163 if i > 0:
1164 yield i, f
1164 yield i, f
1165
1165
1166 # It's necessary to flush here in case we're inside a worker fork and will
1166 # It's necessary to flush here in case we're inside a worker fork and will
1167 # quit after this function.
1167 # quit after this function.
1168 wctx.flushall()
1168 wctx.flushall()
1169
1169
1170 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1170 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
1171 """apply the merge action list to the working directory
1171 """apply the merge action list to the working directory
1172
1172
1173 wctx is the working copy context
1173 wctx is the working copy context
1174 mctx is the context to be merged into the working copy
1174 mctx is the context to be merged into the working copy
1175
1175
1176 Return a tuple of counts (updated, merged, removed, unresolved) that
1176 Return a tuple of counts (updated, merged, removed, unresolved) that
1177 describes how many files were affected by the update.
1177 describes how many files were affected by the update.
1178 """
1178 """
1179
1179
1180 updated, merged, removed = 0, 0, 0
1180 updated, merged, removed = 0, 0, 0
1181 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1181 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1182 moves = []
1182 moves = []
1183 for m, l in actions.items():
1183 for m, l in actions.items():
1184 l.sort()
1184 l.sort()
1185
1185
1186 # 'cd' and 'dc' actions are treated like other merge conflicts
1186 # 'cd' and 'dc' actions are treated like other merge conflicts
1187 mergeactions = sorted(actions['cd'])
1187 mergeactions = sorted(actions['cd'])
1188 mergeactions.extend(sorted(actions['dc']))
1188 mergeactions.extend(sorted(actions['dc']))
1189 mergeactions.extend(actions['m'])
1189 mergeactions.extend(actions['m'])
1190 for f, args, msg in mergeactions:
1190 for f, args, msg in mergeactions:
1191 f1, f2, fa, move, anc = args
1191 f1, f2, fa, move, anc = args
1192 if f == '.hgsubstate': # merged internally
1192 if f == '.hgsubstate': # merged internally
1193 continue
1193 continue
1194 if f1 is None:
1194 if f1 is None:
1195 fcl = filemerge.absentfilectx(wctx, fa)
1195 fcl = filemerge.absentfilectx(wctx, fa)
1196 else:
1196 else:
1197 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1197 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1198 fcl = wctx[f1]
1198 fcl = wctx[f1]
1199 if f2 is None:
1199 if f2 is None:
1200 fco = filemerge.absentfilectx(mctx, fa)
1200 fco = filemerge.absentfilectx(mctx, fa)
1201 else:
1201 else:
1202 fco = mctx[f2]
1202 fco = mctx[f2]
1203 actx = repo[anc]
1203 actx = repo[anc]
1204 if fa in actx:
1204 if fa in actx:
1205 fca = actx[fa]
1205 fca = actx[fa]
1206 else:
1206 else:
1207 # TODO: move to absentfilectx
1207 # TODO: move to absentfilectx
1208 fca = repo.filectx(f1, fileid=nullrev)
1208 fca = repo.filectx(f1, fileid=nullrev)
1209 ms.add(fcl, fco, fca, f)
1209 ms.add(fcl, fco, fca, f)
1210 if f1 != f and move:
1210 if f1 != f and move:
1211 moves.append(f1)
1211 moves.append(f1)
1212
1212
1213 _updating = _('updating')
1213 _updating = _('updating')
1214 _files = _('files')
1214 _files = _('files')
1215 progress = repo.ui.progress
1215 progress = repo.ui.progress
1216
1216
1217 # remove renamed files after safely stored
1217 # remove renamed files after safely stored
1218 for f in moves:
1218 for f in moves:
1219 if wctx[f].lexists():
1219 if wctx[f].lexists():
1220 repo.ui.debug("removing %s\n" % f)
1220 repo.ui.debug("removing %s\n" % f)
1221 wctx[f].audit()
1221 wctx[f].audit()
1222 wctx[f].remove()
1222 wctx[f].remove()
1223
1223
1224 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1224 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1225
1225
1226 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1226 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1227 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1227 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1228
1228
1229 # remove in parallel (must come first)
1229 # remove in parallel (must come first)
1230 z = 0
1230 z = 0
1231 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1231 prog = worker.worker(repo.ui, 0.001, batchremove, (repo, wctx),
1232 actions['r'])
1232 actions['r'])
1233 for i, item in prog:
1233 for i, item in prog:
1234 z += i
1234 z += i
1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1235 progress(_updating, z, item=item, total=numupdates, unit=_files)
1236 removed = len(actions['r'])
1236 removed = len(actions['r'])
1237
1237
1238 # We should flush before forking into worker processes, since those workers
1238 # We should flush before forking into worker processes, since those workers
1239 # flush when they complete, and we don't want to duplicate work.
1239 # flush when they complete, and we don't want to duplicate work.
1240 wctx.flushall()
1240 wctx.flushall()
1241
1241
1242 # get in parallel
1242 # get in parallel
1243 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1243 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx, wctx),
1244 actions['g'])
1244 actions['g'])
1245 for i, item in prog:
1245 for i, item in prog:
1246 z += i
1246 z += i
1247 progress(_updating, z, item=item, total=numupdates, unit=_files)
1247 progress(_updating, z, item=item, total=numupdates, unit=_files)
1248 updated = len(actions['g'])
1248 updated = len(actions['g'])
1249
1249
1250 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1250 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1251 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1251 subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1252
1252
1253 # forget (manifest only, just log it) (must come first)
1253 # forget (manifest only, just log it) (must come first)
1254 for f, args, msg in actions['f']:
1254 for f, args, msg in actions['f']:
1255 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1255 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1256 z += 1
1256 z += 1
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1257 progress(_updating, z, item=f, total=numupdates, unit=_files)
1258
1258
1259 # re-add (manifest only, just log it)
1259 # re-add (manifest only, just log it)
1260 for f, args, msg in actions['a']:
1260 for f, args, msg in actions['a']:
1261 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1261 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1262 z += 1
1262 z += 1
1263 progress(_updating, z, item=f, total=numupdates, unit=_files)
1263 progress(_updating, z, item=f, total=numupdates, unit=_files)
1264
1264
1265 # re-add/mark as modified (manifest only, just log it)
1265 # re-add/mark as modified (manifest only, just log it)
1266 for f, args, msg in actions['am']:
1266 for f, args, msg in actions['am']:
1267 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1267 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1268 z += 1
1268 z += 1
1269 progress(_updating, z, item=f, total=numupdates, unit=_files)
1269 progress(_updating, z, item=f, total=numupdates, unit=_files)
1270
1270
1271 # keep (noop, just log it)
1271 # keep (noop, just log it)
1272 for f, args, msg in actions['k']:
1272 for f, args, msg in actions['k']:
1273 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1273 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1274 # no progress
1274 # no progress
1275
1275
1276 # directory rename, move local
1276 # directory rename, move local
1277 for f, args, msg in actions['dm']:
1277 for f, args, msg in actions['dm']:
1278 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1278 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1279 z += 1
1279 z += 1
1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1280 progress(_updating, z, item=f, total=numupdates, unit=_files)
1281 f0, flags = args
1281 f0, flags = args
1282 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1282 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1283 wctx[f].audit()
1283 wctx[f].audit()
1284 wctx[f].write(wctx.filectx(f0).data(), flags)
1284 wctx[f].write(wctx.filectx(f0).data(), flags)
1285 wctx[f0].remove()
1285 wctx[f0].remove()
1286 updated += 1
1286 updated += 1
1287
1287
1288 # local directory rename, get
1288 # local directory rename, get
1289 for f, args, msg in actions['dg']:
1289 for f, args, msg in actions['dg']:
1290 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1290 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1291 z += 1
1291 z += 1
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1292 progress(_updating, z, item=f, total=numupdates, unit=_files)
1293 f0, flags = args
1293 f0, flags = args
1294 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1294 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1295 wctx[f].write(mctx.filectx(f0).data(), flags)
1295 wctx[f].write(mctx.filectx(f0).data(), flags)
1296 updated += 1
1296 updated += 1
1297
1297
1298 # exec
1298 # exec
1299 for f, args, msg in actions['e']:
1299 for f, args, msg in actions['e']:
1300 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1300 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1301 z += 1
1301 z += 1
1302 progress(_updating, z, item=f, total=numupdates, unit=_files)
1302 progress(_updating, z, item=f, total=numupdates, unit=_files)
1303 flags, = args
1303 flags, = args
1304 wctx[f].audit()
1304 wctx[f].audit()
1305 wctx[f].setflags('l' in flags, 'x' in flags)
1305 wctx[f].setflags('l' in flags, 'x' in flags)
1306 updated += 1
1306 updated += 1
1307
1307
1308 # the ordering is important here -- ms.mergedriver will raise if the merge
1308 # the ordering is important here -- ms.mergedriver will raise if the merge
1309 # driver has changed, and we want to be able to bypass it when overwrite is
1309 # driver has changed, and we want to be able to bypass it when overwrite is
1310 # True
1310 # True
1311 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1311 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1312
1312
1313 if usemergedriver:
1313 if usemergedriver:
1314 ms.commit()
1314 ms.commit()
1315 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1315 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1316 # the driver might leave some files unresolved
1316 # the driver might leave some files unresolved
1317 unresolvedf = set(ms.unresolved())
1317 unresolvedf = set(ms.unresolved())
1318 if not proceed:
1318 if not proceed:
1319 # XXX setting unresolved to at least 1 is a hack to make sure we
1319 # XXX setting unresolved to at least 1 is a hack to make sure we
1320 # error out
1320 # error out
1321 return updated, merged, removed, max(len(unresolvedf), 1)
1321 return updated, merged, removed, max(len(unresolvedf), 1)
1322 newactions = []
1322 newactions = []
1323 for f, args, msg in mergeactions:
1323 for f, args, msg in mergeactions:
1324 if f in unresolvedf:
1324 if f in unresolvedf:
1325 newactions.append((f, args, msg))
1325 newactions.append((f, args, msg))
1326 mergeactions = newactions
1326 mergeactions = newactions
1327
1327
1328 # premerge
1328 # premerge
1329 tocomplete = []
1329 tocomplete = []
1330 for f, args, msg in mergeactions:
1330 for f, args, msg in mergeactions:
1331 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1331 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1332 z += 1
1332 z += 1
1333 progress(_updating, z, item=f, total=numupdates, unit=_files)
1333 progress(_updating, z, item=f, total=numupdates, unit=_files)
1334 if f == '.hgsubstate': # subrepo states need updating
1334 if f == '.hgsubstate': # subrepo states need updating
1335 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1335 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1336 overwrite, labels)
1336 overwrite, labels)
1337 continue
1337 continue
1338 wctx[f].audit()
1338 wctx[f].audit()
1339 complete, r = ms.preresolve(f, wctx)
1339 complete, r = ms.preresolve(f, wctx)
1340 if not complete:
1340 if not complete:
1341 numupdates += 1
1341 numupdates += 1
1342 tocomplete.append((f, args, msg))
1342 tocomplete.append((f, args, msg))
1343
1343
1344 # merge
1344 # merge
1345 for f, args, msg in tocomplete:
1345 for f, args, msg in tocomplete:
1346 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1346 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1347 z += 1
1347 z += 1
1348 progress(_updating, z, item=f, total=numupdates, unit=_files)
1348 progress(_updating, z, item=f, total=numupdates, unit=_files)
1349 ms.resolve(f, wctx)
1349 ms.resolve(f, wctx)
1350
1350
1351 ms.commit()
1351 ms.commit()
1352
1352
1353 unresolved = ms.unresolvedcount()
1353 unresolved = ms.unresolvedcount()
1354
1354
1355 if usemergedriver and not unresolved and ms.mdstate() != 's':
1355 if usemergedriver and not unresolved and ms.mdstate() != 's':
1356 if not driverconclude(repo, ms, wctx, labels=labels):
1356 if not driverconclude(repo, ms, wctx, labels=labels):
1357 # XXX setting unresolved to at least 1 is a hack to make sure we
1357 # XXX setting unresolved to at least 1 is a hack to make sure we
1358 # error out
1358 # error out
1359 unresolved = max(unresolved, 1)
1359 unresolved = max(unresolved, 1)
1360
1360
1361 ms.commit()
1361 ms.commit()
1362
1362
1363 msupdated, msmerged, msremoved = ms.counts()
1363 msupdated, msmerged, msremoved = ms.counts()
1364 updated += msupdated
1364 updated += msupdated
1365 merged += msmerged
1365 merged += msmerged
1366 removed += msremoved
1366 removed += msremoved
1367
1367
1368 extraactions = ms.actions()
1368 extraactions = ms.actions()
1369 if extraactions:
1369 if extraactions:
1370 mfiles = set(a[0] for a in actions['m'])
1370 mfiles = set(a[0] for a in actions['m'])
1371 for k, acts in extraactions.iteritems():
1371 for k, acts in extraactions.iteritems():
1372 actions[k].extend(acts)
1372 actions[k].extend(acts)
1373 # Remove these files from actions['m'] as well. This is important
1373 # Remove these files from actions['m'] as well. This is important
1374 # because in recordupdates, files in actions['m'] are processed
1374 # because in recordupdates, files in actions['m'] are processed
1375 # after files in other actions, and the merge driver might add
1375 # after files in other actions, and the merge driver might add
1376 # files to those actions via extraactions above. This can lead to a
1376 # files to those actions via extraactions above. This can lead to a
1377 # file being recorded twice, with poor results. This is especially
1377 # file being recorded twice, with poor results. This is especially
1378 # problematic for actions['r'] (currently only possible with the
1378 # problematic for actions['r'] (currently only possible with the
1379 # merge driver in the initial merge process; interrupted merges
1379 # merge driver in the initial merge process; interrupted merges
1380 # don't go through this flow).
1380 # don't go through this flow).
1381 #
1381 #
1382 # The real fix here is to have indexes by both file and action so
1382 # The real fix here is to have indexes by both file and action so
1383 # that when the action for a file is changed it is automatically
1383 # that when the action for a file is changed it is automatically
1384 # reflected in the other action lists. But that involves a more
1384 # reflected in the other action lists. But that involves a more
1385 # complex data structure, so this will do for now.
1385 # complex data structure, so this will do for now.
1386 #
1386 #
1387 # We don't need to do the same operation for 'dc' and 'cd' because
1387 # We don't need to do the same operation for 'dc' and 'cd' because
1388 # those lists aren't consulted again.
1388 # those lists aren't consulted again.
1389 mfiles.difference_update(a[0] for a in acts)
1389 mfiles.difference_update(a[0] for a in acts)
1390
1390
1391 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1391 actions['m'] = [a for a in actions['m'] if a[0] in mfiles]
1392
1392
1393 progress(_updating, None, total=numupdates, unit=_files)
1393 progress(_updating, None, total=numupdates, unit=_files)
1394
1394
1395 return updated, merged, removed, unresolved
1395 return updated, merged, removed, unresolved
1396
1396
1397 def recordupdates(repo, actions, branchmerge):
1397 def recordupdates(repo, actions, branchmerge):
1398 "record merge actions to the dirstate"
1398 "record merge actions to the dirstate"
1399 # remove (must come first)
1399 # remove (must come first)
1400 for f, args, msg in actions.get('r', []):
1400 for f, args, msg in actions.get('r', []):
1401 if branchmerge:
1401 if branchmerge:
1402 repo.dirstate.remove(f)
1402 repo.dirstate.remove(f)
1403 else:
1403 else:
1404 repo.dirstate.drop(f)
1404 repo.dirstate.drop(f)
1405
1405
1406 # forget (must come first)
1406 # forget (must come first)
1407 for f, args, msg in actions.get('f', []):
1407 for f, args, msg in actions.get('f', []):
1408 repo.dirstate.drop(f)
1408 repo.dirstate.drop(f)
1409
1409
1410 # re-add
1410 # re-add
1411 for f, args, msg in actions.get('a', []):
1411 for f, args, msg in actions.get('a', []):
1412 repo.dirstate.add(f)
1412 repo.dirstate.add(f)
1413
1413
1414 # re-add/mark as modified
1414 # re-add/mark as modified
1415 for f, args, msg in actions.get('am', []):
1415 for f, args, msg in actions.get('am', []):
1416 if branchmerge:
1416 if branchmerge:
1417 repo.dirstate.normallookup(f)
1417 repo.dirstate.normallookup(f)
1418 else:
1418 else:
1419 repo.dirstate.add(f)
1419 repo.dirstate.add(f)
1420
1420
1421 # exec change
1421 # exec change
1422 for f, args, msg in actions.get('e', []):
1422 for f, args, msg in actions.get('e', []):
1423 repo.dirstate.normallookup(f)
1423 repo.dirstate.normallookup(f)
1424
1424
1425 # keep
1425 # keep
1426 for f, args, msg in actions.get('k', []):
1426 for f, args, msg in actions.get('k', []):
1427 pass
1427 pass
1428
1428
1429 # get
1429 # get
1430 for f, args, msg in actions.get('g', []):
1430 for f, args, msg in actions.get('g', []):
1431 if branchmerge:
1431 if branchmerge:
1432 repo.dirstate.otherparent(f)
1432 repo.dirstate.otherparent(f)
1433 else:
1433 else:
1434 repo.dirstate.normal(f)
1434 repo.dirstate.normal(f)
1435
1435
1436 # merge
1436 # merge
1437 for f, args, msg in actions.get('m', []):
1437 for f, args, msg in actions.get('m', []):
1438 f1, f2, fa, move, anc = args
1438 f1, f2, fa, move, anc = args
1439 if branchmerge:
1439 if branchmerge:
1440 # We've done a branch merge, mark this file as merged
1440 # We've done a branch merge, mark this file as merged
1441 # so that we properly record the merger later
1441 # so that we properly record the merger later
1442 repo.dirstate.merge(f)
1442 repo.dirstate.merge(f)
1443 if f1 != f2: # copy/rename
1443 if f1 != f2: # copy/rename
1444 if move:
1444 if move:
1445 repo.dirstate.remove(f1)
1445 repo.dirstate.remove(f1)
1446 if f1 != f:
1446 if f1 != f:
1447 repo.dirstate.copy(f1, f)
1447 repo.dirstate.copy(f1, f)
1448 else:
1448 else:
1449 repo.dirstate.copy(f2, f)
1449 repo.dirstate.copy(f2, f)
1450 else:
1450 else:
1451 # We've update-merged a locally modified file, so
1451 # We've update-merged a locally modified file, so
1452 # we set the dirstate to emulate a normal checkout
1452 # we set the dirstate to emulate a normal checkout
1453 # of that file some time in the past. Thus our
1453 # of that file some time in the past. Thus our
1454 # merge will appear as a normal local file
1454 # merge will appear as a normal local file
1455 # modification.
1455 # modification.
1456 if f2 == f: # file not locally copied/moved
1456 if f2 == f: # file not locally copied/moved
1457 repo.dirstate.normallookup(f)
1457 repo.dirstate.normallookup(f)
1458 if move:
1458 if move:
1459 repo.dirstate.drop(f1)
1459 repo.dirstate.drop(f1)
1460
1460
1461 # directory rename, move local
1461 # directory rename, move local
1462 for f, args, msg in actions.get('dm', []):
1462 for f, args, msg in actions.get('dm', []):
1463 f0, flag = args
1463 f0, flag = args
1464 if branchmerge:
1464 if branchmerge:
1465 repo.dirstate.add(f)
1465 repo.dirstate.add(f)
1466 repo.dirstate.remove(f0)
1466 repo.dirstate.remove(f0)
1467 repo.dirstate.copy(f0, f)
1467 repo.dirstate.copy(f0, f)
1468 else:
1468 else:
1469 repo.dirstate.normal(f)
1469 repo.dirstate.normal(f)
1470 repo.dirstate.drop(f0)
1470 repo.dirstate.drop(f0)
1471
1471
1472 # directory rename, get
1472 # directory rename, get
1473 for f, args, msg in actions.get('dg', []):
1473 for f, args, msg in actions.get('dg', []):
1474 f0, flag = args
1474 f0, flag = args
1475 if branchmerge:
1475 if branchmerge:
1476 repo.dirstate.add(f)
1476 repo.dirstate.add(f)
1477 repo.dirstate.copy(f0, f)
1477 repo.dirstate.copy(f0, f)
1478 else:
1478 else:
1479 repo.dirstate.normal(f)
1479 repo.dirstate.normal(f)
1480
1480
1481 def update(repo, node, branchmerge, force, ancestor=None,
1481 def update(repo, node, branchmerge, force, ancestor=None,
1482 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1482 mergeancestor=False, labels=None, matcher=None, mergeforce=False,
1483 updatecheck=None, wc=None):
1483 updatecheck=None, wc=None):
1484 """
1484 """
1485 Perform a merge between the working directory and the given node
1485 Perform a merge between the working directory and the given node
1486
1486
1487 node = the node to update to
1487 node = the node to update to
1488 branchmerge = whether to merge between branches
1488 branchmerge = whether to merge between branches
1489 force = whether to force branch merging or file overwriting
1489 force = whether to force branch merging or file overwriting
1490 matcher = a matcher to filter file lists (dirstate not updated)
1490 matcher = a matcher to filter file lists (dirstate not updated)
1491 mergeancestor = whether it is merging with an ancestor. If true,
1491 mergeancestor = whether it is merging with an ancestor. If true,
1492 we should accept the incoming changes for any prompts that occur.
1492 we should accept the incoming changes for any prompts that occur.
1493 If false, merging with an ancestor (fast-forward) is only allowed
1493 If false, merging with an ancestor (fast-forward) is only allowed
1494 between different named branches. This flag is used by rebase extension
1494 between different named branches. This flag is used by rebase extension
1495 as a temporary fix and should be avoided in general.
1495 as a temporary fix and should be avoided in general.
1496 labels = labels to use for base, local and other
1496 labels = labels to use for base, local and other
1497 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1497 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1498 this is True, then 'force' should be True as well.
1498 this is True, then 'force' should be True as well.
1499
1499
1500 The table below shows all the behaviors of the update command
1500 The table below shows all the behaviors of the update command
1501 given the -c and -C or no options, whether the working directory
1501 given the -c and -C or no options, whether the working directory
1502 is dirty, whether a revision is specified, and the relationship of
1502 is dirty, whether a revision is specified, and the relationship of
1503 the parent rev to the target rev (linear or not). Match from top first. The
1503 the parent rev to the target rev (linear or not). Match from top first. The
1504 -n option doesn't exist on the command line, but represents the
1504 -n option doesn't exist on the command line, but represents the
1505 experimental.updatecheck=noconflict option.
1505 experimental.updatecheck=noconflict option.
1506
1506
1507 This logic is tested by test-update-branches.t.
1507 This logic is tested by test-update-branches.t.
1508
1508
1509 -c -C -n -m dirty rev linear | result
1509 -c -C -n -m dirty rev linear | result
1510 y y * * * * * | (1)
1510 y y * * * * * | (1)
1511 y * y * * * * | (1)
1511 y * y * * * * | (1)
1512 y * * y * * * | (1)
1512 y * * y * * * | (1)
1513 * y y * * * * | (1)
1513 * y y * * * * | (1)
1514 * y * y * * * | (1)
1514 * y * y * * * | (1)
1515 * * y y * * * | (1)
1515 * * y y * * * | (1)
1516 * * * * * n n | x
1516 * * * * * n n | x
1517 * * * * n * * | ok
1517 * * * * n * * | ok
1518 n n n n y * y | merge
1518 n n n n y * y | merge
1519 n n n n y y n | (2)
1519 n n n n y y n | (2)
1520 n n n y y * * | merge
1520 n n n y y * * | merge
1521 n n y n y * * | merge if no conflict
1521 n n y n y * * | merge if no conflict
1522 n y n n y * * | discard
1522 n y n n y * * | discard
1523 y n n n y * * | (3)
1523 y n n n y * * | (3)
1524
1524
1525 x = can't happen
1525 x = can't happen
1526 * = don't-care
1526 * = don't-care
1527 1 = incompatible options (checked in commands.py)
1527 1 = incompatible options (checked in commands.py)
1528 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1528 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1529 3 = abort: uncommitted changes (checked in commands.py)
1529 3 = abort: uncommitted changes (checked in commands.py)
1530
1530
1531 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1531 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1532 to repo[None] if None is passed.
1532 to repo[None] if None is passed.
1533
1533
1534 Return the same tuple as applyupdates().
1534 Return the same tuple as applyupdates().
1535 """
1535 """
1536 # Avoid cycle.
1536 # Avoid cycle.
1537 from . import sparse
1537 from . import sparse
1538
1538
1539 # This function used to find the default destination if node was None, but
1539 # This function used to find the default destination if node was None, but
1540 # that's now in destutil.py.
1540 # that's now in destutil.py.
1541 assert node is not None
1541 assert node is not None
1542 if not branchmerge and not force:
1542 if not branchmerge and not force:
1543 # TODO: remove the default once all callers that pass branchmerge=False
1543 # TODO: remove the default once all callers that pass branchmerge=False
1544 # and force=False pass a value for updatecheck. We may want to allow
1544 # and force=False pass a value for updatecheck. We may want to allow
1545 # updatecheck='abort' to better suppport some of these callers.
1545 # updatecheck='abort' to better suppport some of these callers.
1546 if updatecheck is None:
1546 if updatecheck is None:
1547 updatecheck = 'linear'
1547 updatecheck = 'linear'
1548 assert updatecheck in ('none', 'linear', 'noconflict')
1548 assert updatecheck in ('none', 'linear', 'noconflict')
1549 # If we're doing a partial update, we need to skip updating
1549 # If we're doing a partial update, we need to skip updating
1550 # the dirstate, so make a note of any partial-ness to the
1550 # the dirstate, so make a note of any partial-ness to the
1551 # update here.
1551 # update here.
1552 if matcher is None or matcher.always():
1552 if matcher is None or matcher.always():
1553 partial = False
1553 partial = False
1554 else:
1554 else:
1555 partial = True
1555 partial = True
1556 with repo.wlock():
1556 with repo.wlock():
1557 if wc is None:
1557 if wc is None:
1558 wc = repo[None]
1558 wc = repo[None]
1559 pl = wc.parents()
1559 pl = wc.parents()
1560 p1 = pl[0]
1560 p1 = pl[0]
1561 pas = [None]
1561 pas = [None]
1562 if ancestor is not None:
1562 if ancestor is not None:
1563 pas = [repo[ancestor]]
1563 pas = [repo[ancestor]]
1564
1564
1565 overwrite = force and not branchmerge
1565 overwrite = force and not branchmerge
1566
1566
1567 p2 = repo[node]
1567 p2 = repo[node]
1568 if pas[0] is None:
1568 if pas[0] is None:
1569 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1569 if repo.ui.configlist('merge', 'preferancestor') == ['*']:
1570 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1570 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1571 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1571 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1572 else:
1572 else:
1573 pas = [p1.ancestor(p2, warn=branchmerge)]
1573 pas = [p1.ancestor(p2, warn=branchmerge)]
1574
1574
1575 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1575 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1576
1576
1577 ### check phase
1577 ### check phase
1578 if not overwrite:
1578 if not overwrite:
1579 if len(pl) > 1:
1579 if len(pl) > 1:
1580 raise error.Abort(_("outstanding uncommitted merge"))
1580 raise error.Abort(_("outstanding uncommitted merge"))
1581 ms = mergestate.read(repo)
1581 ms = mergestate.read(repo)
1582 if list(ms.unresolved()):
1582 if list(ms.unresolved()):
1583 raise error.Abort(_("outstanding merge conflicts"))
1583 raise error.Abort(_("outstanding merge conflicts"))
1584 if branchmerge:
1584 if branchmerge:
1585 if pas == [p2]:
1585 if pas == [p2]:
1586 raise error.Abort(_("merging with a working directory ancestor"
1586 raise error.Abort(_("merging with a working directory ancestor"
1587 " has no effect"))
1587 " has no effect"))
1588 elif pas == [p1]:
1588 elif pas == [p1]:
1589 if not mergeancestor and wc.branch() == p2.branch():
1589 if not mergeancestor and wc.branch() == p2.branch():
1590 raise error.Abort(_("nothing to merge"),
1590 raise error.Abort(_("nothing to merge"),
1591 hint=_("use 'hg update' "
1591 hint=_("use 'hg update' "
1592 "or check 'hg heads'"))
1592 "or check 'hg heads'"))
1593 if not force and (wc.files() or wc.deleted()):
1593 if not force and (wc.files() or wc.deleted()):
1594 raise error.Abort(_("uncommitted changes"),
1594 raise error.Abort(_("uncommitted changes"),
1595 hint=_("use 'hg status' to list changes"))
1595 hint=_("use 'hg status' to list changes"))
1596 for s in sorted(wc.substate):
1596 for s in sorted(wc.substate):
1597 wc.sub(s).bailifchanged()
1597 wc.sub(s).bailifchanged()
1598
1598
1599 elif not overwrite:
1599 elif not overwrite:
1600 if p1 == p2: # no-op update
1600 if p1 == p2: # no-op update
1601 # call the hooks and exit early
1601 # call the hooks and exit early
1602 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1602 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1603 repo.hook('update', parent1=xp2, parent2='', error=0)
1603 repo.hook('update', parent1=xp2, parent2='', error=0)
1604 return 0, 0, 0, 0
1604 return 0, 0, 0, 0
1605
1605
1606 if (updatecheck == 'linear' and
1606 if (updatecheck == 'linear' and
1607 pas not in ([p1], [p2])): # nonlinear
1607 pas not in ([p1], [p2])): # nonlinear
1608 dirty = wc.dirty(missing=True)
1608 dirty = wc.dirty(missing=True)
1609 if dirty:
1609 if dirty:
1610 # Branching is a bit strange to ensure we do the minimal
1610 # Branching is a bit strange to ensure we do the minimal
1611 # amount of call to obsutil.foreground.
1611 # amount of call to obsutil.foreground.
1612 foreground = obsutil.foreground(repo, [p1.node()])
1612 foreground = obsutil.foreground(repo, [p1.node()])
1613 # note: the <node> variable contains a random identifier
1613 # note: the <node> variable contains a random identifier
1614 if repo[node].node() in foreground:
1614 if repo[node].node() in foreground:
1615 pass # allow updating to successors
1615 pass # allow updating to successors
1616 else:
1616 else:
1617 msg = _("uncommitted changes")
1617 msg = _("uncommitted changes")
1618 hint = _("commit or update --clean to discard changes")
1618 hint = _("commit or update --clean to discard changes")
1619 raise error.UpdateAbort(msg, hint=hint)
1619 raise error.UpdateAbort(msg, hint=hint)
1620 else:
1620 else:
1621 # Allow jumping branches if clean and specific rev given
1621 # Allow jumping branches if clean and specific rev given
1622 pass
1622 pass
1623
1623
1624 if overwrite:
1624 if overwrite:
1625 pas = [wc]
1625 pas = [wc]
1626 elif not branchmerge:
1626 elif not branchmerge:
1627 pas = [p1]
1627 pas = [p1]
1628
1628
1629 # deprecated config: merge.followcopies
1629 # deprecated config: merge.followcopies
1630 followcopies = repo.ui.configbool('merge', 'followcopies')
1630 followcopies = repo.ui.configbool('merge', 'followcopies')
1631 if overwrite:
1631 if overwrite:
1632 followcopies = False
1632 followcopies = False
1633 elif not pas[0]:
1633 elif not pas[0]:
1634 followcopies = False
1634 followcopies = False
1635 if not branchmerge and not wc.dirty(missing=True):
1635 if not branchmerge and not wc.dirty(missing=True):
1636 followcopies = False
1636 followcopies = False
1637
1637
1638 ### calculate phase
1638 ### calculate phase
1639 actionbyfile, diverge, renamedelete = calculateupdates(
1639 actionbyfile, diverge, renamedelete = calculateupdates(
1640 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1640 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1641 followcopies, matcher=matcher, mergeforce=mergeforce)
1641 followcopies, matcher=matcher, mergeforce=mergeforce)
1642
1642
1643 if updatecheck == 'noconflict':
1643 if updatecheck == 'noconflict':
1644 for f, (m, args, msg) in actionbyfile.iteritems():
1644 for f, (m, args, msg) in actionbyfile.iteritems():
1645 if m not in ('g', 'k', 'e', 'r'):
1645 if m not in ('g', 'k', 'e', 'r'):
1646 msg = _("conflicting changes")
1646 msg = _("conflicting changes")
1647 hint = _("commit or update --clean to discard changes")
1647 hint = _("commit or update --clean to discard changes")
1648 raise error.Abort(msg, hint=hint)
1648 raise error.Abort(msg, hint=hint)
1649
1649
1650 # Prompt and create actions. Most of this is in the resolve phase
1650 # Prompt and create actions. Most of this is in the resolve phase
1651 # already, but we can't handle .hgsubstate in filemerge or
1651 # already, but we can't handle .hgsubstate in filemerge or
1652 # subrepo.submerge yet so we have to keep prompting for it.
1652 # subrepo.submerge yet so we have to keep prompting for it.
1653 if '.hgsubstate' in actionbyfile:
1653 if '.hgsubstate' in actionbyfile:
1654 f = '.hgsubstate'
1654 f = '.hgsubstate'
1655 m, args, msg = actionbyfile[f]
1655 m, args, msg = actionbyfile[f]
1656 prompts = filemerge.partextras(labels)
1656 prompts = filemerge.partextras(labels)
1657 prompts['f'] = f
1657 prompts['f'] = f
1658 if m == 'cd':
1658 if m == 'cd':
1659 if repo.ui.promptchoice(
1659 if repo.ui.promptchoice(
1660 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1660 _("local%(l)s changed %(f)s which other%(o)s deleted\n"
1661 "use (c)hanged version or (d)elete?"
1661 "use (c)hanged version or (d)elete?"
1662 "$$ &Changed $$ &Delete") % prompts, 0):
1662 "$$ &Changed $$ &Delete") % prompts, 0):
1663 actionbyfile[f] = ('r', None, "prompt delete")
1663 actionbyfile[f] = ('r', None, "prompt delete")
1664 elif f in p1:
1664 elif f in p1:
1665 actionbyfile[f] = ('am', None, "prompt keep")
1665 actionbyfile[f] = ('am', None, "prompt keep")
1666 else:
1666 else:
1667 actionbyfile[f] = ('a', None, "prompt keep")
1667 actionbyfile[f] = ('a', None, "prompt keep")
1668 elif m == 'dc':
1668 elif m == 'dc':
1669 f1, f2, fa, move, anc = args
1669 f1, f2, fa, move, anc = args
1670 flags = p2[f2].flags()
1670 flags = p2[f2].flags()
1671 if repo.ui.promptchoice(
1671 if repo.ui.promptchoice(
1672 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1672 _("other%(o)s changed %(f)s which local%(l)s deleted\n"
1673 "use (c)hanged version or leave (d)eleted?"
1673 "use (c)hanged version or leave (d)eleted?"
1674 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1674 "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
1675 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1675 actionbyfile[f] = ('g', (flags, False), "prompt recreating")
1676 else:
1676 else:
1677 del actionbyfile[f]
1677 del actionbyfile[f]
1678
1678
1679 # Convert to dictionary-of-lists format
1679 # Convert to dictionary-of-lists format
1680 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1680 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1681 for f, (m, args, msg) in actionbyfile.iteritems():
1681 for f, (m, args, msg) in actionbyfile.iteritems():
1682 if m not in actions:
1682 if m not in actions:
1683 actions[m] = []
1683 actions[m] = []
1684 actions[m].append((f, args, msg))
1684 actions[m].append((f, args, msg))
1685
1685
1686 if not util.fscasesensitive(repo.path):
1686 if not util.fscasesensitive(repo.path):
1687 # check collision between files only in p2 for clean update
1687 # check collision between files only in p2 for clean update
1688 if (not branchmerge and
1688 if (not branchmerge and
1689 (force or not wc.dirty(missing=True, branch=False))):
1689 (force or not wc.dirty(missing=True, branch=False))):
1690 _checkcollision(repo, p2.manifest(), None)
1690 _checkcollision(repo, p2.manifest(), None)
1691 else:
1691 else:
1692 _checkcollision(repo, wc.manifest(), actions)
1692 _checkcollision(repo, wc.manifest(), actions)
1693
1693
1694 # divergent renames
1694 # divergent renames
1695 for f, fl in sorted(diverge.iteritems()):
1695 for f, fl in sorted(diverge.iteritems()):
1696 repo.ui.warn(_("note: possible conflict - %s was renamed "
1696 repo.ui.warn(_("note: possible conflict - %s was renamed "
1697 "multiple times to:\n") % f)
1697 "multiple times to:\n") % f)
1698 for nf in fl:
1698 for nf in fl:
1699 repo.ui.warn(" %s\n" % nf)
1699 repo.ui.warn(" %s\n" % nf)
1700
1700
1701 # rename and delete
1701 # rename and delete
1702 for f, fl in sorted(renamedelete.iteritems()):
1702 for f, fl in sorted(renamedelete.iteritems()):
1703 repo.ui.warn(_("note: possible conflict - %s was deleted "
1703 repo.ui.warn(_("note: possible conflict - %s was deleted "
1704 "and renamed to:\n") % f)
1704 "and renamed to:\n") % f)
1705 for nf in fl:
1705 for nf in fl:
1706 repo.ui.warn(" %s\n" % nf)
1706 repo.ui.warn(" %s\n" % nf)
1707
1707
1708 ### apply phase
1708 ### apply phase
1709 if not branchmerge: # just jump to the new rev
1709 if not branchmerge: # just jump to the new rev
1710 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1710 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1711 if not partial:
1711 if not partial:
1712 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1712 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1713 # note that we're in the middle of an update
1713 # note that we're in the middle of an update
1714 repo.vfs.write('updatestate', p2.hex())
1714 repo.vfs.write('updatestate', p2.hex())
1715
1715
1716 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1716 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1717 wc.flushall()
1717 wc.flushall()
1718
1718
1719 if not partial:
1719 if not partial:
1720 with repo.dirstate.parentchange():
1720 with repo.dirstate.parentchange():
1721 repo.setparents(fp1, fp2)
1721 repo.setparents(fp1, fp2)
1722 recordupdates(repo, actions, branchmerge)
1722 recordupdates(repo, actions, branchmerge)
1723 # update completed, clear state
1723 # update completed, clear state
1724 util.unlink(repo.vfs.join('updatestate'))
1724 util.unlink(repo.vfs.join('updatestate'))
1725
1725
1726 if not branchmerge:
1726 if not branchmerge:
1727 repo.dirstate.setbranch(p2.branch())
1727 repo.dirstate.setbranch(p2.branch())
1728
1728
1729 # If we're updating to a location, clean up any stale temporary includes
1729 # If we're updating to a location, clean up any stale temporary includes
1730 # (ex: this happens during hg rebase --abort).
1730 # (ex: this happens during hg rebase --abort).
1731 if not branchmerge:
1731 if not branchmerge:
1732 sparse.prunetemporaryincludes(repo)
1732 sparse.prunetemporaryincludes(repo)
1733
1733
1734 if not partial:
1734 if not partial:
1735 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1735 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1736 return stats
1736 return stats
1737
1737
1738 def graft(repo, ctx, pctx, labels, keepparent=False):
1738 def graft(repo, ctx, pctx, labels, keepparent=False):
1739 """Do a graft-like merge.
1739 """Do a graft-like merge.
1740
1740
1741 This is a merge where the merge ancestor is chosen such that one
1741 This is a merge where the merge ancestor is chosen such that one
1742 or more changesets are grafted onto the current changeset. In
1742 or more changesets are grafted onto the current changeset. In
1743 addition to the merge, this fixes up the dirstate to include only
1743 addition to the merge, this fixes up the dirstate to include only
1744 a single parent (if keepparent is False) and tries to duplicate any
1744 a single parent (if keepparent is False) and tries to duplicate any
1745 renames/copies appropriately.
1745 renames/copies appropriately.
1746
1746
1747 ctx - changeset to rebase
1747 ctx - changeset to rebase
1748 pctx - merge base, usually ctx.p1()
1748 pctx - merge base, usually ctx.p1()
1749 labels - merge labels eg ['local', 'graft']
1749 labels - merge labels eg ['local', 'graft']
1750 keepparent - keep second parent if any
1750 keepparent - keep second parent if any
1751
1751
1752 """
1752 """
1753 # If we're grafting a descendant onto an ancestor, be sure to pass
1753 # If we're grafting a descendant onto an ancestor, be sure to pass
1754 # mergeancestor=True to update. This does two things: 1) allows the merge if
1754 # mergeancestor=True to update. This does two things: 1) allows the merge if
1755 # the destination is the same as the parent of the ctx (so we can use graft
1755 # the destination is the same as the parent of the ctx (so we can use graft
1756 # to copy commits), and 2) informs update that the incoming changes are
1756 # to copy commits), and 2) informs update that the incoming changes are
1757 # newer than the destination so it doesn't prompt about "remote changed foo
1757 # newer than the destination so it doesn't prompt about "remote changed foo
1758 # which local deleted".
1758 # which local deleted".
1759 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1759 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1760
1760
1761 stats = update(repo, ctx.node(), True, True, pctx.node(),
1761 stats = update(repo, ctx.node(), True, True, pctx.node(),
1762 mergeancestor=mergeancestor, labels=labels)
1762 mergeancestor=mergeancestor, labels=labels)
1763
1763
1764 pother = nullid
1764 pother = nullid
1765 parents = ctx.parents()
1765 parents = ctx.parents()
1766 if keepparent and len(parents) == 2 and pctx in parents:
1766 if keepparent and len(parents) == 2 and pctx in parents:
1767 parents.remove(pctx)
1767 parents.remove(pctx)
1768 pother = parents[0].node()
1768 pother = parents[0].node()
1769
1769
1770 with repo.dirstate.parentchange():
1770 with repo.dirstate.parentchange():
1771 repo.setparents(repo['.'].node(), pother)
1771 repo.setparents(repo['.'].node(), pother)
1772 repo.dirstate.write(repo.currenttransaction())
1772 repo.dirstate.write(repo.currenttransaction())
1773 # fix up dirstate for copies and renames
1773 # fix up dirstate for copies and renames
1774 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1774 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1775 return stats
1775 return stats
General Comments 0
You need to be logged in to leave comments. Login now