##// END OF EJS Templates
merge with i18n
Matt Mackall -
r27573:ea389970 merge 3.6.3 stable
parent child Browse files
Show More
@@ -1,1344 +1,1344 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 copies,
23 copies,
24 destutil,
24 destutil,
25 error,
25 error,
26 filemerge,
26 filemerge,
27 obsolete,
27 obsolete,
28 subrepo,
28 subrepo,
29 util,
29 util,
30 worker,
30 worker,
31 )
31 )
32
32
33 _pack = struct.pack
33 _pack = struct.pack
34 _unpack = struct.unpack
34 _unpack = struct.unpack
35
35
36 def _droponode(data):
36 def _droponode(data):
37 # used for compatibility for v1
37 # used for compatibility for v1
38 bits = data.split('\0')
38 bits = data.split('\0')
39 bits = bits[:-2] + bits[-1:]
39 bits = bits[:-2] + bits[-1:]
40 return '\0'.join(bits)
40 return '\0'.join(bits)
41
41
42 class mergestate(object):
42 class mergestate(object):
43 '''track 3-way merge state of individual files
43 '''track 3-way merge state of individual files
44
44
45 it is stored on disk when needed. Two file are used, one with an old
45 it is stored on disk when needed. Two file are used, one with an old
46 format, one with a new format. Both contains similar data, but the new
46 format, one with a new format. Both contains similar data, but the new
47 format can store new kinds of field.
47 format can store new kinds of field.
48
48
49 Current new format is a list of arbitrary record of the form:
49 Current new format is a list of arbitrary record of the form:
50
50
51 [type][length][content]
51 [type][length][content]
52
52
53 Type is a single character, length is a 4 bytes integer, content is an
53 Type is a single character, length is a 4 bytes integer, content is an
54 arbitrary suites of bytes of length `length`.
54 arbitrary suites of bytes of length `length`.
55
55
56 Type should be a letter. Capital letter are mandatory record, Mercurial
56 Type should be a letter. Capital letter are mandatory record, Mercurial
57 should abort if they are unknown. lower case record can be safely ignored.
57 should abort if they are unknown. lower case record can be safely ignored.
58
58
59 Currently known record:
59 Currently known record:
60
60
61 L: the node of the "local" part of the merge (hexified version)
61 L: the node of the "local" part of the merge (hexified version)
62 O: the node of the "other" part of the merge (hexified version)
62 O: the node of the "other" part of the merge (hexified version)
63 F: a file to be merged entry
63 F: a file to be merged entry
64 D: a file that the external merge driver will merge internally
64 D: a file that the external merge driver will merge internally
65 (experimental)
65 (experimental)
66 m: the external merge driver defined for this merge plus its run state
66 m: the external merge driver defined for this merge plus its run state
67 (experimental)
67 (experimental)
68
68
69 Merge driver run states (experimental):
69 Merge driver run states (experimental):
70 u: driver-resolved files unmarked -- needs to be run next time we're about
70 u: driver-resolved files unmarked -- needs to be run next time we're about
71 to resolve or commit
71 to resolve or commit
72 m: driver-resolved files marked -- only needs to be run before commit
72 m: driver-resolved files marked -- only needs to be run before commit
73 s: success/skipped -- does not need to be run any more
73 s: success/skipped -- does not need to be run any more
74 '''
74 '''
75 statepathv1 = 'merge/state'
75 statepathv1 = 'merge/state'
76 statepathv2 = 'merge/state2'
76 statepathv2 = 'merge/state2'
77
77
78 def __init__(self, repo):
78 def __init__(self, repo):
79 self._repo = repo
79 self._repo = repo
80 self._dirty = False
80 self._dirty = False
81 self._read()
81 self._read()
82
82
83 def reset(self, node=None, other=None):
83 def reset(self, node=None, other=None):
84 self._state = {}
84 self._state = {}
85 self._local = None
85 self._local = None
86 self._other = None
86 self._other = None
87 if 'otherctx' in vars(self):
87 if 'otherctx' in vars(self):
88 del self.otherctx
88 del self.otherctx
89 if node:
89 if node:
90 self._local = node
90 self._local = node
91 self._other = other
91 self._other = other
92 self._readmergedriver = None
92 self._readmergedriver = None
93 if self.mergedriver:
93 if self.mergedriver:
94 self._mdstate = 's'
94 self._mdstate = 's'
95 else:
95 else:
96 self._mdstate = 'u'
96 self._mdstate = 'u'
97 shutil.rmtree(self._repo.join('merge'), True)
97 shutil.rmtree(self._repo.join('merge'), True)
98 self._dirty = False
98 self._dirty = False
99
99
100 def _read(self):
100 def _read(self):
101 """Analyse each record content to restore a serialized state from disk
101 """Analyse each record content to restore a serialized state from disk
102
102
103 This function process "record" entry produced by the de-serialization
103 This function process "record" entry produced by the de-serialization
104 of on disk file.
104 of on disk file.
105 """
105 """
106 self._state = {}
106 self._state = {}
107 self._local = None
107 self._local = None
108 self._other = None
108 self._other = None
109 if 'otherctx' in vars(self):
109 if 'otherctx' in vars(self):
110 del self.otherctx
110 del self.otherctx
111 self._readmergedriver = None
111 self._readmergedriver = None
112 self._mdstate = 's'
112 self._mdstate = 's'
113 records = self._readrecords()
113 records = self._readrecords()
114 for rtype, record in records:
114 for rtype, record in records:
115 if rtype == 'L':
115 if rtype == 'L':
116 self._local = bin(record)
116 self._local = bin(record)
117 elif rtype == 'O':
117 elif rtype == 'O':
118 self._other = bin(record)
118 self._other = bin(record)
119 elif rtype == 'm':
119 elif rtype == 'm':
120 bits = record.split('\0', 1)
120 bits = record.split('\0', 1)
121 mdstate = bits[1]
121 mdstate = bits[1]
122 if len(mdstate) != 1 or mdstate not in 'ums':
122 if len(mdstate) != 1 or mdstate not in 'ums':
123 # the merge driver should be idempotent, so just rerun it
123 # the merge driver should be idempotent, so just rerun it
124 mdstate = 'u'
124 mdstate = 'u'
125
125
126 self._readmergedriver = bits[0]
126 self._readmergedriver = bits[0]
127 self._mdstate = mdstate
127 self._mdstate = mdstate
128 elif rtype in 'FD':
128 elif rtype in 'FD':
129 bits = record.split('\0')
129 bits = record.split('\0')
130 self._state[bits[0]] = bits[1:]
130 self._state[bits[0]] = bits[1:]
131 elif not rtype.islower():
131 elif not rtype.islower():
132 raise error.Abort(_('unsupported merge state record: %s')
132 raise error.Abort(_('unsupported merge state record: %s')
133 % rtype)
133 % rtype)
134 self._dirty = False
134 self._dirty = False
135
135
136 def _readrecords(self):
136 def _readrecords(self):
137 """Read merge state from disk and return a list of record (TYPE, data)
137 """Read merge state from disk and return a list of record (TYPE, data)
138
138
139 We read data from both v1 and v2 files and decide which one to use.
139 We read data from both v1 and v2 files and decide which one to use.
140
140
141 V1 has been used by version prior to 2.9.1 and contains less data than
141 V1 has been used by version prior to 2.9.1 and contains less data than
142 v2. We read both versions and check if no data in v2 contradicts
142 v2. We read both versions and check if no data in v2 contradicts
143 v1. If there is not contradiction we can safely assume that both v1
143 v1. If there is not contradiction we can safely assume that both v1
144 and v2 were written at the same time and use the extract data in v2. If
144 and v2 were written at the same time and use the extract data in v2. If
145 there is contradiction we ignore v2 content as we assume an old version
145 there is contradiction we ignore v2 content as we assume an old version
146 of Mercurial has overwritten the mergestate file and left an old v2
146 of Mercurial has overwritten the mergestate file and left an old v2
147 file around.
147 file around.
148
148
149 returns list of record [(TYPE, data), ...]"""
149 returns list of record [(TYPE, data), ...]"""
150 v1records = self._readrecordsv1()
150 v1records = self._readrecordsv1()
151 v2records = self._readrecordsv2()
151 v2records = self._readrecordsv2()
152 if self._v1v2match(v1records, v2records):
152 if self._v1v2match(v1records, v2records):
153 return v2records
153 return v2records
154 else:
154 else:
155 # v1 file is newer than v2 file, use it
155 # v1 file is newer than v2 file, use it
156 # we have to infer the "other" changeset of the merge
156 # we have to infer the "other" changeset of the merge
157 # we cannot do better than that with v1 of the format
157 # we cannot do better than that with v1 of the format
158 mctx = self._repo[None].parents()[-1]
158 mctx = self._repo[None].parents()[-1]
159 v1records.append(('O', mctx.hex()))
159 v1records.append(('O', mctx.hex()))
160 # add place holder "other" file node information
160 # add place holder "other" file node information
161 # nobody is using it yet so we do no need to fetch the data
161 # nobody is using it yet so we do no need to fetch the data
162 # if mctx was wrong `mctx[bits[-2]]` may fails.
162 # if mctx was wrong `mctx[bits[-2]]` may fails.
163 for idx, r in enumerate(v1records):
163 for idx, r in enumerate(v1records):
164 if r[0] == 'F':
164 if r[0] == 'F':
165 bits = r[1].split('\0')
165 bits = r[1].split('\0')
166 bits.insert(-2, '')
166 bits.insert(-2, '')
167 v1records[idx] = (r[0], '\0'.join(bits))
167 v1records[idx] = (r[0], '\0'.join(bits))
168 return v1records
168 return v1records
169
169
170 def _v1v2match(self, v1records, v2records):
170 def _v1v2match(self, v1records, v2records):
171 oldv2 = set() # old format version of v2 record
171 oldv2 = set() # old format version of v2 record
172 for rec in v2records:
172 for rec in v2records:
173 if rec[0] == 'L':
173 if rec[0] == 'L':
174 oldv2.add(rec)
174 oldv2.add(rec)
175 elif rec[0] == 'F':
175 elif rec[0] == 'F':
176 # drop the onode data (not contained in v1)
176 # drop the onode data (not contained in v1)
177 oldv2.add(('F', _droponode(rec[1])))
177 oldv2.add(('F', _droponode(rec[1])))
178 for rec in v1records:
178 for rec in v1records:
179 if rec not in oldv2:
179 if rec not in oldv2:
180 return False
180 return False
181 else:
181 else:
182 return True
182 return True
183
183
184 def _readrecordsv1(self):
184 def _readrecordsv1(self):
185 """read on disk merge state for version 1 file
185 """read on disk merge state for version 1 file
186
186
187 returns list of record [(TYPE, data), ...]
187 returns list of record [(TYPE, data), ...]
188
188
189 Note: the "F" data from this file are one entry short
189 Note: the "F" data from this file are one entry short
190 (no "other file node" entry)
190 (no "other file node" entry)
191 """
191 """
192 records = []
192 records = []
193 try:
193 try:
194 f = self._repo.vfs(self.statepathv1)
194 f = self._repo.vfs(self.statepathv1)
195 for i, l in enumerate(f):
195 for i, l in enumerate(f):
196 if i == 0:
196 if i == 0:
197 records.append(('L', l[:-1]))
197 records.append(('L', l[:-1]))
198 else:
198 else:
199 records.append(('F', l[:-1]))
199 records.append(('F', l[:-1]))
200 f.close()
200 f.close()
201 except IOError as err:
201 except IOError as err:
202 if err.errno != errno.ENOENT:
202 if err.errno != errno.ENOENT:
203 raise
203 raise
204 return records
204 return records
205
205
206 def _readrecordsv2(self):
206 def _readrecordsv2(self):
207 """read on disk merge state for version 2 file
207 """read on disk merge state for version 2 file
208
208
209 returns list of record [(TYPE, data), ...]
209 returns list of record [(TYPE, data), ...]
210 """
210 """
211 records = []
211 records = []
212 try:
212 try:
213 f = self._repo.vfs(self.statepathv2)
213 f = self._repo.vfs(self.statepathv2)
214 data = f.read()
214 data = f.read()
215 off = 0
215 off = 0
216 end = len(data)
216 end = len(data)
217 while off < end:
217 while off < end:
218 rtype = data[off]
218 rtype = data[off]
219 off += 1
219 off += 1
220 length = _unpack('>I', data[off:(off + 4)])[0]
220 length = _unpack('>I', data[off:(off + 4)])[0]
221 off += 4
221 off += 4
222 record = data[off:(off + length)]
222 record = data[off:(off + length)]
223 off += length
223 off += length
224 records.append((rtype, record))
224 records.append((rtype, record))
225 f.close()
225 f.close()
226 except IOError as err:
226 except IOError as err:
227 if err.errno != errno.ENOENT:
227 if err.errno != errno.ENOENT:
228 raise
228 raise
229 return records
229 return records
230
230
231 @util.propertycache
231 @util.propertycache
232 def mergedriver(self):
232 def mergedriver(self):
233 # protect against the following:
233 # protect against the following:
234 # - A configures a malicious merge driver in their hgrc, then
234 # - A configures a malicious merge driver in their hgrc, then
235 # pauses the merge
235 # pauses the merge
236 # - A edits their hgrc to remove references to the merge driver
236 # - A edits their hgrc to remove references to the merge driver
237 # - A gives a copy of their entire repo, including .hg, to B
237 # - A gives a copy of their entire repo, including .hg, to B
238 # - B inspects .hgrc and finds it to be clean
238 # - B inspects .hgrc and finds it to be clean
239 # - B then continues the merge and the malicious merge driver
239 # - B then continues the merge and the malicious merge driver
240 # gets invoked
240 # gets invoked
241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
242 if (self._readmergedriver is not None
242 if (self._readmergedriver is not None
243 and self._readmergedriver != configmergedriver):
243 and self._readmergedriver != configmergedriver):
244 raise error.ConfigError(
244 raise error.ConfigError(
245 _("merge driver changed since merge started"),
245 _("merge driver changed since merge started"),
246 hint=_("revert merge driver change or abort merge"))
246 hint=_("revert merge driver change or abort merge"))
247
247
248 return configmergedriver
248 return configmergedriver
249
249
250 @util.propertycache
250 @util.propertycache
251 def otherctx(self):
251 def otherctx(self):
252 return self._repo[self._other]
252 return self._repo[self._other]
253
253
254 def active(self):
254 def active(self):
255 """Whether mergestate is active.
255 """Whether mergestate is active.
256
256
257 Returns True if there appears to be mergestate. This is a rough proxy
257 Returns True if there appears to be mergestate. This is a rough proxy
258 for "is a merge in progress."
258 for "is a merge in progress."
259 """
259 """
260 # Check local variables before looking at filesystem for performance
260 # Check local variables before looking at filesystem for performance
261 # reasons.
261 # reasons.
262 return bool(self._local) or bool(self._state) or \
262 return bool(self._local) or bool(self._state) or \
263 self._repo.vfs.exists(self.statepathv1) or \
263 self._repo.vfs.exists(self.statepathv1) or \
264 self._repo.vfs.exists(self.statepathv2)
264 self._repo.vfs.exists(self.statepathv2)
265
265
266 def commit(self):
266 def commit(self):
267 """Write current state on disk (if necessary)"""
267 """Write current state on disk (if necessary)"""
268 if self._dirty:
268 if self._dirty:
269 records = []
269 records = []
270 records.append(('L', hex(self._local)))
270 records.append(('L', hex(self._local)))
271 records.append(('O', hex(self._other)))
271 records.append(('O', hex(self._other)))
272 if self.mergedriver:
272 if self.mergedriver:
273 records.append(('m', '\0'.join([
273 records.append(('m', '\0'.join([
274 self.mergedriver, self._mdstate])))
274 self.mergedriver, self._mdstate])))
275 for d, v in self._state.iteritems():
275 for d, v in self._state.iteritems():
276 if v[0] == 'd':
276 if v[0] == 'd':
277 records.append(('D', '\0'.join([d] + v)))
277 records.append(('D', '\0'.join([d] + v)))
278 else:
278 else:
279 records.append(('F', '\0'.join([d] + v)))
279 records.append(('F', '\0'.join([d] + v)))
280 self._writerecords(records)
280 self._writerecords(records)
281 self._dirty = False
281 self._dirty = False
282
282
283 def _writerecords(self, records):
283 def _writerecords(self, records):
284 """Write current state on disk (both v1 and v2)"""
284 """Write current state on disk (both v1 and v2)"""
285 self._writerecordsv1(records)
285 self._writerecordsv1(records)
286 self._writerecordsv2(records)
286 self._writerecordsv2(records)
287
287
288 def _writerecordsv1(self, records):
288 def _writerecordsv1(self, records):
289 """Write current state on disk in a version 1 file"""
289 """Write current state on disk in a version 1 file"""
290 f = self._repo.vfs(self.statepathv1, 'w')
290 f = self._repo.vfs(self.statepathv1, 'w')
291 irecords = iter(records)
291 irecords = iter(records)
292 lrecords = irecords.next()
292 lrecords = irecords.next()
293 assert lrecords[0] == 'L'
293 assert lrecords[0] == 'L'
294 f.write(hex(self._local) + '\n')
294 f.write(hex(self._local) + '\n')
295 for rtype, data in irecords:
295 for rtype, data in irecords:
296 if rtype == 'F':
296 if rtype == 'F':
297 f.write('%s\n' % _droponode(data))
297 f.write('%s\n' % _droponode(data))
298 f.close()
298 f.close()
299
299
300 def _writerecordsv2(self, records):
300 def _writerecordsv2(self, records):
301 """Write current state on disk in a version 2 file"""
301 """Write current state on disk in a version 2 file"""
302 f = self._repo.vfs(self.statepathv2, 'w')
302 f = self._repo.vfs(self.statepathv2, 'w')
303 for key, data in records:
303 for key, data in records:
304 assert len(key) == 1
304 assert len(key) == 1
305 format = '>sI%is' % len(data)
305 format = '>sI%is' % len(data)
306 f.write(_pack(format, key, len(data), data))
306 f.write(_pack(format, key, len(data), data))
307 f.close()
307 f.close()
308
308
309 def add(self, fcl, fco, fca, fd):
309 def add(self, fcl, fco, fca, fd):
310 """add a new (potentially?) conflicting file the merge state
310 """add a new (potentially?) conflicting file the merge state
311 fcl: file context for local,
311 fcl: file context for local,
312 fco: file context for remote,
312 fco: file context for remote,
313 fca: file context for ancestors,
313 fca: file context for ancestors,
314 fd: file path of the resulting merge.
314 fd: file path of the resulting merge.
315
315
316 note: also write the local version to the `.hg/merge` directory.
316 note: also write the local version to the `.hg/merge` directory.
317 """
317 """
318 hash = util.sha1(fcl.path()).hexdigest()
318 hash = util.sha1(fcl.path()).hexdigest()
319 self._repo.vfs.write('merge/' + hash, fcl.data())
319 self._repo.vfs.write('merge/' + hash, fcl.data())
320 self._state[fd] = ['u', hash, fcl.path(),
320 self._state[fd] = ['u', hash, fcl.path(),
321 fca.path(), hex(fca.filenode()),
321 fca.path(), hex(fca.filenode()),
322 fco.path(), hex(fco.filenode()),
322 fco.path(), hex(fco.filenode()),
323 fcl.flags()]
323 fcl.flags()]
324 self._dirty = True
324 self._dirty = True
325
325
326 def __contains__(self, dfile):
326 def __contains__(self, dfile):
327 return dfile in self._state
327 return dfile in self._state
328
328
329 def __getitem__(self, dfile):
329 def __getitem__(self, dfile):
330 return self._state[dfile][0]
330 return self._state[dfile][0]
331
331
332 def __iter__(self):
332 def __iter__(self):
333 return iter(sorted(self._state))
333 return iter(sorted(self._state))
334
334
335 def files(self):
335 def files(self):
336 return self._state.keys()
336 return self._state.keys()
337
337
338 def mark(self, dfile, state):
338 def mark(self, dfile, state):
339 self._state[dfile][0] = state
339 self._state[dfile][0] = state
340 self._dirty = True
340 self._dirty = True
341
341
342 def mdstate(self):
342 def mdstate(self):
343 return self._mdstate
343 return self._mdstate
344
344
345 def unresolved(self):
345 def unresolved(self):
346 """Obtain the paths of unresolved files."""
346 """Obtain the paths of unresolved files."""
347
347
348 for f, entry in self._state.items():
348 for f, entry in self._state.items():
349 if entry[0] == 'u':
349 if entry[0] == 'u':
350 yield f
350 yield f
351
351
352 def driverresolved(self):
352 def driverresolved(self):
353 """Obtain the paths of driver-resolved files."""
353 """Obtain the paths of driver-resolved files."""
354
354
355 for f, entry in self._state.items():
355 for f, entry in self._state.items():
356 if entry[0] == 'd':
356 if entry[0] == 'd':
357 yield f
357 yield f
358
358
359 def _resolve(self, preresolve, dfile, wctx, labels=None):
359 def _resolve(self, preresolve, dfile, wctx, labels=None):
360 """rerun merge process for file path `dfile`"""
360 """rerun merge process for file path `dfile`"""
361 if self[dfile] in 'rd':
361 if self[dfile] in 'rd':
362 return True, 0
362 return True, 0
363 stateentry = self._state[dfile]
363 stateentry = self._state[dfile]
364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
365 octx = self._repo[self._other]
365 octx = self._repo[self._other]
366 fcd = wctx[dfile]
366 fcd = wctx[dfile]
367 fco = octx[ofile]
367 fco = octx[ofile]
368 fca = self._repo.filectx(afile, fileid=anode)
368 fca = self._repo.filectx(afile, fileid=anode)
369 # "premerge" x flags
369 # "premerge" x flags
370 flo = fco.flags()
370 flo = fco.flags()
371 fla = fca.flags()
371 fla = fca.flags()
372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
373 if fca.node() == nullid:
373 if fca.node() == nullid:
374 if preresolve:
374 if preresolve:
375 self._repo.ui.warn(
375 self._repo.ui.warn(
376 _('warning: cannot merge flags for %s\n') % afile)
376 _('warning: cannot merge flags for %s\n') % afile)
377 elif flags == fla:
377 elif flags == fla:
378 flags = flo
378 flags = flo
379 if preresolve:
379 if preresolve:
380 # restore local
380 # restore local
381 f = self._repo.vfs('merge/' + hash)
381 f = self._repo.vfs('merge/' + hash)
382 self._repo.wwrite(dfile, f.read(), flags)
382 self._repo.wwrite(dfile, f.read(), flags)
383 f.close()
383 f.close()
384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
385 fcd, fco, fca, labels=labels)
385 fcd, fco, fca, labels=labels)
386 else:
386 else:
387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
388 fcd, fco, fca, labels=labels)
388 fcd, fco, fca, labels=labels)
389 if r is None:
389 if r is None:
390 # no real conflict
390 # no real conflict
391 del self._state[dfile]
391 del self._state[dfile]
392 self._dirty = True
392 self._dirty = True
393 elif not r:
393 elif not r:
394 self.mark(dfile, 'r')
394 self.mark(dfile, 'r')
395 return complete, r
395 return complete, r
396
396
397 def preresolve(self, dfile, wctx, labels=None):
397 def preresolve(self, dfile, wctx, labels=None):
398 return self._resolve(True, dfile, wctx, labels=labels)
398 return self._resolve(True, dfile, wctx, labels=labels)
399
399
400 def resolve(self, dfile, wctx, labels=None):
400 def resolve(self, dfile, wctx, labels=None):
401 """rerun merge process for file path `dfile`"""
401 """rerun merge process for file path `dfile`"""
402 return self._resolve(False, dfile, wctx, labels=labels)[1]
402 return self._resolve(False, dfile, wctx, labels=labels)[1]
403
403
404 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
404 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
405 if f2 is None:
405 if f2 is None:
406 f2 = f
406 f2 = f
407 return (os.path.isfile(repo.wjoin(f))
407 return (repo.wvfs.isfileorlink(f)
408 and repo.wvfs.audit.check(f)
408 and repo.wvfs.audit.check(f)
409 and repo.dirstate.normalize(f) not in repo.dirstate
409 and repo.dirstate.normalize(f) not in repo.dirstate
410 and mctx[f2].cmp(wctx[f]))
410 and mctx[f2].cmp(wctx[f]))
411
411
412 def _checkunknownfiles(repo, wctx, mctx, force, actions):
412 def _checkunknownfiles(repo, wctx, mctx, force, actions):
413 """
413 """
414 Considers any actions that care about the presence of conflicting unknown
414 Considers any actions that care about the presence of conflicting unknown
415 files. For some actions, the result is to abort; for others, it is to
415 files. For some actions, the result is to abort; for others, it is to
416 choose a different action.
416 choose a different action.
417 """
417 """
418 aborts = []
418 aborts = []
419 if not force:
419 if not force:
420 for f, (m, args, msg) in actions.iteritems():
420 for f, (m, args, msg) in actions.iteritems():
421 if m in ('c', 'dc'):
421 if m in ('c', 'dc'):
422 if _checkunknownfile(repo, wctx, mctx, f):
422 if _checkunknownfile(repo, wctx, mctx, f):
423 aborts.append(f)
423 aborts.append(f)
424 elif m == 'dg':
424 elif m == 'dg':
425 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
425 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
426 aborts.append(f)
426 aborts.append(f)
427
427
428 for f in sorted(aborts):
428 for f in sorted(aborts):
429 repo.ui.warn(_("%s: untracked file differs\n") % f)
429 repo.ui.warn(_("%s: untracked file differs\n") % f)
430 if aborts:
430 if aborts:
431 raise error.Abort(_("untracked files in working directory differ "
431 raise error.Abort(_("untracked files in working directory differ "
432 "from files in requested revision"))
432 "from files in requested revision"))
433
433
434 for f, (m, args, msg) in actions.iteritems():
434 for f, (m, args, msg) in actions.iteritems():
435 if m == 'c':
435 if m == 'c':
436 actions[f] = ('g', args, msg)
436 actions[f] = ('g', args, msg)
437 elif m == 'cm':
437 elif m == 'cm':
438 fl2, anc = args
438 fl2, anc = args
439 different = _checkunknownfile(repo, wctx, mctx, f)
439 different = _checkunknownfile(repo, wctx, mctx, f)
440 if different:
440 if different:
441 actions[f] = ('m', (f, f, None, False, anc),
441 actions[f] = ('m', (f, f, None, False, anc),
442 "remote differs from untracked local")
442 "remote differs from untracked local")
443 else:
443 else:
444 actions[f] = ('g', (fl2,), "remote created")
444 actions[f] = ('g', (fl2,), "remote created")
445
445
446 def _forgetremoved(wctx, mctx, branchmerge):
446 def _forgetremoved(wctx, mctx, branchmerge):
447 """
447 """
448 Forget removed files
448 Forget removed files
449
449
450 If we're jumping between revisions (as opposed to merging), and if
450 If we're jumping between revisions (as opposed to merging), and if
451 neither the working directory nor the target rev has the file,
451 neither the working directory nor the target rev has the file,
452 then we need to remove it from the dirstate, to prevent the
452 then we need to remove it from the dirstate, to prevent the
453 dirstate from listing the file when it is no longer in the
453 dirstate from listing the file when it is no longer in the
454 manifest.
454 manifest.
455
455
456 If we're merging, and the other revision has removed a file
456 If we're merging, and the other revision has removed a file
457 that is not present in the working directory, we need to mark it
457 that is not present in the working directory, we need to mark it
458 as removed.
458 as removed.
459 """
459 """
460
460
461 actions = {}
461 actions = {}
462 m = 'f'
462 m = 'f'
463 if branchmerge:
463 if branchmerge:
464 m = 'r'
464 m = 'r'
465 for f in wctx.deleted():
465 for f in wctx.deleted():
466 if f not in mctx:
466 if f not in mctx:
467 actions[f] = m, None, "forget deleted"
467 actions[f] = m, None, "forget deleted"
468
468
469 if not branchmerge:
469 if not branchmerge:
470 for f in wctx.removed():
470 for f in wctx.removed():
471 if f not in mctx:
471 if f not in mctx:
472 actions[f] = 'f', None, "forget removed"
472 actions[f] = 'f', None, "forget removed"
473
473
474 return actions
474 return actions
475
475
476 def _checkcollision(repo, wmf, actions):
476 def _checkcollision(repo, wmf, actions):
477 # build provisional merged manifest up
477 # build provisional merged manifest up
478 pmmf = set(wmf)
478 pmmf = set(wmf)
479
479
480 if actions:
480 if actions:
481 # k, dr, e and rd are no-op
481 # k, dr, e and rd are no-op
482 for m in 'a', 'f', 'g', 'cd', 'dc':
482 for m in 'a', 'f', 'g', 'cd', 'dc':
483 for f, args, msg in actions[m]:
483 for f, args, msg in actions[m]:
484 pmmf.add(f)
484 pmmf.add(f)
485 for f, args, msg in actions['r']:
485 for f, args, msg in actions['r']:
486 pmmf.discard(f)
486 pmmf.discard(f)
487 for f, args, msg in actions['dm']:
487 for f, args, msg in actions['dm']:
488 f2, flags = args
488 f2, flags = args
489 pmmf.discard(f2)
489 pmmf.discard(f2)
490 pmmf.add(f)
490 pmmf.add(f)
491 for f, args, msg in actions['dg']:
491 for f, args, msg in actions['dg']:
492 pmmf.add(f)
492 pmmf.add(f)
493 for f, args, msg in actions['m']:
493 for f, args, msg in actions['m']:
494 f1, f2, fa, move, anc = args
494 f1, f2, fa, move, anc = args
495 if move:
495 if move:
496 pmmf.discard(f1)
496 pmmf.discard(f1)
497 pmmf.add(f)
497 pmmf.add(f)
498
498
499 # check case-folding collision in provisional merged manifest
499 # check case-folding collision in provisional merged manifest
500 foldmap = {}
500 foldmap = {}
501 for f in sorted(pmmf):
501 for f in sorted(pmmf):
502 fold = util.normcase(f)
502 fold = util.normcase(f)
503 if fold in foldmap:
503 if fold in foldmap:
504 raise error.Abort(_("case-folding collision between %s and %s")
504 raise error.Abort(_("case-folding collision between %s and %s")
505 % (f, foldmap[fold]))
505 % (f, foldmap[fold]))
506 foldmap[fold] = f
506 foldmap[fold] = f
507
507
508 # check case-folding of directories
508 # check case-folding of directories
509 foldprefix = unfoldprefix = lastfull = ''
509 foldprefix = unfoldprefix = lastfull = ''
510 for fold, f in sorted(foldmap.items()):
510 for fold, f in sorted(foldmap.items()):
511 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
511 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
512 # the folded prefix matches but actual casing is different
512 # the folded prefix matches but actual casing is different
513 raise error.Abort(_("case-folding collision between "
513 raise error.Abort(_("case-folding collision between "
514 "%s and directory of %s") % (lastfull, f))
514 "%s and directory of %s") % (lastfull, f))
515 foldprefix = fold + '/'
515 foldprefix = fold + '/'
516 unfoldprefix = f + '/'
516 unfoldprefix = f + '/'
517 lastfull = f
517 lastfull = f
518
518
519 def driverpreprocess(repo, ms, wctx, labels=None):
519 def driverpreprocess(repo, ms, wctx, labels=None):
520 """run the preprocess step of the merge driver, if any
520 """run the preprocess step of the merge driver, if any
521
521
522 This is currently not implemented -- it's an extension point."""
522 This is currently not implemented -- it's an extension point."""
523 return True
523 return True
524
524
525 def driverconclude(repo, ms, wctx, labels=None):
525 def driverconclude(repo, ms, wctx, labels=None):
526 """run the conclude step of the merge driver, if any
526 """run the conclude step of the merge driver, if any
527
527
528 This is currently not implemented -- it's an extension point."""
528 This is currently not implemented -- it's an extension point."""
529 return True
529 return True
530
530
531 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
531 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
532 acceptremote, followcopies):
532 acceptremote, followcopies):
533 """
533 """
534 Merge p1 and p2 with ancestor pa and generate merge action list
534 Merge p1 and p2 with ancestor pa and generate merge action list
535
535
536 branchmerge and force are as passed in to update
536 branchmerge and force are as passed in to update
537 partial = function to filter file lists
537 partial = function to filter file lists
538 acceptremote = accept the incoming changes without prompting
538 acceptremote = accept the incoming changes without prompting
539 """
539 """
540
540
541 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
541 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
542
542
543 # manifests fetched in order are going to be faster, so prime the caches
543 # manifests fetched in order are going to be faster, so prime the caches
544 [x.manifest() for x in
544 [x.manifest() for x in
545 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
545 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
546
546
547 if followcopies:
547 if followcopies:
548 ret = copies.mergecopies(repo, wctx, p2, pa)
548 ret = copies.mergecopies(repo, wctx, p2, pa)
549 copy, movewithdir, diverge, renamedelete = ret
549 copy, movewithdir, diverge, renamedelete = ret
550
550
551 repo.ui.note(_("resolving manifests\n"))
551 repo.ui.note(_("resolving manifests\n"))
552 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
552 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
553 % (bool(branchmerge), bool(force), bool(partial)))
553 % (bool(branchmerge), bool(force), bool(partial)))
554 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
554 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
555
555
556 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
556 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
557 copied = set(copy.values())
557 copied = set(copy.values())
558 copied.update(movewithdir.values())
558 copied.update(movewithdir.values())
559
559
560 if '.hgsubstate' in m1:
560 if '.hgsubstate' in m1:
561 # check whether sub state is modified
561 # check whether sub state is modified
562 for s in sorted(wctx.substate):
562 for s in sorted(wctx.substate):
563 if wctx.sub(s).dirty():
563 if wctx.sub(s).dirty():
564 m1['.hgsubstate'] += '+'
564 m1['.hgsubstate'] += '+'
565 break
565 break
566
566
567 # Compare manifests
567 # Compare manifests
568 diff = m1.diff(m2)
568 diff = m1.diff(m2)
569
569
570 actions = {}
570 actions = {}
571 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
571 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
572 if partial and not partial(f):
572 if partial and not partial(f):
573 continue
573 continue
574 if n1 and n2: # file exists on both local and remote side
574 if n1 and n2: # file exists on both local and remote side
575 if f not in ma:
575 if f not in ma:
576 fa = copy.get(f, None)
576 fa = copy.get(f, None)
577 if fa is not None:
577 if fa is not None:
578 actions[f] = ('m', (f, f, fa, False, pa.node()),
578 actions[f] = ('m', (f, f, fa, False, pa.node()),
579 "both renamed from " + fa)
579 "both renamed from " + fa)
580 else:
580 else:
581 actions[f] = ('m', (f, f, None, False, pa.node()),
581 actions[f] = ('m', (f, f, None, False, pa.node()),
582 "both created")
582 "both created")
583 else:
583 else:
584 a = ma[f]
584 a = ma[f]
585 fla = ma.flags(f)
585 fla = ma.flags(f)
586 nol = 'l' not in fl1 + fl2 + fla
586 nol = 'l' not in fl1 + fl2 + fla
587 if n2 == a and fl2 == fla:
587 if n2 == a and fl2 == fla:
588 actions[f] = ('k' , (), "remote unchanged")
588 actions[f] = ('k' , (), "remote unchanged")
589 elif n1 == a and fl1 == fla: # local unchanged - use remote
589 elif n1 == a and fl1 == fla: # local unchanged - use remote
590 if n1 == n2: # optimization: keep local content
590 if n1 == n2: # optimization: keep local content
591 actions[f] = ('e', (fl2,), "update permissions")
591 actions[f] = ('e', (fl2,), "update permissions")
592 else:
592 else:
593 actions[f] = ('g', (fl2,), "remote is newer")
593 actions[f] = ('g', (fl2,), "remote is newer")
594 elif nol and n2 == a: # remote only changed 'x'
594 elif nol and n2 == a: # remote only changed 'x'
595 actions[f] = ('e', (fl2,), "update permissions")
595 actions[f] = ('e', (fl2,), "update permissions")
596 elif nol and n1 == a: # local only changed 'x'
596 elif nol and n1 == a: # local only changed 'x'
597 actions[f] = ('g', (fl1,), "remote is newer")
597 actions[f] = ('g', (fl1,), "remote is newer")
598 else: # both changed something
598 else: # both changed something
599 actions[f] = ('m', (f, f, f, False, pa.node()),
599 actions[f] = ('m', (f, f, f, False, pa.node()),
600 "versions differ")
600 "versions differ")
601 elif n1: # file exists only on local side
601 elif n1: # file exists only on local side
602 if f in copied:
602 if f in copied:
603 pass # we'll deal with it on m2 side
603 pass # we'll deal with it on m2 side
604 elif f in movewithdir: # directory rename, move local
604 elif f in movewithdir: # directory rename, move local
605 f2 = movewithdir[f]
605 f2 = movewithdir[f]
606 if f2 in m2:
606 if f2 in m2:
607 actions[f2] = ('m', (f, f2, None, True, pa.node()),
607 actions[f2] = ('m', (f, f2, None, True, pa.node()),
608 "remote directory rename, both created")
608 "remote directory rename, both created")
609 else:
609 else:
610 actions[f2] = ('dm', (f, fl1),
610 actions[f2] = ('dm', (f, fl1),
611 "remote directory rename - move from " + f)
611 "remote directory rename - move from " + f)
612 elif f in copy:
612 elif f in copy:
613 f2 = copy[f]
613 f2 = copy[f]
614 actions[f] = ('m', (f, f2, f2, False, pa.node()),
614 actions[f] = ('m', (f, f2, f2, False, pa.node()),
615 "local copied/moved from " + f2)
615 "local copied/moved from " + f2)
616 elif f in ma: # clean, a different, no remote
616 elif f in ma: # clean, a different, no remote
617 if n1 != ma[f]:
617 if n1 != ma[f]:
618 if acceptremote:
618 if acceptremote:
619 actions[f] = ('r', None, "remote delete")
619 actions[f] = ('r', None, "remote delete")
620 else:
620 else:
621 actions[f] = ('cd', None, "prompt changed/deleted")
621 actions[f] = ('cd', None, "prompt changed/deleted")
622 elif n1[20:] == 'a':
622 elif n1[20:] == 'a':
623 # This extra 'a' is added by working copy manifest to mark
623 # This extra 'a' is added by working copy manifest to mark
624 # the file as locally added. We should forget it instead of
624 # the file as locally added. We should forget it instead of
625 # deleting it.
625 # deleting it.
626 actions[f] = ('f', None, "remote deleted")
626 actions[f] = ('f', None, "remote deleted")
627 else:
627 else:
628 actions[f] = ('r', None, "other deleted")
628 actions[f] = ('r', None, "other deleted")
629 elif n2: # file exists only on remote side
629 elif n2: # file exists only on remote side
630 if f in copied:
630 if f in copied:
631 pass # we'll deal with it on m1 side
631 pass # we'll deal with it on m1 side
632 elif f in movewithdir:
632 elif f in movewithdir:
633 f2 = movewithdir[f]
633 f2 = movewithdir[f]
634 if f2 in m1:
634 if f2 in m1:
635 actions[f2] = ('m', (f2, f, None, False, pa.node()),
635 actions[f2] = ('m', (f2, f, None, False, pa.node()),
636 "local directory rename, both created")
636 "local directory rename, both created")
637 else:
637 else:
638 actions[f2] = ('dg', (f, fl2),
638 actions[f2] = ('dg', (f, fl2),
639 "local directory rename - get from " + f)
639 "local directory rename - get from " + f)
640 elif f in copy:
640 elif f in copy:
641 f2 = copy[f]
641 f2 = copy[f]
642 if f2 in m2:
642 if f2 in m2:
643 actions[f] = ('m', (f2, f, f2, False, pa.node()),
643 actions[f] = ('m', (f2, f, f2, False, pa.node()),
644 "remote copied from " + f2)
644 "remote copied from " + f2)
645 else:
645 else:
646 actions[f] = ('m', (f2, f, f2, True, pa.node()),
646 actions[f] = ('m', (f2, f, f2, True, pa.node()),
647 "remote moved from " + f2)
647 "remote moved from " + f2)
648 elif f not in ma:
648 elif f not in ma:
649 # local unknown, remote created: the logic is described by the
649 # local unknown, remote created: the logic is described by the
650 # following table:
650 # following table:
651 #
651 #
652 # force branchmerge different | action
652 # force branchmerge different | action
653 # n * * | create
653 # n * * | create
654 # y n * | create
654 # y n * | create
655 # y y n | create
655 # y y n | create
656 # y y y | merge
656 # y y y | merge
657 #
657 #
658 # Checking whether the files are different is expensive, so we
658 # Checking whether the files are different is expensive, so we
659 # don't do that when we can avoid it.
659 # don't do that when we can avoid it.
660 if not force:
660 if not force:
661 actions[f] = ('c', (fl2,), "remote created")
661 actions[f] = ('c', (fl2,), "remote created")
662 elif not branchmerge:
662 elif not branchmerge:
663 actions[f] = ('c', (fl2,), "remote created")
663 actions[f] = ('c', (fl2,), "remote created")
664 else:
664 else:
665 actions[f] = ('cm', (fl2, pa.node()),
665 actions[f] = ('cm', (fl2, pa.node()),
666 "remote created, get or merge")
666 "remote created, get or merge")
667 elif n2 != ma[f]:
667 elif n2 != ma[f]:
668 if acceptremote:
668 if acceptremote:
669 actions[f] = ('c', (fl2,), "remote recreating")
669 actions[f] = ('c', (fl2,), "remote recreating")
670 else:
670 else:
671 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
671 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
672
672
673 return actions, diverge, renamedelete
673 return actions, diverge, renamedelete
674
674
675 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
675 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
676 """Resolves false conflicts where the nodeid changed but the content
676 """Resolves false conflicts where the nodeid changed but the content
677 remained the same."""
677 remained the same."""
678
678
679 for f, (m, args, msg) in actions.items():
679 for f, (m, args, msg) in actions.items():
680 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
680 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
681 # local did change but ended up with same content
681 # local did change but ended up with same content
682 actions[f] = 'r', None, "prompt same"
682 actions[f] = 'r', None, "prompt same"
683 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
683 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
684 # remote did change but ended up with same content
684 # remote did change but ended up with same content
685 del actions[f] # don't get = keep local deleted
685 del actions[f] # don't get = keep local deleted
686
686
687 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
687 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
688 acceptremote, followcopies):
688 acceptremote, followcopies):
689 "Calculate the actions needed to merge mctx into wctx using ancestors"
689 "Calculate the actions needed to merge mctx into wctx using ancestors"
690
690
691 if len(ancestors) == 1: # default
691 if len(ancestors) == 1: # default
692 actions, diverge, renamedelete = manifestmerge(
692 actions, diverge, renamedelete = manifestmerge(
693 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
693 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
694 acceptremote, followcopies)
694 acceptremote, followcopies)
695 _checkunknownfiles(repo, wctx, mctx, force, actions)
695 _checkunknownfiles(repo, wctx, mctx, force, actions)
696
696
697 else: # only when merge.preferancestor=* - the default
697 else: # only when merge.preferancestor=* - the default
698 repo.ui.note(
698 repo.ui.note(
699 _("note: merging %s and %s using bids from ancestors %s\n") %
699 _("note: merging %s and %s using bids from ancestors %s\n") %
700 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
700 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
701
701
702 # Call for bids
702 # Call for bids
703 fbids = {} # mapping filename to bids (action method to list af actions)
703 fbids = {} # mapping filename to bids (action method to list af actions)
704 diverge, renamedelete = None, None
704 diverge, renamedelete = None, None
705 for ancestor in ancestors:
705 for ancestor in ancestors:
706 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
706 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
707 actions, diverge1, renamedelete1 = manifestmerge(
707 actions, diverge1, renamedelete1 = manifestmerge(
708 repo, wctx, mctx, ancestor, branchmerge, force, partial,
708 repo, wctx, mctx, ancestor, branchmerge, force, partial,
709 acceptremote, followcopies)
709 acceptremote, followcopies)
710 _checkunknownfiles(repo, wctx, mctx, force, actions)
710 _checkunknownfiles(repo, wctx, mctx, force, actions)
711
711
712 # Track the shortest set of warning on the theory that bid
712 # Track the shortest set of warning on the theory that bid
713 # merge will correctly incorporate more information
713 # merge will correctly incorporate more information
714 if diverge is None or len(diverge1) < len(diverge):
714 if diverge is None or len(diverge1) < len(diverge):
715 diverge = diverge1
715 diverge = diverge1
716 if renamedelete is None or len(renamedelete) < len(renamedelete1):
716 if renamedelete is None or len(renamedelete) < len(renamedelete1):
717 renamedelete = renamedelete1
717 renamedelete = renamedelete1
718
718
719 for f, a in sorted(actions.iteritems()):
719 for f, a in sorted(actions.iteritems()):
720 m, args, msg = a
720 m, args, msg = a
721 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
721 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
722 if f in fbids:
722 if f in fbids:
723 d = fbids[f]
723 d = fbids[f]
724 if m in d:
724 if m in d:
725 d[m].append(a)
725 d[m].append(a)
726 else:
726 else:
727 d[m] = [a]
727 d[m] = [a]
728 else:
728 else:
729 fbids[f] = {m: [a]}
729 fbids[f] = {m: [a]}
730
730
731 # Pick the best bid for each file
731 # Pick the best bid for each file
732 repo.ui.note(_('\nauction for merging merge bids\n'))
732 repo.ui.note(_('\nauction for merging merge bids\n'))
733 actions = {}
733 actions = {}
734 for f, bids in sorted(fbids.items()):
734 for f, bids in sorted(fbids.items()):
735 # bids is a mapping from action method to list af actions
735 # bids is a mapping from action method to list af actions
736 # Consensus?
736 # Consensus?
737 if len(bids) == 1: # all bids are the same kind of method
737 if len(bids) == 1: # all bids are the same kind of method
738 m, l = bids.items()[0]
738 m, l = bids.items()[0]
739 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
739 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
740 repo.ui.note(" %s: consensus for %s\n" % (f, m))
740 repo.ui.note(" %s: consensus for %s\n" % (f, m))
741 actions[f] = l[0]
741 actions[f] = l[0]
742 continue
742 continue
743 # If keep is an option, just do it.
743 # If keep is an option, just do it.
744 if 'k' in bids:
744 if 'k' in bids:
745 repo.ui.note(" %s: picking 'keep' action\n" % f)
745 repo.ui.note(" %s: picking 'keep' action\n" % f)
746 actions[f] = bids['k'][0]
746 actions[f] = bids['k'][0]
747 continue
747 continue
748 # If there are gets and they all agree [how could they not?], do it.
748 # If there are gets and they all agree [how could they not?], do it.
749 if 'g' in bids:
749 if 'g' in bids:
750 ga0 = bids['g'][0]
750 ga0 = bids['g'][0]
751 if all(a == ga0 for a in bids['g'][1:]):
751 if all(a == ga0 for a in bids['g'][1:]):
752 repo.ui.note(" %s: picking 'get' action\n" % f)
752 repo.ui.note(" %s: picking 'get' action\n" % f)
753 actions[f] = ga0
753 actions[f] = ga0
754 continue
754 continue
755 # TODO: Consider other simple actions such as mode changes
755 # TODO: Consider other simple actions such as mode changes
756 # Handle inefficient democrazy.
756 # Handle inefficient democrazy.
757 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
757 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
758 for m, l in sorted(bids.items()):
758 for m, l in sorted(bids.items()):
759 for _f, args, msg in l:
759 for _f, args, msg in l:
760 repo.ui.note(' %s -> %s\n' % (msg, m))
760 repo.ui.note(' %s -> %s\n' % (msg, m))
761 # Pick random action. TODO: Instead, prompt user when resolving
761 # Pick random action. TODO: Instead, prompt user when resolving
762 m, l = bids.items()[0]
762 m, l = bids.items()[0]
763 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
763 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
764 (f, m))
764 (f, m))
765 actions[f] = l[0]
765 actions[f] = l[0]
766 continue
766 continue
767 repo.ui.note(_('end of auction\n\n'))
767 repo.ui.note(_('end of auction\n\n'))
768
768
769 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
769 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
770
770
771 if wctx.rev() is None:
771 if wctx.rev() is None:
772 fractions = _forgetremoved(wctx, mctx, branchmerge)
772 fractions = _forgetremoved(wctx, mctx, branchmerge)
773 actions.update(fractions)
773 actions.update(fractions)
774
774
775 return actions, diverge, renamedelete
775 return actions, diverge, renamedelete
776
776
777 def batchremove(repo, actions):
777 def batchremove(repo, actions):
778 """apply removes to the working directory
778 """apply removes to the working directory
779
779
780 yields tuples for progress updates
780 yields tuples for progress updates
781 """
781 """
782 verbose = repo.ui.verbose
782 verbose = repo.ui.verbose
783 unlink = util.unlinkpath
783 unlink = util.unlinkpath
784 wjoin = repo.wjoin
784 wjoin = repo.wjoin
785 audit = repo.wvfs.audit
785 audit = repo.wvfs.audit
786 i = 0
786 i = 0
787 for f, args, msg in actions:
787 for f, args, msg in actions:
788 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
788 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
789 if verbose:
789 if verbose:
790 repo.ui.note(_("removing %s\n") % f)
790 repo.ui.note(_("removing %s\n") % f)
791 audit(f)
791 audit(f)
792 try:
792 try:
793 unlink(wjoin(f), ignoremissing=True)
793 unlink(wjoin(f), ignoremissing=True)
794 except OSError as inst:
794 except OSError as inst:
795 repo.ui.warn(_("update failed to remove %s: %s!\n") %
795 repo.ui.warn(_("update failed to remove %s: %s!\n") %
796 (f, inst.strerror))
796 (f, inst.strerror))
797 if i == 100:
797 if i == 100:
798 yield i, f
798 yield i, f
799 i = 0
799 i = 0
800 i += 1
800 i += 1
801 if i > 0:
801 if i > 0:
802 yield i, f
802 yield i, f
803
803
804 def batchget(repo, mctx, actions):
804 def batchget(repo, mctx, actions):
805 """apply gets to the working directory
805 """apply gets to the working directory
806
806
807 mctx is the context to get from
807 mctx is the context to get from
808
808
809 yields tuples for progress updates
809 yields tuples for progress updates
810 """
810 """
811 verbose = repo.ui.verbose
811 verbose = repo.ui.verbose
812 fctx = mctx.filectx
812 fctx = mctx.filectx
813 wwrite = repo.wwrite
813 wwrite = repo.wwrite
814 i = 0
814 i = 0
815 for f, args, msg in actions:
815 for f, args, msg in actions:
816 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
816 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
817 if verbose:
817 if verbose:
818 repo.ui.note(_("getting %s\n") % f)
818 repo.ui.note(_("getting %s\n") % f)
819 wwrite(f, fctx(f).data(), args[0])
819 wwrite(f, fctx(f).data(), args[0])
820 if i == 100:
820 if i == 100:
821 yield i, f
821 yield i, f
822 i = 0
822 i = 0
823 i += 1
823 i += 1
824 if i > 0:
824 if i > 0:
825 yield i, f
825 yield i, f
826
826
827 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
827 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
828 """apply the merge action list to the working directory
828 """apply the merge action list to the working directory
829
829
830 wctx is the working copy context
830 wctx is the working copy context
831 mctx is the context to be merged into the working copy
831 mctx is the context to be merged into the working copy
832
832
833 Return a tuple of counts (updated, merged, removed, unresolved) that
833 Return a tuple of counts (updated, merged, removed, unresolved) that
834 describes how many files were affected by the update.
834 describes how many files were affected by the update.
835 """
835 """
836
836
837 updated, merged, removed, unresolved = 0, 0, 0, 0
837 updated, merged, removed, unresolved = 0, 0, 0, 0
838 ms = mergestate(repo)
838 ms = mergestate(repo)
839 ms.reset(wctx.p1().node(), mctx.node())
839 ms.reset(wctx.p1().node(), mctx.node())
840 moves = []
840 moves = []
841 for m, l in actions.items():
841 for m, l in actions.items():
842 l.sort()
842 l.sort()
843
843
844 # prescan for merges
844 # prescan for merges
845 for f, args, msg in actions['m']:
845 for f, args, msg in actions['m']:
846 f1, f2, fa, move, anc = args
846 f1, f2, fa, move, anc = args
847 if f == '.hgsubstate': # merged internally
847 if f == '.hgsubstate': # merged internally
848 continue
848 continue
849 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
849 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
850 fcl = wctx[f1]
850 fcl = wctx[f1]
851 fco = mctx[f2]
851 fco = mctx[f2]
852 actx = repo[anc]
852 actx = repo[anc]
853 if fa in actx:
853 if fa in actx:
854 fca = actx[fa]
854 fca = actx[fa]
855 else:
855 else:
856 fca = repo.filectx(f1, fileid=nullrev)
856 fca = repo.filectx(f1, fileid=nullrev)
857 ms.add(fcl, fco, fca, f)
857 ms.add(fcl, fco, fca, f)
858 if f1 != f and move:
858 if f1 != f and move:
859 moves.append(f1)
859 moves.append(f1)
860
860
861 audit = repo.wvfs.audit
861 audit = repo.wvfs.audit
862 _updating = _('updating')
862 _updating = _('updating')
863 _files = _('files')
863 _files = _('files')
864 progress = repo.ui.progress
864 progress = repo.ui.progress
865
865
866 # remove renamed files after safely stored
866 # remove renamed files after safely stored
867 for f in moves:
867 for f in moves:
868 if os.path.lexists(repo.wjoin(f)):
868 if os.path.lexists(repo.wjoin(f)):
869 repo.ui.debug("removing %s\n" % f)
869 repo.ui.debug("removing %s\n" % f)
870 audit(f)
870 audit(f)
871 util.unlinkpath(repo.wjoin(f))
871 util.unlinkpath(repo.wjoin(f))
872
872
873 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
873 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
874
874
875 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
875 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
876 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
876 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
877
877
878 # remove in parallel (must come first)
878 # remove in parallel (must come first)
879 z = 0
879 z = 0
880 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
880 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
881 for i, item in prog:
881 for i, item in prog:
882 z += i
882 z += i
883 progress(_updating, z, item=item, total=numupdates, unit=_files)
883 progress(_updating, z, item=item, total=numupdates, unit=_files)
884 removed = len(actions['r'])
884 removed = len(actions['r'])
885
885
886 # get in parallel
886 # get in parallel
887 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
887 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
888 for i, item in prog:
888 for i, item in prog:
889 z += i
889 z += i
890 progress(_updating, z, item=item, total=numupdates, unit=_files)
890 progress(_updating, z, item=item, total=numupdates, unit=_files)
891 updated = len(actions['g'])
891 updated = len(actions['g'])
892
892
893 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
893 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
894 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
894 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
895
895
896 # forget (manifest only, just log it) (must come first)
896 # forget (manifest only, just log it) (must come first)
897 for f, args, msg in actions['f']:
897 for f, args, msg in actions['f']:
898 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
898 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
899 z += 1
899 z += 1
900 progress(_updating, z, item=f, total=numupdates, unit=_files)
900 progress(_updating, z, item=f, total=numupdates, unit=_files)
901
901
902 # re-add (manifest only, just log it)
902 # re-add (manifest only, just log it)
903 for f, args, msg in actions['a']:
903 for f, args, msg in actions['a']:
904 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
904 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
905 z += 1
905 z += 1
906 progress(_updating, z, item=f, total=numupdates, unit=_files)
906 progress(_updating, z, item=f, total=numupdates, unit=_files)
907
907
908 # keep (noop, just log it)
908 # keep (noop, just log it)
909 for f, args, msg in actions['k']:
909 for f, args, msg in actions['k']:
910 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
910 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
911 # no progress
911 # no progress
912
912
913 # directory rename, move local
913 # directory rename, move local
914 for f, args, msg in actions['dm']:
914 for f, args, msg in actions['dm']:
915 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
915 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
916 z += 1
916 z += 1
917 progress(_updating, z, item=f, total=numupdates, unit=_files)
917 progress(_updating, z, item=f, total=numupdates, unit=_files)
918 f0, flags = args
918 f0, flags = args
919 repo.ui.note(_("moving %s to %s\n") % (f0, f))
919 repo.ui.note(_("moving %s to %s\n") % (f0, f))
920 audit(f)
920 audit(f)
921 repo.wwrite(f, wctx.filectx(f0).data(), flags)
921 repo.wwrite(f, wctx.filectx(f0).data(), flags)
922 util.unlinkpath(repo.wjoin(f0))
922 util.unlinkpath(repo.wjoin(f0))
923 updated += 1
923 updated += 1
924
924
925 # local directory rename, get
925 # local directory rename, get
926 for f, args, msg in actions['dg']:
926 for f, args, msg in actions['dg']:
927 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
927 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
928 z += 1
928 z += 1
929 progress(_updating, z, item=f, total=numupdates, unit=_files)
929 progress(_updating, z, item=f, total=numupdates, unit=_files)
930 f0, flags = args
930 f0, flags = args
931 repo.ui.note(_("getting %s to %s\n") % (f0, f))
931 repo.ui.note(_("getting %s to %s\n") % (f0, f))
932 repo.wwrite(f, mctx.filectx(f0).data(), flags)
932 repo.wwrite(f, mctx.filectx(f0).data(), flags)
933 updated += 1
933 updated += 1
934
934
935 # exec
935 # exec
936 for f, args, msg in actions['e']:
936 for f, args, msg in actions['e']:
937 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
937 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
938 z += 1
938 z += 1
939 progress(_updating, z, item=f, total=numupdates, unit=_files)
939 progress(_updating, z, item=f, total=numupdates, unit=_files)
940 flags, = args
940 flags, = args
941 audit(f)
941 audit(f)
942 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
942 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
943 updated += 1
943 updated += 1
944
944
945 mergeactions = actions['m']
945 mergeactions = actions['m']
946 # the ordering is important here -- ms.mergedriver will raise if the merge
946 # the ordering is important here -- ms.mergedriver will raise if the merge
947 # driver has changed, and we want to be able to bypass it when overwrite is
947 # driver has changed, and we want to be able to bypass it when overwrite is
948 # True
948 # True
949 usemergedriver = not overwrite and mergeactions and ms.mergedriver
949 usemergedriver = not overwrite and mergeactions and ms.mergedriver
950
950
951 if usemergedriver:
951 if usemergedriver:
952 ms.commit()
952 ms.commit()
953 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
953 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
954 # the driver might leave some files unresolved
954 # the driver might leave some files unresolved
955 unresolvedf = set(ms.unresolved())
955 unresolvedf = set(ms.unresolved())
956 if not proceed:
956 if not proceed:
957 # XXX setting unresolved to at least 1 is a hack to make sure we
957 # XXX setting unresolved to at least 1 is a hack to make sure we
958 # error out
958 # error out
959 return updated, merged, removed, max(len(unresolvedf), 1)
959 return updated, merged, removed, max(len(unresolvedf), 1)
960 newactions = []
960 newactions = []
961 for f, args, msg in mergeactions:
961 for f, args, msg in mergeactions:
962 if f in unresolvedf:
962 if f in unresolvedf:
963 newactions.append((f, args, msg))
963 newactions.append((f, args, msg))
964 mergeactions = newactions
964 mergeactions = newactions
965
965
966 # premerge
966 # premerge
967 tocomplete = []
967 tocomplete = []
968 for f, args, msg in actions['m']:
968 for f, args, msg in actions['m']:
969 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
969 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
970 z += 1
970 z += 1
971 progress(_updating, z, item=f, total=numupdates, unit=_files)
971 progress(_updating, z, item=f, total=numupdates, unit=_files)
972 if f == '.hgsubstate': # subrepo states need updating
972 if f == '.hgsubstate': # subrepo states need updating
973 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
973 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
974 overwrite)
974 overwrite)
975 continue
975 continue
976 audit(f)
976 audit(f)
977 complete, r = ms.preresolve(f, wctx, labels=labels)
977 complete, r = ms.preresolve(f, wctx, labels=labels)
978 if complete:
978 if complete:
979 if r is not None and r > 0:
979 if r is not None and r > 0:
980 unresolved += 1
980 unresolved += 1
981 else:
981 else:
982 if r is None:
982 if r is None:
983 updated += 1
983 updated += 1
984 else:
984 else:
985 merged += 1
985 merged += 1
986 else:
986 else:
987 numupdates += 1
987 numupdates += 1
988 tocomplete.append((f, args, msg))
988 tocomplete.append((f, args, msg))
989
989
990 # merge
990 # merge
991 for f, args, msg in tocomplete:
991 for f, args, msg in tocomplete:
992 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
992 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
993 z += 1
993 z += 1
994 progress(_updating, z, item=f, total=numupdates, unit=_files)
994 progress(_updating, z, item=f, total=numupdates, unit=_files)
995 r = ms.resolve(f, wctx, labels=labels)
995 r = ms.resolve(f, wctx, labels=labels)
996 if r is not None and r > 0:
996 if r is not None and r > 0:
997 unresolved += 1
997 unresolved += 1
998 else:
998 else:
999 if r is None:
999 if r is None:
1000 updated += 1
1000 updated += 1
1001 else:
1001 else:
1002 merged += 1
1002 merged += 1
1003
1003
1004 ms.commit()
1004 ms.commit()
1005
1005
1006 if usemergedriver and not unresolved and ms.mdstate() != 's':
1006 if usemergedriver and not unresolved and ms.mdstate() != 's':
1007 if not driverconclude(repo, ms, wctx, labels=labels):
1007 if not driverconclude(repo, ms, wctx, labels=labels):
1008 # XXX setting unresolved to at least 1 is a hack to make sure we
1008 # XXX setting unresolved to at least 1 is a hack to make sure we
1009 # error out
1009 # error out
1010 return updated, merged, removed, max(unresolved, 1)
1010 return updated, merged, removed, max(unresolved, 1)
1011
1011
1012 ms.commit()
1012 ms.commit()
1013
1013
1014 progress(_updating, None, total=numupdates, unit=_files)
1014 progress(_updating, None, total=numupdates, unit=_files)
1015
1015
1016 return updated, merged, removed, unresolved
1016 return updated, merged, removed, unresolved
1017
1017
1018 def recordupdates(repo, actions, branchmerge):
1018 def recordupdates(repo, actions, branchmerge):
1019 "record merge actions to the dirstate"
1019 "record merge actions to the dirstate"
1020 # remove (must come first)
1020 # remove (must come first)
1021 for f, args, msg in actions['r']:
1021 for f, args, msg in actions['r']:
1022 if branchmerge:
1022 if branchmerge:
1023 repo.dirstate.remove(f)
1023 repo.dirstate.remove(f)
1024 else:
1024 else:
1025 repo.dirstate.drop(f)
1025 repo.dirstate.drop(f)
1026
1026
1027 # forget (must come first)
1027 # forget (must come first)
1028 for f, args, msg in actions['f']:
1028 for f, args, msg in actions['f']:
1029 repo.dirstate.drop(f)
1029 repo.dirstate.drop(f)
1030
1030
1031 # re-add
1031 # re-add
1032 for f, args, msg in actions['a']:
1032 for f, args, msg in actions['a']:
1033 if not branchmerge:
1033 if not branchmerge:
1034 repo.dirstate.add(f)
1034 repo.dirstate.add(f)
1035
1035
1036 # exec change
1036 # exec change
1037 for f, args, msg in actions['e']:
1037 for f, args, msg in actions['e']:
1038 repo.dirstate.normallookup(f)
1038 repo.dirstate.normallookup(f)
1039
1039
1040 # keep
1040 # keep
1041 for f, args, msg in actions['k']:
1041 for f, args, msg in actions['k']:
1042 pass
1042 pass
1043
1043
1044 # get
1044 # get
1045 for f, args, msg in actions['g']:
1045 for f, args, msg in actions['g']:
1046 if branchmerge:
1046 if branchmerge:
1047 repo.dirstate.otherparent(f)
1047 repo.dirstate.otherparent(f)
1048 else:
1048 else:
1049 repo.dirstate.normal(f)
1049 repo.dirstate.normal(f)
1050
1050
1051 # merge
1051 # merge
1052 for f, args, msg in actions['m']:
1052 for f, args, msg in actions['m']:
1053 f1, f2, fa, move, anc = args
1053 f1, f2, fa, move, anc = args
1054 if branchmerge:
1054 if branchmerge:
1055 # We've done a branch merge, mark this file as merged
1055 # We've done a branch merge, mark this file as merged
1056 # so that we properly record the merger later
1056 # so that we properly record the merger later
1057 repo.dirstate.merge(f)
1057 repo.dirstate.merge(f)
1058 if f1 != f2: # copy/rename
1058 if f1 != f2: # copy/rename
1059 if move:
1059 if move:
1060 repo.dirstate.remove(f1)
1060 repo.dirstate.remove(f1)
1061 if f1 != f:
1061 if f1 != f:
1062 repo.dirstate.copy(f1, f)
1062 repo.dirstate.copy(f1, f)
1063 else:
1063 else:
1064 repo.dirstate.copy(f2, f)
1064 repo.dirstate.copy(f2, f)
1065 else:
1065 else:
1066 # We've update-merged a locally modified file, so
1066 # We've update-merged a locally modified file, so
1067 # we set the dirstate to emulate a normal checkout
1067 # we set the dirstate to emulate a normal checkout
1068 # of that file some time in the past. Thus our
1068 # of that file some time in the past. Thus our
1069 # merge will appear as a normal local file
1069 # merge will appear as a normal local file
1070 # modification.
1070 # modification.
1071 if f2 == f: # file not locally copied/moved
1071 if f2 == f: # file not locally copied/moved
1072 repo.dirstate.normallookup(f)
1072 repo.dirstate.normallookup(f)
1073 if move:
1073 if move:
1074 repo.dirstate.drop(f1)
1074 repo.dirstate.drop(f1)
1075
1075
1076 # directory rename, move local
1076 # directory rename, move local
1077 for f, args, msg in actions['dm']:
1077 for f, args, msg in actions['dm']:
1078 f0, flag = args
1078 f0, flag = args
1079 if branchmerge:
1079 if branchmerge:
1080 repo.dirstate.add(f)
1080 repo.dirstate.add(f)
1081 repo.dirstate.remove(f0)
1081 repo.dirstate.remove(f0)
1082 repo.dirstate.copy(f0, f)
1082 repo.dirstate.copy(f0, f)
1083 else:
1083 else:
1084 repo.dirstate.normal(f)
1084 repo.dirstate.normal(f)
1085 repo.dirstate.drop(f0)
1085 repo.dirstate.drop(f0)
1086
1086
1087 # directory rename, get
1087 # directory rename, get
1088 for f, args, msg in actions['dg']:
1088 for f, args, msg in actions['dg']:
1089 f0, flag = args
1089 f0, flag = args
1090 if branchmerge:
1090 if branchmerge:
1091 repo.dirstate.add(f)
1091 repo.dirstate.add(f)
1092 repo.dirstate.copy(f0, f)
1092 repo.dirstate.copy(f0, f)
1093 else:
1093 else:
1094 repo.dirstate.normal(f)
1094 repo.dirstate.normal(f)
1095
1095
1096 def update(repo, node, branchmerge, force, partial, ancestor=None,
1096 def update(repo, node, branchmerge, force, partial, ancestor=None,
1097 mergeancestor=False, labels=None):
1097 mergeancestor=False, labels=None):
1098 """
1098 """
1099 Perform a merge between the working directory and the given node
1099 Perform a merge between the working directory and the given node
1100
1100
1101 node = the node to update to, or None if unspecified
1101 node = the node to update to, or None if unspecified
1102 branchmerge = whether to merge between branches
1102 branchmerge = whether to merge between branches
1103 force = whether to force branch merging or file overwriting
1103 force = whether to force branch merging or file overwriting
1104 partial = a function to filter file lists (dirstate not updated)
1104 partial = a function to filter file lists (dirstate not updated)
1105 mergeancestor = whether it is merging with an ancestor. If true,
1105 mergeancestor = whether it is merging with an ancestor. If true,
1106 we should accept the incoming changes for any prompts that occur.
1106 we should accept the incoming changes for any prompts that occur.
1107 If false, merging with an ancestor (fast-forward) is only allowed
1107 If false, merging with an ancestor (fast-forward) is only allowed
1108 between different named branches. This flag is used by rebase extension
1108 between different named branches. This flag is used by rebase extension
1109 as a temporary fix and should be avoided in general.
1109 as a temporary fix and should be avoided in general.
1110
1110
1111 The table below shows all the behaviors of the update command
1111 The table below shows all the behaviors of the update command
1112 given the -c and -C or no options, whether the working directory
1112 given the -c and -C or no options, whether the working directory
1113 is dirty, whether a revision is specified, and the relationship of
1113 is dirty, whether a revision is specified, and the relationship of
1114 the parent rev to the target rev (linear, on the same named
1114 the parent rev to the target rev (linear, on the same named
1115 branch, or on another named branch).
1115 branch, or on another named branch).
1116
1116
1117 This logic is tested by test-update-branches.t.
1117 This logic is tested by test-update-branches.t.
1118
1118
1119 -c -C dirty rev | linear same cross
1119 -c -C dirty rev | linear same cross
1120 n n n n | ok (1) x
1120 n n n n | ok (1) x
1121 n n n y | ok ok ok
1121 n n n y | ok ok ok
1122 n n y n | merge (2) (2)
1122 n n y n | merge (2) (2)
1123 n n y y | merge (3) (3)
1123 n n y y | merge (3) (3)
1124 n y * * | discard discard discard
1124 n y * * | discard discard discard
1125 y n y * | (4) (4) (4)
1125 y n y * | (4) (4) (4)
1126 y n n * | ok ok ok
1126 y n n * | ok ok ok
1127 y y * * | (5) (5) (5)
1127 y y * * | (5) (5) (5)
1128
1128
1129 x = can't happen
1129 x = can't happen
1130 * = don't-care
1130 * = don't-care
1131 1 = abort: not a linear update (merge or update --check to force update)
1131 1 = abort: not a linear update (merge or update --check to force update)
1132 2 = abort: uncommitted changes (commit and merge, or update --clean to
1132 2 = abort: uncommitted changes (commit and merge, or update --clean to
1133 discard changes)
1133 discard changes)
1134 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1134 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1135 4 = abort: uncommitted changes (checked in commands.py)
1135 4 = abort: uncommitted changes (checked in commands.py)
1136 5 = incompatible options (checked in commands.py)
1136 5 = incompatible options (checked in commands.py)
1137
1137
1138 Return the same tuple as applyupdates().
1138 Return the same tuple as applyupdates().
1139 """
1139 """
1140
1140
1141 onode = node
1141 onode = node
1142 wlock = repo.wlock()
1142 wlock = repo.wlock()
1143 try:
1143 try:
1144 wc = repo[None]
1144 wc = repo[None]
1145 pl = wc.parents()
1145 pl = wc.parents()
1146 p1 = pl[0]
1146 p1 = pl[0]
1147 pas = [None]
1147 pas = [None]
1148 if ancestor is not None:
1148 if ancestor is not None:
1149 pas = [repo[ancestor]]
1149 pas = [repo[ancestor]]
1150
1150
1151 if node is None:
1151 if node is None:
1152 if (repo.ui.configbool('devel', 'all-warnings')
1152 if (repo.ui.configbool('devel', 'all-warnings')
1153 or repo.ui.configbool('devel', 'oldapi')):
1153 or repo.ui.configbool('devel', 'oldapi')):
1154 repo.ui.develwarn('update with no target')
1154 repo.ui.develwarn('update with no target')
1155 rev, _mark, _act = destutil.destupdate(repo)
1155 rev, _mark, _act = destutil.destupdate(repo)
1156 node = repo[rev].node()
1156 node = repo[rev].node()
1157
1157
1158 overwrite = force and not branchmerge
1158 overwrite = force and not branchmerge
1159
1159
1160 p2 = repo[node]
1160 p2 = repo[node]
1161 if pas[0] is None:
1161 if pas[0] is None:
1162 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1162 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1163 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1163 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1164 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1164 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1165 else:
1165 else:
1166 pas = [p1.ancestor(p2, warn=branchmerge)]
1166 pas = [p1.ancestor(p2, warn=branchmerge)]
1167
1167
1168 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1168 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1169
1169
1170 ### check phase
1170 ### check phase
1171 if not overwrite and len(pl) > 1:
1171 if not overwrite and len(pl) > 1:
1172 raise error.Abort(_("outstanding uncommitted merge"))
1172 raise error.Abort(_("outstanding uncommitted merge"))
1173 if branchmerge:
1173 if branchmerge:
1174 if pas == [p2]:
1174 if pas == [p2]:
1175 raise error.Abort(_("merging with a working directory ancestor"
1175 raise error.Abort(_("merging with a working directory ancestor"
1176 " has no effect"))
1176 " has no effect"))
1177 elif pas == [p1]:
1177 elif pas == [p1]:
1178 if not mergeancestor and p1.branch() == p2.branch():
1178 if not mergeancestor and p1.branch() == p2.branch():
1179 raise error.Abort(_("nothing to merge"),
1179 raise error.Abort(_("nothing to merge"),
1180 hint=_("use 'hg update' "
1180 hint=_("use 'hg update' "
1181 "or check 'hg heads'"))
1181 "or check 'hg heads'"))
1182 if not force and (wc.files() or wc.deleted()):
1182 if not force and (wc.files() or wc.deleted()):
1183 raise error.Abort(_("uncommitted changes"),
1183 raise error.Abort(_("uncommitted changes"),
1184 hint=_("use 'hg status' to list changes"))
1184 hint=_("use 'hg status' to list changes"))
1185 for s in sorted(wc.substate):
1185 for s in sorted(wc.substate):
1186 wc.sub(s).bailifchanged()
1186 wc.sub(s).bailifchanged()
1187
1187
1188 elif not overwrite:
1188 elif not overwrite:
1189 if p1 == p2: # no-op update
1189 if p1 == p2: # no-op update
1190 # call the hooks and exit early
1190 # call the hooks and exit early
1191 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1191 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1192 repo.hook('update', parent1=xp2, parent2='', error=0)
1192 repo.hook('update', parent1=xp2, parent2='', error=0)
1193 return 0, 0, 0, 0
1193 return 0, 0, 0, 0
1194
1194
1195 if pas not in ([p1], [p2]): # nonlinear
1195 if pas not in ([p1], [p2]): # nonlinear
1196 dirty = wc.dirty(missing=True)
1196 dirty = wc.dirty(missing=True)
1197 if dirty or onode is None:
1197 if dirty or onode is None:
1198 # Branching is a bit strange to ensure we do the minimal
1198 # Branching is a bit strange to ensure we do the minimal
1199 # amount of call to obsolete.background.
1199 # amount of call to obsolete.background.
1200 foreground = obsolete.foreground(repo, [p1.node()])
1200 foreground = obsolete.foreground(repo, [p1.node()])
1201 # note: the <node> variable contains a random identifier
1201 # note: the <node> variable contains a random identifier
1202 if repo[node].node() in foreground:
1202 if repo[node].node() in foreground:
1203 pas = [p1] # allow updating to successors
1203 pas = [p1] # allow updating to successors
1204 elif dirty:
1204 elif dirty:
1205 msg = _("uncommitted changes")
1205 msg = _("uncommitted changes")
1206 if onode is None:
1206 if onode is None:
1207 hint = _("commit and merge, or update --clean to"
1207 hint = _("commit and merge, or update --clean to"
1208 " discard changes")
1208 " discard changes")
1209 else:
1209 else:
1210 hint = _("commit or update --clean to discard"
1210 hint = _("commit or update --clean to discard"
1211 " changes")
1211 " changes")
1212 raise error.Abort(msg, hint=hint)
1212 raise error.Abort(msg, hint=hint)
1213 else: # node is none
1213 else: # node is none
1214 msg = _("not a linear update")
1214 msg = _("not a linear update")
1215 hint = _("merge or update --check to force update")
1215 hint = _("merge or update --check to force update")
1216 raise error.Abort(msg, hint=hint)
1216 raise error.Abort(msg, hint=hint)
1217 else:
1217 else:
1218 # Allow jumping branches if clean and specific rev given
1218 # Allow jumping branches if clean and specific rev given
1219 pas = [p1]
1219 pas = [p1]
1220
1220
1221 # deprecated config: merge.followcopies
1221 # deprecated config: merge.followcopies
1222 followcopies = False
1222 followcopies = False
1223 if overwrite:
1223 if overwrite:
1224 pas = [wc]
1224 pas = [wc]
1225 elif pas == [p2]: # backwards
1225 elif pas == [p2]: # backwards
1226 pas = [wc.p1()]
1226 pas = [wc.p1()]
1227 elif not branchmerge and not wc.dirty(missing=True):
1227 elif not branchmerge and not wc.dirty(missing=True):
1228 pass
1228 pass
1229 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1229 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1230 followcopies = True
1230 followcopies = True
1231
1231
1232 ### calculate phase
1232 ### calculate phase
1233 actionbyfile, diverge, renamedelete = calculateupdates(
1233 actionbyfile, diverge, renamedelete = calculateupdates(
1234 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1234 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1235 followcopies)
1235 followcopies)
1236 # Convert to dictionary-of-lists format
1236 # Convert to dictionary-of-lists format
1237 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1237 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1238 for f, (m, args, msg) in actionbyfile.iteritems():
1238 for f, (m, args, msg) in actionbyfile.iteritems():
1239 if m not in actions:
1239 if m not in actions:
1240 actions[m] = []
1240 actions[m] = []
1241 actions[m].append((f, args, msg))
1241 actions[m].append((f, args, msg))
1242
1242
1243 if not util.checkcase(repo.path):
1243 if not util.checkcase(repo.path):
1244 # check collision between files only in p2 for clean update
1244 # check collision between files only in p2 for clean update
1245 if (not branchmerge and
1245 if (not branchmerge and
1246 (force or not wc.dirty(missing=True, branch=False))):
1246 (force or not wc.dirty(missing=True, branch=False))):
1247 _checkcollision(repo, p2.manifest(), None)
1247 _checkcollision(repo, p2.manifest(), None)
1248 else:
1248 else:
1249 _checkcollision(repo, wc.manifest(), actions)
1249 _checkcollision(repo, wc.manifest(), actions)
1250
1250
1251 # Prompt and create actions. TODO: Move this towards resolve phase.
1251 # Prompt and create actions. TODO: Move this towards resolve phase.
1252 for f, args, msg in sorted(actions['cd']):
1252 for f, args, msg in sorted(actions['cd']):
1253 if repo.ui.promptchoice(
1253 if repo.ui.promptchoice(
1254 _("local changed %s which remote deleted\n"
1254 _("local changed %s which remote deleted\n"
1255 "use (c)hanged version or (d)elete?"
1255 "use (c)hanged version or (d)elete?"
1256 "$$ &Changed $$ &Delete") % f, 0):
1256 "$$ &Changed $$ &Delete") % f, 0):
1257 actions['r'].append((f, None, "prompt delete"))
1257 actions['r'].append((f, None, "prompt delete"))
1258 else:
1258 else:
1259 actions['a'].append((f, None, "prompt keep"))
1259 actions['a'].append((f, None, "prompt keep"))
1260 del actions['cd'][:]
1260 del actions['cd'][:]
1261
1261
1262 for f, args, msg in sorted(actions['dc']):
1262 for f, args, msg in sorted(actions['dc']):
1263 flags, = args
1263 flags, = args
1264 if repo.ui.promptchoice(
1264 if repo.ui.promptchoice(
1265 _("remote changed %s which local deleted\n"
1265 _("remote changed %s which local deleted\n"
1266 "use (c)hanged version or leave (d)eleted?"
1266 "use (c)hanged version or leave (d)eleted?"
1267 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1267 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1268 actions['g'].append((f, (flags,), "prompt recreating"))
1268 actions['g'].append((f, (flags,), "prompt recreating"))
1269 del actions['dc'][:]
1269 del actions['dc'][:]
1270
1270
1271 ### apply phase
1271 ### apply phase
1272 if not branchmerge: # just jump to the new rev
1272 if not branchmerge: # just jump to the new rev
1273 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1273 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1274 if not partial:
1274 if not partial:
1275 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1275 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1276 # note that we're in the middle of an update
1276 # note that we're in the middle of an update
1277 repo.vfs.write('updatestate', p2.hex())
1277 repo.vfs.write('updatestate', p2.hex())
1278
1278
1279 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1279 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1280
1280
1281 # divergent renames
1281 # divergent renames
1282 for f, fl in sorted(diverge.iteritems()):
1282 for f, fl in sorted(diverge.iteritems()):
1283 repo.ui.warn(_("note: possible conflict - %s was renamed "
1283 repo.ui.warn(_("note: possible conflict - %s was renamed "
1284 "multiple times to:\n") % f)
1284 "multiple times to:\n") % f)
1285 for nf in fl:
1285 for nf in fl:
1286 repo.ui.warn(" %s\n" % nf)
1286 repo.ui.warn(" %s\n" % nf)
1287
1287
1288 # rename and delete
1288 # rename and delete
1289 for f, fl in sorted(renamedelete.iteritems()):
1289 for f, fl in sorted(renamedelete.iteritems()):
1290 repo.ui.warn(_("note: possible conflict - %s was deleted "
1290 repo.ui.warn(_("note: possible conflict - %s was deleted "
1291 "and renamed to:\n") % f)
1291 "and renamed to:\n") % f)
1292 for nf in fl:
1292 for nf in fl:
1293 repo.ui.warn(" %s\n" % nf)
1293 repo.ui.warn(" %s\n" % nf)
1294
1294
1295 if not partial:
1295 if not partial:
1296 repo.dirstate.beginparentchange()
1296 repo.dirstate.beginparentchange()
1297 repo.setparents(fp1, fp2)
1297 repo.setparents(fp1, fp2)
1298 recordupdates(repo, actions, branchmerge)
1298 recordupdates(repo, actions, branchmerge)
1299 # update completed, clear state
1299 # update completed, clear state
1300 util.unlink(repo.join('updatestate'))
1300 util.unlink(repo.join('updatestate'))
1301
1301
1302 if not branchmerge:
1302 if not branchmerge:
1303 repo.dirstate.setbranch(p2.branch())
1303 repo.dirstate.setbranch(p2.branch())
1304 repo.dirstate.endparentchange()
1304 repo.dirstate.endparentchange()
1305 finally:
1305 finally:
1306 wlock.release()
1306 wlock.release()
1307
1307
1308 if not partial:
1308 if not partial:
1309 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1309 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1310 return stats
1310 return stats
1311
1311
1312 def graft(repo, ctx, pctx, labels):
1312 def graft(repo, ctx, pctx, labels):
1313 """Do a graft-like merge.
1313 """Do a graft-like merge.
1314
1314
1315 This is a merge where the merge ancestor is chosen such that one
1315 This is a merge where the merge ancestor is chosen such that one
1316 or more changesets are grafted onto the current changeset. In
1316 or more changesets are grafted onto the current changeset. In
1317 addition to the merge, this fixes up the dirstate to include only
1317 addition to the merge, this fixes up the dirstate to include only
1318 a single parent and tries to duplicate any renames/copies
1318 a single parent and tries to duplicate any renames/copies
1319 appropriately.
1319 appropriately.
1320
1320
1321 ctx - changeset to rebase
1321 ctx - changeset to rebase
1322 pctx - merge base, usually ctx.p1()
1322 pctx - merge base, usually ctx.p1()
1323 labels - merge labels eg ['local', 'graft']
1323 labels - merge labels eg ['local', 'graft']
1324
1324
1325 """
1325 """
1326 # If we're grafting a descendant onto an ancestor, be sure to pass
1326 # If we're grafting a descendant onto an ancestor, be sure to pass
1327 # mergeancestor=True to update. This does two things: 1) allows the merge if
1327 # mergeancestor=True to update. This does two things: 1) allows the merge if
1328 # the destination is the same as the parent of the ctx (so we can use graft
1328 # the destination is the same as the parent of the ctx (so we can use graft
1329 # to copy commits), and 2) informs update that the incoming changes are
1329 # to copy commits), and 2) informs update that the incoming changes are
1330 # newer than the destination so it doesn't prompt about "remote changed foo
1330 # newer than the destination so it doesn't prompt about "remote changed foo
1331 # which local deleted".
1331 # which local deleted".
1332 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1332 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1333
1333
1334 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1334 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1335 mergeancestor=mergeancestor, labels=labels)
1335 mergeancestor=mergeancestor, labels=labels)
1336
1336
1337 # drop the second merge parent
1337 # drop the second merge parent
1338 repo.dirstate.beginparentchange()
1338 repo.dirstate.beginparentchange()
1339 repo.setparents(repo['.'].node(), nullid)
1339 repo.setparents(repo['.'].node(), nullid)
1340 repo.dirstate.write(repo.currenttransaction())
1340 repo.dirstate.write(repo.currenttransaction())
1341 # fix up dirstate for copies and renames
1341 # fix up dirstate for copies and renames
1342 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1342 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1343 repo.dirstate.endparentchange()
1343 repo.dirstate.endparentchange()
1344 return stats
1344 return stats
@@ -1,1172 +1,1183 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import wdirrev
9 from mercurial.node import wdirrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat
13 import os, errno, re, glob, tempfile, shutil, stat
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83
83
84 missing = set()
84 missing = set()
85
85
86 for subpath in ctx2.substate:
86 for subpath in ctx2.substate:
87 if subpath not in ctx1.substate:
87 if subpath not in ctx1.substate:
88 del subpaths[subpath]
88 del subpaths[subpath]
89 missing.add(subpath)
89 missing.add(subpath)
90
90
91 for subpath, ctx in sorted(subpaths.iteritems()):
91 for subpath, ctx in sorted(subpaths.iteritems()):
92 yield subpath, ctx.sub(subpath)
92 yield subpath, ctx.sub(subpath)
93
93
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 # status and diff will have an accurate result when it does
95 # status and diff will have an accurate result when it does
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 # against itself.
97 # against itself.
98 for subpath in missing:
98 for subpath in missing:
99 yield subpath, ctx2.nullsub(subpath, ctx1)
99 yield subpath, ctx2.nullsub(subpath, ctx1)
100
100
101 def nochangesfound(ui, repo, excluded=None):
101 def nochangesfound(ui, repo, excluded=None):
102 '''Report no changes for push/pull, excluded is None or a list of
102 '''Report no changes for push/pull, excluded is None or a list of
103 nodes excluded from the push/pull.
103 nodes excluded from the push/pull.
104 '''
104 '''
105 secretlist = []
105 secretlist = []
106 if excluded:
106 if excluded:
107 for n in excluded:
107 for n in excluded:
108 if n not in repo:
108 if n not in repo:
109 # discovery should not have included the filtered revision,
109 # discovery should not have included the filtered revision,
110 # we have to explicitly exclude it until discovery is cleanup.
110 # we have to explicitly exclude it until discovery is cleanup.
111 continue
111 continue
112 ctx = repo[n]
112 ctx = repo[n]
113 if ctx.phase() >= phases.secret and not ctx.extinct():
113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 secretlist.append(n)
114 secretlist.append(n)
115
115
116 if secretlist:
116 if secretlist:
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 % len(secretlist))
118 % len(secretlist))
119 else:
119 else:
120 ui.status(_("no changes found\n"))
120 ui.status(_("no changes found\n"))
121
121
122 def checknewlabel(repo, lbl, kind):
122 def checknewlabel(repo, lbl, kind):
123 # Do not use the "kind" parameter in ui output.
123 # Do not use the "kind" parameter in ui output.
124 # It makes strings difficult to translate.
124 # It makes strings difficult to translate.
125 if lbl in ['tip', '.', 'null']:
125 if lbl in ['tip', '.', 'null']:
126 raise error.Abort(_("the name '%s' is reserved") % lbl)
126 raise error.Abort(_("the name '%s' is reserved") % lbl)
127 for c in (':', '\0', '\n', '\r'):
127 for c in (':', '\0', '\n', '\r'):
128 if c in lbl:
128 if c in lbl:
129 raise error.Abort(_("%r cannot be used in a name") % c)
129 raise error.Abort(_("%r cannot be used in a name") % c)
130 try:
130 try:
131 int(lbl)
131 int(lbl)
132 raise error.Abort(_("cannot use an integer as a name"))
132 raise error.Abort(_("cannot use an integer as a name"))
133 except ValueError:
133 except ValueError:
134 pass
134 pass
135
135
136 def checkfilename(f):
136 def checkfilename(f):
137 '''Check that the filename f is an acceptable filename for a tracked file'''
137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 if '\r' in f or '\n' in f:
138 if '\r' in f or '\n' in f:
139 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
139 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140
140
141 def checkportable(ui, f):
141 def checkportable(ui, f):
142 '''Check if filename f is portable and warn or abort depending on config'''
142 '''Check if filename f is portable and warn or abort depending on config'''
143 checkfilename(f)
143 checkfilename(f)
144 abort, warn = checkportabilityalert(ui)
144 abort, warn = checkportabilityalert(ui)
145 if abort or warn:
145 if abort or warn:
146 msg = util.checkwinfilename(f)
146 msg = util.checkwinfilename(f)
147 if msg:
147 if msg:
148 msg = "%s: %r" % (msg, f)
148 msg = "%s: %r" % (msg, f)
149 if abort:
149 if abort:
150 raise error.Abort(msg)
150 raise error.Abort(msg)
151 ui.warn(_("warning: %s\n") % msg)
151 ui.warn(_("warning: %s\n") % msg)
152
152
153 def checkportabilityalert(ui):
153 def checkportabilityalert(ui):
154 '''check if the user's config requests nothing, a warning, or abort for
154 '''check if the user's config requests nothing, a warning, or abort for
155 non-portable filenames'''
155 non-portable filenames'''
156 val = ui.config('ui', 'portablefilenames', 'warn')
156 val = ui.config('ui', 'portablefilenames', 'warn')
157 lval = val.lower()
157 lval = val.lower()
158 bval = util.parsebool(val)
158 bval = util.parsebool(val)
159 abort = os.name == 'nt' or lval == 'abort'
159 abort = os.name == 'nt' or lval == 'abort'
160 warn = bval or lval == 'warn'
160 warn = bval or lval == 'warn'
161 if bval is None and not (warn or abort or lval == 'ignore'):
161 if bval is None and not (warn or abort or lval == 'ignore'):
162 raise error.ConfigError(
162 raise error.ConfigError(
163 _("ui.portablefilenames value is invalid ('%s')") % val)
163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 return abort, warn
164 return abort, warn
165
165
166 class casecollisionauditor(object):
166 class casecollisionauditor(object):
167 def __init__(self, ui, abort, dirstate):
167 def __init__(self, ui, abort, dirstate):
168 self._ui = ui
168 self._ui = ui
169 self._abort = abort
169 self._abort = abort
170 allfiles = '\0'.join(dirstate._map)
170 allfiles = '\0'.join(dirstate._map)
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 self._dirstate = dirstate
172 self._dirstate = dirstate
173 # The purpose of _newfiles is so that we don't complain about
173 # The purpose of _newfiles is so that we don't complain about
174 # case collisions if someone were to call this object with the
174 # case collisions if someone were to call this object with the
175 # same filename twice.
175 # same filename twice.
176 self._newfiles = set()
176 self._newfiles = set()
177
177
178 def __call__(self, f):
178 def __call__(self, f):
179 if f in self._newfiles:
179 if f in self._newfiles:
180 return
180 return
181 fl = encoding.lower(f)
181 fl = encoding.lower(f)
182 if fl in self._loweredfiles and f not in self._dirstate:
182 if fl in self._loweredfiles and f not in self._dirstate:
183 msg = _('possible case-folding collision for %s') % f
183 msg = _('possible case-folding collision for %s') % f
184 if self._abort:
184 if self._abort:
185 raise error.Abort(msg)
185 raise error.Abort(msg)
186 self._ui.warn(_("warning: %s\n") % msg)
186 self._ui.warn(_("warning: %s\n") % msg)
187 self._loweredfiles.add(fl)
187 self._loweredfiles.add(fl)
188 self._newfiles.add(f)
188 self._newfiles.add(f)
189
189
190 def filteredhash(repo, maxrev):
190 def filteredhash(repo, maxrev):
191 """build hash of filtered revisions in the current repoview.
191 """build hash of filtered revisions in the current repoview.
192
192
193 Multiple caches perform up-to-date validation by checking that the
193 Multiple caches perform up-to-date validation by checking that the
194 tiprev and tipnode stored in the cache file match the current repository.
194 tiprev and tipnode stored in the cache file match the current repository.
195 However, this is not sufficient for validating repoviews because the set
195 However, this is not sufficient for validating repoviews because the set
196 of revisions in the view may change without the repository tiprev and
196 of revisions in the view may change without the repository tiprev and
197 tipnode changing.
197 tipnode changing.
198
198
199 This function hashes all the revs filtered from the view and returns
199 This function hashes all the revs filtered from the view and returns
200 that SHA-1 digest.
200 that SHA-1 digest.
201 """
201 """
202 cl = repo.changelog
202 cl = repo.changelog
203 if not cl.filteredrevs:
203 if not cl.filteredrevs:
204 return None
204 return None
205 key = None
205 key = None
206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 if revs:
207 if revs:
208 s = util.sha1()
208 s = util.sha1()
209 for rev in revs:
209 for rev in revs:
210 s.update('%s;' % rev)
210 s.update('%s;' % rev)
211 key = s.digest()
211 key = s.digest()
212 return key
212 return key
213
213
214 class abstractvfs(object):
214 class abstractvfs(object):
215 """Abstract base class; cannot be instantiated"""
215 """Abstract base class; cannot be instantiated"""
216
216
217 def __init__(self, *args, **kwargs):
217 def __init__(self, *args, **kwargs):
218 '''Prevent instantiation; don't call this from subclasses.'''
218 '''Prevent instantiation; don't call this from subclasses.'''
219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220
220
221 def tryread(self, path):
221 def tryread(self, path):
222 '''gracefully return an empty string for missing files'''
222 '''gracefully return an empty string for missing files'''
223 try:
223 try:
224 return self.read(path)
224 return self.read(path)
225 except IOError as inst:
225 except IOError as inst:
226 if inst.errno != errno.ENOENT:
226 if inst.errno != errno.ENOENT:
227 raise
227 raise
228 return ""
228 return ""
229
229
230 def tryreadlines(self, path, mode='rb'):
230 def tryreadlines(self, path, mode='rb'):
231 '''gracefully return an empty array for missing files'''
231 '''gracefully return an empty array for missing files'''
232 try:
232 try:
233 return self.readlines(path, mode=mode)
233 return self.readlines(path, mode=mode)
234 except IOError as inst:
234 except IOError as inst:
235 if inst.errno != errno.ENOENT:
235 if inst.errno != errno.ENOENT:
236 raise
236 raise
237 return []
237 return []
238
238
239 def open(self, path, mode="r", text=False, atomictemp=False,
239 def open(self, path, mode="r", text=False, atomictemp=False,
240 notindexed=False):
240 notindexed=False):
241 '''Open ``path`` file, which is relative to vfs root.
241 '''Open ``path`` file, which is relative to vfs root.
242
242
243 Newly created directories are marked as "not to be indexed by
243 Newly created directories are marked as "not to be indexed by
244 the content indexing service", if ``notindexed`` is specified
244 the content indexing service", if ``notindexed`` is specified
245 for "write" mode access.
245 for "write" mode access.
246 '''
246 '''
247 self.open = self.__call__
247 self.open = self.__call__
248 return self.__call__(path, mode, text, atomictemp, notindexed)
248 return self.__call__(path, mode, text, atomictemp, notindexed)
249
249
250 def read(self, path):
250 def read(self, path):
251 fp = self(path, 'rb')
251 fp = self(path, 'rb')
252 try:
252 try:
253 return fp.read()
253 return fp.read()
254 finally:
254 finally:
255 fp.close()
255 fp.close()
256
256
257 def readlines(self, path, mode='rb'):
257 def readlines(self, path, mode='rb'):
258 fp = self(path, mode=mode)
258 fp = self(path, mode=mode)
259 try:
259 try:
260 return fp.readlines()
260 return fp.readlines()
261 finally:
261 finally:
262 fp.close()
262 fp.close()
263
263
264 def write(self, path, data):
264 def write(self, path, data):
265 fp = self(path, 'wb')
265 fp = self(path, 'wb')
266 try:
266 try:
267 return fp.write(data)
267 return fp.write(data)
268 finally:
268 finally:
269 fp.close()
269 fp.close()
270
270
271 def writelines(self, path, data, mode='wb', notindexed=False):
271 def writelines(self, path, data, mode='wb', notindexed=False):
272 fp = self(path, mode=mode, notindexed=notindexed)
272 fp = self(path, mode=mode, notindexed=notindexed)
273 try:
273 try:
274 return fp.writelines(data)
274 return fp.writelines(data)
275 finally:
275 finally:
276 fp.close()
276 fp.close()
277
277
278 def append(self, path, data):
278 def append(self, path, data):
279 fp = self(path, 'ab')
279 fp = self(path, 'ab')
280 try:
280 try:
281 return fp.write(data)
281 return fp.write(data)
282 finally:
282 finally:
283 fp.close()
283 fp.close()
284
284
285 def basename(self, path):
285 def basename(self, path):
286 """return base element of a path (as os.path.basename would do)
286 """return base element of a path (as os.path.basename would do)
287
287
288 This exists to allow handling of strange encoding if needed."""
288 This exists to allow handling of strange encoding if needed."""
289 return os.path.basename(path)
289 return os.path.basename(path)
290
290
291 def chmod(self, path, mode):
291 def chmod(self, path, mode):
292 return os.chmod(self.join(path), mode)
292 return os.chmod(self.join(path), mode)
293
293
294 def dirname(self, path):
294 def dirname(self, path):
295 """return dirname element of a path (as os.path.dirname would do)
295 """return dirname element of a path (as os.path.dirname would do)
296
296
297 This exists to allow handling of strange encoding if needed."""
297 This exists to allow handling of strange encoding if needed."""
298 return os.path.dirname(path)
298 return os.path.dirname(path)
299
299
300 def exists(self, path=None):
300 def exists(self, path=None):
301 return os.path.exists(self.join(path))
301 return os.path.exists(self.join(path))
302
302
303 def fstat(self, fp):
303 def fstat(self, fp):
304 return util.fstat(fp)
304 return util.fstat(fp)
305
305
306 def isdir(self, path=None):
306 def isdir(self, path=None):
307 return os.path.isdir(self.join(path))
307 return os.path.isdir(self.join(path))
308
308
309 def isfile(self, path=None):
309 def isfile(self, path=None):
310 return os.path.isfile(self.join(path))
310 return os.path.isfile(self.join(path))
311
311
312 def islink(self, path=None):
312 def islink(self, path=None):
313 return os.path.islink(self.join(path))
313 return os.path.islink(self.join(path))
314
314
315 def isfileorlink(self, path=None):
316 '''return whether path is a regular file or a symlink
317
318 Unlike isfile, this doesn't follow symlinks.'''
319 try:
320 st = self.lstat(path)
321 except OSError:
322 return False
323 mode = st.st_mode
324 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
325
315 def reljoin(self, *paths):
326 def reljoin(self, *paths):
316 """join various elements of a path together (as os.path.join would do)
327 """join various elements of a path together (as os.path.join would do)
317
328
318 The vfs base is not injected so that path stay relative. This exists
329 The vfs base is not injected so that path stay relative. This exists
319 to allow handling of strange encoding if needed."""
330 to allow handling of strange encoding if needed."""
320 return os.path.join(*paths)
331 return os.path.join(*paths)
321
332
322 def split(self, path):
333 def split(self, path):
323 """split top-most element of a path (as os.path.split would do)
334 """split top-most element of a path (as os.path.split would do)
324
335
325 This exists to allow handling of strange encoding if needed."""
336 This exists to allow handling of strange encoding if needed."""
326 return os.path.split(path)
337 return os.path.split(path)
327
338
328 def lexists(self, path=None):
339 def lexists(self, path=None):
329 return os.path.lexists(self.join(path))
340 return os.path.lexists(self.join(path))
330
341
331 def lstat(self, path=None):
342 def lstat(self, path=None):
332 return os.lstat(self.join(path))
343 return os.lstat(self.join(path))
333
344
334 def listdir(self, path=None):
345 def listdir(self, path=None):
335 return os.listdir(self.join(path))
346 return os.listdir(self.join(path))
336
347
337 def makedir(self, path=None, notindexed=True):
348 def makedir(self, path=None, notindexed=True):
338 return util.makedir(self.join(path), notindexed)
349 return util.makedir(self.join(path), notindexed)
339
350
340 def makedirs(self, path=None, mode=None):
351 def makedirs(self, path=None, mode=None):
341 return util.makedirs(self.join(path), mode)
352 return util.makedirs(self.join(path), mode)
342
353
343 def makelock(self, info, path):
354 def makelock(self, info, path):
344 return util.makelock(info, self.join(path))
355 return util.makelock(info, self.join(path))
345
356
346 def mkdir(self, path=None):
357 def mkdir(self, path=None):
347 return os.mkdir(self.join(path))
358 return os.mkdir(self.join(path))
348
359
349 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
360 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
350 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
361 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
351 dir=self.join(dir), text=text)
362 dir=self.join(dir), text=text)
352 dname, fname = util.split(name)
363 dname, fname = util.split(name)
353 if dir:
364 if dir:
354 return fd, os.path.join(dir, fname)
365 return fd, os.path.join(dir, fname)
355 else:
366 else:
356 return fd, fname
367 return fd, fname
357
368
358 def readdir(self, path=None, stat=None, skip=None):
369 def readdir(self, path=None, stat=None, skip=None):
359 return osutil.listdir(self.join(path), stat, skip)
370 return osutil.listdir(self.join(path), stat, skip)
360
371
361 def readlock(self, path):
372 def readlock(self, path):
362 return util.readlock(self.join(path))
373 return util.readlock(self.join(path))
363
374
364 def rename(self, src, dst):
375 def rename(self, src, dst):
365 return util.rename(self.join(src), self.join(dst))
376 return util.rename(self.join(src), self.join(dst))
366
377
367 def readlink(self, path):
378 def readlink(self, path):
368 return os.readlink(self.join(path))
379 return os.readlink(self.join(path))
369
380
370 def removedirs(self, path=None):
381 def removedirs(self, path=None):
371 """Remove a leaf directory and all empty intermediate ones
382 """Remove a leaf directory and all empty intermediate ones
372 """
383 """
373 return util.removedirs(self.join(path))
384 return util.removedirs(self.join(path))
374
385
375 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
386 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
376 """Remove a directory tree recursively
387 """Remove a directory tree recursively
377
388
378 If ``forcibly``, this tries to remove READ-ONLY files, too.
389 If ``forcibly``, this tries to remove READ-ONLY files, too.
379 """
390 """
380 if forcibly:
391 if forcibly:
381 def onerror(function, path, excinfo):
392 def onerror(function, path, excinfo):
382 if function is not os.remove:
393 if function is not os.remove:
383 raise
394 raise
384 # read-only files cannot be unlinked under Windows
395 # read-only files cannot be unlinked under Windows
385 s = os.stat(path)
396 s = os.stat(path)
386 if (s.st_mode & stat.S_IWRITE) != 0:
397 if (s.st_mode & stat.S_IWRITE) != 0:
387 raise
398 raise
388 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
399 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
389 os.remove(path)
400 os.remove(path)
390 else:
401 else:
391 onerror = None
402 onerror = None
392 return shutil.rmtree(self.join(path),
403 return shutil.rmtree(self.join(path),
393 ignore_errors=ignore_errors, onerror=onerror)
404 ignore_errors=ignore_errors, onerror=onerror)
394
405
395 def setflags(self, path, l, x):
406 def setflags(self, path, l, x):
396 return util.setflags(self.join(path), l, x)
407 return util.setflags(self.join(path), l, x)
397
408
398 def stat(self, path=None):
409 def stat(self, path=None):
399 return os.stat(self.join(path))
410 return os.stat(self.join(path))
400
411
401 def unlink(self, path=None):
412 def unlink(self, path=None):
402 return util.unlink(self.join(path))
413 return util.unlink(self.join(path))
403
414
404 def unlinkpath(self, path=None, ignoremissing=False):
415 def unlinkpath(self, path=None, ignoremissing=False):
405 return util.unlinkpath(self.join(path), ignoremissing)
416 return util.unlinkpath(self.join(path), ignoremissing)
406
417
407 def utime(self, path=None, t=None):
418 def utime(self, path=None, t=None):
408 return os.utime(self.join(path), t)
419 return os.utime(self.join(path), t)
409
420
410 def walk(self, path=None, onerror=None):
421 def walk(self, path=None, onerror=None):
411 """Yield (dirpath, dirs, files) tuple for each directories under path
422 """Yield (dirpath, dirs, files) tuple for each directories under path
412
423
413 ``dirpath`` is relative one from the root of this vfs. This
424 ``dirpath`` is relative one from the root of this vfs. This
414 uses ``os.sep`` as path separator, even you specify POSIX
425 uses ``os.sep`` as path separator, even you specify POSIX
415 style ``path``.
426 style ``path``.
416
427
417 "The root of this vfs" is represented as empty ``dirpath``.
428 "The root of this vfs" is represented as empty ``dirpath``.
418 """
429 """
419 root = os.path.normpath(self.join(None))
430 root = os.path.normpath(self.join(None))
420 # when dirpath == root, dirpath[prefixlen:] becomes empty
431 # when dirpath == root, dirpath[prefixlen:] becomes empty
421 # because len(dirpath) < prefixlen.
432 # because len(dirpath) < prefixlen.
422 prefixlen = len(pathutil.normasprefix(root))
433 prefixlen = len(pathutil.normasprefix(root))
423 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
434 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
424 yield (dirpath[prefixlen:], dirs, files)
435 yield (dirpath[prefixlen:], dirs, files)
425
436
426 class vfs(abstractvfs):
437 class vfs(abstractvfs):
427 '''Operate files relative to a base directory
438 '''Operate files relative to a base directory
428
439
429 This class is used to hide the details of COW semantics and
440 This class is used to hide the details of COW semantics and
430 remote file access from higher level code.
441 remote file access from higher level code.
431 '''
442 '''
432 def __init__(self, base, audit=True, expandpath=False, realpath=False):
443 def __init__(self, base, audit=True, expandpath=False, realpath=False):
433 if expandpath:
444 if expandpath:
434 base = util.expandpath(base)
445 base = util.expandpath(base)
435 if realpath:
446 if realpath:
436 base = os.path.realpath(base)
447 base = os.path.realpath(base)
437 self.base = base
448 self.base = base
438 self._setmustaudit(audit)
449 self._setmustaudit(audit)
439 self.createmode = None
450 self.createmode = None
440 self._trustnlink = None
451 self._trustnlink = None
441
452
442 def _getmustaudit(self):
453 def _getmustaudit(self):
443 return self._audit
454 return self._audit
444
455
445 def _setmustaudit(self, onoff):
456 def _setmustaudit(self, onoff):
446 self._audit = onoff
457 self._audit = onoff
447 if onoff:
458 if onoff:
448 self.audit = pathutil.pathauditor(self.base)
459 self.audit = pathutil.pathauditor(self.base)
449 else:
460 else:
450 self.audit = util.always
461 self.audit = util.always
451
462
452 mustaudit = property(_getmustaudit, _setmustaudit)
463 mustaudit = property(_getmustaudit, _setmustaudit)
453
464
454 @util.propertycache
465 @util.propertycache
455 def _cansymlink(self):
466 def _cansymlink(self):
456 return util.checklink(self.base)
467 return util.checklink(self.base)
457
468
458 @util.propertycache
469 @util.propertycache
459 def _chmod(self):
470 def _chmod(self):
460 return util.checkexec(self.base)
471 return util.checkexec(self.base)
461
472
462 def _fixfilemode(self, name):
473 def _fixfilemode(self, name):
463 if self.createmode is None or not self._chmod:
474 if self.createmode is None or not self._chmod:
464 return
475 return
465 os.chmod(name, self.createmode & 0o666)
476 os.chmod(name, self.createmode & 0o666)
466
477
467 def __call__(self, path, mode="r", text=False, atomictemp=False,
478 def __call__(self, path, mode="r", text=False, atomictemp=False,
468 notindexed=False):
479 notindexed=False):
469 '''Open ``path`` file, which is relative to vfs root.
480 '''Open ``path`` file, which is relative to vfs root.
470
481
471 Newly created directories are marked as "not to be indexed by
482 Newly created directories are marked as "not to be indexed by
472 the content indexing service", if ``notindexed`` is specified
483 the content indexing service", if ``notindexed`` is specified
473 for "write" mode access.
484 for "write" mode access.
474 '''
485 '''
475 if self._audit:
486 if self._audit:
476 r = util.checkosfilename(path)
487 r = util.checkosfilename(path)
477 if r:
488 if r:
478 raise error.Abort("%s: %r" % (r, path))
489 raise error.Abort("%s: %r" % (r, path))
479 self.audit(path)
490 self.audit(path)
480 f = self.join(path)
491 f = self.join(path)
481
492
482 if not text and "b" not in mode:
493 if not text and "b" not in mode:
483 mode += "b" # for that other OS
494 mode += "b" # for that other OS
484
495
485 nlink = -1
496 nlink = -1
486 if mode not in ('r', 'rb'):
497 if mode not in ('r', 'rb'):
487 dirname, basename = util.split(f)
498 dirname, basename = util.split(f)
488 # If basename is empty, then the path is malformed because it points
499 # If basename is empty, then the path is malformed because it points
489 # to a directory. Let the posixfile() call below raise IOError.
500 # to a directory. Let the posixfile() call below raise IOError.
490 if basename:
501 if basename:
491 if atomictemp:
502 if atomictemp:
492 util.ensuredirs(dirname, self.createmode, notindexed)
503 util.ensuredirs(dirname, self.createmode, notindexed)
493 return util.atomictempfile(f, mode, self.createmode)
504 return util.atomictempfile(f, mode, self.createmode)
494 try:
505 try:
495 if 'w' in mode:
506 if 'w' in mode:
496 util.unlink(f)
507 util.unlink(f)
497 nlink = 0
508 nlink = 0
498 else:
509 else:
499 # nlinks() may behave differently for files on Windows
510 # nlinks() may behave differently for files on Windows
500 # shares if the file is open.
511 # shares if the file is open.
501 fd = util.posixfile(f)
512 fd = util.posixfile(f)
502 nlink = util.nlinks(f)
513 nlink = util.nlinks(f)
503 if nlink < 1:
514 if nlink < 1:
504 nlink = 2 # force mktempcopy (issue1922)
515 nlink = 2 # force mktempcopy (issue1922)
505 fd.close()
516 fd.close()
506 except (OSError, IOError) as e:
517 except (OSError, IOError) as e:
507 if e.errno != errno.ENOENT:
518 if e.errno != errno.ENOENT:
508 raise
519 raise
509 nlink = 0
520 nlink = 0
510 util.ensuredirs(dirname, self.createmode, notindexed)
521 util.ensuredirs(dirname, self.createmode, notindexed)
511 if nlink > 0:
522 if nlink > 0:
512 if self._trustnlink is None:
523 if self._trustnlink is None:
513 self._trustnlink = nlink > 1 or util.checknlink(f)
524 self._trustnlink = nlink > 1 or util.checknlink(f)
514 if nlink > 1 or not self._trustnlink:
525 if nlink > 1 or not self._trustnlink:
515 util.rename(util.mktempcopy(f), f)
526 util.rename(util.mktempcopy(f), f)
516 fp = util.posixfile(f, mode)
527 fp = util.posixfile(f, mode)
517 if nlink == 0:
528 if nlink == 0:
518 self._fixfilemode(f)
529 self._fixfilemode(f)
519 return fp
530 return fp
520
531
521 def symlink(self, src, dst):
532 def symlink(self, src, dst):
522 self.audit(dst)
533 self.audit(dst)
523 linkname = self.join(dst)
534 linkname = self.join(dst)
524 try:
535 try:
525 os.unlink(linkname)
536 os.unlink(linkname)
526 except OSError:
537 except OSError:
527 pass
538 pass
528
539
529 util.ensuredirs(os.path.dirname(linkname), self.createmode)
540 util.ensuredirs(os.path.dirname(linkname), self.createmode)
530
541
531 if self._cansymlink:
542 if self._cansymlink:
532 try:
543 try:
533 os.symlink(src, linkname)
544 os.symlink(src, linkname)
534 except OSError as err:
545 except OSError as err:
535 raise OSError(err.errno, _('could not symlink to %r: %s') %
546 raise OSError(err.errno, _('could not symlink to %r: %s') %
536 (src, err.strerror), linkname)
547 (src, err.strerror), linkname)
537 else:
548 else:
538 self.write(dst, src)
549 self.write(dst, src)
539
550
540 def join(self, path, *insidef):
551 def join(self, path, *insidef):
541 if path:
552 if path:
542 return os.path.join(self.base, path, *insidef)
553 return os.path.join(self.base, path, *insidef)
543 else:
554 else:
544 return self.base
555 return self.base
545
556
546 opener = vfs
557 opener = vfs
547
558
548 class auditvfs(object):
559 class auditvfs(object):
549 def __init__(self, vfs):
560 def __init__(self, vfs):
550 self.vfs = vfs
561 self.vfs = vfs
551
562
552 def _getmustaudit(self):
563 def _getmustaudit(self):
553 return self.vfs.mustaudit
564 return self.vfs.mustaudit
554
565
555 def _setmustaudit(self, onoff):
566 def _setmustaudit(self, onoff):
556 self.vfs.mustaudit = onoff
567 self.vfs.mustaudit = onoff
557
568
558 mustaudit = property(_getmustaudit, _setmustaudit)
569 mustaudit = property(_getmustaudit, _setmustaudit)
559
570
560 class filtervfs(abstractvfs, auditvfs):
571 class filtervfs(abstractvfs, auditvfs):
561 '''Wrapper vfs for filtering filenames with a function.'''
572 '''Wrapper vfs for filtering filenames with a function.'''
562
573
563 def __init__(self, vfs, filter):
574 def __init__(self, vfs, filter):
564 auditvfs.__init__(self, vfs)
575 auditvfs.__init__(self, vfs)
565 self._filter = filter
576 self._filter = filter
566
577
567 def __call__(self, path, *args, **kwargs):
578 def __call__(self, path, *args, **kwargs):
568 return self.vfs(self._filter(path), *args, **kwargs)
579 return self.vfs(self._filter(path), *args, **kwargs)
569
580
570 def join(self, path, *insidef):
581 def join(self, path, *insidef):
571 if path:
582 if path:
572 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
583 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
573 else:
584 else:
574 return self.vfs.join(path)
585 return self.vfs.join(path)
575
586
576 filteropener = filtervfs
587 filteropener = filtervfs
577
588
578 class readonlyvfs(abstractvfs, auditvfs):
589 class readonlyvfs(abstractvfs, auditvfs):
579 '''Wrapper vfs preventing any writing.'''
590 '''Wrapper vfs preventing any writing.'''
580
591
581 def __init__(self, vfs):
592 def __init__(self, vfs):
582 auditvfs.__init__(self, vfs)
593 auditvfs.__init__(self, vfs)
583
594
584 def __call__(self, path, mode='r', *args, **kw):
595 def __call__(self, path, mode='r', *args, **kw):
585 if mode not in ('r', 'rb'):
596 if mode not in ('r', 'rb'):
586 raise error.Abort('this vfs is read only')
597 raise error.Abort('this vfs is read only')
587 return self.vfs(path, mode, *args, **kw)
598 return self.vfs(path, mode, *args, **kw)
588
599
589 def join(self, path, *insidef):
600 def join(self, path, *insidef):
590 return self.vfs.join(path, *insidef)
601 return self.vfs.join(path, *insidef)
591
602
592 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
603 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
593 '''yield every hg repository under path, always recursively.
604 '''yield every hg repository under path, always recursively.
594 The recurse flag will only control recursion into repo working dirs'''
605 The recurse flag will only control recursion into repo working dirs'''
595 def errhandler(err):
606 def errhandler(err):
596 if err.filename == path:
607 if err.filename == path:
597 raise err
608 raise err
598 samestat = getattr(os.path, 'samestat', None)
609 samestat = getattr(os.path, 'samestat', None)
599 if followsym and samestat is not None:
610 if followsym and samestat is not None:
600 def adddir(dirlst, dirname):
611 def adddir(dirlst, dirname):
601 match = False
612 match = False
602 dirstat = os.stat(dirname)
613 dirstat = os.stat(dirname)
603 for lstdirstat in dirlst:
614 for lstdirstat in dirlst:
604 if samestat(dirstat, lstdirstat):
615 if samestat(dirstat, lstdirstat):
605 match = True
616 match = True
606 break
617 break
607 if not match:
618 if not match:
608 dirlst.append(dirstat)
619 dirlst.append(dirstat)
609 return not match
620 return not match
610 else:
621 else:
611 followsym = False
622 followsym = False
612
623
613 if (seen_dirs is None) and followsym:
624 if (seen_dirs is None) and followsym:
614 seen_dirs = []
625 seen_dirs = []
615 adddir(seen_dirs, path)
626 adddir(seen_dirs, path)
616 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
627 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
617 dirs.sort()
628 dirs.sort()
618 if '.hg' in dirs:
629 if '.hg' in dirs:
619 yield root # found a repository
630 yield root # found a repository
620 qroot = os.path.join(root, '.hg', 'patches')
631 qroot = os.path.join(root, '.hg', 'patches')
621 if os.path.isdir(os.path.join(qroot, '.hg')):
632 if os.path.isdir(os.path.join(qroot, '.hg')):
622 yield qroot # we have a patch queue repo here
633 yield qroot # we have a patch queue repo here
623 if recurse:
634 if recurse:
624 # avoid recursing inside the .hg directory
635 # avoid recursing inside the .hg directory
625 dirs.remove('.hg')
636 dirs.remove('.hg')
626 else:
637 else:
627 dirs[:] = [] # don't descend further
638 dirs[:] = [] # don't descend further
628 elif followsym:
639 elif followsym:
629 newdirs = []
640 newdirs = []
630 for d in dirs:
641 for d in dirs:
631 fname = os.path.join(root, d)
642 fname = os.path.join(root, d)
632 if adddir(seen_dirs, fname):
643 if adddir(seen_dirs, fname):
633 if os.path.islink(fname):
644 if os.path.islink(fname):
634 for hgname in walkrepos(fname, True, seen_dirs):
645 for hgname in walkrepos(fname, True, seen_dirs):
635 yield hgname
646 yield hgname
636 else:
647 else:
637 newdirs.append(d)
648 newdirs.append(d)
638 dirs[:] = newdirs
649 dirs[:] = newdirs
639
650
640 def osrcpath():
651 def osrcpath():
641 '''return default os-specific hgrc search path'''
652 '''return default os-specific hgrc search path'''
642 path = []
653 path = []
643 defaultpath = os.path.join(util.datapath, 'default.d')
654 defaultpath = os.path.join(util.datapath, 'default.d')
644 if os.path.isdir(defaultpath):
655 if os.path.isdir(defaultpath):
645 for f, kind in osutil.listdir(defaultpath):
656 for f, kind in osutil.listdir(defaultpath):
646 if f.endswith('.rc'):
657 if f.endswith('.rc'):
647 path.append(os.path.join(defaultpath, f))
658 path.append(os.path.join(defaultpath, f))
648 path.extend(systemrcpath())
659 path.extend(systemrcpath())
649 path.extend(userrcpath())
660 path.extend(userrcpath())
650 path = [os.path.normpath(f) for f in path]
661 path = [os.path.normpath(f) for f in path]
651 return path
662 return path
652
663
653 _rcpath = None
664 _rcpath = None
654
665
655 def rcpath():
666 def rcpath():
656 '''return hgrc search path. if env var HGRCPATH is set, use it.
667 '''return hgrc search path. if env var HGRCPATH is set, use it.
657 for each item in path, if directory, use files ending in .rc,
668 for each item in path, if directory, use files ending in .rc,
658 else use item.
669 else use item.
659 make HGRCPATH empty to only look in .hg/hgrc of current repo.
670 make HGRCPATH empty to only look in .hg/hgrc of current repo.
660 if no HGRCPATH, use default os-specific path.'''
671 if no HGRCPATH, use default os-specific path.'''
661 global _rcpath
672 global _rcpath
662 if _rcpath is None:
673 if _rcpath is None:
663 if 'HGRCPATH' in os.environ:
674 if 'HGRCPATH' in os.environ:
664 _rcpath = []
675 _rcpath = []
665 for p in os.environ['HGRCPATH'].split(os.pathsep):
676 for p in os.environ['HGRCPATH'].split(os.pathsep):
666 if not p:
677 if not p:
667 continue
678 continue
668 p = util.expandpath(p)
679 p = util.expandpath(p)
669 if os.path.isdir(p):
680 if os.path.isdir(p):
670 for f, kind in osutil.listdir(p):
681 for f, kind in osutil.listdir(p):
671 if f.endswith('.rc'):
682 if f.endswith('.rc'):
672 _rcpath.append(os.path.join(p, f))
683 _rcpath.append(os.path.join(p, f))
673 else:
684 else:
674 _rcpath.append(p)
685 _rcpath.append(p)
675 else:
686 else:
676 _rcpath = osrcpath()
687 _rcpath = osrcpath()
677 return _rcpath
688 return _rcpath
678
689
679 def intrev(rev):
690 def intrev(rev):
680 """Return integer for a given revision that can be used in comparison or
691 """Return integer for a given revision that can be used in comparison or
681 arithmetic operation"""
692 arithmetic operation"""
682 if rev is None:
693 if rev is None:
683 return wdirrev
694 return wdirrev
684 return rev
695 return rev
685
696
686 def revsingle(repo, revspec, default='.'):
697 def revsingle(repo, revspec, default='.'):
687 if not revspec and revspec != 0:
698 if not revspec and revspec != 0:
688 return repo[default]
699 return repo[default]
689
700
690 l = revrange(repo, [revspec])
701 l = revrange(repo, [revspec])
691 if not l:
702 if not l:
692 raise error.Abort(_('empty revision set'))
703 raise error.Abort(_('empty revision set'))
693 return repo[l.last()]
704 return repo[l.last()]
694
705
695 def _pairspec(revspec):
706 def _pairspec(revspec):
696 tree = revset.parse(revspec)
707 tree = revset.parse(revspec)
697 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
708 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
698 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
709 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
699
710
700 def revpair(repo, revs):
711 def revpair(repo, revs):
701 if not revs:
712 if not revs:
702 return repo.dirstate.p1(), None
713 return repo.dirstate.p1(), None
703
714
704 l = revrange(repo, revs)
715 l = revrange(repo, revs)
705
716
706 if not l:
717 if not l:
707 first = second = None
718 first = second = None
708 elif l.isascending():
719 elif l.isascending():
709 first = l.min()
720 first = l.min()
710 second = l.max()
721 second = l.max()
711 elif l.isdescending():
722 elif l.isdescending():
712 first = l.max()
723 first = l.max()
713 second = l.min()
724 second = l.min()
714 else:
725 else:
715 first = l.first()
726 first = l.first()
716 second = l.last()
727 second = l.last()
717
728
718 if first is None:
729 if first is None:
719 raise error.Abort(_('empty revision range'))
730 raise error.Abort(_('empty revision range'))
720 if (first == second and len(revs) >= 2
731 if (first == second and len(revs) >= 2
721 and not all(revrange(repo, [r]) for r in revs)):
732 and not all(revrange(repo, [r]) for r in revs)):
722 raise error.Abort(_('empty revision on one side of range'))
733 raise error.Abort(_('empty revision on one side of range'))
723
734
724 # if top-level is range expression, the result must always be a pair
735 # if top-level is range expression, the result must always be a pair
725 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
736 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 return repo.lookup(first), None
737 return repo.lookup(first), None
727
738
728 return repo.lookup(first), repo.lookup(second)
739 return repo.lookup(first), repo.lookup(second)
729
740
730 def revrange(repo, revs):
741 def revrange(repo, revs):
731 """Yield revision as strings from a list of revision specifications."""
742 """Yield revision as strings from a list of revision specifications."""
732 allspecs = []
743 allspecs = []
733 for spec in revs:
744 for spec in revs:
734 if isinstance(spec, int):
745 if isinstance(spec, int):
735 spec = revset.formatspec('rev(%d)', spec)
746 spec = revset.formatspec('rev(%d)', spec)
736 allspecs.append(spec)
747 allspecs.append(spec)
737 m = revset.matchany(repo.ui, allspecs, repo)
748 m = revset.matchany(repo.ui, allspecs, repo)
738 return m(repo)
749 return m(repo)
739
750
740 def meaningfulparents(repo, ctx):
751 def meaningfulparents(repo, ctx):
741 """Return list of meaningful (or all if debug) parentrevs for rev.
752 """Return list of meaningful (or all if debug) parentrevs for rev.
742
753
743 For merges (two non-nullrev revisions) both parents are meaningful.
754 For merges (two non-nullrev revisions) both parents are meaningful.
744 Otherwise the first parent revision is considered meaningful if it
755 Otherwise the first parent revision is considered meaningful if it
745 is not the preceding revision.
756 is not the preceding revision.
746 """
757 """
747 parents = ctx.parents()
758 parents = ctx.parents()
748 if len(parents) > 1:
759 if len(parents) > 1:
749 return parents
760 return parents
750 if repo.ui.debugflag:
761 if repo.ui.debugflag:
751 return [parents[0], repo['null']]
762 return [parents[0], repo['null']]
752 if parents[0].rev() >= intrev(ctx.rev()) - 1:
763 if parents[0].rev() >= intrev(ctx.rev()) - 1:
753 return []
764 return []
754 return parents
765 return parents
755
766
756 def expandpats(pats):
767 def expandpats(pats):
757 '''Expand bare globs when running on windows.
768 '''Expand bare globs when running on windows.
758 On posix we assume it already has already been done by sh.'''
769 On posix we assume it already has already been done by sh.'''
759 if not util.expandglobs:
770 if not util.expandglobs:
760 return list(pats)
771 return list(pats)
761 ret = []
772 ret = []
762 for kindpat in pats:
773 for kindpat in pats:
763 kind, pat = matchmod._patsplit(kindpat, None)
774 kind, pat = matchmod._patsplit(kindpat, None)
764 if kind is None:
775 if kind is None:
765 try:
776 try:
766 globbed = glob.glob(pat)
777 globbed = glob.glob(pat)
767 except re.error:
778 except re.error:
768 globbed = [pat]
779 globbed = [pat]
769 if globbed:
780 if globbed:
770 ret.extend(globbed)
781 ret.extend(globbed)
771 continue
782 continue
772 ret.append(kindpat)
783 ret.append(kindpat)
773 return ret
784 return ret
774
785
775 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
786 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
776 badfn=None):
787 badfn=None):
777 '''Return a matcher and the patterns that were used.
788 '''Return a matcher and the patterns that were used.
778 The matcher will warn about bad matches, unless an alternate badfn callback
789 The matcher will warn about bad matches, unless an alternate badfn callback
779 is provided.'''
790 is provided.'''
780 if pats == ("",):
791 if pats == ("",):
781 pats = []
792 pats = []
782 if opts is None:
793 if opts is None:
783 opts = {}
794 opts = {}
784 if not globbed and default == 'relpath':
795 if not globbed and default == 'relpath':
785 pats = expandpats(pats or [])
796 pats = expandpats(pats or [])
786
797
787 def bad(f, msg):
798 def bad(f, msg):
788 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
799 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
789
800
790 if badfn is None:
801 if badfn is None:
791 badfn = bad
802 badfn = bad
792
803
793 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
804 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
794 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
805 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
795
806
796 if m.always():
807 if m.always():
797 pats = []
808 pats = []
798 return m, pats
809 return m, pats
799
810
800 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
811 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
801 badfn=None):
812 badfn=None):
802 '''Return a matcher that will warn about bad matches.'''
813 '''Return a matcher that will warn about bad matches.'''
803 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
814 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
804
815
805 def matchall(repo):
816 def matchall(repo):
806 '''Return a matcher that will efficiently match everything.'''
817 '''Return a matcher that will efficiently match everything.'''
807 return matchmod.always(repo.root, repo.getcwd())
818 return matchmod.always(repo.root, repo.getcwd())
808
819
809 def matchfiles(repo, files, badfn=None):
820 def matchfiles(repo, files, badfn=None):
810 '''Return a matcher that will efficiently match exactly these files.'''
821 '''Return a matcher that will efficiently match exactly these files.'''
811 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
822 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
812
823
813 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
824 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
814 if opts is None:
825 if opts is None:
815 opts = {}
826 opts = {}
816 m = matcher
827 m = matcher
817 if dry_run is None:
828 if dry_run is None:
818 dry_run = opts.get('dry_run')
829 dry_run = opts.get('dry_run')
819 if similarity is None:
830 if similarity is None:
820 similarity = float(opts.get('similarity') or 0)
831 similarity = float(opts.get('similarity') or 0)
821
832
822 ret = 0
833 ret = 0
823 join = lambda f: os.path.join(prefix, f)
834 join = lambda f: os.path.join(prefix, f)
824
835
825 def matchessubrepo(matcher, subpath):
836 def matchessubrepo(matcher, subpath):
826 if matcher.exact(subpath):
837 if matcher.exact(subpath):
827 return True
838 return True
828 for f in matcher.files():
839 for f in matcher.files():
829 if f.startswith(subpath):
840 if f.startswith(subpath):
830 return True
841 return True
831 return False
842 return False
832
843
833 wctx = repo[None]
844 wctx = repo[None]
834 for subpath in sorted(wctx.substate):
845 for subpath in sorted(wctx.substate):
835 if opts.get('subrepos') or matchessubrepo(m, subpath):
846 if opts.get('subrepos') or matchessubrepo(m, subpath):
836 sub = wctx.sub(subpath)
847 sub = wctx.sub(subpath)
837 try:
848 try:
838 submatch = matchmod.narrowmatcher(subpath, m)
849 submatch = matchmod.narrowmatcher(subpath, m)
839 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
850 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
840 ret = 1
851 ret = 1
841 except error.LookupError:
852 except error.LookupError:
842 repo.ui.status(_("skipping missing subrepository: %s\n")
853 repo.ui.status(_("skipping missing subrepository: %s\n")
843 % join(subpath))
854 % join(subpath))
844
855
845 rejected = []
856 rejected = []
846 def badfn(f, msg):
857 def badfn(f, msg):
847 if f in m.files():
858 if f in m.files():
848 m.bad(f, msg)
859 m.bad(f, msg)
849 rejected.append(f)
860 rejected.append(f)
850
861
851 badmatch = matchmod.badmatch(m, badfn)
862 badmatch = matchmod.badmatch(m, badfn)
852 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
863 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
853 badmatch)
864 badmatch)
854
865
855 unknownset = set(unknown + forgotten)
866 unknownset = set(unknown + forgotten)
856 toprint = unknownset.copy()
867 toprint = unknownset.copy()
857 toprint.update(deleted)
868 toprint.update(deleted)
858 for abs in sorted(toprint):
869 for abs in sorted(toprint):
859 if repo.ui.verbose or not m.exact(abs):
870 if repo.ui.verbose or not m.exact(abs):
860 if abs in unknownset:
871 if abs in unknownset:
861 status = _('adding %s\n') % m.uipath(abs)
872 status = _('adding %s\n') % m.uipath(abs)
862 else:
873 else:
863 status = _('removing %s\n') % m.uipath(abs)
874 status = _('removing %s\n') % m.uipath(abs)
864 repo.ui.status(status)
875 repo.ui.status(status)
865
876
866 renames = _findrenames(repo, m, added + unknown, removed + deleted,
877 renames = _findrenames(repo, m, added + unknown, removed + deleted,
867 similarity)
878 similarity)
868
879
869 if not dry_run:
880 if not dry_run:
870 _markchanges(repo, unknown + forgotten, deleted, renames)
881 _markchanges(repo, unknown + forgotten, deleted, renames)
871
882
872 for f in rejected:
883 for f in rejected:
873 if f in m.files():
884 if f in m.files():
874 return 1
885 return 1
875 return ret
886 return ret
876
887
877 def marktouched(repo, files, similarity=0.0):
888 def marktouched(repo, files, similarity=0.0):
878 '''Assert that files have somehow been operated upon. files are relative to
889 '''Assert that files have somehow been operated upon. files are relative to
879 the repo root.'''
890 the repo root.'''
880 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
891 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
881 rejected = []
892 rejected = []
882
893
883 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
894 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
884
895
885 if repo.ui.verbose:
896 if repo.ui.verbose:
886 unknownset = set(unknown + forgotten)
897 unknownset = set(unknown + forgotten)
887 toprint = unknownset.copy()
898 toprint = unknownset.copy()
888 toprint.update(deleted)
899 toprint.update(deleted)
889 for abs in sorted(toprint):
900 for abs in sorted(toprint):
890 if abs in unknownset:
901 if abs in unknownset:
891 status = _('adding %s\n') % abs
902 status = _('adding %s\n') % abs
892 else:
903 else:
893 status = _('removing %s\n') % abs
904 status = _('removing %s\n') % abs
894 repo.ui.status(status)
905 repo.ui.status(status)
895
906
896 renames = _findrenames(repo, m, added + unknown, removed + deleted,
907 renames = _findrenames(repo, m, added + unknown, removed + deleted,
897 similarity)
908 similarity)
898
909
899 _markchanges(repo, unknown + forgotten, deleted, renames)
910 _markchanges(repo, unknown + forgotten, deleted, renames)
900
911
901 for f in rejected:
912 for f in rejected:
902 if f in m.files():
913 if f in m.files():
903 return 1
914 return 1
904 return 0
915 return 0
905
916
906 def _interestingfiles(repo, matcher):
917 def _interestingfiles(repo, matcher):
907 '''Walk dirstate with matcher, looking for files that addremove would care
918 '''Walk dirstate with matcher, looking for files that addremove would care
908 about.
919 about.
909
920
910 This is different from dirstate.status because it doesn't care about
921 This is different from dirstate.status because it doesn't care about
911 whether files are modified or clean.'''
922 whether files are modified or clean.'''
912 added, unknown, deleted, removed, forgotten = [], [], [], [], []
923 added, unknown, deleted, removed, forgotten = [], [], [], [], []
913 audit_path = pathutil.pathauditor(repo.root)
924 audit_path = pathutil.pathauditor(repo.root)
914
925
915 ctx = repo[None]
926 ctx = repo[None]
916 dirstate = repo.dirstate
927 dirstate = repo.dirstate
917 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
928 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
918 full=False)
929 full=False)
919 for abs, st in walkresults.iteritems():
930 for abs, st in walkresults.iteritems():
920 dstate = dirstate[abs]
931 dstate = dirstate[abs]
921 if dstate == '?' and audit_path.check(abs):
932 if dstate == '?' and audit_path.check(abs):
922 unknown.append(abs)
933 unknown.append(abs)
923 elif dstate != 'r' and not st:
934 elif dstate != 'r' and not st:
924 deleted.append(abs)
935 deleted.append(abs)
925 elif dstate == 'r' and st:
936 elif dstate == 'r' and st:
926 forgotten.append(abs)
937 forgotten.append(abs)
927 # for finding renames
938 # for finding renames
928 elif dstate == 'r' and not st:
939 elif dstate == 'r' and not st:
929 removed.append(abs)
940 removed.append(abs)
930 elif dstate == 'a':
941 elif dstate == 'a':
931 added.append(abs)
942 added.append(abs)
932
943
933 return added, unknown, deleted, removed, forgotten
944 return added, unknown, deleted, removed, forgotten
934
945
935 def _findrenames(repo, matcher, added, removed, similarity):
946 def _findrenames(repo, matcher, added, removed, similarity):
936 '''Find renames from removed files to added ones.'''
947 '''Find renames from removed files to added ones.'''
937 renames = {}
948 renames = {}
938 if similarity > 0:
949 if similarity > 0:
939 for old, new, score in similar.findrenames(repo, added, removed,
950 for old, new, score in similar.findrenames(repo, added, removed,
940 similarity):
951 similarity):
941 if (repo.ui.verbose or not matcher.exact(old)
952 if (repo.ui.verbose or not matcher.exact(old)
942 or not matcher.exact(new)):
953 or not matcher.exact(new)):
943 repo.ui.status(_('recording removal of %s as rename to %s '
954 repo.ui.status(_('recording removal of %s as rename to %s '
944 '(%d%% similar)\n') %
955 '(%d%% similar)\n') %
945 (matcher.rel(old), matcher.rel(new),
956 (matcher.rel(old), matcher.rel(new),
946 score * 100))
957 score * 100))
947 renames[new] = old
958 renames[new] = old
948 return renames
959 return renames
949
960
950 def _markchanges(repo, unknown, deleted, renames):
961 def _markchanges(repo, unknown, deleted, renames):
951 '''Marks the files in unknown as added, the files in deleted as removed,
962 '''Marks the files in unknown as added, the files in deleted as removed,
952 and the files in renames as copied.'''
963 and the files in renames as copied.'''
953 wctx = repo[None]
964 wctx = repo[None]
954 wlock = repo.wlock()
965 wlock = repo.wlock()
955 try:
966 try:
956 wctx.forget(deleted)
967 wctx.forget(deleted)
957 wctx.add(unknown)
968 wctx.add(unknown)
958 for new, old in renames.iteritems():
969 for new, old in renames.iteritems():
959 wctx.copy(old, new)
970 wctx.copy(old, new)
960 finally:
971 finally:
961 wlock.release()
972 wlock.release()
962
973
963 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
974 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
964 """Update the dirstate to reflect the intent of copying src to dst. For
975 """Update the dirstate to reflect the intent of copying src to dst. For
965 different reasons it might not end with dst being marked as copied from src.
976 different reasons it might not end with dst being marked as copied from src.
966 """
977 """
967 origsrc = repo.dirstate.copied(src) or src
978 origsrc = repo.dirstate.copied(src) or src
968 if dst == origsrc: # copying back a copy?
979 if dst == origsrc: # copying back a copy?
969 if repo.dirstate[dst] not in 'mn' and not dryrun:
980 if repo.dirstate[dst] not in 'mn' and not dryrun:
970 repo.dirstate.normallookup(dst)
981 repo.dirstate.normallookup(dst)
971 else:
982 else:
972 if repo.dirstate[origsrc] == 'a' and origsrc == src:
983 if repo.dirstate[origsrc] == 'a' and origsrc == src:
973 if not ui.quiet:
984 if not ui.quiet:
974 ui.warn(_("%s has not been committed yet, so no copy "
985 ui.warn(_("%s has not been committed yet, so no copy "
975 "data will be stored for %s.\n")
986 "data will be stored for %s.\n")
976 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
987 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
977 if repo.dirstate[dst] in '?r' and not dryrun:
988 if repo.dirstate[dst] in '?r' and not dryrun:
978 wctx.add([dst])
989 wctx.add([dst])
979 elif not dryrun:
990 elif not dryrun:
980 wctx.copy(origsrc, dst)
991 wctx.copy(origsrc, dst)
981
992
982 def readrequires(opener, supported):
993 def readrequires(opener, supported):
983 '''Reads and parses .hg/requires and checks if all entries found
994 '''Reads and parses .hg/requires and checks if all entries found
984 are in the list of supported features.'''
995 are in the list of supported features.'''
985 requirements = set(opener.read("requires").splitlines())
996 requirements = set(opener.read("requires").splitlines())
986 missings = []
997 missings = []
987 for r in requirements:
998 for r in requirements:
988 if r not in supported:
999 if r not in supported:
989 if not r or not r[0].isalnum():
1000 if not r or not r[0].isalnum():
990 raise error.RequirementError(_(".hg/requires file is corrupt"))
1001 raise error.RequirementError(_(".hg/requires file is corrupt"))
991 missings.append(r)
1002 missings.append(r)
992 missings.sort()
1003 missings.sort()
993 if missings:
1004 if missings:
994 raise error.RequirementError(
1005 raise error.RequirementError(
995 _("repository requires features unknown to this Mercurial: %s")
1006 _("repository requires features unknown to this Mercurial: %s")
996 % " ".join(missings),
1007 % " ".join(missings),
997 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1008 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
998 " for more information"))
1009 " for more information"))
999 return requirements
1010 return requirements
1000
1011
1001 def writerequires(opener, requirements):
1012 def writerequires(opener, requirements):
1002 reqfile = opener("requires", "w")
1013 reqfile = opener("requires", "w")
1003 for r in sorted(requirements):
1014 for r in sorted(requirements):
1004 reqfile.write("%s\n" % r)
1015 reqfile.write("%s\n" % r)
1005 reqfile.close()
1016 reqfile.close()
1006
1017
1007 class filecachesubentry(object):
1018 class filecachesubentry(object):
1008 def __init__(self, path, stat):
1019 def __init__(self, path, stat):
1009 self.path = path
1020 self.path = path
1010 self.cachestat = None
1021 self.cachestat = None
1011 self._cacheable = None
1022 self._cacheable = None
1012
1023
1013 if stat:
1024 if stat:
1014 self.cachestat = filecachesubentry.stat(self.path)
1025 self.cachestat = filecachesubentry.stat(self.path)
1015
1026
1016 if self.cachestat:
1027 if self.cachestat:
1017 self._cacheable = self.cachestat.cacheable()
1028 self._cacheable = self.cachestat.cacheable()
1018 else:
1029 else:
1019 # None means we don't know yet
1030 # None means we don't know yet
1020 self._cacheable = None
1031 self._cacheable = None
1021
1032
1022 def refresh(self):
1033 def refresh(self):
1023 if self.cacheable():
1034 if self.cacheable():
1024 self.cachestat = filecachesubentry.stat(self.path)
1035 self.cachestat = filecachesubentry.stat(self.path)
1025
1036
1026 def cacheable(self):
1037 def cacheable(self):
1027 if self._cacheable is not None:
1038 if self._cacheable is not None:
1028 return self._cacheable
1039 return self._cacheable
1029
1040
1030 # we don't know yet, assume it is for now
1041 # we don't know yet, assume it is for now
1031 return True
1042 return True
1032
1043
1033 def changed(self):
1044 def changed(self):
1034 # no point in going further if we can't cache it
1045 # no point in going further if we can't cache it
1035 if not self.cacheable():
1046 if not self.cacheable():
1036 return True
1047 return True
1037
1048
1038 newstat = filecachesubentry.stat(self.path)
1049 newstat = filecachesubentry.stat(self.path)
1039
1050
1040 # we may not know if it's cacheable yet, check again now
1051 # we may not know if it's cacheable yet, check again now
1041 if newstat and self._cacheable is None:
1052 if newstat and self._cacheable is None:
1042 self._cacheable = newstat.cacheable()
1053 self._cacheable = newstat.cacheable()
1043
1054
1044 # check again
1055 # check again
1045 if not self._cacheable:
1056 if not self._cacheable:
1046 return True
1057 return True
1047
1058
1048 if self.cachestat != newstat:
1059 if self.cachestat != newstat:
1049 self.cachestat = newstat
1060 self.cachestat = newstat
1050 return True
1061 return True
1051 else:
1062 else:
1052 return False
1063 return False
1053
1064
1054 @staticmethod
1065 @staticmethod
1055 def stat(path):
1066 def stat(path):
1056 try:
1067 try:
1057 return util.cachestat(path)
1068 return util.cachestat(path)
1058 except OSError as e:
1069 except OSError as e:
1059 if e.errno != errno.ENOENT:
1070 if e.errno != errno.ENOENT:
1060 raise
1071 raise
1061
1072
1062 class filecacheentry(object):
1073 class filecacheentry(object):
1063 def __init__(self, paths, stat=True):
1074 def __init__(self, paths, stat=True):
1064 self._entries = []
1075 self._entries = []
1065 for path in paths:
1076 for path in paths:
1066 self._entries.append(filecachesubentry(path, stat))
1077 self._entries.append(filecachesubentry(path, stat))
1067
1078
1068 def changed(self):
1079 def changed(self):
1069 '''true if any entry has changed'''
1080 '''true if any entry has changed'''
1070 for entry in self._entries:
1081 for entry in self._entries:
1071 if entry.changed():
1082 if entry.changed():
1072 return True
1083 return True
1073 return False
1084 return False
1074
1085
1075 def refresh(self):
1086 def refresh(self):
1076 for entry in self._entries:
1087 for entry in self._entries:
1077 entry.refresh()
1088 entry.refresh()
1078
1089
1079 class filecache(object):
1090 class filecache(object):
1080 '''A property like decorator that tracks files under .hg/ for updates.
1091 '''A property like decorator that tracks files under .hg/ for updates.
1081
1092
1082 Records stat info when called in _filecache.
1093 Records stat info when called in _filecache.
1083
1094
1084 On subsequent calls, compares old stat info with new info, and recreates the
1095 On subsequent calls, compares old stat info with new info, and recreates the
1085 object when any of the files changes, updating the new stat info in
1096 object when any of the files changes, updating the new stat info in
1086 _filecache.
1097 _filecache.
1087
1098
1088 Mercurial either atomic renames or appends for files under .hg,
1099 Mercurial either atomic renames or appends for files under .hg,
1089 so to ensure the cache is reliable we need the filesystem to be able
1100 so to ensure the cache is reliable we need the filesystem to be able
1090 to tell us if a file has been replaced. If it can't, we fallback to
1101 to tell us if a file has been replaced. If it can't, we fallback to
1091 recreating the object on every call (essentially the same behavior as
1102 recreating the object on every call (essentially the same behavior as
1092 propertycache).
1103 propertycache).
1093
1104
1094 '''
1105 '''
1095 def __init__(self, *paths):
1106 def __init__(self, *paths):
1096 self.paths = paths
1107 self.paths = paths
1097
1108
1098 def join(self, obj, fname):
1109 def join(self, obj, fname):
1099 """Used to compute the runtime path of a cached file.
1110 """Used to compute the runtime path of a cached file.
1100
1111
1101 Users should subclass filecache and provide their own version of this
1112 Users should subclass filecache and provide their own version of this
1102 function to call the appropriate join function on 'obj' (an instance
1113 function to call the appropriate join function on 'obj' (an instance
1103 of the class that its member function was decorated).
1114 of the class that its member function was decorated).
1104 """
1115 """
1105 return obj.join(fname)
1116 return obj.join(fname)
1106
1117
1107 def __call__(self, func):
1118 def __call__(self, func):
1108 self.func = func
1119 self.func = func
1109 self.name = func.__name__
1120 self.name = func.__name__
1110 return self
1121 return self
1111
1122
1112 def __get__(self, obj, type=None):
1123 def __get__(self, obj, type=None):
1113 # do we need to check if the file changed?
1124 # do we need to check if the file changed?
1114 if self.name in obj.__dict__:
1125 if self.name in obj.__dict__:
1115 assert self.name in obj._filecache, self.name
1126 assert self.name in obj._filecache, self.name
1116 return obj.__dict__[self.name]
1127 return obj.__dict__[self.name]
1117
1128
1118 entry = obj._filecache.get(self.name)
1129 entry = obj._filecache.get(self.name)
1119
1130
1120 if entry:
1131 if entry:
1121 if entry.changed():
1132 if entry.changed():
1122 entry.obj = self.func(obj)
1133 entry.obj = self.func(obj)
1123 else:
1134 else:
1124 paths = [self.join(obj, path) for path in self.paths]
1135 paths = [self.join(obj, path) for path in self.paths]
1125
1136
1126 # We stat -before- creating the object so our cache doesn't lie if
1137 # We stat -before- creating the object so our cache doesn't lie if
1127 # a writer modified between the time we read and stat
1138 # a writer modified between the time we read and stat
1128 entry = filecacheentry(paths, True)
1139 entry = filecacheentry(paths, True)
1129 entry.obj = self.func(obj)
1140 entry.obj = self.func(obj)
1130
1141
1131 obj._filecache[self.name] = entry
1142 obj._filecache[self.name] = entry
1132
1143
1133 obj.__dict__[self.name] = entry.obj
1144 obj.__dict__[self.name] = entry.obj
1134 return entry.obj
1145 return entry.obj
1135
1146
1136 def __set__(self, obj, value):
1147 def __set__(self, obj, value):
1137 if self.name not in obj._filecache:
1148 if self.name not in obj._filecache:
1138 # we add an entry for the missing value because X in __dict__
1149 # we add an entry for the missing value because X in __dict__
1139 # implies X in _filecache
1150 # implies X in _filecache
1140 paths = [self.join(obj, path) for path in self.paths]
1151 paths = [self.join(obj, path) for path in self.paths]
1141 ce = filecacheentry(paths, False)
1152 ce = filecacheentry(paths, False)
1142 obj._filecache[self.name] = ce
1153 obj._filecache[self.name] = ce
1143 else:
1154 else:
1144 ce = obj._filecache[self.name]
1155 ce = obj._filecache[self.name]
1145
1156
1146 ce.obj = value # update cached copy
1157 ce.obj = value # update cached copy
1147 obj.__dict__[self.name] = value # update copy returned by obj.x
1158 obj.__dict__[self.name] = value # update copy returned by obj.x
1148
1159
1149 def __delete__(self, obj):
1160 def __delete__(self, obj):
1150 try:
1161 try:
1151 del obj.__dict__[self.name]
1162 del obj.__dict__[self.name]
1152 except KeyError:
1163 except KeyError:
1153 raise AttributeError(self.name)
1164 raise AttributeError(self.name)
1154
1165
1155 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1166 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1156 if lock is None:
1167 if lock is None:
1157 raise error.LockInheritanceContractViolation(
1168 raise error.LockInheritanceContractViolation(
1158 'lock can only be inherited while held')
1169 'lock can only be inherited while held')
1159 if environ is None:
1170 if environ is None:
1160 environ = {}
1171 environ = {}
1161 with lock.inherit() as locker:
1172 with lock.inherit() as locker:
1162 environ[envvar] = locker
1173 environ[envvar] = locker
1163 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1174 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1164
1175
1165 def wlocksub(repo, cmd, *args, **kwargs):
1176 def wlocksub(repo, cmd, *args, **kwargs):
1166 """run cmd as a subprocess that allows inheriting repo's wlock
1177 """run cmd as a subprocess that allows inheriting repo's wlock
1167
1178
1168 This can only be called while the wlock is held. This takes all the
1179 This can only be called while the wlock is held. This takes all the
1169 arguments that ui.system does, and returns the exit code of the
1180 arguments that ui.system does, and returns the exit code of the
1170 subprocess."""
1181 subprocess."""
1171 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1182 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1172 **kwargs)
1183 **kwargs)
@@ -1,296 +1,318 b''
1 $ cat <<EOF > merge
1 $ cat <<EOF > merge
2 > import sys, os
2 > import sys, os
3 >
3 >
4 > try:
4 > try:
5 > import msvcrt
5 > import msvcrt
6 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
6 > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
7 > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
7 > msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
8 > except ImportError:
8 > except ImportError:
9 > pass
9 > pass
10 >
10 >
11 > print "merging for", os.path.basename(sys.argv[1])
11 > print "merging for", os.path.basename(sys.argv[1])
12 > EOF
12 > EOF
13 $ HGMERGE="python ../merge"; export HGMERGE
13 $ HGMERGE="python ../merge"; export HGMERGE
14
14
15 $ hg init t
15 $ hg init t
16 $ cd t
16 $ cd t
17 $ echo This is file a1 > a
17 $ echo This is file a1 > a
18 $ hg add a
18 $ hg add a
19 $ hg commit -m "commit #0"
19 $ hg commit -m "commit #0"
20 $ echo This is file b1 > b
20 $ echo This is file b1 > b
21 $ hg add b
21 $ hg add b
22 $ hg commit -m "commit #1"
22 $ hg commit -m "commit #1"
23
23
24 $ hg update 0
24 $ hg update 0
25 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
25 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
26
26
27 Test interrupted updates by exploiting our non-handling of directory collisions
27 Test interrupted updates by exploiting our non-handling of directory collisions
28
28
29 $ mkdir b
29 $ mkdir b
30 $ hg up
30 $ hg up
31 abort: *: '$TESTTMP/t/b' (glob)
31 abort: *: '$TESTTMP/t/b' (glob)
32 [255]
32 [255]
33 $ hg ci
33 $ hg ci
34 abort: last update was interrupted
34 abort: last update was interrupted
35 (use 'hg update' to get a consistent checkout)
35 (use 'hg update' to get a consistent checkout)
36 [255]
36 [255]
37 $ hg sum
37 $ hg sum
38 parent: 0:538afb845929
38 parent: 0:538afb845929
39 commit #0
39 commit #0
40 branch: default
40 branch: default
41 commit: (interrupted update)
41 commit: (interrupted update)
42 update: 1 new changesets (update)
42 update: 1 new changesets (update)
43 phases: 2 draft
43 phases: 2 draft
44 $ rmdir b
44 $ rmdir b
45 $ hg up
45 $ hg up
46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 $ hg sum
47 $ hg sum
48 parent: 1:b8bb4a988f25 tip
48 parent: 1:b8bb4a988f25 tip
49 commit #1
49 commit #1
50 branch: default
50 branch: default
51 commit: (clean)
51 commit: (clean)
52 update: (current)
52 update: (current)
53 phases: 2 draft
53 phases: 2 draft
54
54
55 Prepare a basic merge
55 Prepare a basic merge
56
56
57 $ hg up 0
57 $ hg up 0
58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 $ echo This is file c1 > c
59 $ echo This is file c1 > c
60 $ hg add c
60 $ hg add c
61 $ hg commit -m "commit #2"
61 $ hg commit -m "commit #2"
62 created new head
62 created new head
63 $ echo This is file b1 > b
63 $ echo This is file b1 > b
64 no merges expected
64 no merges expected
65 $ hg merge -P 1
65 $ hg merge -P 1
66 changeset: 1:b8bb4a988f25
66 changeset: 1:b8bb4a988f25
67 user: test
67 user: test
68 date: Thu Jan 01 00:00:00 1970 +0000
68 date: Thu Jan 01 00:00:00 1970 +0000
69 summary: commit #1
69 summary: commit #1
70
70
71 $ hg merge 1
71 $ hg merge 1
72 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
73 (branch merge, don't forget to commit)
73 (branch merge, don't forget to commit)
74 $ hg diff --nodates
74 $ hg diff --nodates
75 diff -r 49035e18a8e6 b
75 diff -r 49035e18a8e6 b
76 --- /dev/null
76 --- /dev/null
77 +++ b/b
77 +++ b/b
78 @@ -0,0 +1,1 @@
78 @@ -0,0 +1,1 @@
79 +This is file b1
79 +This is file b1
80 $ hg status
80 $ hg status
81 M b
81 M b
82 $ cd ..; rm -r t
82 $ cd ..; rm -r t
83
83
84 $ hg init t
84 $ hg init t
85 $ cd t
85 $ cd t
86 $ echo This is file a1 > a
86 $ echo This is file a1 > a
87 $ hg add a
87 $ hg add a
88 $ hg commit -m "commit #0"
88 $ hg commit -m "commit #0"
89 $ echo This is file b1 > b
89 $ echo This is file b1 > b
90 $ hg add b
90 $ hg add b
91 $ hg commit -m "commit #1"
91 $ hg commit -m "commit #1"
92
92
93 $ hg update 0
93 $ hg update 0
94 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
94 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
95 $ echo This is file c1 > c
95 $ echo This is file c1 > c
96 $ hg add c
96 $ hg add c
97 $ hg commit -m "commit #2"
97 $ hg commit -m "commit #2"
98 created new head
98 created new head
99 $ echo This is file b2 > b
99 $ echo This is file b2 > b
100 merge should fail
100 merge should fail
101 $ hg merge 1
101 $ hg merge 1
102 b: untracked file differs
102 b: untracked file differs
103 abort: untracked files in working directory differ from files in requested revision
103 abort: untracked files in working directory differ from files in requested revision
104 [255]
104 [255]
105
106 #if symlink
107 symlinks to directories should be treated as regular files (issue5027)
108 $ rm b
109 $ ln -s 'This is file b2' b
110 $ hg merge 1
111 b: untracked file differs
112 abort: untracked files in working directory differ from files in requested revision
113 [255]
114 symlinks shouldn't be followed
115 $ rm b
116 $ echo This is file b1 > .hg/b
117 $ ln -s .hg/b b
118 $ hg merge 1
119 b: untracked file differs
120 abort: untracked files in working directory differ from files in requested revision
121 [255]
122
123 $ rm b
124 $ echo This is file b2 > b
125 #endif
126
105 merge of b expected
127 merge of b expected
106 $ hg merge -f 1
128 $ hg merge -f 1
107 merging b
129 merging b
108 merging for b
130 merging for b
109 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
131 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
110 (branch merge, don't forget to commit)
132 (branch merge, don't forget to commit)
111 $ hg diff --nodates
133 $ hg diff --nodates
112 diff -r 49035e18a8e6 b
134 diff -r 49035e18a8e6 b
113 --- /dev/null
135 --- /dev/null
114 +++ b/b
136 +++ b/b
115 @@ -0,0 +1,1 @@
137 @@ -0,0 +1,1 @@
116 +This is file b2
138 +This is file b2
117 $ hg status
139 $ hg status
118 M b
140 M b
119 $ cd ..; rm -r t
141 $ cd ..; rm -r t
120
142
121 $ hg init t
143 $ hg init t
122 $ cd t
144 $ cd t
123 $ echo This is file a1 > a
145 $ echo This is file a1 > a
124 $ hg add a
146 $ hg add a
125 $ hg commit -m "commit #0"
147 $ hg commit -m "commit #0"
126 $ echo This is file b1 > b
148 $ echo This is file b1 > b
127 $ hg add b
149 $ hg add b
128 $ hg commit -m "commit #1"
150 $ hg commit -m "commit #1"
129 $ echo This is file b22 > b
151 $ echo This is file b22 > b
130 $ hg commit -m "commit #2"
152 $ hg commit -m "commit #2"
131 $ hg update 1
153 $ hg update 1
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 $ echo This is file c1 > c
155 $ echo This is file c1 > c
134 $ hg add c
156 $ hg add c
135 $ hg commit -m "commit #3"
157 $ hg commit -m "commit #3"
136 created new head
158 created new head
137
159
138 Contents of b should be "this is file b1"
160 Contents of b should be "this is file b1"
139 $ cat b
161 $ cat b
140 This is file b1
162 This is file b1
141
163
142 $ echo This is file b22 > b
164 $ echo This is file b22 > b
143 merge fails
165 merge fails
144 $ hg merge 2
166 $ hg merge 2
145 abort: uncommitted changes
167 abort: uncommitted changes
146 (use 'hg status' to list changes)
168 (use 'hg status' to list changes)
147 [255]
169 [255]
148 merge expected!
170 merge expected!
149 $ hg merge -f 2
171 $ hg merge -f 2
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 (branch merge, don't forget to commit)
173 (branch merge, don't forget to commit)
152 $ hg diff --nodates
174 $ hg diff --nodates
153 diff -r 85de557015a8 b
175 diff -r 85de557015a8 b
154 --- a/b
176 --- a/b
155 +++ b/b
177 +++ b/b
156 @@ -1,1 +1,1 @@
178 @@ -1,1 +1,1 @@
157 -This is file b1
179 -This is file b1
158 +This is file b22
180 +This is file b22
159 $ hg status
181 $ hg status
160 M b
182 M b
161 $ cd ..; rm -r t
183 $ cd ..; rm -r t
162
184
163 $ hg init t
185 $ hg init t
164 $ cd t
186 $ cd t
165 $ echo This is file a1 > a
187 $ echo This is file a1 > a
166 $ hg add a
188 $ hg add a
167 $ hg commit -m "commit #0"
189 $ hg commit -m "commit #0"
168 $ echo This is file b1 > b
190 $ echo This is file b1 > b
169 $ hg add b
191 $ hg add b
170 $ hg commit -m "commit #1"
192 $ hg commit -m "commit #1"
171 $ echo This is file b22 > b
193 $ echo This is file b22 > b
172 $ hg commit -m "commit #2"
194 $ hg commit -m "commit #2"
173 $ hg update 1
195 $ hg update 1
174 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
196 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 $ echo This is file c1 > c
197 $ echo This is file c1 > c
176 $ hg add c
198 $ hg add c
177 $ hg commit -m "commit #3"
199 $ hg commit -m "commit #3"
178 created new head
200 created new head
179 $ echo This is file b33 > b
201 $ echo This is file b33 > b
180 merge of b should fail
202 merge of b should fail
181 $ hg merge 2
203 $ hg merge 2
182 abort: uncommitted changes
204 abort: uncommitted changes
183 (use 'hg status' to list changes)
205 (use 'hg status' to list changes)
184 [255]
206 [255]
185 merge of b expected
207 merge of b expected
186 $ hg merge -f 2
208 $ hg merge -f 2
187 merging b
209 merging b
188 merging for b
210 merging for b
189 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
211 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
190 (branch merge, don't forget to commit)
212 (branch merge, don't forget to commit)
191 $ hg diff --nodates
213 $ hg diff --nodates
192 diff -r 85de557015a8 b
214 diff -r 85de557015a8 b
193 --- a/b
215 --- a/b
194 +++ b/b
216 +++ b/b
195 @@ -1,1 +1,1 @@
217 @@ -1,1 +1,1 @@
196 -This is file b1
218 -This is file b1
197 +This is file b33
219 +This is file b33
198 $ hg status
220 $ hg status
199 M b
221 M b
200
222
201 Test for issue2364
223 Test for issue2364
202
224
203 $ hg up -qC .
225 $ hg up -qC .
204 $ hg rm b
226 $ hg rm b
205 $ hg ci -md
227 $ hg ci -md
206 $ hg revert -r -2 b
228 $ hg revert -r -2 b
207 $ hg up -q -- -2
229 $ hg up -q -- -2
208
230
209 Test that updated files are treated as "modified", when
231 Test that updated files are treated as "modified", when
210 'merge.update()' is aborted before 'merge.recordupdates()' (= parents
232 'merge.update()' is aborted before 'merge.recordupdates()' (= parents
211 aren't changed), even if none of mode, size and timestamp of them
233 aren't changed), even if none of mode, size and timestamp of them
212 isn't changed on the filesystem (see also issue4583).
234 isn't changed on the filesystem (see also issue4583).
213
235
214 $ cat > $TESTTMP/abort.py <<EOF
236 $ cat > $TESTTMP/abort.py <<EOF
215 > # emulate aborting before "recordupdates()". in this case, files
237 > # emulate aborting before "recordupdates()". in this case, files
216 > # are changed without updating dirstate
238 > # are changed without updating dirstate
217 > from mercurial import extensions, merge, error
239 > from mercurial import extensions, merge, error
218 > def applyupdates(orig, *args, **kwargs):
240 > def applyupdates(orig, *args, **kwargs):
219 > orig(*args, **kwargs)
241 > orig(*args, **kwargs)
220 > raise error.Abort('intentional aborting')
242 > raise error.Abort('intentional aborting')
221 > def extsetup(ui):
243 > def extsetup(ui):
222 > extensions.wrapfunction(merge, "applyupdates", applyupdates)
244 > extensions.wrapfunction(merge, "applyupdates", applyupdates)
223 > EOF
245 > EOF
224
246
225 $ cat >> .hg/hgrc <<EOF
247 $ cat >> .hg/hgrc <<EOF
226 > [fakedirstatewritetime]
248 > [fakedirstatewritetime]
227 > # emulate invoking dirstate.write() via repo.status()
249 > # emulate invoking dirstate.write() via repo.status()
228 > # at 2000-01-01 00:00
250 > # at 2000-01-01 00:00
229 > fakenow = 200001010000
251 > fakenow = 200001010000
230 > EOF
252 > EOF
231
253
232 (file gotten from other revision)
254 (file gotten from other revision)
233
255
234 $ hg update -q -C 2
256 $ hg update -q -C 2
235 $ echo 'THIS IS FILE B5' > b
257 $ echo 'THIS IS FILE B5' > b
236 $ hg commit -m 'commit #5'
258 $ hg commit -m 'commit #5'
237
259
238 $ hg update -q -C 3
260 $ hg update -q -C 3
239 $ cat b
261 $ cat b
240 This is file b1
262 This is file b1
241 $ touch -t 200001010000 b
263 $ touch -t 200001010000 b
242 $ hg debugrebuildstate
264 $ hg debugrebuildstate
243
265
244 $ cat >> .hg/hgrc <<EOF
266 $ cat >> .hg/hgrc <<EOF
245 > [extensions]
267 > [extensions]
246 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
268 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
247 > abort = $TESTTMP/abort.py
269 > abort = $TESTTMP/abort.py
248 > EOF
270 > EOF
249 $ hg merge 5
271 $ hg merge 5
250 abort: intentional aborting
272 abort: intentional aborting
251 [255]
273 [255]
252 $ cat >> .hg/hgrc <<EOF
274 $ cat >> .hg/hgrc <<EOF
253 > [extensions]
275 > [extensions]
254 > fakedirstatewritetime = !
276 > fakedirstatewritetime = !
255 > abort = !
277 > abort = !
256 > EOF
278 > EOF
257
279
258 $ cat b
280 $ cat b
259 THIS IS FILE B5
281 THIS IS FILE B5
260 $ touch -t 200001010000 b
282 $ touch -t 200001010000 b
261 $ hg status -A b
283 $ hg status -A b
262 M b
284 M b
263
285
264 (file merged from other revision)
286 (file merged from other revision)
265
287
266 $ hg update -q -C 3
288 $ hg update -q -C 3
267 $ echo 'this is file b6' > b
289 $ echo 'this is file b6' > b
268 $ hg commit -m 'commit #6'
290 $ hg commit -m 'commit #6'
269 created new head
291 created new head
270
292
271 $ cat b
293 $ cat b
272 this is file b6
294 this is file b6
273 $ touch -t 200001010000 b
295 $ touch -t 200001010000 b
274 $ hg debugrebuildstate
296 $ hg debugrebuildstate
275
297
276 $ cat >> .hg/hgrc <<EOF
298 $ cat >> .hg/hgrc <<EOF
277 > [extensions]
299 > [extensions]
278 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
300 > fakedirstatewritetime = $TESTDIR/fakedirstatewritetime.py
279 > abort = $TESTTMP/abort.py
301 > abort = $TESTTMP/abort.py
280 > EOF
302 > EOF
281 $ hg merge --tool internal:other 5
303 $ hg merge --tool internal:other 5
282 abort: intentional aborting
304 abort: intentional aborting
283 [255]
305 [255]
284 $ cat >> .hg/hgrc <<EOF
306 $ cat >> .hg/hgrc <<EOF
285 > [extensions]
307 > [extensions]
286 > fakedirstatewritetime = !
308 > fakedirstatewritetime = !
287 > abort = !
309 > abort = !
288 > EOF
310 > EOF
289
311
290 $ cat b
312 $ cat b
291 THIS IS FILE B5
313 THIS IS FILE B5
292 $ touch -t 200001010000 b
314 $ touch -t 200001010000 b
293 $ hg status -A b
315 $ hg status -A b
294 M b
316 M b
295
317
296 $ cd ..
318 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now