##// END OF EJS Templates
manifest: transpose pair of pairs from diff()...
Martin von Zweigbergk -
r22966:ff93aa00 default
parent child Browse files
Show More
@@ -1,268 +1,268
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util
9 import mdiff, parsers, error, revlog, util
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self, mapping=None, flags=None):
13 def __init__(self, mapping=None, flags=None):
14 if mapping is None:
14 if mapping is None:
15 mapping = {}
15 mapping = {}
16 if flags is None:
16 if flags is None:
17 flags = {}
17 flags = {}
18 dict.__init__(self, mapping)
18 dict.__init__(self, mapping)
19 self._flags = flags
19 self._flags = flags
20 def flags(self, f):
20 def flags(self, f):
21 return self._flags.get(f, "")
21 return self._flags.get(f, "")
22 def withflags(self):
22 def withflags(self):
23 return set(self._flags.keys())
23 return set(self._flags.keys())
24 def setflag(self, f, flags):
24 def setflag(self, f, flags):
25 """Set the flags (symlink, executable) for path f."""
25 """Set the flags (symlink, executable) for path f."""
26 self._flags[f] = flags
26 self._flags[f] = flags
27 def copy(self):
27 def copy(self):
28 return manifestdict(self, dict.copy(self._flags))
28 return manifestdict(self, dict.copy(self._flags))
29 def intersectfiles(self, files):
29 def intersectfiles(self, files):
30 '''make a new manifestdict with the intersection of self with files
30 '''make a new manifestdict with the intersection of self with files
31
31
32 The algorithm assumes that files is much smaller than self.'''
32 The algorithm assumes that files is much smaller than self.'''
33 ret = manifestdict()
33 ret = manifestdict()
34 for fn in files:
34 for fn in files:
35 if fn in self:
35 if fn in self:
36 ret[fn] = self[fn]
36 ret[fn] = self[fn]
37 flags = self._flags.get(fn, None)
37 flags = self._flags.get(fn, None)
38 if flags:
38 if flags:
39 ret._flags[fn] = flags
39 ret._flags[fn] = flags
40 return ret
40 return ret
41
41
42 def diff(self, m2):
42 def diff(self, m2):
43 '''Finds changes between the current manifest and m2. The result is
43 '''Finds changes between the current manifest and m2. The result is
44 returned as a dict with filename as key and values of the form
44 returned as a dict with filename as key and values of the form
45 ((n1,n2),(fl1,fl2)), where n1/n2 is the nodeid in the current/other
45 ((n1,fl1),(n2,fl2)), where n1/n2 is the nodeid in the current/other
46 manifest and fl1/fl2 is the flag in the current/other manifest. Where
46 manifest and fl1/fl2 is the flag in the current/other manifest. Where
47 the file does not exist, the nodeid will be None and the flags will be
47 the file does not exist, the nodeid will be None and the flags will be
48 the empty string.'''
48 the empty string.'''
49 diff = {}
49 diff = {}
50
50
51 for fn, n1 in self.iteritems():
51 for fn, n1 in self.iteritems():
52 fl1 = self._flags.get(fn, '')
52 fl1 = self._flags.get(fn, '')
53 n2 = m2.get(fn, None)
53 n2 = m2.get(fn, None)
54 fl2 = m2._flags.get(fn, '')
54 fl2 = m2._flags.get(fn, '')
55 if n2 is None:
55 if n2 is None:
56 fl2 = ''
56 fl2 = ''
57 if n1 != n2 or fl1 != fl2:
57 if n1 != n2 or fl1 != fl2:
58 diff[fn] = ((n1, n2), (fl1, fl2))
58 diff[fn] = ((n1, fl1), (n2, fl2))
59
59
60 for fn, n2 in m2.iteritems():
60 for fn, n2 in m2.iteritems():
61 if fn not in self:
61 if fn not in self:
62 fl2 = m2._flags.get(fn, '')
62 fl2 = m2._flags.get(fn, '')
63 diff[fn] = ((None, n2), ('', fl2))
63 diff[fn] = ((None, ''), (n2, fl2))
64
64
65 return diff
65 return diff
66
66
67 def text(self):
67 def text(self):
68 """Get the full data of this manifest as a bytestring."""
68 """Get the full data of this manifest as a bytestring."""
69 fl = sorted(self)
69 fl = sorted(self)
70 _checkforbidden(fl)
70 _checkforbidden(fl)
71
71
72 hex, flags = revlog.hex, self.flags
72 hex, flags = revlog.hex, self.flags
73 # if this is changed to support newlines in filenames,
73 # if this is changed to support newlines in filenames,
74 # be sure to check the templates/ dir again (especially *-raw.tmpl)
74 # be sure to check the templates/ dir again (especially *-raw.tmpl)
75 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
75 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
76
76
77 def fastdelta(self, base, changes):
77 def fastdelta(self, base, changes):
78 """Given a base manifest text as an array.array and a list of changes
78 """Given a base manifest text as an array.array and a list of changes
79 relative to that text, compute a delta that can be used by revlog.
79 relative to that text, compute a delta that can be used by revlog.
80 """
80 """
81 delta = []
81 delta = []
82 dstart = None
82 dstart = None
83 dend = None
83 dend = None
84 dline = [""]
84 dline = [""]
85 start = 0
85 start = 0
86 # zero copy representation of base as a buffer
86 # zero copy representation of base as a buffer
87 addbuf = util.buffer(base)
87 addbuf = util.buffer(base)
88
88
89 # start with a readonly loop that finds the offset of
89 # start with a readonly loop that finds the offset of
90 # each line and creates the deltas
90 # each line and creates the deltas
91 for f, todelete in changes:
91 for f, todelete in changes:
92 # bs will either be the index of the item or the insert point
92 # bs will either be the index of the item or the insert point
93 start, end = _msearch(addbuf, f, start)
93 start, end = _msearch(addbuf, f, start)
94 if not todelete:
94 if not todelete:
95 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
95 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
96 else:
96 else:
97 if start == end:
97 if start == end:
98 # item we want to delete was not found, error out
98 # item we want to delete was not found, error out
99 raise AssertionError(
99 raise AssertionError(
100 _("failed to remove %s from manifest") % f)
100 _("failed to remove %s from manifest") % f)
101 l = ""
101 l = ""
102 if dstart is not None and dstart <= start and dend >= start:
102 if dstart is not None and dstart <= start and dend >= start:
103 if dend < end:
103 if dend < end:
104 dend = end
104 dend = end
105 if l:
105 if l:
106 dline.append(l)
106 dline.append(l)
107 else:
107 else:
108 if dstart is not None:
108 if dstart is not None:
109 delta.append([dstart, dend, "".join(dline)])
109 delta.append([dstart, dend, "".join(dline)])
110 dstart = start
110 dstart = start
111 dend = end
111 dend = end
112 dline = [l]
112 dline = [l]
113
113
114 if dstart is not None:
114 if dstart is not None:
115 delta.append([dstart, dend, "".join(dline)])
115 delta.append([dstart, dend, "".join(dline)])
116 # apply the delta to the base, and get a delta for addrevision
116 # apply the delta to the base, and get a delta for addrevision
117 deltatext, arraytext = _addlistdelta(base, delta)
117 deltatext, arraytext = _addlistdelta(base, delta)
118 return arraytext, deltatext
118 return arraytext, deltatext
119
119
120 def _msearch(m, s, lo=0, hi=None):
120 def _msearch(m, s, lo=0, hi=None):
121 '''return a tuple (start, end) that says where to find s within m.
121 '''return a tuple (start, end) that says where to find s within m.
122
122
123 If the string is found m[start:end] are the line containing
123 If the string is found m[start:end] are the line containing
124 that string. If start == end the string was not found and
124 that string. If start == end the string was not found and
125 they indicate the proper sorted insertion point.
125 they indicate the proper sorted insertion point.
126
126
127 m should be a buffer or a string
127 m should be a buffer or a string
128 s is a string'''
128 s is a string'''
129 def advance(i, c):
129 def advance(i, c):
130 while i < lenm and m[i] != c:
130 while i < lenm and m[i] != c:
131 i += 1
131 i += 1
132 return i
132 return i
133 if not s:
133 if not s:
134 return (lo, lo)
134 return (lo, lo)
135 lenm = len(m)
135 lenm = len(m)
136 if not hi:
136 if not hi:
137 hi = lenm
137 hi = lenm
138 while lo < hi:
138 while lo < hi:
139 mid = (lo + hi) // 2
139 mid = (lo + hi) // 2
140 start = mid
140 start = mid
141 while start > 0 and m[start - 1] != '\n':
141 while start > 0 and m[start - 1] != '\n':
142 start -= 1
142 start -= 1
143 end = advance(start, '\0')
143 end = advance(start, '\0')
144 if m[start:end] < s:
144 if m[start:end] < s:
145 # we know that after the null there are 40 bytes of sha1
145 # we know that after the null there are 40 bytes of sha1
146 # this translates to the bisect lo = mid + 1
146 # this translates to the bisect lo = mid + 1
147 lo = advance(end + 40, '\n') + 1
147 lo = advance(end + 40, '\n') + 1
148 else:
148 else:
149 # this translates to the bisect hi = mid
149 # this translates to the bisect hi = mid
150 hi = start
150 hi = start
151 end = advance(lo, '\0')
151 end = advance(lo, '\0')
152 found = m[lo:end]
152 found = m[lo:end]
153 if s == found:
153 if s == found:
154 # we know that after the null there are 40 bytes of sha1
154 # we know that after the null there are 40 bytes of sha1
155 end = advance(end + 40, '\n')
155 end = advance(end + 40, '\n')
156 return (lo, end + 1)
156 return (lo, end + 1)
157 else:
157 else:
158 return (lo, lo)
158 return (lo, lo)
159
159
160 def _checkforbidden(l):
160 def _checkforbidden(l):
161 """Check filenames for illegal characters."""
161 """Check filenames for illegal characters."""
162 for f in l:
162 for f in l:
163 if '\n' in f or '\r' in f:
163 if '\n' in f or '\r' in f:
164 raise error.RevlogError(
164 raise error.RevlogError(
165 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
165 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
166
166
167
167
168 # apply the changes collected during the bisect loop to our addlist
168 # apply the changes collected during the bisect loop to our addlist
169 # return a delta suitable for addrevision
169 # return a delta suitable for addrevision
170 def _addlistdelta(addlist, x):
170 def _addlistdelta(addlist, x):
171 # for large addlist arrays, building a new array is cheaper
171 # for large addlist arrays, building a new array is cheaper
172 # than repeatedly modifying the existing one
172 # than repeatedly modifying the existing one
173 currentposition = 0
173 currentposition = 0
174 newaddlist = array.array('c')
174 newaddlist = array.array('c')
175
175
176 for start, end, content in x:
176 for start, end, content in x:
177 newaddlist += addlist[currentposition:start]
177 newaddlist += addlist[currentposition:start]
178 if content:
178 if content:
179 newaddlist += array.array('c', content)
179 newaddlist += array.array('c', content)
180
180
181 currentposition = end
181 currentposition = end
182
182
183 newaddlist += addlist[currentposition:]
183 newaddlist += addlist[currentposition:]
184
184
185 deltatext = "".join(struct.pack(">lll", start, end, len(content))
185 deltatext = "".join(struct.pack(">lll", start, end, len(content))
186 + content for start, end, content in x)
186 + content for start, end, content in x)
187 return deltatext, newaddlist
187 return deltatext, newaddlist
188
188
189 def _parse(lines):
189 def _parse(lines):
190 mfdict = manifestdict()
190 mfdict = manifestdict()
191 parsers.parse_manifest(mfdict, mfdict._flags, lines)
191 parsers.parse_manifest(mfdict, mfdict._flags, lines)
192 return mfdict
192 return mfdict
193
193
194 class manifest(revlog.revlog):
194 class manifest(revlog.revlog):
195 def __init__(self, opener):
195 def __init__(self, opener):
196 # we expect to deal with not more than four revs at a time,
196 # we expect to deal with not more than four revs at a time,
197 # during a commit --amend
197 # during a commit --amend
198 self._mancache = util.lrucachedict(4)
198 self._mancache = util.lrucachedict(4)
199 revlog.revlog.__init__(self, opener, "00manifest.i")
199 revlog.revlog.__init__(self, opener, "00manifest.i")
200
200
201 def readdelta(self, node):
201 def readdelta(self, node):
202 r = self.rev(node)
202 r = self.rev(node)
203 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
203 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
204
204
205 def readfast(self, node):
205 def readfast(self, node):
206 '''use the faster of readdelta or read'''
206 '''use the faster of readdelta or read'''
207 r = self.rev(node)
207 r = self.rev(node)
208 deltaparent = self.deltaparent(r)
208 deltaparent = self.deltaparent(r)
209 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
209 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
210 return self.readdelta(node)
210 return self.readdelta(node)
211 return self.read(node)
211 return self.read(node)
212
212
213 def read(self, node):
213 def read(self, node):
214 if node == revlog.nullid:
214 if node == revlog.nullid:
215 return manifestdict() # don't upset local cache
215 return manifestdict() # don't upset local cache
216 if node in self._mancache:
216 if node in self._mancache:
217 return self._mancache[node][0]
217 return self._mancache[node][0]
218 text = self.revision(node)
218 text = self.revision(node)
219 arraytext = array.array('c', text)
219 arraytext = array.array('c', text)
220 mapping = _parse(text)
220 mapping = _parse(text)
221 self._mancache[node] = (mapping, arraytext)
221 self._mancache[node] = (mapping, arraytext)
222 return mapping
222 return mapping
223
223
224 def find(self, node, f):
224 def find(self, node, f):
225 '''look up entry for a single file efficiently.
225 '''look up entry for a single file efficiently.
226 return (node, flags) pair if found, (None, None) if not.'''
226 return (node, flags) pair if found, (None, None) if not.'''
227 if node in self._mancache:
227 if node in self._mancache:
228 mapping = self._mancache[node][0]
228 mapping = self._mancache[node][0]
229 return mapping.get(f), mapping.flags(f)
229 return mapping.get(f), mapping.flags(f)
230 text = self.revision(node)
230 text = self.revision(node)
231 start, end = _msearch(text, f)
231 start, end = _msearch(text, f)
232 if start == end:
232 if start == end:
233 return None, None
233 return None, None
234 l = text[start:end]
234 l = text[start:end]
235 f, n = l.split('\0')
235 f, n = l.split('\0')
236 return revlog.bin(n[:40]), n[40:-1]
236 return revlog.bin(n[:40]), n[40:-1]
237
237
238 def add(self, map, transaction, link, p1, p2, added, removed):
238 def add(self, map, transaction, link, p1, p2, added, removed):
239 if p1 in self._mancache:
239 if p1 in self._mancache:
240 # If our first parent is in the manifest cache, we can
240 # If our first parent is in the manifest cache, we can
241 # compute a delta here using properties we know about the
241 # compute a delta here using properties we know about the
242 # manifest up-front, which may save time later for the
242 # manifest up-front, which may save time later for the
243 # revlog layer.
243 # revlog layer.
244
244
245 _checkforbidden(added)
245 _checkforbidden(added)
246 # combine the changed lists into one list for sorting
246 # combine the changed lists into one list for sorting
247 work = [(x, False) for x in added]
247 work = [(x, False) for x in added]
248 work.extend((x, True) for x in removed)
248 work.extend((x, True) for x in removed)
249 # this could use heapq.merge() (from Python 2.6+) or equivalent
249 # this could use heapq.merge() (from Python 2.6+) or equivalent
250 # since the lists are already sorted
250 # since the lists are already sorted
251 work.sort()
251 work.sort()
252
252
253 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
253 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
254 cachedelta = self.rev(p1), deltatext
254 cachedelta = self.rev(p1), deltatext
255 text = util.buffer(arraytext)
255 text = util.buffer(arraytext)
256 else:
256 else:
257 # The first parent manifest isn't already loaded, so we'll
257 # The first parent manifest isn't already loaded, so we'll
258 # just encode a fulltext of the manifest and pass that
258 # just encode a fulltext of the manifest and pass that
259 # through to the revlog layer, and let it handle the delta
259 # through to the revlog layer, and let it handle the delta
260 # process.
260 # process.
261 text = map.text()
261 text = map.text()
262 arraytext = array.array('c', text)
262 arraytext = array.array('c', text)
263 cachedelta = None
263 cachedelta = None
264
264
265 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
265 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
266 self._mancache[n] = (map, arraytext)
266 self._mancache[n] = (map, arraytext)
267
267
268 return n
268 return n
@@ -1,1160 +1,1160
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error as errormod, util, filemerge, copies, subrepo, worker
13 import error as errormod, util, filemerge, copies, subrepo, worker
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join("merge"), True)
63 shutil.rmtree(self._repo.join("merge"), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == "F":
81 elif rtype == "F":
82 bits = record.split("\0")
82 bits = record.split("\0")
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split("\0")
124 bits = r[1].split("\0")
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], "\0".join(bits))
126 v1records[idx] = (r[0], "\0".join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(("L", hex(self._local)))
194 records.append(("L", hex(self._local)))
195 records.append(("O", hex(self._other)))
195 records.append(("O", hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(("F", "\0".join([d] + v)))
197 records.append(("F", "\0".join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, "w")
208 f = self._repo.opener(self.statepathv1, "w")
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + "\n")
212 f.write(hex(self._local) + "\n")
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == "F":
214 if rtype == "F":
215 f.write("%s\n" % _droponode(data))
215 f.write("%s\n" % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, "w")
220 f = self._repo.opener(self.statepathv2, "w")
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = ">sI%is" % len(data)
223 format = ">sI%is" % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write("merge/" + hash, fcl.data())
237 self._repo.opener.write("merge/" + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener("merge/" + hash)
287 f = self._repo.opener("merge/" + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f):
301 return (not repo.dirstate._ignore(f)
301 return (not repo.dirstate._ignore(f)
302 and os.path.isfile(repo.wjoin(f))
302 and os.path.isfile(repo.wjoin(f))
303 and repo.wopener.audit.check(f)
303 and repo.wopener.audit.check(f)
304 and repo.dirstate.normalize(f) not in repo.dirstate
304 and repo.dirstate.normalize(f) not in repo.dirstate
305 and mctx[f].cmp(wctx[f]))
305 and mctx[f].cmp(wctx[f]))
306
306
307 def _checkunknown(repo, wctx, mctx):
307 def _checkunknown(repo, wctx, mctx):
308 "check for collisions between unknown files and files in mctx"
308 "check for collisions between unknown files and files in mctx"
309
309
310 error = False
310 error = False
311 for f in mctx:
311 for f in mctx:
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
313 error = True
313 error = True
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
315 if error:
315 if error:
316 raise util.Abort(_("untracked files in working directory differ "
316 raise util.Abort(_("untracked files in working directory differ "
317 "from files in requested revision"))
317 "from files in requested revision"))
318
318
319 def _forgetremoved(wctx, mctx, branchmerge):
319 def _forgetremoved(wctx, mctx, branchmerge):
320 """
320 """
321 Forget removed files
321 Forget removed files
322
322
323 If we're jumping between revisions (as opposed to merging), and if
323 If we're jumping between revisions (as opposed to merging), and if
324 neither the working directory nor the target rev has the file,
324 neither the working directory nor the target rev has the file,
325 then we need to remove it from the dirstate, to prevent the
325 then we need to remove it from the dirstate, to prevent the
326 dirstate from listing the file when it is no longer in the
326 dirstate from listing the file when it is no longer in the
327 manifest.
327 manifest.
328
328
329 If we're merging, and the other revision has removed a file
329 If we're merging, and the other revision has removed a file
330 that is not present in the working directory, we need to mark it
330 that is not present in the working directory, we need to mark it
331 as removed.
331 as removed.
332 """
332 """
333
333
334 ractions = []
334 ractions = []
335 factions = xactions = []
335 factions = xactions = []
336 if branchmerge:
336 if branchmerge:
337 xactions = ractions
337 xactions = ractions
338 for f in wctx.deleted():
338 for f in wctx.deleted():
339 if f not in mctx:
339 if f not in mctx:
340 xactions.append((f, None, "forget deleted"))
340 xactions.append((f, None, "forget deleted"))
341
341
342 if not branchmerge:
342 if not branchmerge:
343 for f in wctx.removed():
343 for f in wctx.removed():
344 if f not in mctx:
344 if f not in mctx:
345 factions.append((f, None, "forget removed"))
345 factions.append((f, None, "forget removed"))
346
346
347 return ractions, factions
347 return ractions, factions
348
348
349 def _checkcollision(repo, wmf, actions):
349 def _checkcollision(repo, wmf, actions):
350 # build provisional merged manifest up
350 # build provisional merged manifest up
351 pmmf = set(wmf)
351 pmmf = set(wmf)
352
352
353 if actions:
353 if actions:
354 # k, dr, e and rd are no-op
354 # k, dr, e and rd are no-op
355 for m in 'a', 'f', 'g', 'cd', 'dc':
355 for m in 'a', 'f', 'g', 'cd', 'dc':
356 for f, args, msg in actions[m]:
356 for f, args, msg in actions[m]:
357 pmmf.add(f)
357 pmmf.add(f)
358 for f, args, msg in actions['r']:
358 for f, args, msg in actions['r']:
359 pmmf.discard(f)
359 pmmf.discard(f)
360 for f, args, msg in actions['dm']:
360 for f, args, msg in actions['dm']:
361 f2, flags = args
361 f2, flags = args
362 pmmf.discard(f2)
362 pmmf.discard(f2)
363 pmmf.add(f)
363 pmmf.add(f)
364 for f, args, msg in actions['dg']:
364 for f, args, msg in actions['dg']:
365 f2, flags = args
365 f2, flags = args
366 pmmf.add(f)
366 pmmf.add(f)
367 for f, args, msg in actions['m']:
367 for f, args, msg in actions['m']:
368 f1, f2, fa, move, anc = args
368 f1, f2, fa, move, anc = args
369 if move:
369 if move:
370 pmmf.discard(f1)
370 pmmf.discard(f1)
371 pmmf.add(f)
371 pmmf.add(f)
372
372
373 # check case-folding collision in provisional merged manifest
373 # check case-folding collision in provisional merged manifest
374 foldmap = {}
374 foldmap = {}
375 for f in sorted(pmmf):
375 for f in sorted(pmmf):
376 fold = util.normcase(f)
376 fold = util.normcase(f)
377 if fold in foldmap:
377 if fold in foldmap:
378 raise util.Abort(_("case-folding collision between %s and %s")
378 raise util.Abort(_("case-folding collision between %s and %s")
379 % (f, foldmap[fold]))
379 % (f, foldmap[fold]))
380 foldmap[fold] = f
380 foldmap[fold] = f
381
381
382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
383 acceptremote, followcopies):
383 acceptremote, followcopies):
384 """
384 """
385 Merge p1 and p2 with ancestor pa and generate merge action list
385 Merge p1 and p2 with ancestor pa and generate merge action list
386
386
387 branchmerge and force are as passed in to update
387 branchmerge and force are as passed in to update
388 partial = function to filter file lists
388 partial = function to filter file lists
389 acceptremote = accept the incoming changes without prompting
389 acceptremote = accept the incoming changes without prompting
390 """
390 """
391
391
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
393 copy, movewithdir = {}, {}
393 copy, movewithdir = {}, {}
394
394
395 # manifests fetched in order are going to be faster, so prime the caches
395 # manifests fetched in order are going to be faster, so prime the caches
396 [x.manifest() for x in
396 [x.manifest() for x in
397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
398
398
399 if followcopies:
399 if followcopies:
400 ret = copies.mergecopies(repo, wctx, p2, pa)
400 ret = copies.mergecopies(repo, wctx, p2, pa)
401 copy, movewithdir, diverge, renamedelete = ret
401 copy, movewithdir, diverge, renamedelete = ret
402 for of, fl in diverge.iteritems():
402 for of, fl in diverge.iteritems():
403 actions['dr'].append((of, (fl,), "divergent renames"))
403 actions['dr'].append((of, (fl,), "divergent renames"))
404 for of, fl in renamedelete.iteritems():
404 for of, fl in renamedelete.iteritems():
405 actions['rd'].append((of, (fl,), "rename and delete"))
405 actions['rd'].append((of, (fl,), "rename and delete"))
406
406
407 repo.ui.note(_("resolving manifests\n"))
407 repo.ui.note(_("resolving manifests\n"))
408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
409 % (bool(branchmerge), bool(force), bool(partial)))
409 % (bool(branchmerge), bool(force), bool(partial)))
410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
411
411
412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
413 copied = set(copy.values())
413 copied = set(copy.values())
414 copied.update(movewithdir.values())
414 copied.update(movewithdir.values())
415
415
416 if '.hgsubstate' in m1:
416 if '.hgsubstate' in m1:
417 # check whether sub state is modified
417 # check whether sub state is modified
418 for s in sorted(wctx.substate):
418 for s in sorted(wctx.substate):
419 if wctx.sub(s).dirty():
419 if wctx.sub(s).dirty():
420 m1['.hgsubstate'] += "+"
420 m1['.hgsubstate'] += "+"
421 break
421 break
422
422
423 aborts = []
423 aborts = []
424 # Compare manifests
424 # Compare manifests
425 diff = m1.diff(m2)
425 diff = m1.diff(m2)
426
426
427 for f, ((n1, n2), (fl1, fl2)) in diff.iteritems():
427 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
428 if partial and not partial(f):
428 if partial and not partial(f):
429 continue
429 continue
430 if n1 and n2:
430 if n1 and n2:
431 fa = f
431 fa = f
432 a = ma.get(f, nullid)
432 a = ma.get(f, nullid)
433 if a == nullid:
433 if a == nullid:
434 fa = copy.get(f, f)
434 fa = copy.get(f, f)
435 # Note: f as default is wrong - we can't really make a 3-way
435 # Note: f as default is wrong - we can't really make a 3-way
436 # merge without an ancestor file.
436 # merge without an ancestor file.
437 fla = ma.flags(fa)
437 fla = ma.flags(fa)
438 nol = 'l' not in fl1 + fl2 + fla
438 nol = 'l' not in fl1 + fl2 + fla
439 if n2 == a and fl2 == fla:
439 if n2 == a and fl2 == fla:
440 actions['k'].append((f, (), "keep")) # remote unchanged
440 actions['k'].append((f, (), "keep")) # remote unchanged
441 elif n1 == a and fl1 == fla: # local unchanged - use remote
441 elif n1 == a and fl1 == fla: # local unchanged - use remote
442 if n1 == n2: # optimization: keep local content
442 if n1 == n2: # optimization: keep local content
443 actions['e'].append((f, (fl2,), "update permissions"))
443 actions['e'].append((f, (fl2,), "update permissions"))
444 else:
444 else:
445 actions['g'].append((f, (fl2,), "remote is newer"))
445 actions['g'].append((f, (fl2,), "remote is newer"))
446 elif nol and n2 == a: # remote only changed 'x'
446 elif nol and n2 == a: # remote only changed 'x'
447 actions['e'].append((f, (fl2,), "update permissions"))
447 actions['e'].append((f, (fl2,), "update permissions"))
448 elif nol and n1 == a: # local only changed 'x'
448 elif nol and n1 == a: # local only changed 'x'
449 actions['g'].append((f, (fl1,), "remote is newer"))
449 actions['g'].append((f, (fl1,), "remote is newer"))
450 else: # both changed something
450 else: # both changed something
451 actions['m'].append((f, (f, f, fa, False, pa.node()),
451 actions['m'].append((f, (f, f, fa, False, pa.node()),
452 "versions differ"))
452 "versions differ"))
453 elif f in copied: # files we'll deal with on m2 side
453 elif f in copied: # files we'll deal with on m2 side
454 pass
454 pass
455 elif n1 and f in movewithdir: # directory rename, move local
455 elif n1 and f in movewithdir: # directory rename, move local
456 f2 = movewithdir[f]
456 f2 = movewithdir[f]
457 actions['dm'].append((f2, (f, fl1),
457 actions['dm'].append((f2, (f, fl1),
458 "remote directory rename - move from " + f))
458 "remote directory rename - move from " + f))
459 elif n1 and f in copy:
459 elif n1 and f in copy:
460 f2 = copy[f]
460 f2 = copy[f]
461 actions['m'].append((f, (f, f2, f2, False, pa.node()),
461 actions['m'].append((f, (f, f2, f2, False, pa.node()),
462 "local copied/moved from " + f2))
462 "local copied/moved from " + f2))
463 elif n1 and f in ma: # clean, a different, no remote
463 elif n1 and f in ma: # clean, a different, no remote
464 if n1 != ma[f]:
464 if n1 != ma[f]:
465 if acceptremote:
465 if acceptremote:
466 actions['r'].append((f, None, "remote delete"))
466 actions['r'].append((f, None, "remote delete"))
467 else:
467 else:
468 actions['cd'].append((f, None, "prompt changed/deleted"))
468 actions['cd'].append((f, None, "prompt changed/deleted"))
469 elif n1[20:] == "a": # added, no remote
469 elif n1[20:] == "a": # added, no remote
470 actions['f'].append((f, None, "remote deleted"))
470 actions['f'].append((f, None, "remote deleted"))
471 else:
471 else:
472 actions['r'].append((f, None, "other deleted"))
472 actions['r'].append((f, None, "other deleted"))
473 elif n2 and f in movewithdir:
473 elif n2 and f in movewithdir:
474 f2 = movewithdir[f]
474 f2 = movewithdir[f]
475 actions['dg'].append((f2, (f, fl2),
475 actions['dg'].append((f2, (f, fl2),
476 "local directory rename - get from " + f))
476 "local directory rename - get from " + f))
477 elif n2 and f in copy:
477 elif n2 and f in copy:
478 f2 = copy[f]
478 f2 = copy[f]
479 if f2 in m2:
479 if f2 in m2:
480 actions['m'].append((f, (f2, f, f2, False, pa.node()),
480 actions['m'].append((f, (f2, f, f2, False, pa.node()),
481 "remote copied from " + f2))
481 "remote copied from " + f2))
482 else:
482 else:
483 actions['m'].append((f, (f2, f, f2, True, pa.node()),
483 actions['m'].append((f, (f2, f, f2, True, pa.node()),
484 "remote moved from " + f2))
484 "remote moved from " + f2))
485 elif n2 and f not in ma:
485 elif n2 and f not in ma:
486 # local unknown, remote created: the logic is described by the
486 # local unknown, remote created: the logic is described by the
487 # following table:
487 # following table:
488 #
488 #
489 # force branchmerge different | action
489 # force branchmerge different | action
490 # n * n | get
490 # n * n | get
491 # n * y | abort
491 # n * y | abort
492 # y n * | get
492 # y n * | get
493 # y y n | get
493 # y y n | get
494 # y y y | merge
494 # y y y | merge
495 #
495 #
496 # Checking whether the files are different is expensive, so we
496 # Checking whether the files are different is expensive, so we
497 # don't do that when we can avoid it.
497 # don't do that when we can avoid it.
498 if force and not branchmerge:
498 if force and not branchmerge:
499 actions['g'].append((f, (fl2,), "remote created"))
499 actions['g'].append((f, (fl2,), "remote created"))
500 else:
500 else:
501 different = _checkunknownfile(repo, wctx, p2, f)
501 different = _checkunknownfile(repo, wctx, p2, f)
502 if force and branchmerge and different:
502 if force and branchmerge and different:
503 # FIXME: This is wrong - f is not in ma ...
503 # FIXME: This is wrong - f is not in ma ...
504 actions['m'].append((f, (f, f, f, False, pa.node()),
504 actions['m'].append((f, (f, f, f, False, pa.node()),
505 "remote differs from untracked local"))
505 "remote differs from untracked local"))
506 elif not force and different:
506 elif not force and different:
507 aborts.append((f, "ud"))
507 aborts.append((f, "ud"))
508 else:
508 else:
509 actions['g'].append((f, (fl2,), "remote created"))
509 actions['g'].append((f, (fl2,), "remote created"))
510 elif n2 and n2 != ma[f]:
510 elif n2 and n2 != ma[f]:
511 different = _checkunknownfile(repo, wctx, p2, f)
511 different = _checkunknownfile(repo, wctx, p2, f)
512 if not force and different:
512 if not force and different:
513 aborts.append((f, "ud"))
513 aborts.append((f, "ud"))
514 else:
514 else:
515 # if different: old untracked f may be overwritten and lost
515 # if different: old untracked f may be overwritten and lost
516 if acceptremote:
516 if acceptremote:
517 actions['g'].append((f, (m2.flags(f),),
517 actions['g'].append((f, (m2.flags(f),),
518 "remote recreating"))
518 "remote recreating"))
519 else:
519 else:
520 actions['dc'].append((f, (m2.flags(f),),
520 actions['dc'].append((f, (m2.flags(f),),
521 "prompt deleted/changed"))
521 "prompt deleted/changed"))
522
522
523 for f, m in sorted(aborts):
523 for f, m in sorted(aborts):
524 if m == "ud":
524 if m == "ud":
525 repo.ui.warn(_("%s: untracked file differs\n") % f)
525 repo.ui.warn(_("%s: untracked file differs\n") % f)
526 else: assert False, m
526 else: assert False, m
527 if aborts:
527 if aborts:
528 raise util.Abort(_("untracked files in working directory differ "
528 raise util.Abort(_("untracked files in working directory differ "
529 "from files in requested revision"))
529 "from files in requested revision"))
530
530
531 if not util.checkcase(repo.path):
531 if not util.checkcase(repo.path):
532 # check collision between files only in p2 for clean update
532 # check collision between files only in p2 for clean update
533 if (not branchmerge and
533 if (not branchmerge and
534 (force or not wctx.dirty(missing=True, branch=False))):
534 (force or not wctx.dirty(missing=True, branch=False))):
535 _checkcollision(repo, m2, None)
535 _checkcollision(repo, m2, None)
536 else:
536 else:
537 _checkcollision(repo, m1, actions)
537 _checkcollision(repo, m1, actions)
538
538
539 return actions
539 return actions
540
540
541 def batchremove(repo, actions):
541 def batchremove(repo, actions):
542 """apply removes to the working directory
542 """apply removes to the working directory
543
543
544 yields tuples for progress updates
544 yields tuples for progress updates
545 """
545 """
546 verbose = repo.ui.verbose
546 verbose = repo.ui.verbose
547 unlink = util.unlinkpath
547 unlink = util.unlinkpath
548 wjoin = repo.wjoin
548 wjoin = repo.wjoin
549 audit = repo.wopener.audit
549 audit = repo.wopener.audit
550 i = 0
550 i = 0
551 for f, args, msg in actions:
551 for f, args, msg in actions:
552 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
552 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
553 if verbose:
553 if verbose:
554 repo.ui.note(_("removing %s\n") % f)
554 repo.ui.note(_("removing %s\n") % f)
555 audit(f)
555 audit(f)
556 try:
556 try:
557 unlink(wjoin(f), ignoremissing=True)
557 unlink(wjoin(f), ignoremissing=True)
558 except OSError, inst:
558 except OSError, inst:
559 repo.ui.warn(_("update failed to remove %s: %s!\n") %
559 repo.ui.warn(_("update failed to remove %s: %s!\n") %
560 (f, inst.strerror))
560 (f, inst.strerror))
561 if i == 100:
561 if i == 100:
562 yield i, f
562 yield i, f
563 i = 0
563 i = 0
564 i += 1
564 i += 1
565 if i > 0:
565 if i > 0:
566 yield i, f
566 yield i, f
567
567
568 def batchget(repo, mctx, actions):
568 def batchget(repo, mctx, actions):
569 """apply gets to the working directory
569 """apply gets to the working directory
570
570
571 mctx is the context to get from
571 mctx is the context to get from
572
572
573 yields tuples for progress updates
573 yields tuples for progress updates
574 """
574 """
575 verbose = repo.ui.verbose
575 verbose = repo.ui.verbose
576 fctx = mctx.filectx
576 fctx = mctx.filectx
577 wwrite = repo.wwrite
577 wwrite = repo.wwrite
578 i = 0
578 i = 0
579 for f, args, msg in actions:
579 for f, args, msg in actions:
580 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
580 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
581 if verbose:
581 if verbose:
582 repo.ui.note(_("getting %s\n") % f)
582 repo.ui.note(_("getting %s\n") % f)
583 wwrite(f, fctx(f).data(), args[0])
583 wwrite(f, fctx(f).data(), args[0])
584 if i == 100:
584 if i == 100:
585 yield i, f
585 yield i, f
586 i = 0
586 i = 0
587 i += 1
587 i += 1
588 if i > 0:
588 if i > 0:
589 yield i, f
589 yield i, f
590
590
591 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
591 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
592 """apply the merge action list to the working directory
592 """apply the merge action list to the working directory
593
593
594 wctx is the working copy context
594 wctx is the working copy context
595 mctx is the context to be merged into the working copy
595 mctx is the context to be merged into the working copy
596
596
597 Return a tuple of counts (updated, merged, removed, unresolved) that
597 Return a tuple of counts (updated, merged, removed, unresolved) that
598 describes how many files were affected by the update.
598 describes how many files were affected by the update.
599 """
599 """
600
600
601 updated, merged, removed, unresolved = 0, 0, 0, 0
601 updated, merged, removed, unresolved = 0, 0, 0, 0
602 ms = mergestate(repo)
602 ms = mergestate(repo)
603 ms.reset(wctx.p1().node(), mctx.node())
603 ms.reset(wctx.p1().node(), mctx.node())
604 moves = []
604 moves = []
605 for m, l in actions.items():
605 for m, l in actions.items():
606 l.sort()
606 l.sort()
607
607
608 # prescan for merges
608 # prescan for merges
609 for f, args, msg in actions['m']:
609 for f, args, msg in actions['m']:
610 f1, f2, fa, move, anc = args
610 f1, f2, fa, move, anc = args
611 if f == '.hgsubstate': # merged internally
611 if f == '.hgsubstate': # merged internally
612 continue
612 continue
613 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
613 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
614 fcl = wctx[f1]
614 fcl = wctx[f1]
615 fco = mctx[f2]
615 fco = mctx[f2]
616 actx = repo[anc]
616 actx = repo[anc]
617 if fa in actx:
617 if fa in actx:
618 fca = actx[fa]
618 fca = actx[fa]
619 else:
619 else:
620 fca = repo.filectx(f1, fileid=nullrev)
620 fca = repo.filectx(f1, fileid=nullrev)
621 ms.add(fcl, fco, fca, f)
621 ms.add(fcl, fco, fca, f)
622 if f1 != f and move:
622 if f1 != f and move:
623 moves.append(f1)
623 moves.append(f1)
624
624
625 audit = repo.wopener.audit
625 audit = repo.wopener.audit
626 _updating = _('updating')
626 _updating = _('updating')
627 _files = _('files')
627 _files = _('files')
628 progress = repo.ui.progress
628 progress = repo.ui.progress
629
629
630 # remove renamed files after safely stored
630 # remove renamed files after safely stored
631 for f in moves:
631 for f in moves:
632 if os.path.lexists(repo.wjoin(f)):
632 if os.path.lexists(repo.wjoin(f)):
633 repo.ui.debug("removing %s\n" % f)
633 repo.ui.debug("removing %s\n" % f)
634 audit(f)
634 audit(f)
635 util.unlinkpath(repo.wjoin(f))
635 util.unlinkpath(repo.wjoin(f))
636
636
637 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
637 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
638
638
639 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
639 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
640 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
640 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
641
641
642 # remove in parallel (must come first)
642 # remove in parallel (must come first)
643 z = 0
643 z = 0
644 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
644 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
645 for i, item in prog:
645 for i, item in prog:
646 z += i
646 z += i
647 progress(_updating, z, item=item, total=numupdates, unit=_files)
647 progress(_updating, z, item=item, total=numupdates, unit=_files)
648 removed = len(actions['r'])
648 removed = len(actions['r'])
649
649
650 # get in parallel
650 # get in parallel
651 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
651 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
652 for i, item in prog:
652 for i, item in prog:
653 z += i
653 z += i
654 progress(_updating, z, item=item, total=numupdates, unit=_files)
654 progress(_updating, z, item=item, total=numupdates, unit=_files)
655 updated = len(actions['g'])
655 updated = len(actions['g'])
656
656
657 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
657 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
658 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
658 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
659
659
660 # forget (manifest only, just log it) (must come first)
660 # forget (manifest only, just log it) (must come first)
661 for f, args, msg in actions['f']:
661 for f, args, msg in actions['f']:
662 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
662 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
663 z += 1
663 z += 1
664 progress(_updating, z, item=f, total=numupdates, unit=_files)
664 progress(_updating, z, item=f, total=numupdates, unit=_files)
665
665
666 # re-add (manifest only, just log it)
666 # re-add (manifest only, just log it)
667 for f, args, msg in actions['a']:
667 for f, args, msg in actions['a']:
668 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
668 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
669 z += 1
669 z += 1
670 progress(_updating, z, item=f, total=numupdates, unit=_files)
670 progress(_updating, z, item=f, total=numupdates, unit=_files)
671
671
672 # keep (noop, just log it)
672 # keep (noop, just log it)
673 for f, args, msg in actions['k']:
673 for f, args, msg in actions['k']:
674 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
674 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
675 # no progress
675 # no progress
676
676
677 # merge
677 # merge
678 for f, args, msg in actions['m']:
678 for f, args, msg in actions['m']:
679 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
679 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
680 z += 1
680 z += 1
681 progress(_updating, z, item=f, total=numupdates, unit=_files)
681 progress(_updating, z, item=f, total=numupdates, unit=_files)
682 f1, f2, fa, move, anc = args
682 f1, f2, fa, move, anc = args
683 if f == '.hgsubstate': # subrepo states need updating
683 if f == '.hgsubstate': # subrepo states need updating
684 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
684 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
685 overwrite)
685 overwrite)
686 continue
686 continue
687 audit(f)
687 audit(f)
688 r = ms.resolve(f, wctx, labels=labels)
688 r = ms.resolve(f, wctx, labels=labels)
689 if r is not None and r > 0:
689 if r is not None and r > 0:
690 unresolved += 1
690 unresolved += 1
691 else:
691 else:
692 if r is None:
692 if r is None:
693 updated += 1
693 updated += 1
694 else:
694 else:
695 merged += 1
695 merged += 1
696
696
697 # directory rename, move local
697 # directory rename, move local
698 for f, args, msg in actions['dm']:
698 for f, args, msg in actions['dm']:
699 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
699 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
700 z += 1
700 z += 1
701 progress(_updating, z, item=f, total=numupdates, unit=_files)
701 progress(_updating, z, item=f, total=numupdates, unit=_files)
702 f0, flags = args
702 f0, flags = args
703 repo.ui.note(_("moving %s to %s\n") % (f0, f))
703 repo.ui.note(_("moving %s to %s\n") % (f0, f))
704 audit(f)
704 audit(f)
705 repo.wwrite(f, wctx.filectx(f0).data(), flags)
705 repo.wwrite(f, wctx.filectx(f0).data(), flags)
706 util.unlinkpath(repo.wjoin(f0))
706 util.unlinkpath(repo.wjoin(f0))
707 updated += 1
707 updated += 1
708
708
709 # local directory rename, get
709 # local directory rename, get
710 for f, args, msg in actions['dg']:
710 for f, args, msg in actions['dg']:
711 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
711 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
712 z += 1
712 z += 1
713 progress(_updating, z, item=f, total=numupdates, unit=_files)
713 progress(_updating, z, item=f, total=numupdates, unit=_files)
714 f0, flags = args
714 f0, flags = args
715 repo.ui.note(_("getting %s to %s\n") % (f0, f))
715 repo.ui.note(_("getting %s to %s\n") % (f0, f))
716 repo.wwrite(f, mctx.filectx(f0).data(), flags)
716 repo.wwrite(f, mctx.filectx(f0).data(), flags)
717 updated += 1
717 updated += 1
718
718
719 # divergent renames
719 # divergent renames
720 for f, args, msg in actions['dr']:
720 for f, args, msg in actions['dr']:
721 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
721 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
722 z += 1
722 z += 1
723 progress(_updating, z, item=f, total=numupdates, unit=_files)
723 progress(_updating, z, item=f, total=numupdates, unit=_files)
724 fl, = args
724 fl, = args
725 repo.ui.warn(_("note: possible conflict - %s was renamed "
725 repo.ui.warn(_("note: possible conflict - %s was renamed "
726 "multiple times to:\n") % f)
726 "multiple times to:\n") % f)
727 for nf in fl:
727 for nf in fl:
728 repo.ui.warn(" %s\n" % nf)
728 repo.ui.warn(" %s\n" % nf)
729
729
730 # rename and delete
730 # rename and delete
731 for f, args, msg in actions['rd']:
731 for f, args, msg in actions['rd']:
732 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
732 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
733 z += 1
733 z += 1
734 progress(_updating, z, item=f, total=numupdates, unit=_files)
734 progress(_updating, z, item=f, total=numupdates, unit=_files)
735 fl, = args
735 fl, = args
736 repo.ui.warn(_("note: possible conflict - %s was deleted "
736 repo.ui.warn(_("note: possible conflict - %s was deleted "
737 "and renamed to:\n") % f)
737 "and renamed to:\n") % f)
738 for nf in fl:
738 for nf in fl:
739 repo.ui.warn(" %s\n" % nf)
739 repo.ui.warn(" %s\n" % nf)
740
740
741 # exec
741 # exec
742 for f, args, msg in actions['e']:
742 for f, args, msg in actions['e']:
743 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
743 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
744 z += 1
744 z += 1
745 progress(_updating, z, item=f, total=numupdates, unit=_files)
745 progress(_updating, z, item=f, total=numupdates, unit=_files)
746 flags, = args
746 flags, = args
747 audit(f)
747 audit(f)
748 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
748 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
749 updated += 1
749 updated += 1
750
750
751 ms.commit()
751 ms.commit()
752 progress(_updating, None, total=numupdates, unit=_files)
752 progress(_updating, None, total=numupdates, unit=_files)
753
753
754 return updated, merged, removed, unresolved
754 return updated, merged, removed, unresolved
755
755
756 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
756 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
757 acceptremote, followcopies):
757 acceptremote, followcopies):
758 "Calculate the actions needed to merge mctx into wctx using ancestors"
758 "Calculate the actions needed to merge mctx into wctx using ancestors"
759
759
760 if len(ancestors) == 1: # default
760 if len(ancestors) == 1: # default
761 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
761 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
762 branchmerge, force,
762 branchmerge, force,
763 partial, acceptremote, followcopies)
763 partial, acceptremote, followcopies)
764
764
765 else: # only when merge.preferancestor=* - the default
765 else: # only when merge.preferancestor=* - the default
766 repo.ui.note(
766 repo.ui.note(
767 _("note: merging %s and %s using bids from ancestors %s\n") %
767 _("note: merging %s and %s using bids from ancestors %s\n") %
768 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
768 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
769
769
770 # Call for bids
770 # Call for bids
771 fbids = {} # mapping filename to bids (action method to list af actions)
771 fbids = {} # mapping filename to bids (action method to list af actions)
772 for ancestor in ancestors:
772 for ancestor in ancestors:
773 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
773 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
774 actions = manifestmerge(repo, wctx, mctx, ancestor,
774 actions = manifestmerge(repo, wctx, mctx, ancestor,
775 branchmerge, force,
775 branchmerge, force,
776 partial, acceptremote, followcopies)
776 partial, acceptremote, followcopies)
777 for m, l in sorted(actions.items()):
777 for m, l in sorted(actions.items()):
778 for a in l:
778 for a in l:
779 f, args, msg = a
779 f, args, msg = a
780 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
780 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
781 if f in fbids:
781 if f in fbids:
782 d = fbids[f]
782 d = fbids[f]
783 if m in d:
783 if m in d:
784 d[m].append(a)
784 d[m].append(a)
785 else:
785 else:
786 d[m] = [a]
786 d[m] = [a]
787 else:
787 else:
788 fbids[f] = {m: [a]}
788 fbids[f] = {m: [a]}
789
789
790 # Pick the best bid for each file
790 # Pick the best bid for each file
791 repo.ui.note(_('\nauction for merging merge bids\n'))
791 repo.ui.note(_('\nauction for merging merge bids\n'))
792 actions = dict((m, []) for m in actions.keys())
792 actions = dict((m, []) for m in actions.keys())
793 for f, bids in sorted(fbids.items()):
793 for f, bids in sorted(fbids.items()):
794 # bids is a mapping from action method to list af actions
794 # bids is a mapping from action method to list af actions
795 # Consensus?
795 # Consensus?
796 if len(bids) == 1: # all bids are the same kind of method
796 if len(bids) == 1: # all bids are the same kind of method
797 m, l = bids.items()[0]
797 m, l = bids.items()[0]
798 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
798 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
799 repo.ui.note(" %s: consensus for %s\n" % (f, m))
799 repo.ui.note(" %s: consensus for %s\n" % (f, m))
800 actions[m].append(l[0])
800 actions[m].append(l[0])
801 continue
801 continue
802 # If keep is an option, just do it.
802 # If keep is an option, just do it.
803 if "k" in bids:
803 if "k" in bids:
804 repo.ui.note(" %s: picking 'keep' action\n" % f)
804 repo.ui.note(" %s: picking 'keep' action\n" % f)
805 actions['k'].append(bids["k"][0])
805 actions['k'].append(bids["k"][0])
806 continue
806 continue
807 # If there are gets and they all agree [how could they not?], do it.
807 # If there are gets and they all agree [how could they not?], do it.
808 if "g" in bids:
808 if "g" in bids:
809 ga0 = bids["g"][0]
809 ga0 = bids["g"][0]
810 if util.all(a == ga0 for a in bids["g"][1:]):
810 if util.all(a == ga0 for a in bids["g"][1:]):
811 repo.ui.note(" %s: picking 'get' action\n" % f)
811 repo.ui.note(" %s: picking 'get' action\n" % f)
812 actions['g'].append(ga0)
812 actions['g'].append(ga0)
813 continue
813 continue
814 # TODO: Consider other simple actions such as mode changes
814 # TODO: Consider other simple actions such as mode changes
815 # Handle inefficient democrazy.
815 # Handle inefficient democrazy.
816 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
816 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
817 for m, l in sorted(bids.items()):
817 for m, l in sorted(bids.items()):
818 for _f, args, msg in l:
818 for _f, args, msg in l:
819 repo.ui.note(' %s -> %s\n' % (msg, m))
819 repo.ui.note(' %s -> %s\n' % (msg, m))
820 # Pick random action. TODO: Instead, prompt user when resolving
820 # Pick random action. TODO: Instead, prompt user when resolving
821 m, l = bids.items()[0]
821 m, l = bids.items()[0]
822 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
822 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
823 (f, m))
823 (f, m))
824 actions[m].append(l[0])
824 actions[m].append(l[0])
825 continue
825 continue
826 repo.ui.note(_('end of auction\n\n'))
826 repo.ui.note(_('end of auction\n\n'))
827
827
828 # Prompt and create actions. TODO: Move this towards resolve phase.
828 # Prompt and create actions. TODO: Move this towards resolve phase.
829 for f, args, msg in actions['cd']:
829 for f, args, msg in actions['cd']:
830 if repo.ui.promptchoice(
830 if repo.ui.promptchoice(
831 _("local changed %s which remote deleted\n"
831 _("local changed %s which remote deleted\n"
832 "use (c)hanged version or (d)elete?"
832 "use (c)hanged version or (d)elete?"
833 "$$ &Changed $$ &Delete") % f, 0):
833 "$$ &Changed $$ &Delete") % f, 0):
834 actions['r'].append((f, None, "prompt delete"))
834 actions['r'].append((f, None, "prompt delete"))
835 else:
835 else:
836 actions['a'].append((f, None, "prompt keep"))
836 actions['a'].append((f, None, "prompt keep"))
837 del actions['cd'][:]
837 del actions['cd'][:]
838
838
839 for f, args, msg in actions['dc']:
839 for f, args, msg in actions['dc']:
840 flags, = args
840 flags, = args
841 if repo.ui.promptchoice(
841 if repo.ui.promptchoice(
842 _("remote changed %s which local deleted\n"
842 _("remote changed %s which local deleted\n"
843 "use (c)hanged version or leave (d)eleted?"
843 "use (c)hanged version or leave (d)eleted?"
844 "$$ &Changed $$ &Deleted") % f, 0) == 0:
844 "$$ &Changed $$ &Deleted") % f, 0) == 0:
845 actions['g'].append((f, (flags,), "prompt recreating"))
845 actions['g'].append((f, (flags,), "prompt recreating"))
846 del actions['dc'][:]
846 del actions['dc'][:]
847
847
848 if wctx.rev() is None:
848 if wctx.rev() is None:
849 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
849 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
850 actions['r'].extend(ractions)
850 actions['r'].extend(ractions)
851 actions['f'].extend(factions)
851 actions['f'].extend(factions)
852
852
853 return actions
853 return actions
854
854
855 def recordupdates(repo, actions, branchmerge):
855 def recordupdates(repo, actions, branchmerge):
856 "record merge actions to the dirstate"
856 "record merge actions to the dirstate"
857 # remove (must come first)
857 # remove (must come first)
858 for f, args, msg in actions['r']:
858 for f, args, msg in actions['r']:
859 if branchmerge:
859 if branchmerge:
860 repo.dirstate.remove(f)
860 repo.dirstate.remove(f)
861 else:
861 else:
862 repo.dirstate.drop(f)
862 repo.dirstate.drop(f)
863
863
864 # forget (must come first)
864 # forget (must come first)
865 for f, args, msg in actions['f']:
865 for f, args, msg in actions['f']:
866 repo.dirstate.drop(f)
866 repo.dirstate.drop(f)
867
867
868 # re-add
868 # re-add
869 for f, args, msg in actions['a']:
869 for f, args, msg in actions['a']:
870 if not branchmerge:
870 if not branchmerge:
871 repo.dirstate.add(f)
871 repo.dirstate.add(f)
872
872
873 # exec change
873 # exec change
874 for f, args, msg in actions['e']:
874 for f, args, msg in actions['e']:
875 repo.dirstate.normallookup(f)
875 repo.dirstate.normallookup(f)
876
876
877 # keep
877 # keep
878 for f, args, msg in actions['k']:
878 for f, args, msg in actions['k']:
879 pass
879 pass
880
880
881 # get
881 # get
882 for f, args, msg in actions['g']:
882 for f, args, msg in actions['g']:
883 if branchmerge:
883 if branchmerge:
884 repo.dirstate.otherparent(f)
884 repo.dirstate.otherparent(f)
885 else:
885 else:
886 repo.dirstate.normal(f)
886 repo.dirstate.normal(f)
887
887
888 # merge
888 # merge
889 for f, args, msg in actions['m']:
889 for f, args, msg in actions['m']:
890 f1, f2, fa, move, anc = args
890 f1, f2, fa, move, anc = args
891 if branchmerge:
891 if branchmerge:
892 # We've done a branch merge, mark this file as merged
892 # We've done a branch merge, mark this file as merged
893 # so that we properly record the merger later
893 # so that we properly record the merger later
894 repo.dirstate.merge(f)
894 repo.dirstate.merge(f)
895 if f1 != f2: # copy/rename
895 if f1 != f2: # copy/rename
896 if move:
896 if move:
897 repo.dirstate.remove(f1)
897 repo.dirstate.remove(f1)
898 if f1 != f:
898 if f1 != f:
899 repo.dirstate.copy(f1, f)
899 repo.dirstate.copy(f1, f)
900 else:
900 else:
901 repo.dirstate.copy(f2, f)
901 repo.dirstate.copy(f2, f)
902 else:
902 else:
903 # We've update-merged a locally modified file, so
903 # We've update-merged a locally modified file, so
904 # we set the dirstate to emulate a normal checkout
904 # we set the dirstate to emulate a normal checkout
905 # of that file some time in the past. Thus our
905 # of that file some time in the past. Thus our
906 # merge will appear as a normal local file
906 # merge will appear as a normal local file
907 # modification.
907 # modification.
908 if f2 == f: # file not locally copied/moved
908 if f2 == f: # file not locally copied/moved
909 repo.dirstate.normallookup(f)
909 repo.dirstate.normallookup(f)
910 if move:
910 if move:
911 repo.dirstate.drop(f1)
911 repo.dirstate.drop(f1)
912
912
913 # directory rename, move local
913 # directory rename, move local
914 for f, args, msg in actions['dm']:
914 for f, args, msg in actions['dm']:
915 f0, flag = args
915 f0, flag = args
916 if f0 not in repo.dirstate:
916 if f0 not in repo.dirstate:
917 # untracked file moved
917 # untracked file moved
918 continue
918 continue
919 if branchmerge:
919 if branchmerge:
920 repo.dirstate.add(f)
920 repo.dirstate.add(f)
921 repo.dirstate.remove(f0)
921 repo.dirstate.remove(f0)
922 repo.dirstate.copy(f0, f)
922 repo.dirstate.copy(f0, f)
923 else:
923 else:
924 repo.dirstate.normal(f)
924 repo.dirstate.normal(f)
925 repo.dirstate.drop(f0)
925 repo.dirstate.drop(f0)
926
926
927 # directory rename, get
927 # directory rename, get
928 for f, args, msg in actions['dg']:
928 for f, args, msg in actions['dg']:
929 f0, flag = args
929 f0, flag = args
930 if branchmerge:
930 if branchmerge:
931 repo.dirstate.add(f)
931 repo.dirstate.add(f)
932 repo.dirstate.copy(f0, f)
932 repo.dirstate.copy(f0, f)
933 else:
933 else:
934 repo.dirstate.normal(f)
934 repo.dirstate.normal(f)
935
935
936 def update(repo, node, branchmerge, force, partial, ancestor=None,
936 def update(repo, node, branchmerge, force, partial, ancestor=None,
937 mergeancestor=False, labels=None):
937 mergeancestor=False, labels=None):
938 """
938 """
939 Perform a merge between the working directory and the given node
939 Perform a merge between the working directory and the given node
940
940
941 node = the node to update to, or None if unspecified
941 node = the node to update to, or None if unspecified
942 branchmerge = whether to merge between branches
942 branchmerge = whether to merge between branches
943 force = whether to force branch merging or file overwriting
943 force = whether to force branch merging or file overwriting
944 partial = a function to filter file lists (dirstate not updated)
944 partial = a function to filter file lists (dirstate not updated)
945 mergeancestor = whether it is merging with an ancestor. If true,
945 mergeancestor = whether it is merging with an ancestor. If true,
946 we should accept the incoming changes for any prompts that occur.
946 we should accept the incoming changes for any prompts that occur.
947 If false, merging with an ancestor (fast-forward) is only allowed
947 If false, merging with an ancestor (fast-forward) is only allowed
948 between different named branches. This flag is used by rebase extension
948 between different named branches. This flag is used by rebase extension
949 as a temporary fix and should be avoided in general.
949 as a temporary fix and should be avoided in general.
950
950
951 The table below shows all the behaviors of the update command
951 The table below shows all the behaviors of the update command
952 given the -c and -C or no options, whether the working directory
952 given the -c and -C or no options, whether the working directory
953 is dirty, whether a revision is specified, and the relationship of
953 is dirty, whether a revision is specified, and the relationship of
954 the parent rev to the target rev (linear, on the same named
954 the parent rev to the target rev (linear, on the same named
955 branch, or on another named branch).
955 branch, or on another named branch).
956
956
957 This logic is tested by test-update-branches.t.
957 This logic is tested by test-update-branches.t.
958
958
959 -c -C dirty rev | linear same cross
959 -c -C dirty rev | linear same cross
960 n n n n | ok (1) x
960 n n n n | ok (1) x
961 n n n y | ok ok ok
961 n n n y | ok ok ok
962 n n y n | merge (2) (2)
962 n n y n | merge (2) (2)
963 n n y y | merge (3) (3)
963 n n y y | merge (3) (3)
964 n y * * | --- discard ---
964 n y * * | --- discard ---
965 y n y * | --- (4) ---
965 y n y * | --- (4) ---
966 y n n * | --- ok ---
966 y n n * | --- ok ---
967 y y * * | --- (5) ---
967 y y * * | --- (5) ---
968
968
969 x = can't happen
969 x = can't happen
970 * = don't-care
970 * = don't-care
971 1 = abort: not a linear update (merge or update --check to force update)
971 1 = abort: not a linear update (merge or update --check to force update)
972 2 = abort: uncommitted changes (commit and merge, or update --clean to
972 2 = abort: uncommitted changes (commit and merge, or update --clean to
973 discard changes)
973 discard changes)
974 3 = abort: uncommitted changes (commit or update --clean to discard changes)
974 3 = abort: uncommitted changes (commit or update --clean to discard changes)
975 4 = abort: uncommitted changes (checked in commands.py)
975 4 = abort: uncommitted changes (checked in commands.py)
976 5 = incompatible options (checked in commands.py)
976 5 = incompatible options (checked in commands.py)
977
977
978 Return the same tuple as applyupdates().
978 Return the same tuple as applyupdates().
979 """
979 """
980
980
981 onode = node
981 onode = node
982 wlock = repo.wlock()
982 wlock = repo.wlock()
983 try:
983 try:
984 wc = repo[None]
984 wc = repo[None]
985 pl = wc.parents()
985 pl = wc.parents()
986 p1 = pl[0]
986 p1 = pl[0]
987 pas = [None]
987 pas = [None]
988 if ancestor:
988 if ancestor:
989 pas = [repo[ancestor]]
989 pas = [repo[ancestor]]
990
990
991 if node is None:
991 if node is None:
992 # Here is where we should consider bookmarks, divergent bookmarks,
992 # Here is where we should consider bookmarks, divergent bookmarks,
993 # foreground changesets (successors), and tip of current branch;
993 # foreground changesets (successors), and tip of current branch;
994 # but currently we are only checking the branch tips.
994 # but currently we are only checking the branch tips.
995 try:
995 try:
996 node = repo.branchtip(wc.branch())
996 node = repo.branchtip(wc.branch())
997 except errormod.RepoLookupError:
997 except errormod.RepoLookupError:
998 if wc.branch() == "default": # no default branch!
998 if wc.branch() == "default": # no default branch!
999 node = repo.lookup("tip") # update to tip
999 node = repo.lookup("tip") # update to tip
1000 else:
1000 else:
1001 raise util.Abort(_("branch %s not found") % wc.branch())
1001 raise util.Abort(_("branch %s not found") % wc.branch())
1002
1002
1003 if p1.obsolete() and not p1.children():
1003 if p1.obsolete() and not p1.children():
1004 # allow updating to successors
1004 # allow updating to successors
1005 successors = obsolete.successorssets(repo, p1.node())
1005 successors = obsolete.successorssets(repo, p1.node())
1006
1006
1007 # behavior of certain cases is as follows,
1007 # behavior of certain cases is as follows,
1008 #
1008 #
1009 # divergent changesets: update to highest rev, similar to what
1009 # divergent changesets: update to highest rev, similar to what
1010 # is currently done when there are more than one head
1010 # is currently done when there are more than one head
1011 # (i.e. 'tip')
1011 # (i.e. 'tip')
1012 #
1012 #
1013 # replaced changesets: same as divergent except we know there
1013 # replaced changesets: same as divergent except we know there
1014 # is no conflict
1014 # is no conflict
1015 #
1015 #
1016 # pruned changeset: no update is done; though, we could
1016 # pruned changeset: no update is done; though, we could
1017 # consider updating to the first non-obsolete parent,
1017 # consider updating to the first non-obsolete parent,
1018 # similar to what is current done for 'hg prune'
1018 # similar to what is current done for 'hg prune'
1019
1019
1020 if successors:
1020 if successors:
1021 # flatten the list here handles both divergent (len > 1)
1021 # flatten the list here handles both divergent (len > 1)
1022 # and the usual case (len = 1)
1022 # and the usual case (len = 1)
1023 successors = [n for sub in successors for n in sub]
1023 successors = [n for sub in successors for n in sub]
1024
1024
1025 # get the max revision for the given successors set,
1025 # get the max revision for the given successors set,
1026 # i.e. the 'tip' of a set
1026 # i.e. the 'tip' of a set
1027 node = repo.revs("max(%ln)", successors).first()
1027 node = repo.revs("max(%ln)", successors).first()
1028 pas = [p1]
1028 pas = [p1]
1029
1029
1030 overwrite = force and not branchmerge
1030 overwrite = force and not branchmerge
1031
1031
1032 p2 = repo[node]
1032 p2 = repo[node]
1033 if pas[0] is None:
1033 if pas[0] is None:
1034 if repo.ui.config("merge", "preferancestor", '*') == '*':
1034 if repo.ui.config("merge", "preferancestor", '*') == '*':
1035 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1035 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1036 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1036 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1037 else:
1037 else:
1038 pas = [p1.ancestor(p2, warn=branchmerge)]
1038 pas = [p1.ancestor(p2, warn=branchmerge)]
1039
1039
1040 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1040 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1041
1041
1042 ### check phase
1042 ### check phase
1043 if not overwrite and len(pl) > 1:
1043 if not overwrite and len(pl) > 1:
1044 raise util.Abort(_("outstanding uncommitted merge"))
1044 raise util.Abort(_("outstanding uncommitted merge"))
1045 if branchmerge:
1045 if branchmerge:
1046 if pas == [p2]:
1046 if pas == [p2]:
1047 raise util.Abort(_("merging with a working directory ancestor"
1047 raise util.Abort(_("merging with a working directory ancestor"
1048 " has no effect"))
1048 " has no effect"))
1049 elif pas == [p1]:
1049 elif pas == [p1]:
1050 if not mergeancestor and p1.branch() == p2.branch():
1050 if not mergeancestor and p1.branch() == p2.branch():
1051 raise util.Abort(_("nothing to merge"),
1051 raise util.Abort(_("nothing to merge"),
1052 hint=_("use 'hg update' "
1052 hint=_("use 'hg update' "
1053 "or check 'hg heads'"))
1053 "or check 'hg heads'"))
1054 if not force and (wc.files() or wc.deleted()):
1054 if not force and (wc.files() or wc.deleted()):
1055 raise util.Abort(_("uncommitted changes"),
1055 raise util.Abort(_("uncommitted changes"),
1056 hint=_("use 'hg status' to list changes"))
1056 hint=_("use 'hg status' to list changes"))
1057 for s in sorted(wc.substate):
1057 for s in sorted(wc.substate):
1058 if wc.sub(s).dirty():
1058 if wc.sub(s).dirty():
1059 raise util.Abort(_("uncommitted changes in "
1059 raise util.Abort(_("uncommitted changes in "
1060 "subrepository '%s'") % s)
1060 "subrepository '%s'") % s)
1061
1061
1062 elif not overwrite:
1062 elif not overwrite:
1063 if p1 == p2: # no-op update
1063 if p1 == p2: # no-op update
1064 # call the hooks and exit early
1064 # call the hooks and exit early
1065 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1065 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1066 repo.hook('update', parent1=xp2, parent2='', error=0)
1066 repo.hook('update', parent1=xp2, parent2='', error=0)
1067 return 0, 0, 0, 0
1067 return 0, 0, 0, 0
1068
1068
1069 if pas not in ([p1], [p2]): # nonlinear
1069 if pas not in ([p1], [p2]): # nonlinear
1070 dirty = wc.dirty(missing=True)
1070 dirty = wc.dirty(missing=True)
1071 if dirty or onode is None:
1071 if dirty or onode is None:
1072 # Branching is a bit strange to ensure we do the minimal
1072 # Branching is a bit strange to ensure we do the minimal
1073 # amount of call to obsolete.background.
1073 # amount of call to obsolete.background.
1074 foreground = obsolete.foreground(repo, [p1.node()])
1074 foreground = obsolete.foreground(repo, [p1.node()])
1075 # note: the <node> variable contains a random identifier
1075 # note: the <node> variable contains a random identifier
1076 if repo[node].node() in foreground:
1076 if repo[node].node() in foreground:
1077 pas = [p1] # allow updating to successors
1077 pas = [p1] # allow updating to successors
1078 elif dirty:
1078 elif dirty:
1079 msg = _("uncommitted changes")
1079 msg = _("uncommitted changes")
1080 if onode is None:
1080 if onode is None:
1081 hint = _("commit and merge, or update --clean to"
1081 hint = _("commit and merge, or update --clean to"
1082 " discard changes")
1082 " discard changes")
1083 else:
1083 else:
1084 hint = _("commit or update --clean to discard"
1084 hint = _("commit or update --clean to discard"
1085 " changes")
1085 " changes")
1086 raise util.Abort(msg, hint=hint)
1086 raise util.Abort(msg, hint=hint)
1087 else: # node is none
1087 else: # node is none
1088 msg = _("not a linear update")
1088 msg = _("not a linear update")
1089 hint = _("merge or update --check to force update")
1089 hint = _("merge or update --check to force update")
1090 raise util.Abort(msg, hint=hint)
1090 raise util.Abort(msg, hint=hint)
1091 else:
1091 else:
1092 # Allow jumping branches if clean and specific rev given
1092 # Allow jumping branches if clean and specific rev given
1093 pas = [p1]
1093 pas = [p1]
1094
1094
1095 followcopies = False
1095 followcopies = False
1096 if overwrite:
1096 if overwrite:
1097 pas = [wc]
1097 pas = [wc]
1098 elif pas == [p2]: # backwards
1098 elif pas == [p2]: # backwards
1099 pas = [wc.p1()]
1099 pas = [wc.p1()]
1100 elif not branchmerge and not wc.dirty(missing=True):
1100 elif not branchmerge and not wc.dirty(missing=True):
1101 pass
1101 pass
1102 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1102 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1103 followcopies = True
1103 followcopies = True
1104
1104
1105 ### calculate phase
1105 ### calculate phase
1106 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1106 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1107 partial, mergeancestor, followcopies)
1107 partial, mergeancestor, followcopies)
1108
1108
1109 ### apply phase
1109 ### apply phase
1110 if not branchmerge: # just jump to the new rev
1110 if not branchmerge: # just jump to the new rev
1111 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1111 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1112 if not partial:
1112 if not partial:
1113 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1113 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1114 # note that we're in the middle of an update
1114 # note that we're in the middle of an update
1115 repo.vfs.write('updatestate', p2.hex())
1115 repo.vfs.write('updatestate', p2.hex())
1116
1116
1117 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1117 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1118
1118
1119 if not partial:
1119 if not partial:
1120 repo.dirstate.beginparentchange()
1120 repo.dirstate.beginparentchange()
1121 repo.setparents(fp1, fp2)
1121 repo.setparents(fp1, fp2)
1122 recordupdates(repo, actions, branchmerge)
1122 recordupdates(repo, actions, branchmerge)
1123 # update completed, clear state
1123 # update completed, clear state
1124 util.unlink(repo.join('updatestate'))
1124 util.unlink(repo.join('updatestate'))
1125
1125
1126 if not branchmerge:
1126 if not branchmerge:
1127 repo.dirstate.setbranch(p2.branch())
1127 repo.dirstate.setbranch(p2.branch())
1128 repo.dirstate.endparentchange()
1128 repo.dirstate.endparentchange()
1129 finally:
1129 finally:
1130 wlock.release()
1130 wlock.release()
1131
1131
1132 if not partial:
1132 if not partial:
1133 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1133 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1134 return stats
1134 return stats
1135
1135
1136 def graft(repo, ctx, pctx, labels):
1136 def graft(repo, ctx, pctx, labels):
1137 """Do a graft-like merge.
1137 """Do a graft-like merge.
1138
1138
1139 This is a merge where the merge ancestor is chosen such that one
1139 This is a merge where the merge ancestor is chosen such that one
1140 or more changesets are grafted onto the current changeset. In
1140 or more changesets are grafted onto the current changeset. In
1141 addition to the merge, this fixes up the dirstate to include only
1141 addition to the merge, this fixes up the dirstate to include only
1142 a single parent and tries to duplicate any renames/copies
1142 a single parent and tries to duplicate any renames/copies
1143 appropriately.
1143 appropriately.
1144
1144
1145 ctx - changeset to rebase
1145 ctx - changeset to rebase
1146 pctx - merge base, usually ctx.p1()
1146 pctx - merge base, usually ctx.p1()
1147 labels - merge labels eg ['local', 'graft']
1147 labels - merge labels eg ['local', 'graft']
1148
1148
1149 """
1149 """
1150
1150
1151 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1151 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1152 labels=labels)
1152 labels=labels)
1153 # drop the second merge parent
1153 # drop the second merge parent
1154 repo.dirstate.beginparentchange()
1154 repo.dirstate.beginparentchange()
1155 repo.setparents(repo['.'].node(), nullid)
1155 repo.setparents(repo['.'].node(), nullid)
1156 repo.dirstate.write()
1156 repo.dirstate.write()
1157 # fix up dirstate for copies and renames
1157 # fix up dirstate for copies and renames
1158 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1158 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1159 repo.dirstate.endparentchange()
1159 repo.dirstate.endparentchange()
1160 return stats
1160 return stats
General Comments 0
You need to be logged in to leave comments. Login now