##// END OF EJS Templates
manifest: repurpose flagsdiff() into (node-and-flag)diff()...
Martin von Zweigbergk -
r22964:2793ecb1 default
parent child Browse files
Show More
@@ -1,245 +1,252
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import mdiff, parsers, error, revlog, util, dicthelpers
9 import mdiff, parsers, error, revlog, util, dicthelpers
10 import array, struct
10 import array, struct
11
11
12 class manifestdict(dict):
12 class manifestdict(dict):
13 def __init__(self, mapping=None, flags=None):
13 def __init__(self, mapping=None, flags=None):
14 if mapping is None:
14 if mapping is None:
15 mapping = {}
15 mapping = {}
16 if flags is None:
16 if flags is None:
17 flags = {}
17 flags = {}
18 dict.__init__(self, mapping)
18 dict.__init__(self, mapping)
19 self._flags = flags
19 self._flags = flags
20 def flags(self, f):
20 def flags(self, f):
21 return self._flags.get(f, "")
21 return self._flags.get(f, "")
22 def withflags(self):
22 def withflags(self):
23 return set(self._flags.keys())
23 return set(self._flags.keys())
24 def setflag(self, f, flags):
24 def setflag(self, f, flags):
25 """Set the flags (symlink, executable) for path f."""
25 """Set the flags (symlink, executable) for path f."""
26 self._flags[f] = flags
26 self._flags[f] = flags
27 def copy(self):
27 def copy(self):
28 return manifestdict(self, dict.copy(self._flags))
28 return manifestdict(self, dict.copy(self._flags))
29 def intersectfiles(self, files):
29 def intersectfiles(self, files):
30 '''make a new manifestdict with the intersection of self with files
30 '''make a new manifestdict with the intersection of self with files
31
31
32 The algorithm assumes that files is much smaller than self.'''
32 The algorithm assumes that files is much smaller than self.'''
33 ret = manifestdict()
33 ret = manifestdict()
34 for fn in files:
34 for fn in files:
35 if fn in self:
35 if fn in self:
36 ret[fn] = self[fn]
36 ret[fn] = self[fn]
37 flags = self._flags.get(fn, None)
37 flags = self._flags.get(fn, None)
38 if flags:
38 if flags:
39 ret._flags[fn] = flags
39 ret._flags[fn] = flags
40 return ret
40 return ret
41 def flagsdiff(self, d2):
41
42 return dicthelpers.diff(self._flags, d2._flags, "")
42 def diff(self, m2):
43 '''Finds changes between the current manifest and m2. The result is
44 returned as a dict with filename as key and values of the form
45 ((n1,n2),(fl1,fl2)), where n1/n2 is the nodeid in the current/other
46 manifest and fl1/fl2 is the flag in the current/other manifest.'''
47 flagsdiff = dicthelpers.diff(self._flags, m2._flags, "")
48 fdiff = dicthelpers.diff(self, m2)
49 return dicthelpers.join(fdiff, flagsdiff)
43
50
44 def text(self):
51 def text(self):
45 """Get the full data of this manifest as a bytestring."""
52 """Get the full data of this manifest as a bytestring."""
46 fl = sorted(self)
53 fl = sorted(self)
47 _checkforbidden(fl)
54 _checkforbidden(fl)
48
55
49 hex, flags = revlog.hex, self.flags
56 hex, flags = revlog.hex, self.flags
50 # if this is changed to support newlines in filenames,
57 # if this is changed to support newlines in filenames,
51 # be sure to check the templates/ dir again (especially *-raw.tmpl)
58 # be sure to check the templates/ dir again (especially *-raw.tmpl)
52 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
59 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
53
60
54 def fastdelta(self, base, changes):
61 def fastdelta(self, base, changes):
55 """Given a base manifest text as an array.array and a list of changes
62 """Given a base manifest text as an array.array and a list of changes
56 relative to that text, compute a delta that can be used by revlog.
63 relative to that text, compute a delta that can be used by revlog.
57 """
64 """
58 delta = []
65 delta = []
59 dstart = None
66 dstart = None
60 dend = None
67 dend = None
61 dline = [""]
68 dline = [""]
62 start = 0
69 start = 0
63 # zero copy representation of base as a buffer
70 # zero copy representation of base as a buffer
64 addbuf = util.buffer(base)
71 addbuf = util.buffer(base)
65
72
66 # start with a readonly loop that finds the offset of
73 # start with a readonly loop that finds the offset of
67 # each line and creates the deltas
74 # each line and creates the deltas
68 for f, todelete in changes:
75 for f, todelete in changes:
69 # bs will either be the index of the item or the insert point
76 # bs will either be the index of the item or the insert point
70 start, end = _msearch(addbuf, f, start)
77 start, end = _msearch(addbuf, f, start)
71 if not todelete:
78 if not todelete:
72 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
79 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
73 else:
80 else:
74 if start == end:
81 if start == end:
75 # item we want to delete was not found, error out
82 # item we want to delete was not found, error out
76 raise AssertionError(
83 raise AssertionError(
77 _("failed to remove %s from manifest") % f)
84 _("failed to remove %s from manifest") % f)
78 l = ""
85 l = ""
79 if dstart is not None and dstart <= start and dend >= start:
86 if dstart is not None and dstart <= start and dend >= start:
80 if dend < end:
87 if dend < end:
81 dend = end
88 dend = end
82 if l:
89 if l:
83 dline.append(l)
90 dline.append(l)
84 else:
91 else:
85 if dstart is not None:
92 if dstart is not None:
86 delta.append([dstart, dend, "".join(dline)])
93 delta.append([dstart, dend, "".join(dline)])
87 dstart = start
94 dstart = start
88 dend = end
95 dend = end
89 dline = [l]
96 dline = [l]
90
97
91 if dstart is not None:
98 if dstart is not None:
92 delta.append([dstart, dend, "".join(dline)])
99 delta.append([dstart, dend, "".join(dline)])
93 # apply the delta to the base, and get a delta for addrevision
100 # apply the delta to the base, and get a delta for addrevision
94 deltatext, arraytext = _addlistdelta(base, delta)
101 deltatext, arraytext = _addlistdelta(base, delta)
95 return arraytext, deltatext
102 return arraytext, deltatext
96
103
97 def _msearch(m, s, lo=0, hi=None):
104 def _msearch(m, s, lo=0, hi=None):
98 '''return a tuple (start, end) that says where to find s within m.
105 '''return a tuple (start, end) that says where to find s within m.
99
106
100 If the string is found m[start:end] are the line containing
107 If the string is found m[start:end] are the line containing
101 that string. If start == end the string was not found and
108 that string. If start == end the string was not found and
102 they indicate the proper sorted insertion point.
109 they indicate the proper sorted insertion point.
103
110
104 m should be a buffer or a string
111 m should be a buffer or a string
105 s is a string'''
112 s is a string'''
106 def advance(i, c):
113 def advance(i, c):
107 while i < lenm and m[i] != c:
114 while i < lenm and m[i] != c:
108 i += 1
115 i += 1
109 return i
116 return i
110 if not s:
117 if not s:
111 return (lo, lo)
118 return (lo, lo)
112 lenm = len(m)
119 lenm = len(m)
113 if not hi:
120 if not hi:
114 hi = lenm
121 hi = lenm
115 while lo < hi:
122 while lo < hi:
116 mid = (lo + hi) // 2
123 mid = (lo + hi) // 2
117 start = mid
124 start = mid
118 while start > 0 and m[start - 1] != '\n':
125 while start > 0 and m[start - 1] != '\n':
119 start -= 1
126 start -= 1
120 end = advance(start, '\0')
127 end = advance(start, '\0')
121 if m[start:end] < s:
128 if m[start:end] < s:
122 # we know that after the null there are 40 bytes of sha1
129 # we know that after the null there are 40 bytes of sha1
123 # this translates to the bisect lo = mid + 1
130 # this translates to the bisect lo = mid + 1
124 lo = advance(end + 40, '\n') + 1
131 lo = advance(end + 40, '\n') + 1
125 else:
132 else:
126 # this translates to the bisect hi = mid
133 # this translates to the bisect hi = mid
127 hi = start
134 hi = start
128 end = advance(lo, '\0')
135 end = advance(lo, '\0')
129 found = m[lo:end]
136 found = m[lo:end]
130 if s == found:
137 if s == found:
131 # we know that after the null there are 40 bytes of sha1
138 # we know that after the null there are 40 bytes of sha1
132 end = advance(end + 40, '\n')
139 end = advance(end + 40, '\n')
133 return (lo, end + 1)
140 return (lo, end + 1)
134 else:
141 else:
135 return (lo, lo)
142 return (lo, lo)
136
143
137 def _checkforbidden(l):
144 def _checkforbidden(l):
138 """Check filenames for illegal characters."""
145 """Check filenames for illegal characters."""
139 for f in l:
146 for f in l:
140 if '\n' in f or '\r' in f:
147 if '\n' in f or '\r' in f:
141 raise error.RevlogError(
148 raise error.RevlogError(
142 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
149 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
143
150
144
151
145 # apply the changes collected during the bisect loop to our addlist
152 # apply the changes collected during the bisect loop to our addlist
146 # return a delta suitable for addrevision
153 # return a delta suitable for addrevision
147 def _addlistdelta(addlist, x):
154 def _addlistdelta(addlist, x):
148 # for large addlist arrays, building a new array is cheaper
155 # for large addlist arrays, building a new array is cheaper
149 # than repeatedly modifying the existing one
156 # than repeatedly modifying the existing one
150 currentposition = 0
157 currentposition = 0
151 newaddlist = array.array('c')
158 newaddlist = array.array('c')
152
159
153 for start, end, content in x:
160 for start, end, content in x:
154 newaddlist += addlist[currentposition:start]
161 newaddlist += addlist[currentposition:start]
155 if content:
162 if content:
156 newaddlist += array.array('c', content)
163 newaddlist += array.array('c', content)
157
164
158 currentposition = end
165 currentposition = end
159
166
160 newaddlist += addlist[currentposition:]
167 newaddlist += addlist[currentposition:]
161
168
162 deltatext = "".join(struct.pack(">lll", start, end, len(content))
169 deltatext = "".join(struct.pack(">lll", start, end, len(content))
163 + content for start, end, content in x)
170 + content for start, end, content in x)
164 return deltatext, newaddlist
171 return deltatext, newaddlist
165
172
166 def _parse(lines):
173 def _parse(lines):
167 mfdict = manifestdict()
174 mfdict = manifestdict()
168 parsers.parse_manifest(mfdict, mfdict._flags, lines)
175 parsers.parse_manifest(mfdict, mfdict._flags, lines)
169 return mfdict
176 return mfdict
170
177
171 class manifest(revlog.revlog):
178 class manifest(revlog.revlog):
172 def __init__(self, opener):
179 def __init__(self, opener):
173 # we expect to deal with not more than four revs at a time,
180 # we expect to deal with not more than four revs at a time,
174 # during a commit --amend
181 # during a commit --amend
175 self._mancache = util.lrucachedict(4)
182 self._mancache = util.lrucachedict(4)
176 revlog.revlog.__init__(self, opener, "00manifest.i")
183 revlog.revlog.__init__(self, opener, "00manifest.i")
177
184
178 def readdelta(self, node):
185 def readdelta(self, node):
179 r = self.rev(node)
186 r = self.rev(node)
180 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
187 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
181
188
182 def readfast(self, node):
189 def readfast(self, node):
183 '''use the faster of readdelta or read'''
190 '''use the faster of readdelta or read'''
184 r = self.rev(node)
191 r = self.rev(node)
185 deltaparent = self.deltaparent(r)
192 deltaparent = self.deltaparent(r)
186 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
193 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
187 return self.readdelta(node)
194 return self.readdelta(node)
188 return self.read(node)
195 return self.read(node)
189
196
190 def read(self, node):
197 def read(self, node):
191 if node == revlog.nullid:
198 if node == revlog.nullid:
192 return manifestdict() # don't upset local cache
199 return manifestdict() # don't upset local cache
193 if node in self._mancache:
200 if node in self._mancache:
194 return self._mancache[node][0]
201 return self._mancache[node][0]
195 text = self.revision(node)
202 text = self.revision(node)
196 arraytext = array.array('c', text)
203 arraytext = array.array('c', text)
197 mapping = _parse(text)
204 mapping = _parse(text)
198 self._mancache[node] = (mapping, arraytext)
205 self._mancache[node] = (mapping, arraytext)
199 return mapping
206 return mapping
200
207
201 def find(self, node, f):
208 def find(self, node, f):
202 '''look up entry for a single file efficiently.
209 '''look up entry for a single file efficiently.
203 return (node, flags) pair if found, (None, None) if not.'''
210 return (node, flags) pair if found, (None, None) if not.'''
204 if node in self._mancache:
211 if node in self._mancache:
205 mapping = self._mancache[node][0]
212 mapping = self._mancache[node][0]
206 return mapping.get(f), mapping.flags(f)
213 return mapping.get(f), mapping.flags(f)
207 text = self.revision(node)
214 text = self.revision(node)
208 start, end = _msearch(text, f)
215 start, end = _msearch(text, f)
209 if start == end:
216 if start == end:
210 return None, None
217 return None, None
211 l = text[start:end]
218 l = text[start:end]
212 f, n = l.split('\0')
219 f, n = l.split('\0')
213 return revlog.bin(n[:40]), n[40:-1]
220 return revlog.bin(n[:40]), n[40:-1]
214
221
215 def add(self, map, transaction, link, p1, p2, added, removed):
222 def add(self, map, transaction, link, p1, p2, added, removed):
216 if p1 in self._mancache:
223 if p1 in self._mancache:
217 # If our first parent is in the manifest cache, we can
224 # If our first parent is in the manifest cache, we can
218 # compute a delta here using properties we know about the
225 # compute a delta here using properties we know about the
219 # manifest up-front, which may save time later for the
226 # manifest up-front, which may save time later for the
220 # revlog layer.
227 # revlog layer.
221
228
222 _checkforbidden(added)
229 _checkforbidden(added)
223 # combine the changed lists into one list for sorting
230 # combine the changed lists into one list for sorting
224 work = [(x, False) for x in added]
231 work = [(x, False) for x in added]
225 work.extend((x, True) for x in removed)
232 work.extend((x, True) for x in removed)
226 # this could use heapq.merge() (from Python 2.6+) or equivalent
233 # this could use heapq.merge() (from Python 2.6+) or equivalent
227 # since the lists are already sorted
234 # since the lists are already sorted
228 work.sort()
235 work.sort()
229
236
230 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
237 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
231 cachedelta = self.rev(p1), deltatext
238 cachedelta = self.rev(p1), deltatext
232 text = util.buffer(arraytext)
239 text = util.buffer(arraytext)
233 else:
240 else:
234 # The first parent manifest isn't already loaded, so we'll
241 # The first parent manifest isn't already loaded, so we'll
235 # just encode a fulltext of the manifest and pass that
242 # just encode a fulltext of the manifest and pass that
236 # through to the revlog layer, and let it handle the delta
243 # through to the revlog layer, and let it handle the delta
237 # process.
244 # process.
238 text = map.text()
245 text = map.text()
239 arraytext = array.array('c', text)
246 arraytext = array.array('c', text)
240 cachedelta = None
247 cachedelta = None
241
248
242 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
249 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
243 self._mancache[n] = (map, arraytext)
250 self._mancache[n] = (map, arraytext)
244
251
245 return n
252 return n
@@ -1,1177 +1,1175
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error as errormod, util, filemerge, copies, subrepo, worker, dicthelpers
13 import error as errormod, util, filemerge, copies, subrepo, worker
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join("merge"), True)
63 shutil.rmtree(self._repo.join("merge"), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == "F":
81 elif rtype == "F":
82 bits = record.split("\0")
82 bits = record.split("\0")
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split("\0")
124 bits = r[1].split("\0")
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], "\0".join(bits))
126 v1records[idx] = (r[0], "\0".join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(("L", hex(self._local)))
194 records.append(("L", hex(self._local)))
195 records.append(("O", hex(self._other)))
195 records.append(("O", hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(("F", "\0".join([d] + v)))
197 records.append(("F", "\0".join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, "w")
208 f = self._repo.opener(self.statepathv1, "w")
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + "\n")
212 f.write(hex(self._local) + "\n")
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == "F":
214 if rtype == "F":
215 f.write("%s\n" % _droponode(data))
215 f.write("%s\n" % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, "w")
220 f = self._repo.opener(self.statepathv2, "w")
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = ">sI%is" % len(data)
223 format = ">sI%is" % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write("merge/" + hash, fcl.data())
237 self._repo.opener.write("merge/" + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener("merge/" + hash)
287 f = self._repo.opener("merge/" + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f):
301 return (not repo.dirstate._ignore(f)
301 return (not repo.dirstate._ignore(f)
302 and os.path.isfile(repo.wjoin(f))
302 and os.path.isfile(repo.wjoin(f))
303 and repo.wopener.audit.check(f)
303 and repo.wopener.audit.check(f)
304 and repo.dirstate.normalize(f) not in repo.dirstate
304 and repo.dirstate.normalize(f) not in repo.dirstate
305 and mctx[f].cmp(wctx[f]))
305 and mctx[f].cmp(wctx[f]))
306
306
307 def _checkunknown(repo, wctx, mctx):
307 def _checkunknown(repo, wctx, mctx):
308 "check for collisions between unknown files and files in mctx"
308 "check for collisions between unknown files and files in mctx"
309
309
310 error = False
310 error = False
311 for f in mctx:
311 for f in mctx:
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
313 error = True
313 error = True
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
315 if error:
315 if error:
316 raise util.Abort(_("untracked files in working directory differ "
316 raise util.Abort(_("untracked files in working directory differ "
317 "from files in requested revision"))
317 "from files in requested revision"))
318
318
319 def _forgetremoved(wctx, mctx, branchmerge):
319 def _forgetremoved(wctx, mctx, branchmerge):
320 """
320 """
321 Forget removed files
321 Forget removed files
322
322
323 If we're jumping between revisions (as opposed to merging), and if
323 If we're jumping between revisions (as opposed to merging), and if
324 neither the working directory nor the target rev has the file,
324 neither the working directory nor the target rev has the file,
325 then we need to remove it from the dirstate, to prevent the
325 then we need to remove it from the dirstate, to prevent the
326 dirstate from listing the file when it is no longer in the
326 dirstate from listing the file when it is no longer in the
327 manifest.
327 manifest.
328
328
329 If we're merging, and the other revision has removed a file
329 If we're merging, and the other revision has removed a file
330 that is not present in the working directory, we need to mark it
330 that is not present in the working directory, we need to mark it
331 as removed.
331 as removed.
332 """
332 """
333
333
334 ractions = []
334 ractions = []
335 factions = xactions = []
335 factions = xactions = []
336 if branchmerge:
336 if branchmerge:
337 xactions = ractions
337 xactions = ractions
338 for f in wctx.deleted():
338 for f in wctx.deleted():
339 if f not in mctx:
339 if f not in mctx:
340 xactions.append((f, None, "forget deleted"))
340 xactions.append((f, None, "forget deleted"))
341
341
342 if not branchmerge:
342 if not branchmerge:
343 for f in wctx.removed():
343 for f in wctx.removed():
344 if f not in mctx:
344 if f not in mctx:
345 factions.append((f, None, "forget removed"))
345 factions.append((f, None, "forget removed"))
346
346
347 return ractions, factions
347 return ractions, factions
348
348
349 def _checkcollision(repo, wmf, actions):
349 def _checkcollision(repo, wmf, actions):
350 # build provisional merged manifest up
350 # build provisional merged manifest up
351 pmmf = set(wmf)
351 pmmf = set(wmf)
352
352
353 if actions:
353 if actions:
354 # k, dr, e and rd are no-op
354 # k, dr, e and rd are no-op
355 for m in 'a', 'f', 'g', 'cd', 'dc':
355 for m in 'a', 'f', 'g', 'cd', 'dc':
356 for f, args, msg in actions[m]:
356 for f, args, msg in actions[m]:
357 pmmf.add(f)
357 pmmf.add(f)
358 for f, args, msg in actions['r']:
358 for f, args, msg in actions['r']:
359 pmmf.discard(f)
359 pmmf.discard(f)
360 for f, args, msg in actions['dm']:
360 for f, args, msg in actions['dm']:
361 f2, flags = args
361 f2, flags = args
362 pmmf.discard(f2)
362 pmmf.discard(f2)
363 pmmf.add(f)
363 pmmf.add(f)
364 for f, args, msg in actions['dg']:
364 for f, args, msg in actions['dg']:
365 f2, flags = args
365 f2, flags = args
366 pmmf.add(f)
366 pmmf.add(f)
367 for f, args, msg in actions['m']:
367 for f, args, msg in actions['m']:
368 f1, f2, fa, move, anc = args
368 f1, f2, fa, move, anc = args
369 if move:
369 if move:
370 pmmf.discard(f1)
370 pmmf.discard(f1)
371 pmmf.add(f)
371 pmmf.add(f)
372
372
373 # check case-folding collision in provisional merged manifest
373 # check case-folding collision in provisional merged manifest
374 foldmap = {}
374 foldmap = {}
375 for f in sorted(pmmf):
375 for f in sorted(pmmf):
376 fold = util.normcase(f)
376 fold = util.normcase(f)
377 if fold in foldmap:
377 if fold in foldmap:
378 raise util.Abort(_("case-folding collision between %s and %s")
378 raise util.Abort(_("case-folding collision between %s and %s")
379 % (f, foldmap[fold]))
379 % (f, foldmap[fold]))
380 foldmap[fold] = f
380 foldmap[fold] = f
381
381
382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
383 acceptremote, followcopies):
383 acceptremote, followcopies):
384 """
384 """
385 Merge p1 and p2 with ancestor pa and generate merge action list
385 Merge p1 and p2 with ancestor pa and generate merge action list
386
386
387 branchmerge and force are as passed in to update
387 branchmerge and force are as passed in to update
388 partial = function to filter file lists
388 partial = function to filter file lists
389 acceptremote = accept the incoming changes without prompting
389 acceptremote = accept the incoming changes without prompting
390 """
390 """
391
391
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
393 copy, movewithdir = {}, {}
393 copy, movewithdir = {}, {}
394
394
395 # manifests fetched in order are going to be faster, so prime the caches
395 # manifests fetched in order are going to be faster, so prime the caches
396 [x.manifest() for x in
396 [x.manifest() for x in
397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
398
398
399 if followcopies:
399 if followcopies:
400 ret = copies.mergecopies(repo, wctx, p2, pa)
400 ret = copies.mergecopies(repo, wctx, p2, pa)
401 copy, movewithdir, diverge, renamedelete = ret
401 copy, movewithdir, diverge, renamedelete = ret
402 for of, fl in diverge.iteritems():
402 for of, fl in diverge.iteritems():
403 actions['dr'].append((of, (fl,), "divergent renames"))
403 actions['dr'].append((of, (fl,), "divergent renames"))
404 for of, fl in renamedelete.iteritems():
404 for of, fl in renamedelete.iteritems():
405 actions['rd'].append((of, (fl,), "rename and delete"))
405 actions['rd'].append((of, (fl,), "rename and delete"))
406
406
407 repo.ui.note(_("resolving manifests\n"))
407 repo.ui.note(_("resolving manifests\n"))
408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
409 % (bool(branchmerge), bool(force), bool(partial)))
409 % (bool(branchmerge), bool(force), bool(partial)))
410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
411
411
412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
413 copied = set(copy.values())
413 copied = set(copy.values())
414 copied.update(movewithdir.values())
414 copied.update(movewithdir.values())
415
415
416 if '.hgsubstate' in m1:
416 if '.hgsubstate' in m1:
417 # check whether sub state is modified
417 # check whether sub state is modified
418 for s in sorted(wctx.substate):
418 for s in sorted(wctx.substate):
419 if wctx.sub(s).dirty():
419 if wctx.sub(s).dirty():
420 m1['.hgsubstate'] += "+"
420 m1['.hgsubstate'] += "+"
421 break
421 break
422
422
423 aborts = []
423 aborts = []
424 # Compare manifests
424 # Compare manifests
425 fdiff = dicthelpers.diff(m1, m2)
425 diff = m1.diff(m2)
426 flagsdiff = m1.flagsdiff(m2)
427 diff12 = dicthelpers.join(fdiff, flagsdiff)
428
426
429 for f, (n12, fl12) in diff12.iteritems():
427 for f, (n12, fl12) in diff.iteritems():
430 if n12:
428 if n12:
431 n1, n2 = n12
429 n1, n2 = n12
432 else: # file contents didn't change, but flags did
430 else: # file contents didn't change, but flags did
433 n1 = n2 = m1.get(f, None)
431 n1 = n2 = m1.get(f, None)
434 if n1 is None:
432 if n1 is None:
435 # Since n1 == n2, the file isn't present in m2 either. This
433 # Since n1 == n2, the file isn't present in m2 either. This
436 # means that the file was removed or deleted locally and
434 # means that the file was removed or deleted locally and
437 # removed remotely, but that residual entries remain in flags.
435 # removed remotely, but that residual entries remain in flags.
438 # This can happen in manifests generated by workingctx.
436 # This can happen in manifests generated by workingctx.
439 continue
437 continue
440 if fl12:
438 if fl12:
441 fl1, fl2 = fl12
439 fl1, fl2 = fl12
442 else: # flags didn't change, file contents did
440 else: # flags didn't change, file contents did
443 fl1 = fl2 = m1.flags(f)
441 fl1 = fl2 = m1.flags(f)
444
442
445 if partial and not partial(f):
443 if partial and not partial(f):
446 continue
444 continue
447 if n1 and n2:
445 if n1 and n2:
448 fa = f
446 fa = f
449 a = ma.get(f, nullid)
447 a = ma.get(f, nullid)
450 if a == nullid:
448 if a == nullid:
451 fa = copy.get(f, f)
449 fa = copy.get(f, f)
452 # Note: f as default is wrong - we can't really make a 3-way
450 # Note: f as default is wrong - we can't really make a 3-way
453 # merge without an ancestor file.
451 # merge without an ancestor file.
454 fla = ma.flags(fa)
452 fla = ma.flags(fa)
455 nol = 'l' not in fl1 + fl2 + fla
453 nol = 'l' not in fl1 + fl2 + fla
456 if n2 == a and fl2 == fla:
454 if n2 == a and fl2 == fla:
457 actions['k'].append((f, (), "keep")) # remote unchanged
455 actions['k'].append((f, (), "keep")) # remote unchanged
458 elif n1 == a and fl1 == fla: # local unchanged - use remote
456 elif n1 == a and fl1 == fla: # local unchanged - use remote
459 if n1 == n2: # optimization: keep local content
457 if n1 == n2: # optimization: keep local content
460 actions['e'].append((f, (fl2,), "update permissions"))
458 actions['e'].append((f, (fl2,), "update permissions"))
461 else:
459 else:
462 actions['g'].append((f, (fl2,), "remote is newer"))
460 actions['g'].append((f, (fl2,), "remote is newer"))
463 elif nol and n2 == a: # remote only changed 'x'
461 elif nol and n2 == a: # remote only changed 'x'
464 actions['e'].append((f, (fl2,), "update permissions"))
462 actions['e'].append((f, (fl2,), "update permissions"))
465 elif nol and n1 == a: # local only changed 'x'
463 elif nol and n1 == a: # local only changed 'x'
466 actions['g'].append((f, (fl1,), "remote is newer"))
464 actions['g'].append((f, (fl1,), "remote is newer"))
467 else: # both changed something
465 else: # both changed something
468 actions['m'].append((f, (f, f, fa, False, pa.node()),
466 actions['m'].append((f, (f, f, fa, False, pa.node()),
469 "versions differ"))
467 "versions differ"))
470 elif f in copied: # files we'll deal with on m2 side
468 elif f in copied: # files we'll deal with on m2 side
471 pass
469 pass
472 elif n1 and f in movewithdir: # directory rename, move local
470 elif n1 and f in movewithdir: # directory rename, move local
473 f2 = movewithdir[f]
471 f2 = movewithdir[f]
474 actions['dm'].append((f2, (f, fl1),
472 actions['dm'].append((f2, (f, fl1),
475 "remote directory rename - move from " + f))
473 "remote directory rename - move from " + f))
476 elif n1 and f in copy:
474 elif n1 and f in copy:
477 f2 = copy[f]
475 f2 = copy[f]
478 actions['m'].append((f, (f, f2, f2, False, pa.node()),
476 actions['m'].append((f, (f, f2, f2, False, pa.node()),
479 "local copied/moved from " + f2))
477 "local copied/moved from " + f2))
480 elif n1 and f in ma: # clean, a different, no remote
478 elif n1 and f in ma: # clean, a different, no remote
481 if n1 != ma[f]:
479 if n1 != ma[f]:
482 if acceptremote:
480 if acceptremote:
483 actions['r'].append((f, None, "remote delete"))
481 actions['r'].append((f, None, "remote delete"))
484 else:
482 else:
485 actions['cd'].append((f, None, "prompt changed/deleted"))
483 actions['cd'].append((f, None, "prompt changed/deleted"))
486 elif n1[20:] == "a": # added, no remote
484 elif n1[20:] == "a": # added, no remote
487 actions['f'].append((f, None, "remote deleted"))
485 actions['f'].append((f, None, "remote deleted"))
488 else:
486 else:
489 actions['r'].append((f, None, "other deleted"))
487 actions['r'].append((f, None, "other deleted"))
490 elif n2 and f in movewithdir:
488 elif n2 and f in movewithdir:
491 f2 = movewithdir[f]
489 f2 = movewithdir[f]
492 actions['dg'].append((f2, (f, fl2),
490 actions['dg'].append((f2, (f, fl2),
493 "local directory rename - get from " + f))
491 "local directory rename - get from " + f))
494 elif n2 and f in copy:
492 elif n2 and f in copy:
495 f2 = copy[f]
493 f2 = copy[f]
496 if f2 in m2:
494 if f2 in m2:
497 actions['m'].append((f, (f2, f, f2, False, pa.node()),
495 actions['m'].append((f, (f2, f, f2, False, pa.node()),
498 "remote copied from " + f2))
496 "remote copied from " + f2))
499 else:
497 else:
500 actions['m'].append((f, (f2, f, f2, True, pa.node()),
498 actions['m'].append((f, (f2, f, f2, True, pa.node()),
501 "remote moved from " + f2))
499 "remote moved from " + f2))
502 elif n2 and f not in ma:
500 elif n2 and f not in ma:
503 # local unknown, remote created: the logic is described by the
501 # local unknown, remote created: the logic is described by the
504 # following table:
502 # following table:
505 #
503 #
506 # force branchmerge different | action
504 # force branchmerge different | action
507 # n * n | get
505 # n * n | get
508 # n * y | abort
506 # n * y | abort
509 # y n * | get
507 # y n * | get
510 # y y n | get
508 # y y n | get
511 # y y y | merge
509 # y y y | merge
512 #
510 #
513 # Checking whether the files are different is expensive, so we
511 # Checking whether the files are different is expensive, so we
514 # don't do that when we can avoid it.
512 # don't do that when we can avoid it.
515 if force and not branchmerge:
513 if force and not branchmerge:
516 actions['g'].append((f, (fl2,), "remote created"))
514 actions['g'].append((f, (fl2,), "remote created"))
517 else:
515 else:
518 different = _checkunknownfile(repo, wctx, p2, f)
516 different = _checkunknownfile(repo, wctx, p2, f)
519 if force and branchmerge and different:
517 if force and branchmerge and different:
520 # FIXME: This is wrong - f is not in ma ...
518 # FIXME: This is wrong - f is not in ma ...
521 actions['m'].append((f, (f, f, f, False, pa.node()),
519 actions['m'].append((f, (f, f, f, False, pa.node()),
522 "remote differs from untracked local"))
520 "remote differs from untracked local"))
523 elif not force and different:
521 elif not force and different:
524 aborts.append((f, "ud"))
522 aborts.append((f, "ud"))
525 else:
523 else:
526 actions['g'].append((f, (fl2,), "remote created"))
524 actions['g'].append((f, (fl2,), "remote created"))
527 elif n2 and n2 != ma[f]:
525 elif n2 and n2 != ma[f]:
528 different = _checkunknownfile(repo, wctx, p2, f)
526 different = _checkunknownfile(repo, wctx, p2, f)
529 if not force and different:
527 if not force and different:
530 aborts.append((f, "ud"))
528 aborts.append((f, "ud"))
531 else:
529 else:
532 # if different: old untracked f may be overwritten and lost
530 # if different: old untracked f may be overwritten and lost
533 if acceptremote:
531 if acceptremote:
534 actions['g'].append((f, (m2.flags(f),),
532 actions['g'].append((f, (m2.flags(f),),
535 "remote recreating"))
533 "remote recreating"))
536 else:
534 else:
537 actions['dc'].append((f, (m2.flags(f),),
535 actions['dc'].append((f, (m2.flags(f),),
538 "prompt deleted/changed"))
536 "prompt deleted/changed"))
539
537
540 for f, m in sorted(aborts):
538 for f, m in sorted(aborts):
541 if m == "ud":
539 if m == "ud":
542 repo.ui.warn(_("%s: untracked file differs\n") % f)
540 repo.ui.warn(_("%s: untracked file differs\n") % f)
543 else: assert False, m
541 else: assert False, m
544 if aborts:
542 if aborts:
545 raise util.Abort(_("untracked files in working directory differ "
543 raise util.Abort(_("untracked files in working directory differ "
546 "from files in requested revision"))
544 "from files in requested revision"))
547
545
548 if not util.checkcase(repo.path):
546 if not util.checkcase(repo.path):
549 # check collision between files only in p2 for clean update
547 # check collision between files only in p2 for clean update
550 if (not branchmerge and
548 if (not branchmerge and
551 (force or not wctx.dirty(missing=True, branch=False))):
549 (force or not wctx.dirty(missing=True, branch=False))):
552 _checkcollision(repo, m2, None)
550 _checkcollision(repo, m2, None)
553 else:
551 else:
554 _checkcollision(repo, m1, actions)
552 _checkcollision(repo, m1, actions)
555
553
556 return actions
554 return actions
557
555
558 def batchremove(repo, actions):
556 def batchremove(repo, actions):
559 """apply removes to the working directory
557 """apply removes to the working directory
560
558
561 yields tuples for progress updates
559 yields tuples for progress updates
562 """
560 """
563 verbose = repo.ui.verbose
561 verbose = repo.ui.verbose
564 unlink = util.unlinkpath
562 unlink = util.unlinkpath
565 wjoin = repo.wjoin
563 wjoin = repo.wjoin
566 audit = repo.wopener.audit
564 audit = repo.wopener.audit
567 i = 0
565 i = 0
568 for f, args, msg in actions:
566 for f, args, msg in actions:
569 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
567 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
570 if verbose:
568 if verbose:
571 repo.ui.note(_("removing %s\n") % f)
569 repo.ui.note(_("removing %s\n") % f)
572 audit(f)
570 audit(f)
573 try:
571 try:
574 unlink(wjoin(f), ignoremissing=True)
572 unlink(wjoin(f), ignoremissing=True)
575 except OSError, inst:
573 except OSError, inst:
576 repo.ui.warn(_("update failed to remove %s: %s!\n") %
574 repo.ui.warn(_("update failed to remove %s: %s!\n") %
577 (f, inst.strerror))
575 (f, inst.strerror))
578 if i == 100:
576 if i == 100:
579 yield i, f
577 yield i, f
580 i = 0
578 i = 0
581 i += 1
579 i += 1
582 if i > 0:
580 if i > 0:
583 yield i, f
581 yield i, f
584
582
585 def batchget(repo, mctx, actions):
583 def batchget(repo, mctx, actions):
586 """apply gets to the working directory
584 """apply gets to the working directory
587
585
588 mctx is the context to get from
586 mctx is the context to get from
589
587
590 yields tuples for progress updates
588 yields tuples for progress updates
591 """
589 """
592 verbose = repo.ui.verbose
590 verbose = repo.ui.verbose
593 fctx = mctx.filectx
591 fctx = mctx.filectx
594 wwrite = repo.wwrite
592 wwrite = repo.wwrite
595 i = 0
593 i = 0
596 for f, args, msg in actions:
594 for f, args, msg in actions:
597 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
595 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
598 if verbose:
596 if verbose:
599 repo.ui.note(_("getting %s\n") % f)
597 repo.ui.note(_("getting %s\n") % f)
600 wwrite(f, fctx(f).data(), args[0])
598 wwrite(f, fctx(f).data(), args[0])
601 if i == 100:
599 if i == 100:
602 yield i, f
600 yield i, f
603 i = 0
601 i = 0
604 i += 1
602 i += 1
605 if i > 0:
603 if i > 0:
606 yield i, f
604 yield i, f
607
605
608 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
606 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
609 """apply the merge action list to the working directory
607 """apply the merge action list to the working directory
610
608
611 wctx is the working copy context
609 wctx is the working copy context
612 mctx is the context to be merged into the working copy
610 mctx is the context to be merged into the working copy
613
611
614 Return a tuple of counts (updated, merged, removed, unresolved) that
612 Return a tuple of counts (updated, merged, removed, unresolved) that
615 describes how many files were affected by the update.
613 describes how many files were affected by the update.
616 """
614 """
617
615
618 updated, merged, removed, unresolved = 0, 0, 0, 0
616 updated, merged, removed, unresolved = 0, 0, 0, 0
619 ms = mergestate(repo)
617 ms = mergestate(repo)
620 ms.reset(wctx.p1().node(), mctx.node())
618 ms.reset(wctx.p1().node(), mctx.node())
621 moves = []
619 moves = []
622 for m, l in actions.items():
620 for m, l in actions.items():
623 l.sort()
621 l.sort()
624
622
625 # prescan for merges
623 # prescan for merges
626 for f, args, msg in actions['m']:
624 for f, args, msg in actions['m']:
627 f1, f2, fa, move, anc = args
625 f1, f2, fa, move, anc = args
628 if f == '.hgsubstate': # merged internally
626 if f == '.hgsubstate': # merged internally
629 continue
627 continue
630 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
628 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
631 fcl = wctx[f1]
629 fcl = wctx[f1]
632 fco = mctx[f2]
630 fco = mctx[f2]
633 actx = repo[anc]
631 actx = repo[anc]
634 if fa in actx:
632 if fa in actx:
635 fca = actx[fa]
633 fca = actx[fa]
636 else:
634 else:
637 fca = repo.filectx(f1, fileid=nullrev)
635 fca = repo.filectx(f1, fileid=nullrev)
638 ms.add(fcl, fco, fca, f)
636 ms.add(fcl, fco, fca, f)
639 if f1 != f and move:
637 if f1 != f and move:
640 moves.append(f1)
638 moves.append(f1)
641
639
642 audit = repo.wopener.audit
640 audit = repo.wopener.audit
643 _updating = _('updating')
641 _updating = _('updating')
644 _files = _('files')
642 _files = _('files')
645 progress = repo.ui.progress
643 progress = repo.ui.progress
646
644
647 # remove renamed files after safely stored
645 # remove renamed files after safely stored
648 for f in moves:
646 for f in moves:
649 if os.path.lexists(repo.wjoin(f)):
647 if os.path.lexists(repo.wjoin(f)):
650 repo.ui.debug("removing %s\n" % f)
648 repo.ui.debug("removing %s\n" % f)
651 audit(f)
649 audit(f)
652 util.unlinkpath(repo.wjoin(f))
650 util.unlinkpath(repo.wjoin(f))
653
651
654 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
652 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
655
653
656 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
654 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
657 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
655 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
658
656
659 # remove in parallel (must come first)
657 # remove in parallel (must come first)
660 z = 0
658 z = 0
661 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
659 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
662 for i, item in prog:
660 for i, item in prog:
663 z += i
661 z += i
664 progress(_updating, z, item=item, total=numupdates, unit=_files)
662 progress(_updating, z, item=item, total=numupdates, unit=_files)
665 removed = len(actions['r'])
663 removed = len(actions['r'])
666
664
667 # get in parallel
665 # get in parallel
668 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
666 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
669 for i, item in prog:
667 for i, item in prog:
670 z += i
668 z += i
671 progress(_updating, z, item=item, total=numupdates, unit=_files)
669 progress(_updating, z, item=item, total=numupdates, unit=_files)
672 updated = len(actions['g'])
670 updated = len(actions['g'])
673
671
674 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
672 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
675 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
673 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
676
674
677 # forget (manifest only, just log it) (must come first)
675 # forget (manifest only, just log it) (must come first)
678 for f, args, msg in actions['f']:
676 for f, args, msg in actions['f']:
679 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
677 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
680 z += 1
678 z += 1
681 progress(_updating, z, item=f, total=numupdates, unit=_files)
679 progress(_updating, z, item=f, total=numupdates, unit=_files)
682
680
683 # re-add (manifest only, just log it)
681 # re-add (manifest only, just log it)
684 for f, args, msg in actions['a']:
682 for f, args, msg in actions['a']:
685 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
683 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
686 z += 1
684 z += 1
687 progress(_updating, z, item=f, total=numupdates, unit=_files)
685 progress(_updating, z, item=f, total=numupdates, unit=_files)
688
686
689 # keep (noop, just log it)
687 # keep (noop, just log it)
690 for f, args, msg in actions['k']:
688 for f, args, msg in actions['k']:
691 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
689 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
692 # no progress
690 # no progress
693
691
694 # merge
692 # merge
695 for f, args, msg in actions['m']:
693 for f, args, msg in actions['m']:
696 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
694 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
697 z += 1
695 z += 1
698 progress(_updating, z, item=f, total=numupdates, unit=_files)
696 progress(_updating, z, item=f, total=numupdates, unit=_files)
699 f1, f2, fa, move, anc = args
697 f1, f2, fa, move, anc = args
700 if f == '.hgsubstate': # subrepo states need updating
698 if f == '.hgsubstate': # subrepo states need updating
701 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
699 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
702 overwrite)
700 overwrite)
703 continue
701 continue
704 audit(f)
702 audit(f)
705 r = ms.resolve(f, wctx, labels=labels)
703 r = ms.resolve(f, wctx, labels=labels)
706 if r is not None and r > 0:
704 if r is not None and r > 0:
707 unresolved += 1
705 unresolved += 1
708 else:
706 else:
709 if r is None:
707 if r is None:
710 updated += 1
708 updated += 1
711 else:
709 else:
712 merged += 1
710 merged += 1
713
711
714 # directory rename, move local
712 # directory rename, move local
715 for f, args, msg in actions['dm']:
713 for f, args, msg in actions['dm']:
716 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
714 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
717 z += 1
715 z += 1
718 progress(_updating, z, item=f, total=numupdates, unit=_files)
716 progress(_updating, z, item=f, total=numupdates, unit=_files)
719 f0, flags = args
717 f0, flags = args
720 repo.ui.note(_("moving %s to %s\n") % (f0, f))
718 repo.ui.note(_("moving %s to %s\n") % (f0, f))
721 audit(f)
719 audit(f)
722 repo.wwrite(f, wctx.filectx(f0).data(), flags)
720 repo.wwrite(f, wctx.filectx(f0).data(), flags)
723 util.unlinkpath(repo.wjoin(f0))
721 util.unlinkpath(repo.wjoin(f0))
724 updated += 1
722 updated += 1
725
723
726 # local directory rename, get
724 # local directory rename, get
727 for f, args, msg in actions['dg']:
725 for f, args, msg in actions['dg']:
728 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
726 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
729 z += 1
727 z += 1
730 progress(_updating, z, item=f, total=numupdates, unit=_files)
728 progress(_updating, z, item=f, total=numupdates, unit=_files)
731 f0, flags = args
729 f0, flags = args
732 repo.ui.note(_("getting %s to %s\n") % (f0, f))
730 repo.ui.note(_("getting %s to %s\n") % (f0, f))
733 repo.wwrite(f, mctx.filectx(f0).data(), flags)
731 repo.wwrite(f, mctx.filectx(f0).data(), flags)
734 updated += 1
732 updated += 1
735
733
736 # divergent renames
734 # divergent renames
737 for f, args, msg in actions['dr']:
735 for f, args, msg in actions['dr']:
738 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
736 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
739 z += 1
737 z += 1
740 progress(_updating, z, item=f, total=numupdates, unit=_files)
738 progress(_updating, z, item=f, total=numupdates, unit=_files)
741 fl, = args
739 fl, = args
742 repo.ui.warn(_("note: possible conflict - %s was renamed "
740 repo.ui.warn(_("note: possible conflict - %s was renamed "
743 "multiple times to:\n") % f)
741 "multiple times to:\n") % f)
744 for nf in fl:
742 for nf in fl:
745 repo.ui.warn(" %s\n" % nf)
743 repo.ui.warn(" %s\n" % nf)
746
744
747 # rename and delete
745 # rename and delete
748 for f, args, msg in actions['rd']:
746 for f, args, msg in actions['rd']:
749 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
747 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
750 z += 1
748 z += 1
751 progress(_updating, z, item=f, total=numupdates, unit=_files)
749 progress(_updating, z, item=f, total=numupdates, unit=_files)
752 fl, = args
750 fl, = args
753 repo.ui.warn(_("note: possible conflict - %s was deleted "
751 repo.ui.warn(_("note: possible conflict - %s was deleted "
754 "and renamed to:\n") % f)
752 "and renamed to:\n") % f)
755 for nf in fl:
753 for nf in fl:
756 repo.ui.warn(" %s\n" % nf)
754 repo.ui.warn(" %s\n" % nf)
757
755
758 # exec
756 # exec
759 for f, args, msg in actions['e']:
757 for f, args, msg in actions['e']:
760 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
758 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
761 z += 1
759 z += 1
762 progress(_updating, z, item=f, total=numupdates, unit=_files)
760 progress(_updating, z, item=f, total=numupdates, unit=_files)
763 flags, = args
761 flags, = args
764 audit(f)
762 audit(f)
765 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
763 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
766 updated += 1
764 updated += 1
767
765
768 ms.commit()
766 ms.commit()
769 progress(_updating, None, total=numupdates, unit=_files)
767 progress(_updating, None, total=numupdates, unit=_files)
770
768
771 return updated, merged, removed, unresolved
769 return updated, merged, removed, unresolved
772
770
773 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
771 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
774 acceptremote, followcopies):
772 acceptremote, followcopies):
775 "Calculate the actions needed to merge mctx into wctx using ancestors"
773 "Calculate the actions needed to merge mctx into wctx using ancestors"
776
774
777 if len(ancestors) == 1: # default
775 if len(ancestors) == 1: # default
778 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
776 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
779 branchmerge, force,
777 branchmerge, force,
780 partial, acceptremote, followcopies)
778 partial, acceptremote, followcopies)
781
779
782 else: # only when merge.preferancestor=* - the default
780 else: # only when merge.preferancestor=* - the default
783 repo.ui.note(
781 repo.ui.note(
784 _("note: merging %s and %s using bids from ancestors %s\n") %
782 _("note: merging %s and %s using bids from ancestors %s\n") %
785 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
783 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
786
784
787 # Call for bids
785 # Call for bids
788 fbids = {} # mapping filename to bids (action method to list af actions)
786 fbids = {} # mapping filename to bids (action method to list af actions)
789 for ancestor in ancestors:
787 for ancestor in ancestors:
790 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
788 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
791 actions = manifestmerge(repo, wctx, mctx, ancestor,
789 actions = manifestmerge(repo, wctx, mctx, ancestor,
792 branchmerge, force,
790 branchmerge, force,
793 partial, acceptremote, followcopies)
791 partial, acceptremote, followcopies)
794 for m, l in sorted(actions.items()):
792 for m, l in sorted(actions.items()):
795 for a in l:
793 for a in l:
796 f, args, msg = a
794 f, args, msg = a
797 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
795 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
798 if f in fbids:
796 if f in fbids:
799 d = fbids[f]
797 d = fbids[f]
800 if m in d:
798 if m in d:
801 d[m].append(a)
799 d[m].append(a)
802 else:
800 else:
803 d[m] = [a]
801 d[m] = [a]
804 else:
802 else:
805 fbids[f] = {m: [a]}
803 fbids[f] = {m: [a]}
806
804
807 # Pick the best bid for each file
805 # Pick the best bid for each file
808 repo.ui.note(_('\nauction for merging merge bids\n'))
806 repo.ui.note(_('\nauction for merging merge bids\n'))
809 actions = dict((m, []) for m in actions.keys())
807 actions = dict((m, []) for m in actions.keys())
810 for f, bids in sorted(fbids.items()):
808 for f, bids in sorted(fbids.items()):
811 # bids is a mapping from action method to list af actions
809 # bids is a mapping from action method to list af actions
812 # Consensus?
810 # Consensus?
813 if len(bids) == 1: # all bids are the same kind of method
811 if len(bids) == 1: # all bids are the same kind of method
814 m, l = bids.items()[0]
812 m, l = bids.items()[0]
815 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
813 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
816 repo.ui.note(" %s: consensus for %s\n" % (f, m))
814 repo.ui.note(" %s: consensus for %s\n" % (f, m))
817 actions[m].append(l[0])
815 actions[m].append(l[0])
818 continue
816 continue
819 # If keep is an option, just do it.
817 # If keep is an option, just do it.
820 if "k" in bids:
818 if "k" in bids:
821 repo.ui.note(" %s: picking 'keep' action\n" % f)
819 repo.ui.note(" %s: picking 'keep' action\n" % f)
822 actions['k'].append(bids["k"][0])
820 actions['k'].append(bids["k"][0])
823 continue
821 continue
824 # If there are gets and they all agree [how could they not?], do it.
822 # If there are gets and they all agree [how could they not?], do it.
825 if "g" in bids:
823 if "g" in bids:
826 ga0 = bids["g"][0]
824 ga0 = bids["g"][0]
827 if util.all(a == ga0 for a in bids["g"][1:]):
825 if util.all(a == ga0 for a in bids["g"][1:]):
828 repo.ui.note(" %s: picking 'get' action\n" % f)
826 repo.ui.note(" %s: picking 'get' action\n" % f)
829 actions['g'].append(ga0)
827 actions['g'].append(ga0)
830 continue
828 continue
831 # TODO: Consider other simple actions such as mode changes
829 # TODO: Consider other simple actions such as mode changes
832 # Handle inefficient democrazy.
830 # Handle inefficient democrazy.
833 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
831 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
834 for m, l in sorted(bids.items()):
832 for m, l in sorted(bids.items()):
835 for _f, args, msg in l:
833 for _f, args, msg in l:
836 repo.ui.note(' %s -> %s\n' % (msg, m))
834 repo.ui.note(' %s -> %s\n' % (msg, m))
837 # Pick random action. TODO: Instead, prompt user when resolving
835 # Pick random action. TODO: Instead, prompt user when resolving
838 m, l = bids.items()[0]
836 m, l = bids.items()[0]
839 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
837 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
840 (f, m))
838 (f, m))
841 actions[m].append(l[0])
839 actions[m].append(l[0])
842 continue
840 continue
843 repo.ui.note(_('end of auction\n\n'))
841 repo.ui.note(_('end of auction\n\n'))
844
842
845 # Prompt and create actions. TODO: Move this towards resolve phase.
843 # Prompt and create actions. TODO: Move this towards resolve phase.
846 for f, args, msg in actions['cd']:
844 for f, args, msg in actions['cd']:
847 if repo.ui.promptchoice(
845 if repo.ui.promptchoice(
848 _("local changed %s which remote deleted\n"
846 _("local changed %s which remote deleted\n"
849 "use (c)hanged version or (d)elete?"
847 "use (c)hanged version or (d)elete?"
850 "$$ &Changed $$ &Delete") % f, 0):
848 "$$ &Changed $$ &Delete") % f, 0):
851 actions['r'].append((f, None, "prompt delete"))
849 actions['r'].append((f, None, "prompt delete"))
852 else:
850 else:
853 actions['a'].append((f, None, "prompt keep"))
851 actions['a'].append((f, None, "prompt keep"))
854 del actions['cd'][:]
852 del actions['cd'][:]
855
853
856 for f, args, msg in actions['dc']:
854 for f, args, msg in actions['dc']:
857 flags, = args
855 flags, = args
858 if repo.ui.promptchoice(
856 if repo.ui.promptchoice(
859 _("remote changed %s which local deleted\n"
857 _("remote changed %s which local deleted\n"
860 "use (c)hanged version or leave (d)eleted?"
858 "use (c)hanged version or leave (d)eleted?"
861 "$$ &Changed $$ &Deleted") % f, 0) == 0:
859 "$$ &Changed $$ &Deleted") % f, 0) == 0:
862 actions['g'].append((f, (flags,), "prompt recreating"))
860 actions['g'].append((f, (flags,), "prompt recreating"))
863 del actions['dc'][:]
861 del actions['dc'][:]
864
862
865 if wctx.rev() is None:
863 if wctx.rev() is None:
866 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
864 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
867 actions['r'].extend(ractions)
865 actions['r'].extend(ractions)
868 actions['f'].extend(factions)
866 actions['f'].extend(factions)
869
867
870 return actions
868 return actions
871
869
872 def recordupdates(repo, actions, branchmerge):
870 def recordupdates(repo, actions, branchmerge):
873 "record merge actions to the dirstate"
871 "record merge actions to the dirstate"
874 # remove (must come first)
872 # remove (must come first)
875 for f, args, msg in actions['r']:
873 for f, args, msg in actions['r']:
876 if branchmerge:
874 if branchmerge:
877 repo.dirstate.remove(f)
875 repo.dirstate.remove(f)
878 else:
876 else:
879 repo.dirstate.drop(f)
877 repo.dirstate.drop(f)
880
878
881 # forget (must come first)
879 # forget (must come first)
882 for f, args, msg in actions['f']:
880 for f, args, msg in actions['f']:
883 repo.dirstate.drop(f)
881 repo.dirstate.drop(f)
884
882
885 # re-add
883 # re-add
886 for f, args, msg in actions['a']:
884 for f, args, msg in actions['a']:
887 if not branchmerge:
885 if not branchmerge:
888 repo.dirstate.add(f)
886 repo.dirstate.add(f)
889
887
890 # exec change
888 # exec change
891 for f, args, msg in actions['e']:
889 for f, args, msg in actions['e']:
892 repo.dirstate.normallookup(f)
890 repo.dirstate.normallookup(f)
893
891
894 # keep
892 # keep
895 for f, args, msg in actions['k']:
893 for f, args, msg in actions['k']:
896 pass
894 pass
897
895
898 # get
896 # get
899 for f, args, msg in actions['g']:
897 for f, args, msg in actions['g']:
900 if branchmerge:
898 if branchmerge:
901 repo.dirstate.otherparent(f)
899 repo.dirstate.otherparent(f)
902 else:
900 else:
903 repo.dirstate.normal(f)
901 repo.dirstate.normal(f)
904
902
905 # merge
903 # merge
906 for f, args, msg in actions['m']:
904 for f, args, msg in actions['m']:
907 f1, f2, fa, move, anc = args
905 f1, f2, fa, move, anc = args
908 if branchmerge:
906 if branchmerge:
909 # We've done a branch merge, mark this file as merged
907 # We've done a branch merge, mark this file as merged
910 # so that we properly record the merger later
908 # so that we properly record the merger later
911 repo.dirstate.merge(f)
909 repo.dirstate.merge(f)
912 if f1 != f2: # copy/rename
910 if f1 != f2: # copy/rename
913 if move:
911 if move:
914 repo.dirstate.remove(f1)
912 repo.dirstate.remove(f1)
915 if f1 != f:
913 if f1 != f:
916 repo.dirstate.copy(f1, f)
914 repo.dirstate.copy(f1, f)
917 else:
915 else:
918 repo.dirstate.copy(f2, f)
916 repo.dirstate.copy(f2, f)
919 else:
917 else:
920 # We've update-merged a locally modified file, so
918 # We've update-merged a locally modified file, so
921 # we set the dirstate to emulate a normal checkout
919 # we set the dirstate to emulate a normal checkout
922 # of that file some time in the past. Thus our
920 # of that file some time in the past. Thus our
923 # merge will appear as a normal local file
921 # merge will appear as a normal local file
924 # modification.
922 # modification.
925 if f2 == f: # file not locally copied/moved
923 if f2 == f: # file not locally copied/moved
926 repo.dirstate.normallookup(f)
924 repo.dirstate.normallookup(f)
927 if move:
925 if move:
928 repo.dirstate.drop(f1)
926 repo.dirstate.drop(f1)
929
927
930 # directory rename, move local
928 # directory rename, move local
931 for f, args, msg in actions['dm']:
929 for f, args, msg in actions['dm']:
932 f0, flag = args
930 f0, flag = args
933 if f0 not in repo.dirstate:
931 if f0 not in repo.dirstate:
934 # untracked file moved
932 # untracked file moved
935 continue
933 continue
936 if branchmerge:
934 if branchmerge:
937 repo.dirstate.add(f)
935 repo.dirstate.add(f)
938 repo.dirstate.remove(f0)
936 repo.dirstate.remove(f0)
939 repo.dirstate.copy(f0, f)
937 repo.dirstate.copy(f0, f)
940 else:
938 else:
941 repo.dirstate.normal(f)
939 repo.dirstate.normal(f)
942 repo.dirstate.drop(f0)
940 repo.dirstate.drop(f0)
943
941
944 # directory rename, get
942 # directory rename, get
945 for f, args, msg in actions['dg']:
943 for f, args, msg in actions['dg']:
946 f0, flag = args
944 f0, flag = args
947 if branchmerge:
945 if branchmerge:
948 repo.dirstate.add(f)
946 repo.dirstate.add(f)
949 repo.dirstate.copy(f0, f)
947 repo.dirstate.copy(f0, f)
950 else:
948 else:
951 repo.dirstate.normal(f)
949 repo.dirstate.normal(f)
952
950
953 def update(repo, node, branchmerge, force, partial, ancestor=None,
951 def update(repo, node, branchmerge, force, partial, ancestor=None,
954 mergeancestor=False, labels=None):
952 mergeancestor=False, labels=None):
955 """
953 """
956 Perform a merge between the working directory and the given node
954 Perform a merge between the working directory and the given node
957
955
958 node = the node to update to, or None if unspecified
956 node = the node to update to, or None if unspecified
959 branchmerge = whether to merge between branches
957 branchmerge = whether to merge between branches
960 force = whether to force branch merging or file overwriting
958 force = whether to force branch merging or file overwriting
961 partial = a function to filter file lists (dirstate not updated)
959 partial = a function to filter file lists (dirstate not updated)
962 mergeancestor = whether it is merging with an ancestor. If true,
960 mergeancestor = whether it is merging with an ancestor. If true,
963 we should accept the incoming changes for any prompts that occur.
961 we should accept the incoming changes for any prompts that occur.
964 If false, merging with an ancestor (fast-forward) is only allowed
962 If false, merging with an ancestor (fast-forward) is only allowed
965 between different named branches. This flag is used by rebase extension
963 between different named branches. This flag is used by rebase extension
966 as a temporary fix and should be avoided in general.
964 as a temporary fix and should be avoided in general.
967
965
968 The table below shows all the behaviors of the update command
966 The table below shows all the behaviors of the update command
969 given the -c and -C or no options, whether the working directory
967 given the -c and -C or no options, whether the working directory
970 is dirty, whether a revision is specified, and the relationship of
968 is dirty, whether a revision is specified, and the relationship of
971 the parent rev to the target rev (linear, on the same named
969 the parent rev to the target rev (linear, on the same named
972 branch, or on another named branch).
970 branch, or on another named branch).
973
971
974 This logic is tested by test-update-branches.t.
972 This logic is tested by test-update-branches.t.
975
973
976 -c -C dirty rev | linear same cross
974 -c -C dirty rev | linear same cross
977 n n n n | ok (1) x
975 n n n n | ok (1) x
978 n n n y | ok ok ok
976 n n n y | ok ok ok
979 n n y n | merge (2) (2)
977 n n y n | merge (2) (2)
980 n n y y | merge (3) (3)
978 n n y y | merge (3) (3)
981 n y * * | --- discard ---
979 n y * * | --- discard ---
982 y n y * | --- (4) ---
980 y n y * | --- (4) ---
983 y n n * | --- ok ---
981 y n n * | --- ok ---
984 y y * * | --- (5) ---
982 y y * * | --- (5) ---
985
983
986 x = can't happen
984 x = can't happen
987 * = don't-care
985 * = don't-care
988 1 = abort: not a linear update (merge or update --check to force update)
986 1 = abort: not a linear update (merge or update --check to force update)
989 2 = abort: uncommitted changes (commit and merge, or update --clean to
987 2 = abort: uncommitted changes (commit and merge, or update --clean to
990 discard changes)
988 discard changes)
991 3 = abort: uncommitted changes (commit or update --clean to discard changes)
989 3 = abort: uncommitted changes (commit or update --clean to discard changes)
992 4 = abort: uncommitted changes (checked in commands.py)
990 4 = abort: uncommitted changes (checked in commands.py)
993 5 = incompatible options (checked in commands.py)
991 5 = incompatible options (checked in commands.py)
994
992
995 Return the same tuple as applyupdates().
993 Return the same tuple as applyupdates().
996 """
994 """
997
995
998 onode = node
996 onode = node
999 wlock = repo.wlock()
997 wlock = repo.wlock()
1000 try:
998 try:
1001 wc = repo[None]
999 wc = repo[None]
1002 pl = wc.parents()
1000 pl = wc.parents()
1003 p1 = pl[0]
1001 p1 = pl[0]
1004 pas = [None]
1002 pas = [None]
1005 if ancestor:
1003 if ancestor:
1006 pas = [repo[ancestor]]
1004 pas = [repo[ancestor]]
1007
1005
1008 if node is None:
1006 if node is None:
1009 # Here is where we should consider bookmarks, divergent bookmarks,
1007 # Here is where we should consider bookmarks, divergent bookmarks,
1010 # foreground changesets (successors), and tip of current branch;
1008 # foreground changesets (successors), and tip of current branch;
1011 # but currently we are only checking the branch tips.
1009 # but currently we are only checking the branch tips.
1012 try:
1010 try:
1013 node = repo.branchtip(wc.branch())
1011 node = repo.branchtip(wc.branch())
1014 except errormod.RepoLookupError:
1012 except errormod.RepoLookupError:
1015 if wc.branch() == "default": # no default branch!
1013 if wc.branch() == "default": # no default branch!
1016 node = repo.lookup("tip") # update to tip
1014 node = repo.lookup("tip") # update to tip
1017 else:
1015 else:
1018 raise util.Abort(_("branch %s not found") % wc.branch())
1016 raise util.Abort(_("branch %s not found") % wc.branch())
1019
1017
1020 if p1.obsolete() and not p1.children():
1018 if p1.obsolete() and not p1.children():
1021 # allow updating to successors
1019 # allow updating to successors
1022 successors = obsolete.successorssets(repo, p1.node())
1020 successors = obsolete.successorssets(repo, p1.node())
1023
1021
1024 # behavior of certain cases is as follows,
1022 # behavior of certain cases is as follows,
1025 #
1023 #
1026 # divergent changesets: update to highest rev, similar to what
1024 # divergent changesets: update to highest rev, similar to what
1027 # is currently done when there are more than one head
1025 # is currently done when there are more than one head
1028 # (i.e. 'tip')
1026 # (i.e. 'tip')
1029 #
1027 #
1030 # replaced changesets: same as divergent except we know there
1028 # replaced changesets: same as divergent except we know there
1031 # is no conflict
1029 # is no conflict
1032 #
1030 #
1033 # pruned changeset: no update is done; though, we could
1031 # pruned changeset: no update is done; though, we could
1034 # consider updating to the first non-obsolete parent,
1032 # consider updating to the first non-obsolete parent,
1035 # similar to what is current done for 'hg prune'
1033 # similar to what is current done for 'hg prune'
1036
1034
1037 if successors:
1035 if successors:
1038 # flatten the list here handles both divergent (len > 1)
1036 # flatten the list here handles both divergent (len > 1)
1039 # and the usual case (len = 1)
1037 # and the usual case (len = 1)
1040 successors = [n for sub in successors for n in sub]
1038 successors = [n for sub in successors for n in sub]
1041
1039
1042 # get the max revision for the given successors set,
1040 # get the max revision for the given successors set,
1043 # i.e. the 'tip' of a set
1041 # i.e. the 'tip' of a set
1044 node = repo.revs("max(%ln)", successors).first()
1042 node = repo.revs("max(%ln)", successors).first()
1045 pas = [p1]
1043 pas = [p1]
1046
1044
1047 overwrite = force and not branchmerge
1045 overwrite = force and not branchmerge
1048
1046
1049 p2 = repo[node]
1047 p2 = repo[node]
1050 if pas[0] is None:
1048 if pas[0] is None:
1051 if repo.ui.config("merge", "preferancestor", '*') == '*':
1049 if repo.ui.config("merge", "preferancestor", '*') == '*':
1052 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1050 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1053 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1051 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1054 else:
1052 else:
1055 pas = [p1.ancestor(p2, warn=branchmerge)]
1053 pas = [p1.ancestor(p2, warn=branchmerge)]
1056
1054
1057 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1055 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1058
1056
1059 ### check phase
1057 ### check phase
1060 if not overwrite and len(pl) > 1:
1058 if not overwrite and len(pl) > 1:
1061 raise util.Abort(_("outstanding uncommitted merge"))
1059 raise util.Abort(_("outstanding uncommitted merge"))
1062 if branchmerge:
1060 if branchmerge:
1063 if pas == [p2]:
1061 if pas == [p2]:
1064 raise util.Abort(_("merging with a working directory ancestor"
1062 raise util.Abort(_("merging with a working directory ancestor"
1065 " has no effect"))
1063 " has no effect"))
1066 elif pas == [p1]:
1064 elif pas == [p1]:
1067 if not mergeancestor and p1.branch() == p2.branch():
1065 if not mergeancestor and p1.branch() == p2.branch():
1068 raise util.Abort(_("nothing to merge"),
1066 raise util.Abort(_("nothing to merge"),
1069 hint=_("use 'hg update' "
1067 hint=_("use 'hg update' "
1070 "or check 'hg heads'"))
1068 "or check 'hg heads'"))
1071 if not force and (wc.files() or wc.deleted()):
1069 if not force and (wc.files() or wc.deleted()):
1072 raise util.Abort(_("uncommitted changes"),
1070 raise util.Abort(_("uncommitted changes"),
1073 hint=_("use 'hg status' to list changes"))
1071 hint=_("use 'hg status' to list changes"))
1074 for s in sorted(wc.substate):
1072 for s in sorted(wc.substate):
1075 if wc.sub(s).dirty():
1073 if wc.sub(s).dirty():
1076 raise util.Abort(_("uncommitted changes in "
1074 raise util.Abort(_("uncommitted changes in "
1077 "subrepository '%s'") % s)
1075 "subrepository '%s'") % s)
1078
1076
1079 elif not overwrite:
1077 elif not overwrite:
1080 if p1 == p2: # no-op update
1078 if p1 == p2: # no-op update
1081 # call the hooks and exit early
1079 # call the hooks and exit early
1082 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1080 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1083 repo.hook('update', parent1=xp2, parent2='', error=0)
1081 repo.hook('update', parent1=xp2, parent2='', error=0)
1084 return 0, 0, 0, 0
1082 return 0, 0, 0, 0
1085
1083
1086 if pas not in ([p1], [p2]): # nonlinear
1084 if pas not in ([p1], [p2]): # nonlinear
1087 dirty = wc.dirty(missing=True)
1085 dirty = wc.dirty(missing=True)
1088 if dirty or onode is None:
1086 if dirty or onode is None:
1089 # Branching is a bit strange to ensure we do the minimal
1087 # Branching is a bit strange to ensure we do the minimal
1090 # amount of call to obsolete.background.
1088 # amount of call to obsolete.background.
1091 foreground = obsolete.foreground(repo, [p1.node()])
1089 foreground = obsolete.foreground(repo, [p1.node()])
1092 # note: the <node> variable contains a random identifier
1090 # note: the <node> variable contains a random identifier
1093 if repo[node].node() in foreground:
1091 if repo[node].node() in foreground:
1094 pas = [p1] # allow updating to successors
1092 pas = [p1] # allow updating to successors
1095 elif dirty:
1093 elif dirty:
1096 msg = _("uncommitted changes")
1094 msg = _("uncommitted changes")
1097 if onode is None:
1095 if onode is None:
1098 hint = _("commit and merge, or update --clean to"
1096 hint = _("commit and merge, or update --clean to"
1099 " discard changes")
1097 " discard changes")
1100 else:
1098 else:
1101 hint = _("commit or update --clean to discard"
1099 hint = _("commit or update --clean to discard"
1102 " changes")
1100 " changes")
1103 raise util.Abort(msg, hint=hint)
1101 raise util.Abort(msg, hint=hint)
1104 else: # node is none
1102 else: # node is none
1105 msg = _("not a linear update")
1103 msg = _("not a linear update")
1106 hint = _("merge or update --check to force update")
1104 hint = _("merge or update --check to force update")
1107 raise util.Abort(msg, hint=hint)
1105 raise util.Abort(msg, hint=hint)
1108 else:
1106 else:
1109 # Allow jumping branches if clean and specific rev given
1107 # Allow jumping branches if clean and specific rev given
1110 pas = [p1]
1108 pas = [p1]
1111
1109
1112 followcopies = False
1110 followcopies = False
1113 if overwrite:
1111 if overwrite:
1114 pas = [wc]
1112 pas = [wc]
1115 elif pas == [p2]: # backwards
1113 elif pas == [p2]: # backwards
1116 pas = [wc.p1()]
1114 pas = [wc.p1()]
1117 elif not branchmerge and not wc.dirty(missing=True):
1115 elif not branchmerge and not wc.dirty(missing=True):
1118 pass
1116 pass
1119 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1117 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1120 followcopies = True
1118 followcopies = True
1121
1119
1122 ### calculate phase
1120 ### calculate phase
1123 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1121 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1124 partial, mergeancestor, followcopies)
1122 partial, mergeancestor, followcopies)
1125
1123
1126 ### apply phase
1124 ### apply phase
1127 if not branchmerge: # just jump to the new rev
1125 if not branchmerge: # just jump to the new rev
1128 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1126 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1129 if not partial:
1127 if not partial:
1130 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1128 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1131 # note that we're in the middle of an update
1129 # note that we're in the middle of an update
1132 repo.vfs.write('updatestate', p2.hex())
1130 repo.vfs.write('updatestate', p2.hex())
1133
1131
1134 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1132 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1135
1133
1136 if not partial:
1134 if not partial:
1137 repo.dirstate.beginparentchange()
1135 repo.dirstate.beginparentchange()
1138 repo.setparents(fp1, fp2)
1136 repo.setparents(fp1, fp2)
1139 recordupdates(repo, actions, branchmerge)
1137 recordupdates(repo, actions, branchmerge)
1140 # update completed, clear state
1138 # update completed, clear state
1141 util.unlink(repo.join('updatestate'))
1139 util.unlink(repo.join('updatestate'))
1142
1140
1143 if not branchmerge:
1141 if not branchmerge:
1144 repo.dirstate.setbranch(p2.branch())
1142 repo.dirstate.setbranch(p2.branch())
1145 repo.dirstate.endparentchange()
1143 repo.dirstate.endparentchange()
1146 finally:
1144 finally:
1147 wlock.release()
1145 wlock.release()
1148
1146
1149 if not partial:
1147 if not partial:
1150 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1148 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1151 return stats
1149 return stats
1152
1150
1153 def graft(repo, ctx, pctx, labels):
1151 def graft(repo, ctx, pctx, labels):
1154 """Do a graft-like merge.
1152 """Do a graft-like merge.
1155
1153
1156 This is a merge where the merge ancestor is chosen such that one
1154 This is a merge where the merge ancestor is chosen such that one
1157 or more changesets are grafted onto the current changeset. In
1155 or more changesets are grafted onto the current changeset. In
1158 addition to the merge, this fixes up the dirstate to include only
1156 addition to the merge, this fixes up the dirstate to include only
1159 a single parent and tries to duplicate any renames/copies
1157 a single parent and tries to duplicate any renames/copies
1160 appropriately.
1158 appropriately.
1161
1159
1162 ctx - changeset to rebase
1160 ctx - changeset to rebase
1163 pctx - merge base, usually ctx.p1()
1161 pctx - merge base, usually ctx.p1()
1164 labels - merge labels eg ['local', 'graft']
1162 labels - merge labels eg ['local', 'graft']
1165
1163
1166 """
1164 """
1167
1165
1168 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1166 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1169 labels=labels)
1167 labels=labels)
1170 # drop the second merge parent
1168 # drop the second merge parent
1171 repo.dirstate.beginparentchange()
1169 repo.dirstate.beginparentchange()
1172 repo.setparents(repo['.'].node(), nullid)
1170 repo.setparents(repo['.'].node(), nullid)
1173 repo.dirstate.write()
1171 repo.dirstate.write()
1174 # fix up dirstate for copies and renames
1172 # fix up dirstate for copies and renames
1175 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1173 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1176 repo.dirstate.endparentchange()
1174 repo.dirstate.endparentchange()
1177 return stats
1175 return stats
General Comments 0
You need to be logged in to leave comments. Login now