##// END OF EJS Templates
manifest: transpose pair of pairs from diff()...
Martin von Zweigbergk -
r22966:ff93aa00 default
parent child Browse files
Show More
@@ -1,268 +1,268 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util
10 10 import array, struct
11 11
12 12 class manifestdict(dict):
13 13 def __init__(self, mapping=None, flags=None):
14 14 if mapping is None:
15 15 mapping = {}
16 16 if flags is None:
17 17 flags = {}
18 18 dict.__init__(self, mapping)
19 19 self._flags = flags
20 20 def flags(self, f):
21 21 return self._flags.get(f, "")
22 22 def withflags(self):
23 23 return set(self._flags.keys())
24 24 def setflag(self, f, flags):
25 25 """Set the flags (symlink, executable) for path f."""
26 26 self._flags[f] = flags
27 27 def copy(self):
28 28 return manifestdict(self, dict.copy(self._flags))
29 29 def intersectfiles(self, files):
30 30 '''make a new manifestdict with the intersection of self with files
31 31
32 32 The algorithm assumes that files is much smaller than self.'''
33 33 ret = manifestdict()
34 34 for fn in files:
35 35 if fn in self:
36 36 ret[fn] = self[fn]
37 37 flags = self._flags.get(fn, None)
38 38 if flags:
39 39 ret._flags[fn] = flags
40 40 return ret
41 41
42 42 def diff(self, m2):
43 43 '''Finds changes between the current manifest and m2. The result is
44 44 returned as a dict with filename as key and values of the form
45 ((n1,n2),(fl1,fl2)), where n1/n2 is the nodeid in the current/other
45 ((n1,fl1),(n2,fl2)), where n1/n2 is the nodeid in the current/other
46 46 manifest and fl1/fl2 is the flag in the current/other manifest. Where
47 47 the file does not exist, the nodeid will be None and the flags will be
48 48 the empty string.'''
49 49 diff = {}
50 50
51 51 for fn, n1 in self.iteritems():
52 52 fl1 = self._flags.get(fn, '')
53 53 n2 = m2.get(fn, None)
54 54 fl2 = m2._flags.get(fn, '')
55 55 if n2 is None:
56 56 fl2 = ''
57 57 if n1 != n2 or fl1 != fl2:
58 diff[fn] = ((n1, n2), (fl1, fl2))
58 diff[fn] = ((n1, fl1), (n2, fl2))
59 59
60 60 for fn, n2 in m2.iteritems():
61 61 if fn not in self:
62 62 fl2 = m2._flags.get(fn, '')
63 diff[fn] = ((None, n2), ('', fl2))
63 diff[fn] = ((None, ''), (n2, fl2))
64 64
65 65 return diff
66 66
67 67 def text(self):
68 68 """Get the full data of this manifest as a bytestring."""
69 69 fl = sorted(self)
70 70 _checkforbidden(fl)
71 71
72 72 hex, flags = revlog.hex, self.flags
73 73 # if this is changed to support newlines in filenames,
74 74 # be sure to check the templates/ dir again (especially *-raw.tmpl)
75 75 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
76 76
77 77 def fastdelta(self, base, changes):
78 78 """Given a base manifest text as an array.array and a list of changes
79 79 relative to that text, compute a delta that can be used by revlog.
80 80 """
81 81 delta = []
82 82 dstart = None
83 83 dend = None
84 84 dline = [""]
85 85 start = 0
86 86 # zero copy representation of base as a buffer
87 87 addbuf = util.buffer(base)
88 88
89 89 # start with a readonly loop that finds the offset of
90 90 # each line and creates the deltas
91 91 for f, todelete in changes:
92 92 # bs will either be the index of the item or the insert point
93 93 start, end = _msearch(addbuf, f, start)
94 94 if not todelete:
95 95 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
96 96 else:
97 97 if start == end:
98 98 # item we want to delete was not found, error out
99 99 raise AssertionError(
100 100 _("failed to remove %s from manifest") % f)
101 101 l = ""
102 102 if dstart is not None and dstart <= start and dend >= start:
103 103 if dend < end:
104 104 dend = end
105 105 if l:
106 106 dline.append(l)
107 107 else:
108 108 if dstart is not None:
109 109 delta.append([dstart, dend, "".join(dline)])
110 110 dstart = start
111 111 dend = end
112 112 dline = [l]
113 113
114 114 if dstart is not None:
115 115 delta.append([dstart, dend, "".join(dline)])
116 116 # apply the delta to the base, and get a delta for addrevision
117 117 deltatext, arraytext = _addlistdelta(base, delta)
118 118 return arraytext, deltatext
119 119
120 120 def _msearch(m, s, lo=0, hi=None):
121 121 '''return a tuple (start, end) that says where to find s within m.
122 122
123 123 If the string is found m[start:end] are the line containing
124 124 that string. If start == end the string was not found and
125 125 they indicate the proper sorted insertion point.
126 126
127 127 m should be a buffer or a string
128 128 s is a string'''
129 129 def advance(i, c):
130 130 while i < lenm and m[i] != c:
131 131 i += 1
132 132 return i
133 133 if not s:
134 134 return (lo, lo)
135 135 lenm = len(m)
136 136 if not hi:
137 137 hi = lenm
138 138 while lo < hi:
139 139 mid = (lo + hi) // 2
140 140 start = mid
141 141 while start > 0 and m[start - 1] != '\n':
142 142 start -= 1
143 143 end = advance(start, '\0')
144 144 if m[start:end] < s:
145 145 # we know that after the null there are 40 bytes of sha1
146 146 # this translates to the bisect lo = mid + 1
147 147 lo = advance(end + 40, '\n') + 1
148 148 else:
149 149 # this translates to the bisect hi = mid
150 150 hi = start
151 151 end = advance(lo, '\0')
152 152 found = m[lo:end]
153 153 if s == found:
154 154 # we know that after the null there are 40 bytes of sha1
155 155 end = advance(end + 40, '\n')
156 156 return (lo, end + 1)
157 157 else:
158 158 return (lo, lo)
159 159
160 160 def _checkforbidden(l):
161 161 """Check filenames for illegal characters."""
162 162 for f in l:
163 163 if '\n' in f or '\r' in f:
164 164 raise error.RevlogError(
165 165 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
166 166
167 167
168 168 # apply the changes collected during the bisect loop to our addlist
169 169 # return a delta suitable for addrevision
170 170 def _addlistdelta(addlist, x):
171 171 # for large addlist arrays, building a new array is cheaper
172 172 # than repeatedly modifying the existing one
173 173 currentposition = 0
174 174 newaddlist = array.array('c')
175 175
176 176 for start, end, content in x:
177 177 newaddlist += addlist[currentposition:start]
178 178 if content:
179 179 newaddlist += array.array('c', content)
180 180
181 181 currentposition = end
182 182
183 183 newaddlist += addlist[currentposition:]
184 184
185 185 deltatext = "".join(struct.pack(">lll", start, end, len(content))
186 186 + content for start, end, content in x)
187 187 return deltatext, newaddlist
188 188
189 189 def _parse(lines):
190 190 mfdict = manifestdict()
191 191 parsers.parse_manifest(mfdict, mfdict._flags, lines)
192 192 return mfdict
193 193
194 194 class manifest(revlog.revlog):
195 195 def __init__(self, opener):
196 196 # we expect to deal with not more than four revs at a time,
197 197 # during a commit --amend
198 198 self._mancache = util.lrucachedict(4)
199 199 revlog.revlog.__init__(self, opener, "00manifest.i")
200 200
201 201 def readdelta(self, node):
202 202 r = self.rev(node)
203 203 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
204 204
205 205 def readfast(self, node):
206 206 '''use the faster of readdelta or read'''
207 207 r = self.rev(node)
208 208 deltaparent = self.deltaparent(r)
209 209 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
210 210 return self.readdelta(node)
211 211 return self.read(node)
212 212
213 213 def read(self, node):
214 214 if node == revlog.nullid:
215 215 return manifestdict() # don't upset local cache
216 216 if node in self._mancache:
217 217 return self._mancache[node][0]
218 218 text = self.revision(node)
219 219 arraytext = array.array('c', text)
220 220 mapping = _parse(text)
221 221 self._mancache[node] = (mapping, arraytext)
222 222 return mapping
223 223
224 224 def find(self, node, f):
225 225 '''look up entry for a single file efficiently.
226 226 return (node, flags) pair if found, (None, None) if not.'''
227 227 if node in self._mancache:
228 228 mapping = self._mancache[node][0]
229 229 return mapping.get(f), mapping.flags(f)
230 230 text = self.revision(node)
231 231 start, end = _msearch(text, f)
232 232 if start == end:
233 233 return None, None
234 234 l = text[start:end]
235 235 f, n = l.split('\0')
236 236 return revlog.bin(n[:40]), n[40:-1]
237 237
238 238 def add(self, map, transaction, link, p1, p2, added, removed):
239 239 if p1 in self._mancache:
240 240 # If our first parent is in the manifest cache, we can
241 241 # compute a delta here using properties we know about the
242 242 # manifest up-front, which may save time later for the
243 243 # revlog layer.
244 244
245 245 _checkforbidden(added)
246 246 # combine the changed lists into one list for sorting
247 247 work = [(x, False) for x in added]
248 248 work.extend((x, True) for x in removed)
249 249 # this could use heapq.merge() (from Python 2.6+) or equivalent
250 250 # since the lists are already sorted
251 251 work.sort()
252 252
253 253 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
254 254 cachedelta = self.rev(p1), deltatext
255 255 text = util.buffer(arraytext)
256 256 else:
257 257 # The first parent manifest isn't already loaded, so we'll
258 258 # just encode a fulltext of the manifest and pass that
259 259 # through to the revlog layer, and let it handle the delta
260 260 # process.
261 261 text = map.text()
262 262 arraytext = array.array('c', text)
263 263 cachedelta = None
264 264
265 265 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
266 266 self._mancache[n] = (map, arraytext)
267 267
268 268 return n
@@ -1,1160 +1,1160 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import struct
9 9
10 10 from node import nullid, nullrev, hex, bin
11 11 from i18n import _
12 12 from mercurial import obsolete
13 13 import error as errormod, util, filemerge, copies, subrepo, worker
14 14 import errno, os, shutil
15 15
16 16 _pack = struct.pack
17 17 _unpack = struct.unpack
18 18
19 19 def _droponode(data):
20 20 # used for compatibility for v1
21 21 bits = data.split("\0")
22 22 bits = bits[:-2] + bits[-1:]
23 23 return "\0".join(bits)
24 24
25 25 class mergestate(object):
26 26 '''track 3-way merge state of individual files
27 27
28 28 it is stored on disk when needed. Two file are used, one with an old
29 29 format, one with a new format. Both contains similar data, but the new
30 30 format can store new kind of field.
31 31
32 32 Current new format is a list of arbitrary record of the form:
33 33
34 34 [type][length][content]
35 35
36 36 Type is a single character, length is a 4 bytes integer, content is an
37 37 arbitrary suites of bytes of length `length`.
38 38
39 39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 40 should abort if they are unknown. lower case record can be safely ignored.
41 41
42 42 Currently known record:
43 43
44 44 L: the node of the "local" part of the merge (hexified version)
45 45 O: the node of the "other" part of the merge (hexified version)
46 46 F: a file to be merged entry
47 47 '''
48 48 statepathv1 = "merge/state"
49 49 statepathv2 = "merge/state2"
50 50
51 51 def __init__(self, repo):
52 52 self._repo = repo
53 53 self._dirty = False
54 54 self._read()
55 55
56 56 def reset(self, node=None, other=None):
57 57 self._state = {}
58 58 self._local = None
59 59 self._other = None
60 60 if node:
61 61 self._local = node
62 62 self._other = other
63 63 shutil.rmtree(self._repo.join("merge"), True)
64 64 self._dirty = False
65 65
66 66 def _read(self):
67 67 """Analyse each record content to restore a serialized state from disk
68 68
69 69 This function process "record" entry produced by the de-serialization
70 70 of on disk file.
71 71 """
72 72 self._state = {}
73 73 self._local = None
74 74 self._other = None
75 75 records = self._readrecords()
76 76 for rtype, record in records:
77 77 if rtype == 'L':
78 78 self._local = bin(record)
79 79 elif rtype == 'O':
80 80 self._other = bin(record)
81 81 elif rtype == "F":
82 82 bits = record.split("\0")
83 83 self._state[bits[0]] = bits[1:]
84 84 elif not rtype.islower():
85 85 raise util.Abort(_('unsupported merge state record: %s')
86 86 % rtype)
87 87 self._dirty = False
88 88
89 89 def _readrecords(self):
90 90 """Read merge state from disk and return a list of record (TYPE, data)
91 91
92 92 We read data from both v1 and v2 files and decide which one to use.
93 93
94 94 V1 has been used by version prior to 2.9.1 and contains less data than
95 95 v2. We read both versions and check if no data in v2 contradicts
96 96 v1. If there is not contradiction we can safely assume that both v1
97 97 and v2 were written at the same time and use the extract data in v2. If
98 98 there is contradiction we ignore v2 content as we assume an old version
99 99 of Mercurial has overwritten the mergestate file and left an old v2
100 100 file around.
101 101
102 102 returns list of record [(TYPE, data), ...]"""
103 103 v1records = self._readrecordsv1()
104 104 v2records = self._readrecordsv2()
105 105 oldv2 = set() # old format version of v2 record
106 106 for rec in v2records:
107 107 if rec[0] == 'L':
108 108 oldv2.add(rec)
109 109 elif rec[0] == 'F':
110 110 # drop the onode data (not contained in v1)
111 111 oldv2.add(('F', _droponode(rec[1])))
112 112 for rec in v1records:
113 113 if rec not in oldv2:
114 114 # v1 file is newer than v2 file, use it
115 115 # we have to infer the "other" changeset of the merge
116 116 # we cannot do better than that with v1 of the format
117 117 mctx = self._repo[None].parents()[-1]
118 118 v1records.append(('O', mctx.hex()))
119 119 # add place holder "other" file node information
120 120 # nobody is using it yet so we do no need to fetch the data
121 121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 122 for idx, r in enumerate(v1records):
123 123 if r[0] == 'F':
124 124 bits = r[1].split("\0")
125 125 bits.insert(-2, '')
126 126 v1records[idx] = (r[0], "\0".join(bits))
127 127 return v1records
128 128 else:
129 129 return v2records
130 130
131 131 def _readrecordsv1(self):
132 132 """read on disk merge state for version 1 file
133 133
134 134 returns list of record [(TYPE, data), ...]
135 135
136 136 Note: the "F" data from this file are one entry short
137 137 (no "other file node" entry)
138 138 """
139 139 records = []
140 140 try:
141 141 f = self._repo.opener(self.statepathv1)
142 142 for i, l in enumerate(f):
143 143 if i == 0:
144 144 records.append(('L', l[:-1]))
145 145 else:
146 146 records.append(('F', l[:-1]))
147 147 f.close()
148 148 except IOError, err:
149 149 if err.errno != errno.ENOENT:
150 150 raise
151 151 return records
152 152
153 153 def _readrecordsv2(self):
154 154 """read on disk merge state for version 2 file
155 155
156 156 returns list of record [(TYPE, data), ...]
157 157 """
158 158 records = []
159 159 try:
160 160 f = self._repo.opener(self.statepathv2)
161 161 data = f.read()
162 162 off = 0
163 163 end = len(data)
164 164 while off < end:
165 165 rtype = data[off]
166 166 off += 1
167 167 length = _unpack('>I', data[off:(off + 4)])[0]
168 168 off += 4
169 169 record = data[off:(off + length)]
170 170 off += length
171 171 records.append((rtype, record))
172 172 f.close()
173 173 except IOError, err:
174 174 if err.errno != errno.ENOENT:
175 175 raise
176 176 return records
177 177
178 178 def active(self):
179 179 """Whether mergestate is active.
180 180
181 181 Returns True if there appears to be mergestate. This is a rough proxy
182 182 for "is a merge in progress."
183 183 """
184 184 # Check local variables before looking at filesystem for performance
185 185 # reasons.
186 186 return bool(self._local) or bool(self._state) or \
187 187 self._repo.opener.exists(self.statepathv1) or \
188 188 self._repo.opener.exists(self.statepathv2)
189 189
190 190 def commit(self):
191 191 """Write current state on disk (if necessary)"""
192 192 if self._dirty:
193 193 records = []
194 194 records.append(("L", hex(self._local)))
195 195 records.append(("O", hex(self._other)))
196 196 for d, v in self._state.iteritems():
197 197 records.append(("F", "\0".join([d] + v)))
198 198 self._writerecords(records)
199 199 self._dirty = False
200 200
201 201 def _writerecords(self, records):
202 202 """Write current state on disk (both v1 and v2)"""
203 203 self._writerecordsv1(records)
204 204 self._writerecordsv2(records)
205 205
206 206 def _writerecordsv1(self, records):
207 207 """Write current state on disk in a version 1 file"""
208 208 f = self._repo.opener(self.statepathv1, "w")
209 209 irecords = iter(records)
210 210 lrecords = irecords.next()
211 211 assert lrecords[0] == 'L'
212 212 f.write(hex(self._local) + "\n")
213 213 for rtype, data in irecords:
214 214 if rtype == "F":
215 215 f.write("%s\n" % _droponode(data))
216 216 f.close()
217 217
218 218 def _writerecordsv2(self, records):
219 219 """Write current state on disk in a version 2 file"""
220 220 f = self._repo.opener(self.statepathv2, "w")
221 221 for key, data in records:
222 222 assert len(key) == 1
223 223 format = ">sI%is" % len(data)
224 224 f.write(_pack(format, key, len(data), data))
225 225 f.close()
226 226
227 227 def add(self, fcl, fco, fca, fd):
228 228 """add a new (potentially?) conflicting file the merge state
229 229 fcl: file context for local,
230 230 fco: file context for remote,
231 231 fca: file context for ancestors,
232 232 fd: file path of the resulting merge.
233 233
234 234 note: also write the local version to the `.hg/merge` directory.
235 235 """
236 236 hash = util.sha1(fcl.path()).hexdigest()
237 237 self._repo.opener.write("merge/" + hash, fcl.data())
238 238 self._state[fd] = ['u', hash, fcl.path(),
239 239 fca.path(), hex(fca.filenode()),
240 240 fco.path(), hex(fco.filenode()),
241 241 fcl.flags()]
242 242 self._dirty = True
243 243
244 244 def __contains__(self, dfile):
245 245 return dfile in self._state
246 246
247 247 def __getitem__(self, dfile):
248 248 return self._state[dfile][0]
249 249
250 250 def __iter__(self):
251 251 return iter(sorted(self._state))
252 252
253 253 def files(self):
254 254 return self._state.keys()
255 255
256 256 def mark(self, dfile, state):
257 257 self._state[dfile][0] = state
258 258 self._dirty = True
259 259
260 260 def unresolved(self):
261 261 """Obtain the paths of unresolved files."""
262 262
263 263 for f, entry in self._state.items():
264 264 if entry[0] == 'u':
265 265 yield f
266 266
267 267 def resolve(self, dfile, wctx, labels=None):
268 268 """rerun merge process for file path `dfile`"""
269 269 if self[dfile] == 'r':
270 270 return 0
271 271 stateentry = self._state[dfile]
272 272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 273 octx = self._repo[self._other]
274 274 fcd = wctx[dfile]
275 275 fco = octx[ofile]
276 276 fca = self._repo.filectx(afile, fileid=anode)
277 277 # "premerge" x flags
278 278 flo = fco.flags()
279 279 fla = fca.flags()
280 280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 281 if fca.node() == nullid:
282 282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 283 afile)
284 284 elif flags == fla:
285 285 flags = flo
286 286 # restore local
287 287 f = self._repo.opener("merge/" + hash)
288 288 self._repo.wwrite(dfile, f.read(), flags)
289 289 f.close()
290 290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 291 labels=labels)
292 292 if r is None:
293 293 # no real conflict
294 294 del self._state[dfile]
295 295 self._dirty = True
296 296 elif not r:
297 297 self.mark(dfile, 'r')
298 298 return r
299 299
300 300 def _checkunknownfile(repo, wctx, mctx, f):
301 301 return (not repo.dirstate._ignore(f)
302 302 and os.path.isfile(repo.wjoin(f))
303 303 and repo.wopener.audit.check(f)
304 304 and repo.dirstate.normalize(f) not in repo.dirstate
305 305 and mctx[f].cmp(wctx[f]))
306 306
307 307 def _checkunknown(repo, wctx, mctx):
308 308 "check for collisions between unknown files and files in mctx"
309 309
310 310 error = False
311 311 for f in mctx:
312 312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
313 313 error = True
314 314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
315 315 if error:
316 316 raise util.Abort(_("untracked files in working directory differ "
317 317 "from files in requested revision"))
318 318
319 319 def _forgetremoved(wctx, mctx, branchmerge):
320 320 """
321 321 Forget removed files
322 322
323 323 If we're jumping between revisions (as opposed to merging), and if
324 324 neither the working directory nor the target rev has the file,
325 325 then we need to remove it from the dirstate, to prevent the
326 326 dirstate from listing the file when it is no longer in the
327 327 manifest.
328 328
329 329 If we're merging, and the other revision has removed a file
330 330 that is not present in the working directory, we need to mark it
331 331 as removed.
332 332 """
333 333
334 334 ractions = []
335 335 factions = xactions = []
336 336 if branchmerge:
337 337 xactions = ractions
338 338 for f in wctx.deleted():
339 339 if f not in mctx:
340 340 xactions.append((f, None, "forget deleted"))
341 341
342 342 if not branchmerge:
343 343 for f in wctx.removed():
344 344 if f not in mctx:
345 345 factions.append((f, None, "forget removed"))
346 346
347 347 return ractions, factions
348 348
349 349 def _checkcollision(repo, wmf, actions):
350 350 # build provisional merged manifest up
351 351 pmmf = set(wmf)
352 352
353 353 if actions:
354 354 # k, dr, e and rd are no-op
355 355 for m in 'a', 'f', 'g', 'cd', 'dc':
356 356 for f, args, msg in actions[m]:
357 357 pmmf.add(f)
358 358 for f, args, msg in actions['r']:
359 359 pmmf.discard(f)
360 360 for f, args, msg in actions['dm']:
361 361 f2, flags = args
362 362 pmmf.discard(f2)
363 363 pmmf.add(f)
364 364 for f, args, msg in actions['dg']:
365 365 f2, flags = args
366 366 pmmf.add(f)
367 367 for f, args, msg in actions['m']:
368 368 f1, f2, fa, move, anc = args
369 369 if move:
370 370 pmmf.discard(f1)
371 371 pmmf.add(f)
372 372
373 373 # check case-folding collision in provisional merged manifest
374 374 foldmap = {}
375 375 for f in sorted(pmmf):
376 376 fold = util.normcase(f)
377 377 if fold in foldmap:
378 378 raise util.Abort(_("case-folding collision between %s and %s")
379 379 % (f, foldmap[fold]))
380 380 foldmap[fold] = f
381 381
382 382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
383 383 acceptremote, followcopies):
384 384 """
385 385 Merge p1 and p2 with ancestor pa and generate merge action list
386 386
387 387 branchmerge and force are as passed in to update
388 388 partial = function to filter file lists
389 389 acceptremote = accept the incoming changes without prompting
390 390 """
391 391
392 392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
393 393 copy, movewithdir = {}, {}
394 394
395 395 # manifests fetched in order are going to be faster, so prime the caches
396 396 [x.manifest() for x in
397 397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
398 398
399 399 if followcopies:
400 400 ret = copies.mergecopies(repo, wctx, p2, pa)
401 401 copy, movewithdir, diverge, renamedelete = ret
402 402 for of, fl in diverge.iteritems():
403 403 actions['dr'].append((of, (fl,), "divergent renames"))
404 404 for of, fl in renamedelete.iteritems():
405 405 actions['rd'].append((of, (fl,), "rename and delete"))
406 406
407 407 repo.ui.note(_("resolving manifests\n"))
408 408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
409 409 % (bool(branchmerge), bool(force), bool(partial)))
410 410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
411 411
412 412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
413 413 copied = set(copy.values())
414 414 copied.update(movewithdir.values())
415 415
416 416 if '.hgsubstate' in m1:
417 417 # check whether sub state is modified
418 418 for s in sorted(wctx.substate):
419 419 if wctx.sub(s).dirty():
420 420 m1['.hgsubstate'] += "+"
421 421 break
422 422
423 423 aborts = []
424 424 # Compare manifests
425 425 diff = m1.diff(m2)
426 426
427 for f, ((n1, n2), (fl1, fl2)) in diff.iteritems():
427 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
428 428 if partial and not partial(f):
429 429 continue
430 430 if n1 and n2:
431 431 fa = f
432 432 a = ma.get(f, nullid)
433 433 if a == nullid:
434 434 fa = copy.get(f, f)
435 435 # Note: f as default is wrong - we can't really make a 3-way
436 436 # merge without an ancestor file.
437 437 fla = ma.flags(fa)
438 438 nol = 'l' not in fl1 + fl2 + fla
439 439 if n2 == a and fl2 == fla:
440 440 actions['k'].append((f, (), "keep")) # remote unchanged
441 441 elif n1 == a and fl1 == fla: # local unchanged - use remote
442 442 if n1 == n2: # optimization: keep local content
443 443 actions['e'].append((f, (fl2,), "update permissions"))
444 444 else:
445 445 actions['g'].append((f, (fl2,), "remote is newer"))
446 446 elif nol and n2 == a: # remote only changed 'x'
447 447 actions['e'].append((f, (fl2,), "update permissions"))
448 448 elif nol and n1 == a: # local only changed 'x'
449 449 actions['g'].append((f, (fl1,), "remote is newer"))
450 450 else: # both changed something
451 451 actions['m'].append((f, (f, f, fa, False, pa.node()),
452 452 "versions differ"))
453 453 elif f in copied: # files we'll deal with on m2 side
454 454 pass
455 455 elif n1 and f in movewithdir: # directory rename, move local
456 456 f2 = movewithdir[f]
457 457 actions['dm'].append((f2, (f, fl1),
458 458 "remote directory rename - move from " + f))
459 459 elif n1 and f in copy:
460 460 f2 = copy[f]
461 461 actions['m'].append((f, (f, f2, f2, False, pa.node()),
462 462 "local copied/moved from " + f2))
463 463 elif n1 and f in ma: # clean, a different, no remote
464 464 if n1 != ma[f]:
465 465 if acceptremote:
466 466 actions['r'].append((f, None, "remote delete"))
467 467 else:
468 468 actions['cd'].append((f, None, "prompt changed/deleted"))
469 469 elif n1[20:] == "a": # added, no remote
470 470 actions['f'].append((f, None, "remote deleted"))
471 471 else:
472 472 actions['r'].append((f, None, "other deleted"))
473 473 elif n2 and f in movewithdir:
474 474 f2 = movewithdir[f]
475 475 actions['dg'].append((f2, (f, fl2),
476 476 "local directory rename - get from " + f))
477 477 elif n2 and f in copy:
478 478 f2 = copy[f]
479 479 if f2 in m2:
480 480 actions['m'].append((f, (f2, f, f2, False, pa.node()),
481 481 "remote copied from " + f2))
482 482 else:
483 483 actions['m'].append((f, (f2, f, f2, True, pa.node()),
484 484 "remote moved from " + f2))
485 485 elif n2 and f not in ma:
486 486 # local unknown, remote created: the logic is described by the
487 487 # following table:
488 488 #
489 489 # force branchmerge different | action
490 490 # n * n | get
491 491 # n * y | abort
492 492 # y n * | get
493 493 # y y n | get
494 494 # y y y | merge
495 495 #
496 496 # Checking whether the files are different is expensive, so we
497 497 # don't do that when we can avoid it.
498 498 if force and not branchmerge:
499 499 actions['g'].append((f, (fl2,), "remote created"))
500 500 else:
501 501 different = _checkunknownfile(repo, wctx, p2, f)
502 502 if force and branchmerge and different:
503 503 # FIXME: This is wrong - f is not in ma ...
504 504 actions['m'].append((f, (f, f, f, False, pa.node()),
505 505 "remote differs from untracked local"))
506 506 elif not force and different:
507 507 aborts.append((f, "ud"))
508 508 else:
509 509 actions['g'].append((f, (fl2,), "remote created"))
510 510 elif n2 and n2 != ma[f]:
511 511 different = _checkunknownfile(repo, wctx, p2, f)
512 512 if not force and different:
513 513 aborts.append((f, "ud"))
514 514 else:
515 515 # if different: old untracked f may be overwritten and lost
516 516 if acceptremote:
517 517 actions['g'].append((f, (m2.flags(f),),
518 518 "remote recreating"))
519 519 else:
520 520 actions['dc'].append((f, (m2.flags(f),),
521 521 "prompt deleted/changed"))
522 522
523 523 for f, m in sorted(aborts):
524 524 if m == "ud":
525 525 repo.ui.warn(_("%s: untracked file differs\n") % f)
526 526 else: assert False, m
527 527 if aborts:
528 528 raise util.Abort(_("untracked files in working directory differ "
529 529 "from files in requested revision"))
530 530
531 531 if not util.checkcase(repo.path):
532 532 # check collision between files only in p2 for clean update
533 533 if (not branchmerge and
534 534 (force or not wctx.dirty(missing=True, branch=False))):
535 535 _checkcollision(repo, m2, None)
536 536 else:
537 537 _checkcollision(repo, m1, actions)
538 538
539 539 return actions
540 540
541 541 def batchremove(repo, actions):
542 542 """apply removes to the working directory
543 543
544 544 yields tuples for progress updates
545 545 """
546 546 verbose = repo.ui.verbose
547 547 unlink = util.unlinkpath
548 548 wjoin = repo.wjoin
549 549 audit = repo.wopener.audit
550 550 i = 0
551 551 for f, args, msg in actions:
552 552 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
553 553 if verbose:
554 554 repo.ui.note(_("removing %s\n") % f)
555 555 audit(f)
556 556 try:
557 557 unlink(wjoin(f), ignoremissing=True)
558 558 except OSError, inst:
559 559 repo.ui.warn(_("update failed to remove %s: %s!\n") %
560 560 (f, inst.strerror))
561 561 if i == 100:
562 562 yield i, f
563 563 i = 0
564 564 i += 1
565 565 if i > 0:
566 566 yield i, f
567 567
568 568 def batchget(repo, mctx, actions):
569 569 """apply gets to the working directory
570 570
571 571 mctx is the context to get from
572 572
573 573 yields tuples for progress updates
574 574 """
575 575 verbose = repo.ui.verbose
576 576 fctx = mctx.filectx
577 577 wwrite = repo.wwrite
578 578 i = 0
579 579 for f, args, msg in actions:
580 580 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
581 581 if verbose:
582 582 repo.ui.note(_("getting %s\n") % f)
583 583 wwrite(f, fctx(f).data(), args[0])
584 584 if i == 100:
585 585 yield i, f
586 586 i = 0
587 587 i += 1
588 588 if i > 0:
589 589 yield i, f
590 590
591 591 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
592 592 """apply the merge action list to the working directory
593 593
594 594 wctx is the working copy context
595 595 mctx is the context to be merged into the working copy
596 596
597 597 Return a tuple of counts (updated, merged, removed, unresolved) that
598 598 describes how many files were affected by the update.
599 599 """
600 600
601 601 updated, merged, removed, unresolved = 0, 0, 0, 0
602 602 ms = mergestate(repo)
603 603 ms.reset(wctx.p1().node(), mctx.node())
604 604 moves = []
605 605 for m, l in actions.items():
606 606 l.sort()
607 607
608 608 # prescan for merges
609 609 for f, args, msg in actions['m']:
610 610 f1, f2, fa, move, anc = args
611 611 if f == '.hgsubstate': # merged internally
612 612 continue
613 613 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
614 614 fcl = wctx[f1]
615 615 fco = mctx[f2]
616 616 actx = repo[anc]
617 617 if fa in actx:
618 618 fca = actx[fa]
619 619 else:
620 620 fca = repo.filectx(f1, fileid=nullrev)
621 621 ms.add(fcl, fco, fca, f)
622 622 if f1 != f and move:
623 623 moves.append(f1)
624 624
625 625 audit = repo.wopener.audit
626 626 _updating = _('updating')
627 627 _files = _('files')
628 628 progress = repo.ui.progress
629 629
630 630 # remove renamed files after safely stored
631 631 for f in moves:
632 632 if os.path.lexists(repo.wjoin(f)):
633 633 repo.ui.debug("removing %s\n" % f)
634 634 audit(f)
635 635 util.unlinkpath(repo.wjoin(f))
636 636
637 637 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
638 638
639 639 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
640 640 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
641 641
642 642 # remove in parallel (must come first)
643 643 z = 0
644 644 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
645 645 for i, item in prog:
646 646 z += i
647 647 progress(_updating, z, item=item, total=numupdates, unit=_files)
648 648 removed = len(actions['r'])
649 649
650 650 # get in parallel
651 651 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
652 652 for i, item in prog:
653 653 z += i
654 654 progress(_updating, z, item=item, total=numupdates, unit=_files)
655 655 updated = len(actions['g'])
656 656
657 657 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
658 658 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
659 659
660 660 # forget (manifest only, just log it) (must come first)
661 661 for f, args, msg in actions['f']:
662 662 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
663 663 z += 1
664 664 progress(_updating, z, item=f, total=numupdates, unit=_files)
665 665
666 666 # re-add (manifest only, just log it)
667 667 for f, args, msg in actions['a']:
668 668 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
669 669 z += 1
670 670 progress(_updating, z, item=f, total=numupdates, unit=_files)
671 671
672 672 # keep (noop, just log it)
673 673 for f, args, msg in actions['k']:
674 674 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
675 675 # no progress
676 676
677 677 # merge
678 678 for f, args, msg in actions['m']:
679 679 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
680 680 z += 1
681 681 progress(_updating, z, item=f, total=numupdates, unit=_files)
682 682 f1, f2, fa, move, anc = args
683 683 if f == '.hgsubstate': # subrepo states need updating
684 684 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
685 685 overwrite)
686 686 continue
687 687 audit(f)
688 688 r = ms.resolve(f, wctx, labels=labels)
689 689 if r is not None and r > 0:
690 690 unresolved += 1
691 691 else:
692 692 if r is None:
693 693 updated += 1
694 694 else:
695 695 merged += 1
696 696
697 697 # directory rename, move local
698 698 for f, args, msg in actions['dm']:
699 699 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
700 700 z += 1
701 701 progress(_updating, z, item=f, total=numupdates, unit=_files)
702 702 f0, flags = args
703 703 repo.ui.note(_("moving %s to %s\n") % (f0, f))
704 704 audit(f)
705 705 repo.wwrite(f, wctx.filectx(f0).data(), flags)
706 706 util.unlinkpath(repo.wjoin(f0))
707 707 updated += 1
708 708
709 709 # local directory rename, get
710 710 for f, args, msg in actions['dg']:
711 711 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
712 712 z += 1
713 713 progress(_updating, z, item=f, total=numupdates, unit=_files)
714 714 f0, flags = args
715 715 repo.ui.note(_("getting %s to %s\n") % (f0, f))
716 716 repo.wwrite(f, mctx.filectx(f0).data(), flags)
717 717 updated += 1
718 718
719 719 # divergent renames
720 720 for f, args, msg in actions['dr']:
721 721 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
722 722 z += 1
723 723 progress(_updating, z, item=f, total=numupdates, unit=_files)
724 724 fl, = args
725 725 repo.ui.warn(_("note: possible conflict - %s was renamed "
726 726 "multiple times to:\n") % f)
727 727 for nf in fl:
728 728 repo.ui.warn(" %s\n" % nf)
729 729
730 730 # rename and delete
731 731 for f, args, msg in actions['rd']:
732 732 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
733 733 z += 1
734 734 progress(_updating, z, item=f, total=numupdates, unit=_files)
735 735 fl, = args
736 736 repo.ui.warn(_("note: possible conflict - %s was deleted "
737 737 "and renamed to:\n") % f)
738 738 for nf in fl:
739 739 repo.ui.warn(" %s\n" % nf)
740 740
741 741 # exec
742 742 for f, args, msg in actions['e']:
743 743 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
744 744 z += 1
745 745 progress(_updating, z, item=f, total=numupdates, unit=_files)
746 746 flags, = args
747 747 audit(f)
748 748 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
749 749 updated += 1
750 750
751 751 ms.commit()
752 752 progress(_updating, None, total=numupdates, unit=_files)
753 753
754 754 return updated, merged, removed, unresolved
755 755
756 756 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
757 757 acceptremote, followcopies):
758 758 "Calculate the actions needed to merge mctx into wctx using ancestors"
759 759
760 760 if len(ancestors) == 1: # default
761 761 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
762 762 branchmerge, force,
763 763 partial, acceptremote, followcopies)
764 764
765 765 else: # only when merge.preferancestor=* - the default
766 766 repo.ui.note(
767 767 _("note: merging %s and %s using bids from ancestors %s\n") %
768 768 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
769 769
770 770 # Call for bids
771 771 fbids = {} # mapping filename to bids (action method to list af actions)
772 772 for ancestor in ancestors:
773 773 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
774 774 actions = manifestmerge(repo, wctx, mctx, ancestor,
775 775 branchmerge, force,
776 776 partial, acceptremote, followcopies)
777 777 for m, l in sorted(actions.items()):
778 778 for a in l:
779 779 f, args, msg = a
780 780 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
781 781 if f in fbids:
782 782 d = fbids[f]
783 783 if m in d:
784 784 d[m].append(a)
785 785 else:
786 786 d[m] = [a]
787 787 else:
788 788 fbids[f] = {m: [a]}
789 789
790 790 # Pick the best bid for each file
791 791 repo.ui.note(_('\nauction for merging merge bids\n'))
792 792 actions = dict((m, []) for m in actions.keys())
793 793 for f, bids in sorted(fbids.items()):
794 794 # bids is a mapping from action method to list af actions
795 795 # Consensus?
796 796 if len(bids) == 1: # all bids are the same kind of method
797 797 m, l = bids.items()[0]
798 798 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
799 799 repo.ui.note(" %s: consensus for %s\n" % (f, m))
800 800 actions[m].append(l[0])
801 801 continue
802 802 # If keep is an option, just do it.
803 803 if "k" in bids:
804 804 repo.ui.note(" %s: picking 'keep' action\n" % f)
805 805 actions['k'].append(bids["k"][0])
806 806 continue
807 807 # If there are gets and they all agree [how could they not?], do it.
808 808 if "g" in bids:
809 809 ga0 = bids["g"][0]
810 810 if util.all(a == ga0 for a in bids["g"][1:]):
811 811 repo.ui.note(" %s: picking 'get' action\n" % f)
812 812 actions['g'].append(ga0)
813 813 continue
814 814 # TODO: Consider other simple actions such as mode changes
815 815 # Handle inefficient democrazy.
816 816 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
817 817 for m, l in sorted(bids.items()):
818 818 for _f, args, msg in l:
819 819 repo.ui.note(' %s -> %s\n' % (msg, m))
820 820 # Pick random action. TODO: Instead, prompt user when resolving
821 821 m, l = bids.items()[0]
822 822 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
823 823 (f, m))
824 824 actions[m].append(l[0])
825 825 continue
826 826 repo.ui.note(_('end of auction\n\n'))
827 827
828 828 # Prompt and create actions. TODO: Move this towards resolve phase.
829 829 for f, args, msg in actions['cd']:
830 830 if repo.ui.promptchoice(
831 831 _("local changed %s which remote deleted\n"
832 832 "use (c)hanged version or (d)elete?"
833 833 "$$ &Changed $$ &Delete") % f, 0):
834 834 actions['r'].append((f, None, "prompt delete"))
835 835 else:
836 836 actions['a'].append((f, None, "prompt keep"))
837 837 del actions['cd'][:]
838 838
839 839 for f, args, msg in actions['dc']:
840 840 flags, = args
841 841 if repo.ui.promptchoice(
842 842 _("remote changed %s which local deleted\n"
843 843 "use (c)hanged version or leave (d)eleted?"
844 844 "$$ &Changed $$ &Deleted") % f, 0) == 0:
845 845 actions['g'].append((f, (flags,), "prompt recreating"))
846 846 del actions['dc'][:]
847 847
848 848 if wctx.rev() is None:
849 849 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
850 850 actions['r'].extend(ractions)
851 851 actions['f'].extend(factions)
852 852
853 853 return actions
854 854
855 855 def recordupdates(repo, actions, branchmerge):
856 856 "record merge actions to the dirstate"
857 857 # remove (must come first)
858 858 for f, args, msg in actions['r']:
859 859 if branchmerge:
860 860 repo.dirstate.remove(f)
861 861 else:
862 862 repo.dirstate.drop(f)
863 863
864 864 # forget (must come first)
865 865 for f, args, msg in actions['f']:
866 866 repo.dirstate.drop(f)
867 867
868 868 # re-add
869 869 for f, args, msg in actions['a']:
870 870 if not branchmerge:
871 871 repo.dirstate.add(f)
872 872
873 873 # exec change
874 874 for f, args, msg in actions['e']:
875 875 repo.dirstate.normallookup(f)
876 876
877 877 # keep
878 878 for f, args, msg in actions['k']:
879 879 pass
880 880
881 881 # get
882 882 for f, args, msg in actions['g']:
883 883 if branchmerge:
884 884 repo.dirstate.otherparent(f)
885 885 else:
886 886 repo.dirstate.normal(f)
887 887
888 888 # merge
889 889 for f, args, msg in actions['m']:
890 890 f1, f2, fa, move, anc = args
891 891 if branchmerge:
892 892 # We've done a branch merge, mark this file as merged
893 893 # so that we properly record the merger later
894 894 repo.dirstate.merge(f)
895 895 if f1 != f2: # copy/rename
896 896 if move:
897 897 repo.dirstate.remove(f1)
898 898 if f1 != f:
899 899 repo.dirstate.copy(f1, f)
900 900 else:
901 901 repo.dirstate.copy(f2, f)
902 902 else:
903 903 # We've update-merged a locally modified file, so
904 904 # we set the dirstate to emulate a normal checkout
905 905 # of that file some time in the past. Thus our
906 906 # merge will appear as a normal local file
907 907 # modification.
908 908 if f2 == f: # file not locally copied/moved
909 909 repo.dirstate.normallookup(f)
910 910 if move:
911 911 repo.dirstate.drop(f1)
912 912
913 913 # directory rename, move local
914 914 for f, args, msg in actions['dm']:
915 915 f0, flag = args
916 916 if f0 not in repo.dirstate:
917 917 # untracked file moved
918 918 continue
919 919 if branchmerge:
920 920 repo.dirstate.add(f)
921 921 repo.dirstate.remove(f0)
922 922 repo.dirstate.copy(f0, f)
923 923 else:
924 924 repo.dirstate.normal(f)
925 925 repo.dirstate.drop(f0)
926 926
927 927 # directory rename, get
928 928 for f, args, msg in actions['dg']:
929 929 f0, flag = args
930 930 if branchmerge:
931 931 repo.dirstate.add(f)
932 932 repo.dirstate.copy(f0, f)
933 933 else:
934 934 repo.dirstate.normal(f)
935 935
936 936 def update(repo, node, branchmerge, force, partial, ancestor=None,
937 937 mergeancestor=False, labels=None):
938 938 """
939 939 Perform a merge between the working directory and the given node
940 940
941 941 node = the node to update to, or None if unspecified
942 942 branchmerge = whether to merge between branches
943 943 force = whether to force branch merging or file overwriting
944 944 partial = a function to filter file lists (dirstate not updated)
945 945 mergeancestor = whether it is merging with an ancestor. If true,
946 946 we should accept the incoming changes for any prompts that occur.
947 947 If false, merging with an ancestor (fast-forward) is only allowed
948 948 between different named branches. This flag is used by rebase extension
949 949 as a temporary fix and should be avoided in general.
950 950
951 951 The table below shows all the behaviors of the update command
952 952 given the -c and -C or no options, whether the working directory
953 953 is dirty, whether a revision is specified, and the relationship of
954 954 the parent rev to the target rev (linear, on the same named
955 955 branch, or on another named branch).
956 956
957 957 This logic is tested by test-update-branches.t.
958 958
959 959 -c -C dirty rev | linear same cross
960 960 n n n n | ok (1) x
961 961 n n n y | ok ok ok
962 962 n n y n | merge (2) (2)
963 963 n n y y | merge (3) (3)
964 964 n y * * | --- discard ---
965 965 y n y * | --- (4) ---
966 966 y n n * | --- ok ---
967 967 y y * * | --- (5) ---
968 968
969 969 x = can't happen
970 970 * = don't-care
971 971 1 = abort: not a linear update (merge or update --check to force update)
972 972 2 = abort: uncommitted changes (commit and merge, or update --clean to
973 973 discard changes)
974 974 3 = abort: uncommitted changes (commit or update --clean to discard changes)
975 975 4 = abort: uncommitted changes (checked in commands.py)
976 976 5 = incompatible options (checked in commands.py)
977 977
978 978 Return the same tuple as applyupdates().
979 979 """
980 980
981 981 onode = node
982 982 wlock = repo.wlock()
983 983 try:
984 984 wc = repo[None]
985 985 pl = wc.parents()
986 986 p1 = pl[0]
987 987 pas = [None]
988 988 if ancestor:
989 989 pas = [repo[ancestor]]
990 990
991 991 if node is None:
992 992 # Here is where we should consider bookmarks, divergent bookmarks,
993 993 # foreground changesets (successors), and tip of current branch;
994 994 # but currently we are only checking the branch tips.
995 995 try:
996 996 node = repo.branchtip(wc.branch())
997 997 except errormod.RepoLookupError:
998 998 if wc.branch() == "default": # no default branch!
999 999 node = repo.lookup("tip") # update to tip
1000 1000 else:
1001 1001 raise util.Abort(_("branch %s not found") % wc.branch())
1002 1002
1003 1003 if p1.obsolete() and not p1.children():
1004 1004 # allow updating to successors
1005 1005 successors = obsolete.successorssets(repo, p1.node())
1006 1006
1007 1007 # behavior of certain cases is as follows,
1008 1008 #
1009 1009 # divergent changesets: update to highest rev, similar to what
1010 1010 # is currently done when there are more than one head
1011 1011 # (i.e. 'tip')
1012 1012 #
1013 1013 # replaced changesets: same as divergent except we know there
1014 1014 # is no conflict
1015 1015 #
1016 1016 # pruned changeset: no update is done; though, we could
1017 1017 # consider updating to the first non-obsolete parent,
1018 1018 # similar to what is current done for 'hg prune'
1019 1019
1020 1020 if successors:
1021 1021 # flatten the list here handles both divergent (len > 1)
1022 1022 # and the usual case (len = 1)
1023 1023 successors = [n for sub in successors for n in sub]
1024 1024
1025 1025 # get the max revision for the given successors set,
1026 1026 # i.e. the 'tip' of a set
1027 1027 node = repo.revs("max(%ln)", successors).first()
1028 1028 pas = [p1]
1029 1029
1030 1030 overwrite = force and not branchmerge
1031 1031
1032 1032 p2 = repo[node]
1033 1033 if pas[0] is None:
1034 1034 if repo.ui.config("merge", "preferancestor", '*') == '*':
1035 1035 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1036 1036 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1037 1037 else:
1038 1038 pas = [p1.ancestor(p2, warn=branchmerge)]
1039 1039
1040 1040 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1041 1041
1042 1042 ### check phase
1043 1043 if not overwrite and len(pl) > 1:
1044 1044 raise util.Abort(_("outstanding uncommitted merge"))
1045 1045 if branchmerge:
1046 1046 if pas == [p2]:
1047 1047 raise util.Abort(_("merging with a working directory ancestor"
1048 1048 " has no effect"))
1049 1049 elif pas == [p1]:
1050 1050 if not mergeancestor and p1.branch() == p2.branch():
1051 1051 raise util.Abort(_("nothing to merge"),
1052 1052 hint=_("use 'hg update' "
1053 1053 "or check 'hg heads'"))
1054 1054 if not force and (wc.files() or wc.deleted()):
1055 1055 raise util.Abort(_("uncommitted changes"),
1056 1056 hint=_("use 'hg status' to list changes"))
1057 1057 for s in sorted(wc.substate):
1058 1058 if wc.sub(s).dirty():
1059 1059 raise util.Abort(_("uncommitted changes in "
1060 1060 "subrepository '%s'") % s)
1061 1061
1062 1062 elif not overwrite:
1063 1063 if p1 == p2: # no-op update
1064 1064 # call the hooks and exit early
1065 1065 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1066 1066 repo.hook('update', parent1=xp2, parent2='', error=0)
1067 1067 return 0, 0, 0, 0
1068 1068
1069 1069 if pas not in ([p1], [p2]): # nonlinear
1070 1070 dirty = wc.dirty(missing=True)
1071 1071 if dirty or onode is None:
1072 1072 # Branching is a bit strange to ensure we do the minimal
1073 1073 # amount of call to obsolete.background.
1074 1074 foreground = obsolete.foreground(repo, [p1.node()])
1075 1075 # note: the <node> variable contains a random identifier
1076 1076 if repo[node].node() in foreground:
1077 1077 pas = [p1] # allow updating to successors
1078 1078 elif dirty:
1079 1079 msg = _("uncommitted changes")
1080 1080 if onode is None:
1081 1081 hint = _("commit and merge, or update --clean to"
1082 1082 " discard changes")
1083 1083 else:
1084 1084 hint = _("commit or update --clean to discard"
1085 1085 " changes")
1086 1086 raise util.Abort(msg, hint=hint)
1087 1087 else: # node is none
1088 1088 msg = _("not a linear update")
1089 1089 hint = _("merge or update --check to force update")
1090 1090 raise util.Abort(msg, hint=hint)
1091 1091 else:
1092 1092 # Allow jumping branches if clean and specific rev given
1093 1093 pas = [p1]
1094 1094
1095 1095 followcopies = False
1096 1096 if overwrite:
1097 1097 pas = [wc]
1098 1098 elif pas == [p2]: # backwards
1099 1099 pas = [wc.p1()]
1100 1100 elif not branchmerge and not wc.dirty(missing=True):
1101 1101 pass
1102 1102 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1103 1103 followcopies = True
1104 1104
1105 1105 ### calculate phase
1106 1106 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1107 1107 partial, mergeancestor, followcopies)
1108 1108
1109 1109 ### apply phase
1110 1110 if not branchmerge: # just jump to the new rev
1111 1111 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1112 1112 if not partial:
1113 1113 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1114 1114 # note that we're in the middle of an update
1115 1115 repo.vfs.write('updatestate', p2.hex())
1116 1116
1117 1117 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1118 1118
1119 1119 if not partial:
1120 1120 repo.dirstate.beginparentchange()
1121 1121 repo.setparents(fp1, fp2)
1122 1122 recordupdates(repo, actions, branchmerge)
1123 1123 # update completed, clear state
1124 1124 util.unlink(repo.join('updatestate'))
1125 1125
1126 1126 if not branchmerge:
1127 1127 repo.dirstate.setbranch(p2.branch())
1128 1128 repo.dirstate.endparentchange()
1129 1129 finally:
1130 1130 wlock.release()
1131 1131
1132 1132 if not partial:
1133 1133 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1134 1134 return stats
1135 1135
1136 1136 def graft(repo, ctx, pctx, labels):
1137 1137 """Do a graft-like merge.
1138 1138
1139 1139 This is a merge where the merge ancestor is chosen such that one
1140 1140 or more changesets are grafted onto the current changeset. In
1141 1141 addition to the merge, this fixes up the dirstate to include only
1142 1142 a single parent and tries to duplicate any renames/copies
1143 1143 appropriately.
1144 1144
1145 1145 ctx - changeset to rebase
1146 1146 pctx - merge base, usually ctx.p1()
1147 1147 labels - merge labels eg ['local', 'graft']
1148 1148
1149 1149 """
1150 1150
1151 1151 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1152 1152 labels=labels)
1153 1153 # drop the second merge parent
1154 1154 repo.dirstate.beginparentchange()
1155 1155 repo.setparents(repo['.'].node(), nullid)
1156 1156 repo.dirstate.write()
1157 1157 # fix up dirstate for copies and renames
1158 1158 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1159 1159 repo.dirstate.endparentchange()
1160 1160 return stats
General Comments 0
You need to be logged in to leave comments. Login now