##// END OF EJS Templates
merge: add labels parameter from merge.update to filemerge...
Durham Goode -
r21524:47b97d9a default
parent child Browse files
Show More
@@ -1,1174 +1,1174 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 archival, merge, pathutil, revset
15 archival, merge, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''installmatchfn with a matchfn that ignores all largefiles'''
27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 default='relpath'):
29 default='relpath'):
30 match = oldmatch(ctx, pats, opts, globbed, default)
30 match = oldmatch(ctx, pats, opts, globbed, default)
31 m = copy.copy(match)
31 m = copy.copy(match)
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 manifest)
33 manifest)
34 m._files = filter(notlfile, m._files)
34 m._files = filter(notlfile, m._files)
35 m._fmap = set(m._files)
35 m._fmap = set(m._files)
36 m._always = False
36 m._always = False
37 origmatchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(overridematch)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 '''monkey patch the scmutil module with a custom match function.
43 '''monkey patch the scmutil module with a custom match function.
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installmatchfn
51 '''restores scmutil.match to what it was before installmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installmatchfn will require n calls to
54 Note that n calls to installmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57
57
58 def installmatchandpatsfn(f):
58 def installmatchandpatsfn(f):
59 oldmatchandpats = scmutil.matchandpats
59 oldmatchandpats = scmutil.matchandpats
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 scmutil.matchandpats = f
61 scmutil.matchandpats = f
62 return oldmatchandpats
62 return oldmatchandpats
63
63
64 def restorematchandpatsfn():
64 def restorematchandpatsfn():
65 '''restores scmutil.matchandpats to what it was before
65 '''restores scmutil.matchandpats to what it was before
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 is its original function.
67 is its original function.
68
68
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 to restore matchfn to reverse'''
70 to restore matchfn to reverse'''
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 scmutil.matchandpats)
72 scmutil.matchandpats)
73
73
74 def addlargefiles(ui, repo, *pats, **opts):
74 def addlargefiles(ui, repo, *pats, **opts):
75 large = opts.pop('large', None)
75 large = opts.pop('large', None)
76 lfsize = lfutil.getminsize(
76 lfsize = lfutil.getminsize(
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78
78
79 lfmatcher = None
79 lfmatcher = None
80 if lfutil.islfilesrepo(repo):
80 if lfutil.islfilesrepo(repo):
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 if lfpats:
82 if lfpats:
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84
84
85 lfnames = []
85 lfnames = []
86 m = scmutil.match(repo[None], pats, opts)
86 m = scmutil.match(repo[None], pats, opts)
87 m.bad = lambda x, y: None
87 m.bad = lambda x, y: None
88 wctx = repo[None]
88 wctx = repo[None]
89 for f in repo.walk(m):
89 for f in repo.walk(m):
90 exact = m.exact(f)
90 exact = m.exact(f)
91 lfile = lfutil.standin(f) in wctx
91 lfile = lfutil.standin(f) in wctx
92 nfile = f in wctx
92 nfile = f in wctx
93 exists = lfile or nfile
93 exists = lfile or nfile
94
94
95 # Don't warn the user when they attempt to add a normal tracked file.
95 # Don't warn the user when they attempt to add a normal tracked file.
96 # The normal add code will do that for us.
96 # The normal add code will do that for us.
97 if exact and exists:
97 if exact and exists:
98 if lfile:
98 if lfile:
99 ui.warn(_('%s already a largefile\n') % f)
99 ui.warn(_('%s already a largefile\n') % f)
100 continue
100 continue
101
101
102 if (exact or not exists) and not lfutil.isstandin(f):
102 if (exact or not exists) and not lfutil.isstandin(f):
103 wfile = repo.wjoin(f)
103 wfile = repo.wjoin(f)
104
104
105 # In case the file was removed previously, but not committed
105 # In case the file was removed previously, but not committed
106 # (issue3507)
106 # (issue3507)
107 if not os.path.exists(wfile):
107 if not os.path.exists(wfile):
108 continue
108 continue
109
109
110 abovemin = (lfsize and
110 abovemin = (lfsize and
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 lfnames.append(f)
113 lfnames.append(f)
114 if ui.verbose or not exact:
114 if ui.verbose or not exact:
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116
116
117 bad = []
117 bad = []
118 standins = []
118 standins = []
119
119
120 # Need to lock, otherwise there could be a race condition between
120 # Need to lock, otherwise there could be a race condition between
121 # when standins are created and added to the repo.
121 # when standins are created and added to the repo.
122 wlock = repo.wlock()
122 wlock = repo.wlock()
123 try:
123 try:
124 if not opts.get('dry_run'):
124 if not opts.get('dry_run'):
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 for f in lfnames:
126 for f in lfnames:
127 standinname = lfutil.standin(f)
127 standinname = lfutil.standin(f)
128 lfutil.writestandin(repo, standinname, hash='',
128 lfutil.writestandin(repo, standinname, hash='',
129 executable=lfutil.getexecutable(repo.wjoin(f)))
129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 standins.append(standinname)
130 standins.append(standinname)
131 if lfdirstate[f] == 'r':
131 if lfdirstate[f] == 'r':
132 lfdirstate.normallookup(f)
132 lfdirstate.normallookup(f)
133 else:
133 else:
134 lfdirstate.add(f)
134 lfdirstate.add(f)
135 lfdirstate.write()
135 lfdirstate.write()
136 bad += [lfutil.splitstandin(f)
136 bad += [lfutil.splitstandin(f)
137 for f in repo[None].add(standins)
137 for f in repo[None].add(standins)
138 if f in m.files()]
138 if f in m.files()]
139 finally:
139 finally:
140 wlock.release()
140 wlock.release()
141 return bad
141 return bad
142
142
143 def removelargefiles(ui, repo, *pats, **opts):
143 def removelargefiles(ui, repo, *pats, **opts):
144 after = opts.get('after')
144 after = opts.get('after')
145 if not pats and not after:
145 if not pats and not after:
146 raise util.Abort(_('no files specified'))
146 raise util.Abort(_('no files specified'))
147 m = scmutil.match(repo[None], pats, opts)
147 m = scmutil.match(repo[None], pats, opts)
148 try:
148 try:
149 repo.lfstatus = True
149 repo.lfstatus = True
150 s = repo.status(match=m, clean=True)
150 s = repo.status(match=m, clean=True)
151 finally:
151 finally:
152 repo.lfstatus = False
152 repo.lfstatus = False
153 manifest = repo[None].manifest()
153 manifest = repo[None].manifest()
154 modified, added, deleted, clean = [[f for f in list
154 modified, added, deleted, clean = [[f for f in list
155 if lfutil.standin(f) in manifest]
155 if lfutil.standin(f) in manifest]
156 for list in [s[0], s[1], s[3], s[6]]]
156 for list in [s[0], s[1], s[3], s[6]]]
157
157
158 def warn(files, msg):
158 def warn(files, msg):
159 for f in files:
159 for f in files:
160 ui.warn(msg % m.rel(f))
160 ui.warn(msg % m.rel(f))
161 return int(len(files) > 0)
161 return int(len(files) > 0)
162
162
163 result = 0
163 result = 0
164
164
165 if after:
165 if after:
166 remove, forget = deleted, []
166 remove, forget = deleted, []
167 result = warn(modified + added + clean,
167 result = warn(modified + added + clean,
168 _('not removing %s: file still exists\n'))
168 _('not removing %s: file still exists\n'))
169 else:
169 else:
170 remove, forget = deleted + clean, []
170 remove, forget = deleted + clean, []
171 result = warn(modified, _('not removing %s: file is modified (use -f'
171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 ' to force removal)\n'))
172 ' to force removal)\n'))
173 result = warn(added, _('not removing %s: file has been marked for add'
173 result = warn(added, _('not removing %s: file has been marked for add'
174 ' (use forget to undo)\n')) or result
174 ' (use forget to undo)\n')) or result
175
175
176 for f in sorted(remove + forget):
176 for f in sorted(remove + forget):
177 if ui.verbose or not m.exact(f):
177 if ui.verbose or not m.exact(f):
178 ui.status(_('removing %s\n') % m.rel(f))
178 ui.status(_('removing %s\n') % m.rel(f))
179
179
180 # Need to lock because standin files are deleted then removed from the
180 # Need to lock because standin files are deleted then removed from the
181 # repository and we could race in-between.
181 # repository and we could race in-between.
182 wlock = repo.wlock()
182 wlock = repo.wlock()
183 try:
183 try:
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 for f in remove:
185 for f in remove:
186 if not after:
186 if not after:
187 # If this is being called by addremove, notify the user that we
187 # If this is being called by addremove, notify the user that we
188 # are removing the file.
188 # are removing the file.
189 if getattr(repo, "_isaddremove", False):
189 if getattr(repo, "_isaddremove", False):
190 ui.status(_('removing %s\n') % f)
190 ui.status(_('removing %s\n') % f)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 lfdirstate.remove(f)
192 lfdirstate.remove(f)
193 lfdirstate.write()
193 lfdirstate.write()
194 forget = [lfutil.standin(f) for f in forget]
194 forget = [lfutil.standin(f) for f in forget]
195 remove = [lfutil.standin(f) for f in remove]
195 remove = [lfutil.standin(f) for f in remove]
196 repo[None].forget(forget)
196 repo[None].forget(forget)
197 # If this is being called by addremove, let the original addremove
197 # If this is being called by addremove, let the original addremove
198 # function handle this.
198 # function handle this.
199 if not getattr(repo, "_isaddremove", False):
199 if not getattr(repo, "_isaddremove", False):
200 for f in remove:
200 for f in remove:
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
202 repo[None].forget(remove)
202 repo[None].forget(remove)
203 finally:
203 finally:
204 wlock.release()
204 wlock.release()
205
205
206 return result
206 return result
207
207
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
209 # appear at their right place in the manifests.
209 # appear at their right place in the manifests.
210 def decodepath(orig, path):
210 def decodepath(orig, path):
211 return lfutil.splitstandin(path) or path
211 return lfutil.splitstandin(path) or path
212
212
213 # -- Wrappers: modify existing commands --------------------------------
213 # -- Wrappers: modify existing commands --------------------------------
214
214
215 # Add works by going through the files that the user wanted to add and
215 # Add works by going through the files that the user wanted to add and
216 # checking if they should be added as largefiles. Then it makes a new
216 # checking if they should be added as largefiles. Then it makes a new
217 # matcher which matches only the normal files and runs the original
217 # matcher which matches only the normal files and runs the original
218 # version of add.
218 # version of add.
219 def overrideadd(orig, ui, repo, *pats, **opts):
219 def overrideadd(orig, ui, repo, *pats, **opts):
220 normal = opts.pop('normal')
220 normal = opts.pop('normal')
221 if normal:
221 if normal:
222 if opts.get('large'):
222 if opts.get('large'):
223 raise util.Abort(_('--normal cannot be used with --large'))
223 raise util.Abort(_('--normal cannot be used with --large'))
224 return orig(ui, repo, *pats, **opts)
224 return orig(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
226 installnormalfilesmatchfn(repo[None].manifest())
226 installnormalfilesmatchfn(repo[None].manifest())
227 result = orig(ui, repo, *pats, **opts)
227 result = orig(ui, repo, *pats, **opts)
228 restorematchfn()
228 restorematchfn()
229
229
230 return (result == 1 or bad) and 1 or 0
230 return (result == 1 or bad) and 1 or 0
231
231
232 def overrideremove(orig, ui, repo, *pats, **opts):
232 def overrideremove(orig, ui, repo, *pats, **opts):
233 installnormalfilesmatchfn(repo[None].manifest())
233 installnormalfilesmatchfn(repo[None].manifest())
234 result = orig(ui, repo, *pats, **opts)
234 result = orig(ui, repo, *pats, **opts)
235 restorematchfn()
235 restorematchfn()
236 return removelargefiles(ui, repo, *pats, **opts) or result
236 return removelargefiles(ui, repo, *pats, **opts) or result
237
237
238 def overridestatusfn(orig, repo, rev2, **opts):
238 def overridestatusfn(orig, repo, rev2, **opts):
239 try:
239 try:
240 repo._repo.lfstatus = True
240 repo._repo.lfstatus = True
241 return orig(repo, rev2, **opts)
241 return orig(repo, rev2, **opts)
242 finally:
242 finally:
243 repo._repo.lfstatus = False
243 repo._repo.lfstatus = False
244
244
245 def overridestatus(orig, ui, repo, *pats, **opts):
245 def overridestatus(orig, ui, repo, *pats, **opts):
246 try:
246 try:
247 repo.lfstatus = True
247 repo.lfstatus = True
248 return orig(ui, repo, *pats, **opts)
248 return orig(ui, repo, *pats, **opts)
249 finally:
249 finally:
250 repo.lfstatus = False
250 repo.lfstatus = False
251
251
252 def overridedirty(orig, repo, ignoreupdate=False):
252 def overridedirty(orig, repo, ignoreupdate=False):
253 try:
253 try:
254 repo._repo.lfstatus = True
254 repo._repo.lfstatus = True
255 return orig(repo, ignoreupdate)
255 return orig(repo, ignoreupdate)
256 finally:
256 finally:
257 repo._repo.lfstatus = False
257 repo._repo.lfstatus = False
258
258
259 def overridelog(orig, ui, repo, *pats, **opts):
259 def overridelog(orig, ui, repo, *pats, **opts):
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
261 default='relpath'):
261 default='relpath'):
262 """Matcher that merges root directory with .hglf, suitable for log.
262 """Matcher that merges root directory with .hglf, suitable for log.
263 It is still possible to match .hglf directly.
263 It is still possible to match .hglf directly.
264 For any listed files run log on the standin too.
264 For any listed files run log on the standin too.
265 matchfn tries both the given filename and with .hglf stripped.
265 matchfn tries both the given filename and with .hglf stripped.
266 """
266 """
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
268 m, p = copy.copy(matchandpats)
268 m, p = copy.copy(matchandpats)
269
269
270 pats = set(p)
270 pats = set(p)
271 # TODO: handling of patterns in both cases below
271 # TODO: handling of patterns in both cases below
272 if m._cwd:
272 if m._cwd:
273 if os.path.isabs(m._cwd):
273 if os.path.isabs(m._cwd):
274 # TODO: handle largefile magic when invoked from other cwd
274 # TODO: handle largefile magic when invoked from other cwd
275 return matchandpats
275 return matchandpats
276 back = (m._cwd.count('/') + 1) * '../'
276 back = (m._cwd.count('/') + 1) * '../'
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
278 else:
278 else:
279 pats.update(lfutil.standin(f) for f in p)
279 pats.update(lfutil.standin(f) for f in p)
280
280
281 for i in range(0, len(m._files)):
281 for i in range(0, len(m._files)):
282 standin = lfutil.standin(m._files[i])
282 standin = lfutil.standin(m._files[i])
283 if standin in repo[ctx.node()]:
283 if standin in repo[ctx.node()]:
284 m._files[i] = standin
284 m._files[i] = standin
285 elif m._files[i] not in repo[ctx.node()]:
285 elif m._files[i] not in repo[ctx.node()]:
286 m._files.append(standin)
286 m._files.append(standin)
287 pats.add(standin)
287 pats.add(standin)
288
288
289 m._fmap = set(m._files)
289 m._fmap = set(m._files)
290 m._always = False
290 m._always = False
291 origmatchfn = m.matchfn
291 origmatchfn = m.matchfn
292 def lfmatchfn(f):
292 def lfmatchfn(f):
293 lf = lfutil.splitstandin(f)
293 lf = lfutil.splitstandin(f)
294 if lf is not None and origmatchfn(lf):
294 if lf is not None and origmatchfn(lf):
295 return True
295 return True
296 r = origmatchfn(f)
296 r = origmatchfn(f)
297 return r
297 return r
298 m.matchfn = lfmatchfn
298 m.matchfn = lfmatchfn
299
299
300 return m, pats
300 return m, pats
301
301
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
303 try:
303 try:
304 repo.lfstatus = True
304 repo.lfstatus = True
305 return orig(ui, repo, *pats, **opts)
305 return orig(ui, repo, *pats, **opts)
306 finally:
306 finally:
307 repo.lfstatus = False
307 repo.lfstatus = False
308 restorematchandpatsfn()
308 restorematchandpatsfn()
309
309
310 def overrideverify(orig, ui, repo, *pats, **opts):
310 def overrideverify(orig, ui, repo, *pats, **opts):
311 large = opts.pop('large', False)
311 large = opts.pop('large', False)
312 all = opts.pop('lfa', False)
312 all = opts.pop('lfa', False)
313 contents = opts.pop('lfc', False)
313 contents = opts.pop('lfc', False)
314
314
315 result = orig(ui, repo, *pats, **opts)
315 result = orig(ui, repo, *pats, **opts)
316 if large or all or contents:
316 if large or all or contents:
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
318 return result
318 return result
319
319
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
321 large = opts.pop('large', False)
321 large = opts.pop('large', False)
322 if large:
322 if large:
323 class fakerepo(object):
323 class fakerepo(object):
324 dirstate = lfutil.openlfdirstate(ui, repo)
324 dirstate = lfutil.openlfdirstate(ui, repo)
325 orig(ui, fakerepo, *pats, **opts)
325 orig(ui, fakerepo, *pats, **opts)
326 else:
326 else:
327 orig(ui, repo, *pats, **opts)
327 orig(ui, repo, *pats, **opts)
328
328
329 # Override needs to refresh standins so that update's normal merge
329 # Override needs to refresh standins so that update's normal merge
330 # will go through properly. Then the other update hook (overriding repo.update)
330 # will go through properly. Then the other update hook (overriding repo.update)
331 # will get the new files. Filemerge is also overridden so that the merge
331 # will get the new files. Filemerge is also overridden so that the merge
332 # will merge standins correctly.
332 # will merge standins correctly.
333 def overrideupdate(orig, ui, repo, *pats, **opts):
333 def overrideupdate(orig, ui, repo, *pats, **opts):
334 # Need to lock between the standins getting updated and their
334 # Need to lock between the standins getting updated and their
335 # largefiles getting updated
335 # largefiles getting updated
336 wlock = repo.wlock()
336 wlock = repo.wlock()
337 try:
337 try:
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
340 [], False, False, False)
340 [], False, False, False)
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
342
342
343 if opts['check']:
343 if opts['check']:
344 mod = len(modified) > 0
344 mod = len(modified) > 0
345 for lfile in unsure:
345 for lfile in unsure:
346 standin = lfutil.standin(lfile)
346 standin = lfutil.standin(lfile)
347 if repo['.'][standin].data().strip() != \
347 if repo['.'][standin].data().strip() != \
348 lfutil.hashfile(repo.wjoin(lfile)):
348 lfutil.hashfile(repo.wjoin(lfile)):
349 mod = True
349 mod = True
350 else:
350 else:
351 lfdirstate.normal(lfile)
351 lfdirstate.normal(lfile)
352 lfdirstate.write()
352 lfdirstate.write()
353 if mod:
353 if mod:
354 raise util.Abort(_('uncommitted changes'))
354 raise util.Abort(_('uncommitted changes'))
355 # XXX handle removed differently
355 # XXX handle removed differently
356 if not opts['clean']:
356 if not opts['clean']:
357 for lfile in unsure + modified + added:
357 for lfile in unsure + modified + added:
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
359 return orig(ui, repo, *pats, **opts)
359 return orig(ui, repo, *pats, **opts)
360 finally:
360 finally:
361 wlock.release()
361 wlock.release()
362
362
363 # Before starting the manifest merge, merge.updates will call
363 # Before starting the manifest merge, merge.updates will call
364 # _checkunknown to check if there are any files in the merged-in
364 # _checkunknown to check if there are any files in the merged-in
365 # changeset that collide with unknown files in the working copy.
365 # changeset that collide with unknown files in the working copy.
366 #
366 #
367 # The largefiles are seen as unknown, so this prevents us from merging
367 # The largefiles are seen as unknown, so this prevents us from merging
368 # in a file 'foo' if we already have a largefile with the same name.
368 # in a file 'foo' if we already have a largefile with the same name.
369 #
369 #
370 # The overridden function filters the unknown files by removing any
370 # The overridden function filters the unknown files by removing any
371 # largefiles. This makes the merge proceed and we can then handle this
371 # largefiles. This makes the merge proceed and we can then handle this
372 # case further in the overridden manifestmerge function below.
372 # case further in the overridden manifestmerge function below.
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
375 return False
375 return False
376 return origfn(repo, wctx, mctx, f)
376 return origfn(repo, wctx, mctx, f)
377
377
378 # The manifest merge handles conflicts on the manifest level. We want
378 # The manifest merge handles conflicts on the manifest level. We want
379 # to handle changes in largefile-ness of files at this level too.
379 # to handle changes in largefile-ness of files at this level too.
380 #
380 #
381 # The strategy is to run the original manifestmerge and then process
381 # The strategy is to run the original manifestmerge and then process
382 # the action list it outputs. There are two cases we need to deal with:
382 # the action list it outputs. There are two cases we need to deal with:
383 #
383 #
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
385 # detected via its standin file, which will enter the working copy
385 # detected via its standin file, which will enter the working copy
386 # with a "get" action. It is not "merge" since the standin is all
386 # with a "get" action. It is not "merge" since the standin is all
387 # Mercurial is concerned with at this level -- the link to the
387 # Mercurial is concerned with at this level -- the link to the
388 # existing normal file is not relevant here.
388 # existing normal file is not relevant here.
389 #
389 #
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
391 # since the largefile will be present in the working copy and
391 # since the largefile will be present in the working copy and
392 # different from the normal file in p2. Mercurial therefore
392 # different from the normal file in p2. Mercurial therefore
393 # triggers a merge action.
393 # triggers a merge action.
394 #
394 #
395 # In both cases, we prompt the user and emit new actions to either
395 # In both cases, we prompt the user and emit new actions to either
396 # remove the standin (if the normal file was kept) or to remove the
396 # remove the standin (if the normal file was kept) or to remove the
397 # normal file and get the standin (if the largefile was kept). The
397 # normal file and get the standin (if the largefile was kept). The
398 # default prompt answer is to use the largefile version since it was
398 # default prompt answer is to use the largefile version since it was
399 # presumably changed on purpose.
399 # presumably changed on purpose.
400 #
400 #
401 # Finally, the merge.applyupdates function will then take care of
401 # Finally, the merge.applyupdates function will then take care of
402 # writing the files into the working copy and lfcommands.updatelfiles
402 # writing the files into the working copy and lfcommands.updatelfiles
403 # will update the largefiles.
403 # will update the largefiles.
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
405 partial, acceptremote, followcopies):
405 partial, acceptremote, followcopies):
406 overwrite = force and not branchmerge
406 overwrite = force and not branchmerge
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
408 acceptremote, followcopies)
408 acceptremote, followcopies)
409
409
410 if overwrite:
410 if overwrite:
411 return actions
411 return actions
412
412
413 removes = set(a[0] for a in actions if a[1] == 'r')
413 removes = set(a[0] for a in actions if a[1] == 'r')
414 processed = []
414 processed = []
415
415
416 for action in actions:
416 for action in actions:
417 f, m, args, msg = action
417 f, m, args, msg = action
418
418
419 splitstandin = f and lfutil.splitstandin(f)
419 splitstandin = f and lfutil.splitstandin(f)
420 if (m == "g" and splitstandin is not None and
420 if (m == "g" and splitstandin is not None and
421 splitstandin in p1 and splitstandin not in removes):
421 splitstandin in p1 and splitstandin not in removes):
422 # Case 1: normal file in the working copy, largefile in
422 # Case 1: normal file in the working copy, largefile in
423 # the second parent
423 # the second parent
424 lfile = splitstandin
424 lfile = splitstandin
425 standin = f
425 standin = f
426 msg = _('remote turned local normal file %s into a largefile\n'
426 msg = _('remote turned local normal file %s into a largefile\n'
427 'use (l)argefile or keep (n)ormal file?'
427 'use (l)argefile or keep (n)ormal file?'
428 '$$ &Largefile $$ &Normal file') % lfile
428 '$$ &Largefile $$ &Normal file') % lfile
429 if repo.ui.promptchoice(msg, 0) == 0:
429 if repo.ui.promptchoice(msg, 0) == 0:
430 processed.append((lfile, "r", None, msg))
430 processed.append((lfile, "r", None, msg))
431 processed.append((standin, "g", (p2.flags(standin),), msg))
431 processed.append((standin, "g", (p2.flags(standin),), msg))
432 else:
432 else:
433 processed.append((standin, "r", None, msg))
433 processed.append((standin, "r", None, msg))
434 elif (m == "g" and
434 elif (m == "g" and
435 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
435 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
436 # Case 2: largefile in the working copy, normal file in
436 # Case 2: largefile in the working copy, normal file in
437 # the second parent
437 # the second parent
438 standin = lfutil.standin(f)
438 standin = lfutil.standin(f)
439 lfile = f
439 lfile = f
440 msg = _('remote turned local largefile %s into a normal file\n'
440 msg = _('remote turned local largefile %s into a normal file\n'
441 'keep (l)argefile or use (n)ormal file?'
441 'keep (l)argefile or use (n)ormal file?'
442 '$$ &Largefile $$ &Normal file') % lfile
442 '$$ &Largefile $$ &Normal file') % lfile
443 if repo.ui.promptchoice(msg, 0) == 0:
443 if repo.ui.promptchoice(msg, 0) == 0:
444 processed.append((lfile, "r", None, msg))
444 processed.append((lfile, "r", None, msg))
445 else:
445 else:
446 processed.append((standin, "r", None, msg))
446 processed.append((standin, "r", None, msg))
447 processed.append((lfile, "g", (p2.flags(lfile),), msg))
447 processed.append((lfile, "g", (p2.flags(lfile),), msg))
448 else:
448 else:
449 processed.append(action)
449 processed.append(action)
450
450
451 return processed
451 return processed
452
452
453 # Override filemerge to prompt the user about how they wish to merge
453 # Override filemerge to prompt the user about how they wish to merge
454 # largefiles. This will handle identical edits without prompting the user.
454 # largefiles. This will handle identical edits without prompting the user.
455 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
455 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 if not lfutil.isstandin(orig):
456 if not lfutil.isstandin(orig):
457 return origfn(repo, mynode, orig, fcd, fco, fca)
457 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458
458
459 ahash = fca.data().strip().lower()
459 ahash = fca.data().strip().lower()
460 dhash = fcd.data().strip().lower()
460 dhash = fcd.data().strip().lower()
461 ohash = fco.data().strip().lower()
461 ohash = fco.data().strip().lower()
462 if (ohash != ahash and
462 if (ohash != ahash and
463 ohash != dhash and
463 ohash != dhash and
464 (dhash == ahash or
464 (dhash == ahash or
465 repo.ui.promptchoice(
465 repo.ui.promptchoice(
466 _('largefile %s has a merge conflict\nancestor was %s\n'
466 _('largefile %s has a merge conflict\nancestor was %s\n'
467 'keep (l)ocal %s or\ntake (o)ther %s?'
467 'keep (l)ocal %s or\ntake (o)ther %s?'
468 '$$ &Local $$ &Other') %
468 '$$ &Local $$ &Other') %
469 (lfutil.splitstandin(orig), ahash, dhash, ohash),
469 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 0) == 1)):
470 0) == 1)):
471 repo.wwrite(fcd.path(), fco.data(), fco.flags())
471 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 return 0
472 return 0
473
473
474 # Copy first changes the matchers to match standins instead of
474 # Copy first changes the matchers to match standins instead of
475 # largefiles. Then it overrides util.copyfile in that function it
475 # largefiles. Then it overrides util.copyfile in that function it
476 # checks if the destination largefile already exists. It also keeps a
476 # checks if the destination largefile already exists. It also keeps a
477 # list of copied files so that the largefiles can be copied and the
477 # list of copied files so that the largefiles can be copied and the
478 # dirstate updated.
478 # dirstate updated.
479 def overridecopy(orig, ui, repo, pats, opts, rename=False):
479 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 # doesn't remove largefile on rename
480 # doesn't remove largefile on rename
481 if len(pats) < 2:
481 if len(pats) < 2:
482 # this isn't legal, let the original function deal with it
482 # this isn't legal, let the original function deal with it
483 return orig(ui, repo, pats, opts, rename)
483 return orig(ui, repo, pats, opts, rename)
484
484
485 def makestandin(relpath):
485 def makestandin(relpath):
486 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
486 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 return os.path.join(repo.wjoin(lfutil.standin(path)))
487 return os.path.join(repo.wjoin(lfutil.standin(path)))
488
488
489 fullpats = scmutil.expandpats(pats)
489 fullpats = scmutil.expandpats(pats)
490 dest = fullpats[-1]
490 dest = fullpats[-1]
491
491
492 if os.path.isdir(dest):
492 if os.path.isdir(dest):
493 if not os.path.isdir(makestandin(dest)):
493 if not os.path.isdir(makestandin(dest)):
494 os.makedirs(makestandin(dest))
494 os.makedirs(makestandin(dest))
495 # This could copy both lfiles and normal files in one command,
495 # This could copy both lfiles and normal files in one command,
496 # but we don't want to do that. First replace their matcher to
496 # but we don't want to do that. First replace their matcher to
497 # only match normal files and run it, then replace it to just
497 # only match normal files and run it, then replace it to just
498 # match largefiles and run it again.
498 # match largefiles and run it again.
499 nonormalfiles = False
499 nonormalfiles = False
500 nolfiles = False
500 nolfiles = False
501 installnormalfilesmatchfn(repo[None].manifest())
501 installnormalfilesmatchfn(repo[None].manifest())
502 try:
502 try:
503 try:
503 try:
504 result = orig(ui, repo, pats, opts, rename)
504 result = orig(ui, repo, pats, opts, rename)
505 except util.Abort, e:
505 except util.Abort, e:
506 if str(e) != _('no files to copy'):
506 if str(e) != _('no files to copy'):
507 raise e
507 raise e
508 else:
508 else:
509 nonormalfiles = True
509 nonormalfiles = True
510 result = 0
510 result = 0
511 finally:
511 finally:
512 restorematchfn()
512 restorematchfn()
513
513
514 # The first rename can cause our current working directory to be removed.
514 # The first rename can cause our current working directory to be removed.
515 # In that case there is nothing left to copy/rename so just quit.
515 # In that case there is nothing left to copy/rename so just quit.
516 try:
516 try:
517 repo.getcwd()
517 repo.getcwd()
518 except OSError:
518 except OSError:
519 return result
519 return result
520
520
521 try:
521 try:
522 try:
522 try:
523 # When we call orig below it creates the standins but we don't add
523 # When we call orig below it creates the standins but we don't add
524 # them to the dir state until later so lock during that time.
524 # them to the dir state until later so lock during that time.
525 wlock = repo.wlock()
525 wlock = repo.wlock()
526
526
527 manifest = repo[None].manifest()
527 manifest = repo[None].manifest()
528 def overridematch(ctx, pats=[], opts={}, globbed=False,
528 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 default='relpath'):
529 default='relpath'):
530 newpats = []
530 newpats = []
531 # The patterns were previously mangled to add the standin
531 # The patterns were previously mangled to add the standin
532 # directory; we need to remove that now
532 # directory; we need to remove that now
533 for pat in pats:
533 for pat in pats:
534 if match_.patkind(pat) is None and lfutil.shortname in pat:
534 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 newpats.append(pat.replace(lfutil.shortname, ''))
535 newpats.append(pat.replace(lfutil.shortname, ''))
536 else:
536 else:
537 newpats.append(pat)
537 newpats.append(pat)
538 match = oldmatch(ctx, newpats, opts, globbed, default)
538 match = oldmatch(ctx, newpats, opts, globbed, default)
539 m = copy.copy(match)
539 m = copy.copy(match)
540 lfile = lambda f: lfutil.standin(f) in manifest
540 lfile = lambda f: lfutil.standin(f) in manifest
541 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
541 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 m._fmap = set(m._files)
542 m._fmap = set(m._files)
543 m._always = False
543 m._always = False
544 origmatchfn = m.matchfn
544 origmatchfn = m.matchfn
545 m.matchfn = lambda f: (lfutil.isstandin(f) and
545 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 (f in manifest) and
546 (f in manifest) and
547 origmatchfn(lfutil.splitstandin(f)) or
547 origmatchfn(lfutil.splitstandin(f)) or
548 None)
548 None)
549 return m
549 return m
550 oldmatch = installmatchfn(overridematch)
550 oldmatch = installmatchfn(overridematch)
551 listpats = []
551 listpats = []
552 for pat in pats:
552 for pat in pats:
553 if match_.patkind(pat) is not None:
553 if match_.patkind(pat) is not None:
554 listpats.append(pat)
554 listpats.append(pat)
555 else:
555 else:
556 listpats.append(makestandin(pat))
556 listpats.append(makestandin(pat))
557
557
558 try:
558 try:
559 origcopyfile = util.copyfile
559 origcopyfile = util.copyfile
560 copiedfiles = []
560 copiedfiles = []
561 def overridecopyfile(src, dest):
561 def overridecopyfile(src, dest):
562 if (lfutil.shortname in src and
562 if (lfutil.shortname in src and
563 dest.startswith(repo.wjoin(lfutil.shortname))):
563 dest.startswith(repo.wjoin(lfutil.shortname))):
564 destlfile = dest.replace(lfutil.shortname, '')
564 destlfile = dest.replace(lfutil.shortname, '')
565 if not opts['force'] and os.path.exists(destlfile):
565 if not opts['force'] and os.path.exists(destlfile):
566 raise IOError('',
566 raise IOError('',
567 _('destination largefile already exists'))
567 _('destination largefile already exists'))
568 copiedfiles.append((src, dest))
568 copiedfiles.append((src, dest))
569 origcopyfile(src, dest)
569 origcopyfile(src, dest)
570
570
571 util.copyfile = overridecopyfile
571 util.copyfile = overridecopyfile
572 result += orig(ui, repo, listpats, opts, rename)
572 result += orig(ui, repo, listpats, opts, rename)
573 finally:
573 finally:
574 util.copyfile = origcopyfile
574 util.copyfile = origcopyfile
575
575
576 lfdirstate = lfutil.openlfdirstate(ui, repo)
576 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 for (src, dest) in copiedfiles:
577 for (src, dest) in copiedfiles:
578 if (lfutil.shortname in src and
578 if (lfutil.shortname in src and
579 dest.startswith(repo.wjoin(lfutil.shortname))):
579 dest.startswith(repo.wjoin(lfutil.shortname))):
580 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
580 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
581 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
582 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 if not os.path.isdir(destlfiledir):
583 if not os.path.isdir(destlfiledir):
584 os.makedirs(destlfiledir)
584 os.makedirs(destlfiledir)
585 if rename:
585 if rename:
586 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
586 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587
587
588 # The file is gone, but this deletes any empty parent
588 # The file is gone, but this deletes any empty parent
589 # directories as a side-effect.
589 # directories as a side-effect.
590 util.unlinkpath(repo.wjoin(srclfile), True)
590 util.unlinkpath(repo.wjoin(srclfile), True)
591 lfdirstate.remove(srclfile)
591 lfdirstate.remove(srclfile)
592 else:
592 else:
593 util.copyfile(repo.wjoin(srclfile),
593 util.copyfile(repo.wjoin(srclfile),
594 repo.wjoin(destlfile))
594 repo.wjoin(destlfile))
595
595
596 lfdirstate.add(destlfile)
596 lfdirstate.add(destlfile)
597 lfdirstate.write()
597 lfdirstate.write()
598 except util.Abort, e:
598 except util.Abort, e:
599 if str(e) != _('no files to copy'):
599 if str(e) != _('no files to copy'):
600 raise e
600 raise e
601 else:
601 else:
602 nolfiles = True
602 nolfiles = True
603 finally:
603 finally:
604 restorematchfn()
604 restorematchfn()
605 wlock.release()
605 wlock.release()
606
606
607 if nolfiles and nonormalfiles:
607 if nolfiles and nonormalfiles:
608 raise util.Abort(_('no files to copy'))
608 raise util.Abort(_('no files to copy'))
609
609
610 return result
610 return result
611
611
612 # When the user calls revert, we have to be careful to not revert any
612 # When the user calls revert, we have to be careful to not revert any
613 # changes to other largefiles accidentally. This means we have to keep
613 # changes to other largefiles accidentally. This means we have to keep
614 # track of the largefiles that are being reverted so we only pull down
614 # track of the largefiles that are being reverted so we only pull down
615 # the necessary largefiles.
615 # the necessary largefiles.
616 #
616 #
617 # Standins are only updated (to match the hash of largefiles) before
617 # Standins are only updated (to match the hash of largefiles) before
618 # commits. Update the standins then run the original revert, changing
618 # commits. Update the standins then run the original revert, changing
619 # the matcher to hit standins instead of largefiles. Based on the
619 # the matcher to hit standins instead of largefiles. Based on the
620 # resulting standins update the largefiles.
620 # resulting standins update the largefiles.
621 def overriderevert(orig, ui, repo, *pats, **opts):
621 def overriderevert(orig, ui, repo, *pats, **opts):
622 # Because we put the standins in a bad state (by updating them)
622 # Because we put the standins in a bad state (by updating them)
623 # and then return them to a correct state we need to lock to
623 # and then return them to a correct state we need to lock to
624 # prevent others from changing them in their incorrect state.
624 # prevent others from changing them in their incorrect state.
625 wlock = repo.wlock()
625 wlock = repo.wlock()
626 try:
626 try:
627 lfdirstate = lfutil.openlfdirstate(ui, repo)
627 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 (modified, added, removed, missing, unknown, ignored, clean) = \
628 (modified, added, removed, missing, unknown, ignored, clean) = \
629 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
629 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 lfdirstate.write()
630 lfdirstate.write()
631 for lfile in modified:
631 for lfile in modified:
632 lfutil.updatestandin(repo, lfutil.standin(lfile))
632 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 for lfile in missing:
633 for lfile in missing:
634 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
634 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 os.unlink(repo.wjoin(lfutil.standin(lfile)))
635 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636
636
637 oldstandins = lfutil.getstandinsstate(repo)
637 oldstandins = lfutil.getstandinsstate(repo)
638
638
639 def overridematch(ctx, pats=[], opts={}, globbed=False,
639 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 default='relpath'):
640 default='relpath'):
641 match = oldmatch(ctx, pats, opts, globbed, default)
641 match = oldmatch(ctx, pats, opts, globbed, default)
642 m = copy.copy(match)
642 m = copy.copy(match)
643 def tostandin(f):
643 def tostandin(f):
644 if lfutil.standin(f) in ctx:
644 if lfutil.standin(f) in ctx:
645 return lfutil.standin(f)
645 return lfutil.standin(f)
646 elif lfutil.standin(f) in repo[None]:
646 elif lfutil.standin(f) in repo[None]:
647 return None
647 return None
648 return f
648 return f
649 m._files = [tostandin(f) for f in m._files]
649 m._files = [tostandin(f) for f in m._files]
650 m._files = [f for f in m._files if f is not None]
650 m._files = [f for f in m._files if f is not None]
651 m._fmap = set(m._files)
651 m._fmap = set(m._files)
652 m._always = False
652 m._always = False
653 origmatchfn = m.matchfn
653 origmatchfn = m.matchfn
654 def matchfn(f):
654 def matchfn(f):
655 if lfutil.isstandin(f):
655 if lfutil.isstandin(f):
656 return (origmatchfn(lfutil.splitstandin(f)) and
656 return (origmatchfn(lfutil.splitstandin(f)) and
657 (f in repo[None] or f in ctx))
657 (f in repo[None] or f in ctx))
658 return origmatchfn(f)
658 return origmatchfn(f)
659 m.matchfn = matchfn
659 m.matchfn = matchfn
660 return m
660 return m
661 oldmatch = installmatchfn(overridematch)
661 oldmatch = installmatchfn(overridematch)
662 try:
662 try:
663 orig(ui, repo, *pats, **opts)
663 orig(ui, repo, *pats, **opts)
664 finally:
664 finally:
665 restorematchfn()
665 restorematchfn()
666
666
667 newstandins = lfutil.getstandinsstate(repo)
667 newstandins = lfutil.getstandinsstate(repo)
668 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
668 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
669 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670
670
671 finally:
671 finally:
672 wlock.release()
672 wlock.release()
673
673
674 def hgupdaterepo(orig, repo, node, overwrite):
674 def hgupdaterepo(orig, repo, node, overwrite):
675 if not overwrite:
675 if not overwrite:
676 # Only call updatelfiles on the standins that have changed to save time
676 # Only call updatelfiles on the standins that have changed to save time
677 oldstandins = lfutil.getstandinsstate(repo)
677 oldstandins = lfutil.getstandinsstate(repo)
678
678
679 result = orig(repo, node, overwrite)
679 result = orig(repo, node, overwrite)
680
680
681 filelist = None
681 filelist = None
682 if not overwrite:
682 if not overwrite:
683 newstandins = lfutil.getstandinsstate(repo)
683 newstandins = lfutil.getstandinsstate(repo)
684 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
684 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
685 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
685 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
686 return result
686 return result
687
687
688 def hgmerge(orig, repo, node, force=None, remind=True):
688 def hgmerge(orig, repo, node, force=None, remind=True):
689 result = orig(repo, node, force, remind)
689 result = orig(repo, node, force, remind)
690 lfcommands.updatelfiles(repo.ui, repo)
690 lfcommands.updatelfiles(repo.ui, repo)
691 return result
691 return result
692
692
693 # When we rebase a repository with remotely changed largefiles, we need to
693 # When we rebase a repository with remotely changed largefiles, we need to
694 # take some extra care so that the largefiles are correctly updated in the
694 # take some extra care so that the largefiles are correctly updated in the
695 # working copy
695 # working copy
696 def overridepull(orig, ui, repo, source=None, **opts):
696 def overridepull(orig, ui, repo, source=None, **opts):
697 revsprepull = len(repo)
697 revsprepull = len(repo)
698 if not source:
698 if not source:
699 source = 'default'
699 source = 'default'
700 repo.lfpullsource = source
700 repo.lfpullsource = source
701 if opts.get('rebase', False):
701 if opts.get('rebase', False):
702 repo._isrebasing = True
702 repo._isrebasing = True
703 try:
703 try:
704 if opts.get('update'):
704 if opts.get('update'):
705 del opts['update']
705 del opts['update']
706 ui.debug('--update and --rebase are not compatible, ignoring '
706 ui.debug('--update and --rebase are not compatible, ignoring '
707 'the update flag\n')
707 'the update flag\n')
708 del opts['rebase']
708 del opts['rebase']
709 origpostincoming = commands.postincoming
709 origpostincoming = commands.postincoming
710 def _dummy(*args, **kwargs):
710 def _dummy(*args, **kwargs):
711 pass
711 pass
712 commands.postincoming = _dummy
712 commands.postincoming = _dummy
713 try:
713 try:
714 result = commands.pull(ui, repo, source, **opts)
714 result = commands.pull(ui, repo, source, **opts)
715 finally:
715 finally:
716 commands.postincoming = origpostincoming
716 commands.postincoming = origpostincoming
717 revspostpull = len(repo)
717 revspostpull = len(repo)
718 if revspostpull > revsprepull:
718 if revspostpull > revsprepull:
719 result = result or rebase.rebase(ui, repo)
719 result = result or rebase.rebase(ui, repo)
720 finally:
720 finally:
721 repo._isrebasing = False
721 repo._isrebasing = False
722 else:
722 else:
723 result = orig(ui, repo, source, **opts)
723 result = orig(ui, repo, source, **opts)
724 revspostpull = len(repo)
724 revspostpull = len(repo)
725 lfrevs = opts.get('lfrev', [])
725 lfrevs = opts.get('lfrev', [])
726 if opts.get('all_largefiles'):
726 if opts.get('all_largefiles'):
727 lfrevs.append('pulled()')
727 lfrevs.append('pulled()')
728 if lfrevs and revspostpull > revsprepull:
728 if lfrevs and revspostpull > revsprepull:
729 numcached = 0
729 numcached = 0
730 repo.firstpulled = revsprepull # for pulled() revset expression
730 repo.firstpulled = revsprepull # for pulled() revset expression
731 try:
731 try:
732 for rev in scmutil.revrange(repo, lfrevs):
732 for rev in scmutil.revrange(repo, lfrevs):
733 ui.note(_('pulling largefiles for revision %s\n') % rev)
733 ui.note(_('pulling largefiles for revision %s\n') % rev)
734 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
734 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
735 numcached += len(cached)
735 numcached += len(cached)
736 finally:
736 finally:
737 del repo.firstpulled
737 del repo.firstpulled
738 ui.status(_("%d largefiles cached\n") % numcached)
738 ui.status(_("%d largefiles cached\n") % numcached)
739 return result
739 return result
740
740
741 def pulledrevsetsymbol(repo, subset, x):
741 def pulledrevsetsymbol(repo, subset, x):
742 """``pulled()``
742 """``pulled()``
743 Changesets that just has been pulled.
743 Changesets that just has been pulled.
744
744
745 Only available with largefiles from pull --lfrev expressions.
745 Only available with largefiles from pull --lfrev expressions.
746
746
747 .. container:: verbose
747 .. container:: verbose
748
748
749 Some examples:
749 Some examples:
750
750
751 - pull largefiles for all new changesets::
751 - pull largefiles for all new changesets::
752
752
753 hg pull -lfrev "pulled()"
753 hg pull -lfrev "pulled()"
754
754
755 - pull largefiles for all new branch heads::
755 - pull largefiles for all new branch heads::
756
756
757 hg pull -lfrev "head(pulled()) and not closed()"
757 hg pull -lfrev "head(pulled()) and not closed()"
758
758
759 """
759 """
760
760
761 try:
761 try:
762 firstpulled = repo.firstpulled
762 firstpulled = repo.firstpulled
763 except AttributeError:
763 except AttributeError:
764 raise util.Abort(_("pulled() only available in --lfrev"))
764 raise util.Abort(_("pulled() only available in --lfrev"))
765 return revset.baseset([r for r in subset if r >= firstpulled])
765 return revset.baseset([r for r in subset if r >= firstpulled])
766
766
767 def overrideclone(orig, ui, source, dest=None, **opts):
767 def overrideclone(orig, ui, source, dest=None, **opts):
768 d = dest
768 d = dest
769 if d is None:
769 if d is None:
770 d = hg.defaultdest(source)
770 d = hg.defaultdest(source)
771 if opts.get('all_largefiles') and not hg.islocal(d):
771 if opts.get('all_largefiles') and not hg.islocal(d):
772 raise util.Abort(_(
772 raise util.Abort(_(
773 '--all-largefiles is incompatible with non-local destination %s') %
773 '--all-largefiles is incompatible with non-local destination %s') %
774 d)
774 d)
775
775
776 return orig(ui, source, dest, **opts)
776 return orig(ui, source, dest, **opts)
777
777
778 def hgclone(orig, ui, opts, *args, **kwargs):
778 def hgclone(orig, ui, opts, *args, **kwargs):
779 result = orig(ui, opts, *args, **kwargs)
779 result = orig(ui, opts, *args, **kwargs)
780
780
781 if result is not None:
781 if result is not None:
782 sourcerepo, destrepo = result
782 sourcerepo, destrepo = result
783 repo = destrepo.local()
783 repo = destrepo.local()
784
784
785 # Caching is implicitly limited to 'rev' option, since the dest repo was
785 # Caching is implicitly limited to 'rev' option, since the dest repo was
786 # truncated at that point. The user may expect a download count with
786 # truncated at that point. The user may expect a download count with
787 # this option, so attempt whether or not this is a largefile repo.
787 # this option, so attempt whether or not this is a largefile repo.
788 if opts.get('all_largefiles'):
788 if opts.get('all_largefiles'):
789 success, missing = lfcommands.downloadlfiles(ui, repo, None)
789 success, missing = lfcommands.downloadlfiles(ui, repo, None)
790
790
791 if missing != 0:
791 if missing != 0:
792 return None
792 return None
793
793
794 return result
794 return result
795
795
796 def overriderebase(orig, ui, repo, **opts):
796 def overriderebase(orig, ui, repo, **opts):
797 repo._isrebasing = True
797 repo._isrebasing = True
798 try:
798 try:
799 return orig(ui, repo, **opts)
799 return orig(ui, repo, **opts)
800 finally:
800 finally:
801 repo._isrebasing = False
801 repo._isrebasing = False
802
802
803 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
803 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
804 prefix=None, mtime=None, subrepos=None):
804 prefix=None, mtime=None, subrepos=None):
805 # No need to lock because we are only reading history and
805 # No need to lock because we are only reading history and
806 # largefile caches, neither of which are modified.
806 # largefile caches, neither of which are modified.
807 lfcommands.cachelfiles(repo.ui, repo, node)
807 lfcommands.cachelfiles(repo.ui, repo, node)
808
808
809 if kind not in archival.archivers:
809 if kind not in archival.archivers:
810 raise util.Abort(_("unknown archive type '%s'") % kind)
810 raise util.Abort(_("unknown archive type '%s'") % kind)
811
811
812 ctx = repo[node]
812 ctx = repo[node]
813
813
814 if kind == 'files':
814 if kind == 'files':
815 if prefix:
815 if prefix:
816 raise util.Abort(
816 raise util.Abort(
817 _('cannot give prefix when archiving to files'))
817 _('cannot give prefix when archiving to files'))
818 else:
818 else:
819 prefix = archival.tidyprefix(dest, kind, prefix)
819 prefix = archival.tidyprefix(dest, kind, prefix)
820
820
821 def write(name, mode, islink, getdata):
821 def write(name, mode, islink, getdata):
822 if matchfn and not matchfn(name):
822 if matchfn and not matchfn(name):
823 return
823 return
824 data = getdata()
824 data = getdata()
825 if decode:
825 if decode:
826 data = repo.wwritedata(name, data)
826 data = repo.wwritedata(name, data)
827 archiver.addfile(prefix + name, mode, islink, data)
827 archiver.addfile(prefix + name, mode, islink, data)
828
828
829 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
829 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
830
830
831 if repo.ui.configbool("ui", "archivemeta", True):
831 if repo.ui.configbool("ui", "archivemeta", True):
832 def metadata():
832 def metadata():
833 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
833 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
834 hex(repo.changelog.node(0)), hex(node), ctx.branch())
834 hex(repo.changelog.node(0)), hex(node), ctx.branch())
835
835
836 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
836 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
837 if repo.tagtype(t) == 'global')
837 if repo.tagtype(t) == 'global')
838 if not tags:
838 if not tags:
839 repo.ui.pushbuffer()
839 repo.ui.pushbuffer()
840 opts = {'template': '{latesttag}\n{latesttagdistance}',
840 opts = {'template': '{latesttag}\n{latesttagdistance}',
841 'style': '', 'patch': None, 'git': None}
841 'style': '', 'patch': None, 'git': None}
842 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
842 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
843 ltags, dist = repo.ui.popbuffer().split('\n')
843 ltags, dist = repo.ui.popbuffer().split('\n')
844 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
844 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
845 tags += 'latesttagdistance: %s\n' % dist
845 tags += 'latesttagdistance: %s\n' % dist
846
846
847 return base + tags
847 return base + tags
848
848
849 write('.hg_archival.txt', 0644, False, metadata)
849 write('.hg_archival.txt', 0644, False, metadata)
850
850
851 for f in ctx:
851 for f in ctx:
852 ff = ctx.flags(f)
852 ff = ctx.flags(f)
853 getdata = ctx[f].data
853 getdata = ctx[f].data
854 if lfutil.isstandin(f):
854 if lfutil.isstandin(f):
855 path = lfutil.findfile(repo, getdata().strip())
855 path = lfutil.findfile(repo, getdata().strip())
856 if path is None:
856 if path is None:
857 raise util.Abort(
857 raise util.Abort(
858 _('largefile %s not found in repo store or system cache')
858 _('largefile %s not found in repo store or system cache')
859 % lfutil.splitstandin(f))
859 % lfutil.splitstandin(f))
860 f = lfutil.splitstandin(f)
860 f = lfutil.splitstandin(f)
861
861
862 def getdatafn():
862 def getdatafn():
863 fd = None
863 fd = None
864 try:
864 try:
865 fd = open(path, 'rb')
865 fd = open(path, 'rb')
866 return fd.read()
866 return fd.read()
867 finally:
867 finally:
868 if fd:
868 if fd:
869 fd.close()
869 fd.close()
870
870
871 getdata = getdatafn
871 getdata = getdatafn
872 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
872 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
873
873
874 if subrepos:
874 if subrepos:
875 for subpath in sorted(ctx.substate):
875 for subpath in sorted(ctx.substate):
876 sub = ctx.sub(subpath)
876 sub = ctx.sub(subpath)
877 submatch = match_.narrowmatcher(subpath, matchfn)
877 submatch = match_.narrowmatcher(subpath, matchfn)
878 sub.archive(repo.ui, archiver, prefix, submatch)
878 sub.archive(repo.ui, archiver, prefix, submatch)
879
879
880 archiver.done()
880 archiver.done()
881
881
882 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
882 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
883 repo._get(repo._state + ('hg',))
883 repo._get(repo._state + ('hg',))
884 rev = repo._state[1]
884 rev = repo._state[1]
885 ctx = repo._repo[rev]
885 ctx = repo._repo[rev]
886
886
887 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
887 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
888
888
889 def write(name, mode, islink, getdata):
889 def write(name, mode, islink, getdata):
890 # At this point, the standin has been replaced with the largefile name,
890 # At this point, the standin has been replaced with the largefile name,
891 # so the normal matcher works here without the lfutil variants.
891 # so the normal matcher works here without the lfutil variants.
892 if match and not match(f):
892 if match and not match(f):
893 return
893 return
894 data = getdata()
894 data = getdata()
895
895
896 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
896 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
897
897
898 for f in ctx:
898 for f in ctx:
899 ff = ctx.flags(f)
899 ff = ctx.flags(f)
900 getdata = ctx[f].data
900 getdata = ctx[f].data
901 if lfutil.isstandin(f):
901 if lfutil.isstandin(f):
902 path = lfutil.findfile(repo._repo, getdata().strip())
902 path = lfutil.findfile(repo._repo, getdata().strip())
903 if path is None:
903 if path is None:
904 raise util.Abort(
904 raise util.Abort(
905 _('largefile %s not found in repo store or system cache')
905 _('largefile %s not found in repo store or system cache')
906 % lfutil.splitstandin(f))
906 % lfutil.splitstandin(f))
907 f = lfutil.splitstandin(f)
907 f = lfutil.splitstandin(f)
908
908
909 def getdatafn():
909 def getdatafn():
910 fd = None
910 fd = None
911 try:
911 try:
912 fd = open(os.path.join(prefix, path), 'rb')
912 fd = open(os.path.join(prefix, path), 'rb')
913 return fd.read()
913 return fd.read()
914 finally:
914 finally:
915 if fd:
915 if fd:
916 fd.close()
916 fd.close()
917
917
918 getdata = getdatafn
918 getdata = getdatafn
919
919
920 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
920 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
921
921
922 for subpath in sorted(ctx.substate):
922 for subpath in sorted(ctx.substate):
923 sub = ctx.sub(subpath)
923 sub = ctx.sub(subpath)
924 submatch = match_.narrowmatcher(subpath, match)
924 submatch = match_.narrowmatcher(subpath, match)
925 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
925 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
926 submatch)
926 submatch)
927
927
928 # If a largefile is modified, the change is not reflected in its
928 # If a largefile is modified, the change is not reflected in its
929 # standin until a commit. cmdutil.bailifchanged() raises an exception
929 # standin until a commit. cmdutil.bailifchanged() raises an exception
930 # if the repo has uncommitted changes. Wrap it to also check if
930 # if the repo has uncommitted changes. Wrap it to also check if
931 # largefiles were changed. This is used by bisect and backout.
931 # largefiles were changed. This is used by bisect and backout.
932 def overridebailifchanged(orig, repo):
932 def overridebailifchanged(orig, repo):
933 orig(repo)
933 orig(repo)
934 repo.lfstatus = True
934 repo.lfstatus = True
935 modified, added, removed, deleted = repo.status()[:4]
935 modified, added, removed, deleted = repo.status()[:4]
936 repo.lfstatus = False
936 repo.lfstatus = False
937 if modified or added or removed or deleted:
937 if modified or added or removed or deleted:
938 raise util.Abort(_('uncommitted changes'))
938 raise util.Abort(_('uncommitted changes'))
939
939
940 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
940 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
941 def overridefetch(orig, ui, repo, *pats, **opts):
941 def overridefetch(orig, ui, repo, *pats, **opts):
942 repo.lfstatus = True
942 repo.lfstatus = True
943 modified, added, removed, deleted = repo.status()[:4]
943 modified, added, removed, deleted = repo.status()[:4]
944 repo.lfstatus = False
944 repo.lfstatus = False
945 if modified or added or removed or deleted:
945 if modified or added or removed or deleted:
946 raise util.Abort(_('uncommitted changes'))
946 raise util.Abort(_('uncommitted changes'))
947 return orig(ui, repo, *pats, **opts)
947 return orig(ui, repo, *pats, **opts)
948
948
949 def overrideforget(orig, ui, repo, *pats, **opts):
949 def overrideforget(orig, ui, repo, *pats, **opts):
950 installnormalfilesmatchfn(repo[None].manifest())
950 installnormalfilesmatchfn(repo[None].manifest())
951 result = orig(ui, repo, *pats, **opts)
951 result = orig(ui, repo, *pats, **opts)
952 restorematchfn()
952 restorematchfn()
953 m = scmutil.match(repo[None], pats, opts)
953 m = scmutil.match(repo[None], pats, opts)
954
954
955 try:
955 try:
956 repo.lfstatus = True
956 repo.lfstatus = True
957 s = repo.status(match=m, clean=True)
957 s = repo.status(match=m, clean=True)
958 finally:
958 finally:
959 repo.lfstatus = False
959 repo.lfstatus = False
960 forget = sorted(s[0] + s[1] + s[3] + s[6])
960 forget = sorted(s[0] + s[1] + s[3] + s[6])
961 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
961 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
962
962
963 for f in forget:
963 for f in forget:
964 if lfutil.standin(f) not in repo.dirstate and not \
964 if lfutil.standin(f) not in repo.dirstate and not \
965 os.path.isdir(m.rel(lfutil.standin(f))):
965 os.path.isdir(m.rel(lfutil.standin(f))):
966 ui.warn(_('not removing %s: file is already untracked\n')
966 ui.warn(_('not removing %s: file is already untracked\n')
967 % m.rel(f))
967 % m.rel(f))
968 result = 1
968 result = 1
969
969
970 for f in forget:
970 for f in forget:
971 if ui.verbose or not m.exact(f):
971 if ui.verbose or not m.exact(f):
972 ui.status(_('removing %s\n') % m.rel(f))
972 ui.status(_('removing %s\n') % m.rel(f))
973
973
974 # Need to lock because standin files are deleted then removed from the
974 # Need to lock because standin files are deleted then removed from the
975 # repository and we could race in-between.
975 # repository and we could race in-between.
976 wlock = repo.wlock()
976 wlock = repo.wlock()
977 try:
977 try:
978 lfdirstate = lfutil.openlfdirstate(ui, repo)
978 lfdirstate = lfutil.openlfdirstate(ui, repo)
979 for f in forget:
979 for f in forget:
980 if lfdirstate[f] == 'a':
980 if lfdirstate[f] == 'a':
981 lfdirstate.drop(f)
981 lfdirstate.drop(f)
982 else:
982 else:
983 lfdirstate.remove(f)
983 lfdirstate.remove(f)
984 lfdirstate.write()
984 lfdirstate.write()
985 standins = [lfutil.standin(f) for f in forget]
985 standins = [lfutil.standin(f) for f in forget]
986 for f in standins:
986 for f in standins:
987 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
987 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
988 repo[None].forget(standins)
988 repo[None].forget(standins)
989 finally:
989 finally:
990 wlock.release()
990 wlock.release()
991
991
992 return result
992 return result
993
993
994 def outgoinghook(ui, repo, other, opts, missing):
994 def outgoinghook(ui, repo, other, opts, missing):
995 if opts.pop('large', None):
995 if opts.pop('large', None):
996 toupload = set()
996 toupload = set()
997 lfutil.getlfilestoupload(repo, missing,
997 lfutil.getlfilestoupload(repo, missing,
998 lambda fn, lfhash: toupload.add(fn))
998 lambda fn, lfhash: toupload.add(fn))
999 if not toupload:
999 if not toupload:
1000 ui.status(_('largefiles: no files to upload\n'))
1000 ui.status(_('largefiles: no files to upload\n'))
1001 else:
1001 else:
1002 ui.status(_('largefiles to upload:\n'))
1002 ui.status(_('largefiles to upload:\n'))
1003 for file in sorted(toupload):
1003 for file in sorted(toupload):
1004 ui.status(lfutil.splitstandin(file) + '\n')
1004 ui.status(lfutil.splitstandin(file) + '\n')
1005 ui.status('\n')
1005 ui.status('\n')
1006
1006
1007 def summaryremotehook(ui, repo, opts, changes):
1007 def summaryremotehook(ui, repo, opts, changes):
1008 largeopt = opts.get('large', False)
1008 largeopt = opts.get('large', False)
1009 if changes is None:
1009 if changes is None:
1010 if largeopt:
1010 if largeopt:
1011 return (False, True) # only outgoing check is needed
1011 return (False, True) # only outgoing check is needed
1012 else:
1012 else:
1013 return (False, False)
1013 return (False, False)
1014 elif largeopt:
1014 elif largeopt:
1015 url, branch, peer, outgoing = changes[1]
1015 url, branch, peer, outgoing = changes[1]
1016 if peer is None:
1016 if peer is None:
1017 # i18n: column positioning for "hg summary"
1017 # i18n: column positioning for "hg summary"
1018 ui.status(_('largefiles: (no remote repo)\n'))
1018 ui.status(_('largefiles: (no remote repo)\n'))
1019 return
1019 return
1020
1020
1021 toupload = set()
1021 toupload = set()
1022 lfutil.getlfilestoupload(repo, outgoing.missing,
1022 lfutil.getlfilestoupload(repo, outgoing.missing,
1023 lambda fn, lfhash: toupload.add(fn))
1023 lambda fn, lfhash: toupload.add(fn))
1024 if not toupload:
1024 if not toupload:
1025 # i18n: column positioning for "hg summary"
1025 # i18n: column positioning for "hg summary"
1026 ui.status(_('largefiles: (no files to upload)\n'))
1026 ui.status(_('largefiles: (no files to upload)\n'))
1027 else:
1027 else:
1028 # i18n: column positioning for "hg summary"
1028 # i18n: column positioning for "hg summary"
1029 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1029 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1030
1030
1031 def overridesummary(orig, ui, repo, *pats, **opts):
1031 def overridesummary(orig, ui, repo, *pats, **opts):
1032 try:
1032 try:
1033 repo.lfstatus = True
1033 repo.lfstatus = True
1034 orig(ui, repo, *pats, **opts)
1034 orig(ui, repo, *pats, **opts)
1035 finally:
1035 finally:
1036 repo.lfstatus = False
1036 repo.lfstatus = False
1037
1037
1038 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1038 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1039 similarity=None):
1039 similarity=None):
1040 if not lfutil.islfilesrepo(repo):
1040 if not lfutil.islfilesrepo(repo):
1041 return orig(repo, pats, opts, dry_run, similarity)
1041 return orig(repo, pats, opts, dry_run, similarity)
1042 # Get the list of missing largefiles so we can remove them
1042 # Get the list of missing largefiles so we can remove them
1043 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1043 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1044 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1044 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1045 False, False)
1045 False, False)
1046 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1046 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1047
1047
1048 # Call into the normal remove code, but the removing of the standin, we want
1048 # Call into the normal remove code, but the removing of the standin, we want
1049 # to have handled by original addremove. Monkey patching here makes sure
1049 # to have handled by original addremove. Monkey patching here makes sure
1050 # we don't remove the standin in the largefiles code, preventing a very
1050 # we don't remove the standin in the largefiles code, preventing a very
1051 # confused state later.
1051 # confused state later.
1052 if missing:
1052 if missing:
1053 m = [repo.wjoin(f) for f in missing]
1053 m = [repo.wjoin(f) for f in missing]
1054 repo._isaddremove = True
1054 repo._isaddremove = True
1055 removelargefiles(repo.ui, repo, *m, **opts)
1055 removelargefiles(repo.ui, repo, *m, **opts)
1056 repo._isaddremove = False
1056 repo._isaddremove = False
1057 # Call into the normal add code, and any files that *should* be added as
1057 # Call into the normal add code, and any files that *should* be added as
1058 # largefiles will be
1058 # largefiles will be
1059 addlargefiles(repo.ui, repo, *pats, **opts)
1059 addlargefiles(repo.ui, repo, *pats, **opts)
1060 # Now that we've handled largefiles, hand off to the original addremove
1060 # Now that we've handled largefiles, hand off to the original addremove
1061 # function to take care of the rest. Make sure it doesn't do anything with
1061 # function to take care of the rest. Make sure it doesn't do anything with
1062 # largefiles by installing a matcher that will ignore them.
1062 # largefiles by installing a matcher that will ignore them.
1063 installnormalfilesmatchfn(repo[None].manifest())
1063 installnormalfilesmatchfn(repo[None].manifest())
1064 result = orig(repo, pats, opts, dry_run, similarity)
1064 result = orig(repo, pats, opts, dry_run, similarity)
1065 restorematchfn()
1065 restorematchfn()
1066 return result
1066 return result
1067
1067
1068 # Calling purge with --all will cause the largefiles to be deleted.
1068 # Calling purge with --all will cause the largefiles to be deleted.
1069 # Override repo.status to prevent this from happening.
1069 # Override repo.status to prevent this from happening.
1070 def overridepurge(orig, ui, repo, *dirs, **opts):
1070 def overridepurge(orig, ui, repo, *dirs, **opts):
1071 # XXX large file status is buggy when used on repo proxy.
1071 # XXX large file status is buggy when used on repo proxy.
1072 # XXX this needs to be investigate.
1072 # XXX this needs to be investigate.
1073 repo = repo.unfiltered()
1073 repo = repo.unfiltered()
1074 oldstatus = repo.status
1074 oldstatus = repo.status
1075 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1075 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1076 clean=False, unknown=False, listsubrepos=False):
1076 clean=False, unknown=False, listsubrepos=False):
1077 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1077 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1078 listsubrepos)
1078 listsubrepos)
1079 lfdirstate = lfutil.openlfdirstate(ui, repo)
1079 lfdirstate = lfutil.openlfdirstate(ui, repo)
1080 modified, added, removed, deleted, unknown, ignored, clean = r
1080 modified, added, removed, deleted, unknown, ignored, clean = r
1081 unknown = [f for f in unknown if lfdirstate[f] == '?']
1081 unknown = [f for f in unknown if lfdirstate[f] == '?']
1082 ignored = [f for f in ignored if lfdirstate[f] == '?']
1082 ignored = [f for f in ignored if lfdirstate[f] == '?']
1083 return modified, added, removed, deleted, unknown, ignored, clean
1083 return modified, added, removed, deleted, unknown, ignored, clean
1084 repo.status = overridestatus
1084 repo.status = overridestatus
1085 orig(ui, repo, *dirs, **opts)
1085 orig(ui, repo, *dirs, **opts)
1086 repo.status = oldstatus
1086 repo.status = oldstatus
1087
1087
1088 def overriderollback(orig, ui, repo, **opts):
1088 def overriderollback(orig, ui, repo, **opts):
1089 result = orig(ui, repo, **opts)
1089 result = orig(ui, repo, **opts)
1090 merge.update(repo, node=None, branchmerge=False, force=True,
1090 merge.update(repo, node=None, branchmerge=False, force=True,
1091 partial=lfutil.isstandin)
1091 partial=lfutil.isstandin)
1092 wlock = repo.wlock()
1092 wlock = repo.wlock()
1093 try:
1093 try:
1094 lfdirstate = lfutil.openlfdirstate(ui, repo)
1094 lfdirstate = lfutil.openlfdirstate(ui, repo)
1095 lfiles = lfutil.listlfiles(repo)
1095 lfiles = lfutil.listlfiles(repo)
1096 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1096 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1097 for file in lfiles:
1097 for file in lfiles:
1098 if file in oldlfiles:
1098 if file in oldlfiles:
1099 lfdirstate.normallookup(file)
1099 lfdirstate.normallookup(file)
1100 else:
1100 else:
1101 lfdirstate.add(file)
1101 lfdirstate.add(file)
1102 lfdirstate.write()
1102 lfdirstate.write()
1103 finally:
1103 finally:
1104 wlock.release()
1104 wlock.release()
1105 return result
1105 return result
1106
1106
1107 def overridetransplant(orig, ui, repo, *revs, **opts):
1107 def overridetransplant(orig, ui, repo, *revs, **opts):
1108 try:
1108 try:
1109 oldstandins = lfutil.getstandinsstate(repo)
1109 oldstandins = lfutil.getstandinsstate(repo)
1110 repo._istransplanting = True
1110 repo._istransplanting = True
1111 result = orig(ui, repo, *revs, **opts)
1111 result = orig(ui, repo, *revs, **opts)
1112 newstandins = lfutil.getstandinsstate(repo)
1112 newstandins = lfutil.getstandinsstate(repo)
1113 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1113 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1114 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1114 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1115 printmessage=True)
1115 printmessage=True)
1116 finally:
1116 finally:
1117 repo._istransplanting = False
1117 repo._istransplanting = False
1118 return result
1118 return result
1119
1119
1120 def overridecat(orig, ui, repo, file1, *pats, **opts):
1120 def overridecat(orig, ui, repo, file1, *pats, **opts):
1121 ctx = scmutil.revsingle(repo, opts.get('rev'))
1121 ctx = scmutil.revsingle(repo, opts.get('rev'))
1122 err = 1
1122 err = 1
1123 notbad = set()
1123 notbad = set()
1124 m = scmutil.match(ctx, (file1,) + pats, opts)
1124 m = scmutil.match(ctx, (file1,) + pats, opts)
1125 origmatchfn = m.matchfn
1125 origmatchfn = m.matchfn
1126 def lfmatchfn(f):
1126 def lfmatchfn(f):
1127 if origmatchfn(f):
1127 if origmatchfn(f):
1128 return True
1128 return True
1129 lf = lfutil.splitstandin(f)
1129 lf = lfutil.splitstandin(f)
1130 if lf is None:
1130 if lf is None:
1131 return False
1131 return False
1132 notbad.add(lf)
1132 notbad.add(lf)
1133 return origmatchfn(lf)
1133 return origmatchfn(lf)
1134 m.matchfn = lfmatchfn
1134 m.matchfn = lfmatchfn
1135 origbadfn = m.bad
1135 origbadfn = m.bad
1136 def lfbadfn(f, msg):
1136 def lfbadfn(f, msg):
1137 if not f in notbad:
1137 if not f in notbad:
1138 origbadfn(f, msg)
1138 origbadfn(f, msg)
1139 m.bad = lfbadfn
1139 m.bad = lfbadfn
1140 for f in ctx.walk(m):
1140 for f in ctx.walk(m):
1141 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1141 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1142 pathname=f)
1142 pathname=f)
1143 lf = lfutil.splitstandin(f)
1143 lf = lfutil.splitstandin(f)
1144 if lf is None or origmatchfn(f):
1144 if lf is None or origmatchfn(f):
1145 # duplicating unreachable code from commands.cat
1145 # duplicating unreachable code from commands.cat
1146 data = ctx[f].data()
1146 data = ctx[f].data()
1147 if opts.get('decode'):
1147 if opts.get('decode'):
1148 data = repo.wwritedata(f, data)
1148 data = repo.wwritedata(f, data)
1149 fp.write(data)
1149 fp.write(data)
1150 else:
1150 else:
1151 hash = lfutil.readstandin(repo, lf, ctx.rev())
1151 hash = lfutil.readstandin(repo, lf, ctx.rev())
1152 if not lfutil.inusercache(repo.ui, hash):
1152 if not lfutil.inusercache(repo.ui, hash):
1153 store = basestore._openstore(repo)
1153 store = basestore._openstore(repo)
1154 success, missing = store.get([(lf, hash)])
1154 success, missing = store.get([(lf, hash)])
1155 if len(success) != 1:
1155 if len(success) != 1:
1156 raise util.Abort(
1156 raise util.Abort(
1157 _('largefile %s is not in cache and could not be '
1157 _('largefile %s is not in cache and could not be '
1158 'downloaded') % lf)
1158 'downloaded') % lf)
1159 path = lfutil.usercachepath(repo.ui, hash)
1159 path = lfutil.usercachepath(repo.ui, hash)
1160 fpin = open(path, "rb")
1160 fpin = open(path, "rb")
1161 for chunk in util.filechunkiter(fpin, 128 * 1024):
1161 for chunk in util.filechunkiter(fpin, 128 * 1024):
1162 fp.write(chunk)
1162 fp.write(chunk)
1163 fpin.close()
1163 fpin.close()
1164 fp.close()
1164 fp.close()
1165 err = 0
1165 err = 0
1166 return err
1166 return err
1167
1167
1168 def mercurialsinkbefore(orig, sink):
1168 def mercurialsinkbefore(orig, sink):
1169 sink.repo._isconverting = True
1169 sink.repo._isconverting = True
1170 orig(sink)
1170 orig(sink)
1171
1171
1172 def mercurialsinkafter(orig, sink):
1172 def mercurialsinkafter(orig, sink):
1173 sink.repo._isconverting = False
1173 sink.repo._isconverting = False
1174 orig(sink)
1174 orig(sink)
@@ -1,433 +1,437 b''
1 # filemerge.py - file-level merge handling for Mercurial
1 # filemerge.py - file-level merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import short
8 from node import short
9 from i18n import _
9 from i18n import _
10 import util, simplemerge, match, error, templater, templatekw
10 import util, simplemerge, match, error, templater, templatekw
11 import os, tempfile, re, filecmp
11 import os, tempfile, re, filecmp
12
12
13 def _toolstr(ui, tool, part, default=""):
13 def _toolstr(ui, tool, part, default=""):
14 return ui.config("merge-tools", tool + "." + part, default)
14 return ui.config("merge-tools", tool + "." + part, default)
15
15
16 def _toolbool(ui, tool, part, default=False):
16 def _toolbool(ui, tool, part, default=False):
17 return ui.configbool("merge-tools", tool + "." + part, default)
17 return ui.configbool("merge-tools", tool + "." + part, default)
18
18
19 def _toollist(ui, tool, part, default=[]):
19 def _toollist(ui, tool, part, default=[]):
20 return ui.configlist("merge-tools", tool + "." + part, default)
20 return ui.configlist("merge-tools", tool + "." + part, default)
21
21
22 internals = {}
22 internals = {}
23
23
24 def internaltool(name, trymerge, onfailure=None):
24 def internaltool(name, trymerge, onfailure=None):
25 '''return a decorator for populating internal merge tool table'''
25 '''return a decorator for populating internal merge tool table'''
26 def decorator(func):
26 def decorator(func):
27 fullname = 'internal:' + name
27 fullname = 'internal:' + name
28 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
28 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
29 internals[fullname] = func
29 internals[fullname] = func
30 func.trymerge = trymerge
30 func.trymerge = trymerge
31 func.onfailure = onfailure
31 func.onfailure = onfailure
32 return func
32 return func
33 return decorator
33 return decorator
34
34
35 def _findtool(ui, tool):
35 def _findtool(ui, tool):
36 if tool in internals:
36 if tool in internals:
37 return tool
37 return tool
38 for kn in ("regkey", "regkeyalt"):
38 for kn in ("regkey", "regkeyalt"):
39 k = _toolstr(ui, tool, kn)
39 k = _toolstr(ui, tool, kn)
40 if not k:
40 if not k:
41 continue
41 continue
42 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
42 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
43 if p:
43 if p:
44 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
44 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
45 if p:
45 if p:
46 return p
46 return p
47 exe = _toolstr(ui, tool, "executable", tool)
47 exe = _toolstr(ui, tool, "executable", tool)
48 return util.findexe(util.expandpath(exe))
48 return util.findexe(util.expandpath(exe))
49
49
50 def _picktool(repo, ui, path, binary, symlink):
50 def _picktool(repo, ui, path, binary, symlink):
51 def check(tool, pat, symlink, binary):
51 def check(tool, pat, symlink, binary):
52 tmsg = tool
52 tmsg = tool
53 if pat:
53 if pat:
54 tmsg += " specified for " + pat
54 tmsg += " specified for " + pat
55 if not _findtool(ui, tool):
55 if not _findtool(ui, tool):
56 if pat: # explicitly requested tool deserves a warning
56 if pat: # explicitly requested tool deserves a warning
57 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
57 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
58 else: # configured but non-existing tools are more silent
58 else: # configured but non-existing tools are more silent
59 ui.note(_("couldn't find merge tool %s\n") % tmsg)
59 ui.note(_("couldn't find merge tool %s\n") % tmsg)
60 elif symlink and not _toolbool(ui, tool, "symlink"):
60 elif symlink and not _toolbool(ui, tool, "symlink"):
61 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
61 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
62 elif binary and not _toolbool(ui, tool, "binary"):
62 elif binary and not _toolbool(ui, tool, "binary"):
63 ui.warn(_("tool %s can't handle binary\n") % tmsg)
63 ui.warn(_("tool %s can't handle binary\n") % tmsg)
64 elif not util.gui() and _toolbool(ui, tool, "gui"):
64 elif not util.gui() and _toolbool(ui, tool, "gui"):
65 ui.warn(_("tool %s requires a GUI\n") % tmsg)
65 ui.warn(_("tool %s requires a GUI\n") % tmsg)
66 else:
66 else:
67 return True
67 return True
68 return False
68 return False
69
69
70 # forcemerge comes from command line arguments, highest priority
70 # forcemerge comes from command line arguments, highest priority
71 force = ui.config('ui', 'forcemerge')
71 force = ui.config('ui', 'forcemerge')
72 if force:
72 if force:
73 toolpath = _findtool(ui, force)
73 toolpath = _findtool(ui, force)
74 if toolpath:
74 if toolpath:
75 return (force, util.shellquote(toolpath))
75 return (force, util.shellquote(toolpath))
76 else:
76 else:
77 # mimic HGMERGE if given tool not found
77 # mimic HGMERGE if given tool not found
78 return (force, force)
78 return (force, force)
79
79
80 # HGMERGE takes next precedence
80 # HGMERGE takes next precedence
81 hgmerge = os.environ.get("HGMERGE")
81 hgmerge = os.environ.get("HGMERGE")
82 if hgmerge:
82 if hgmerge:
83 return (hgmerge, hgmerge)
83 return (hgmerge, hgmerge)
84
84
85 # then patterns
85 # then patterns
86 for pat, tool in ui.configitems("merge-patterns"):
86 for pat, tool in ui.configitems("merge-patterns"):
87 mf = match.match(repo.root, '', [pat])
87 mf = match.match(repo.root, '', [pat])
88 if mf(path) and check(tool, pat, symlink, False):
88 if mf(path) and check(tool, pat, symlink, False):
89 toolpath = _findtool(ui, tool)
89 toolpath = _findtool(ui, tool)
90 return (tool, util.shellquote(toolpath))
90 return (tool, util.shellquote(toolpath))
91
91
92 # then merge tools
92 # then merge tools
93 tools = {}
93 tools = {}
94 for k, v in ui.configitems("merge-tools"):
94 for k, v in ui.configitems("merge-tools"):
95 t = k.split('.')[0]
95 t = k.split('.')[0]
96 if t not in tools:
96 if t not in tools:
97 tools[t] = int(_toolstr(ui, t, "priority", "0"))
97 tools[t] = int(_toolstr(ui, t, "priority", "0"))
98 names = tools.keys()
98 names = tools.keys()
99 tools = sorted([(-p, t) for t, p in tools.items()])
99 tools = sorted([(-p, t) for t, p in tools.items()])
100 uimerge = ui.config("ui", "merge")
100 uimerge = ui.config("ui", "merge")
101 if uimerge:
101 if uimerge:
102 if uimerge not in names:
102 if uimerge not in names:
103 return (uimerge, uimerge)
103 return (uimerge, uimerge)
104 tools.insert(0, (None, uimerge)) # highest priority
104 tools.insert(0, (None, uimerge)) # highest priority
105 tools.append((None, "hgmerge")) # the old default, if found
105 tools.append((None, "hgmerge")) # the old default, if found
106 for p, t in tools:
106 for p, t in tools:
107 if check(t, None, symlink, binary):
107 if check(t, None, symlink, binary):
108 toolpath = _findtool(ui, t)
108 toolpath = _findtool(ui, t)
109 return (t, util.shellquote(toolpath))
109 return (t, util.shellquote(toolpath))
110
110
111 # internal merge or prompt as last resort
111 # internal merge or prompt as last resort
112 if symlink or binary:
112 if symlink or binary:
113 return "internal:prompt", None
113 return "internal:prompt", None
114 return "internal:merge", None
114 return "internal:merge", None
115
115
116 def _eoltype(data):
116 def _eoltype(data):
117 "Guess the EOL type of a file"
117 "Guess the EOL type of a file"
118 if '\0' in data: # binary
118 if '\0' in data: # binary
119 return None
119 return None
120 if '\r\n' in data: # Windows
120 if '\r\n' in data: # Windows
121 return '\r\n'
121 return '\r\n'
122 if '\r' in data: # Old Mac
122 if '\r' in data: # Old Mac
123 return '\r'
123 return '\r'
124 if '\n' in data: # UNIX
124 if '\n' in data: # UNIX
125 return '\n'
125 return '\n'
126 return None # unknown
126 return None # unknown
127
127
128 def _matcheol(file, origfile):
128 def _matcheol(file, origfile):
129 "Convert EOL markers in a file to match origfile"
129 "Convert EOL markers in a file to match origfile"
130 tostyle = _eoltype(util.readfile(origfile))
130 tostyle = _eoltype(util.readfile(origfile))
131 if tostyle:
131 if tostyle:
132 data = util.readfile(file)
132 data = util.readfile(file)
133 style = _eoltype(data)
133 style = _eoltype(data)
134 if style:
134 if style:
135 newdata = data.replace(style, tostyle)
135 newdata = data.replace(style, tostyle)
136 if newdata != data:
136 if newdata != data:
137 util.writefile(file, newdata)
137 util.writefile(file, newdata)
138
138
139 @internaltool('prompt', False)
139 @internaltool('prompt', False)
140 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
140 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
141 """Asks the user which of the local or the other version to keep as
141 """Asks the user which of the local or the other version to keep as
142 the merged version."""
142 the merged version."""
143 ui = repo.ui
143 ui = repo.ui
144 fd = fcd.path()
144 fd = fcd.path()
145
145
146 if ui.promptchoice(_(" no tool found to merge %s\n"
146 if ui.promptchoice(_(" no tool found to merge %s\n"
147 "keep (l)ocal or take (o)ther?"
147 "keep (l)ocal or take (o)ther?"
148 "$$ &Local $$ &Other") % fd, 0):
148 "$$ &Local $$ &Other") % fd, 0):
149 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
149 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
150 else:
150 else:
151 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
151 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
152
152
153 @internaltool('local', False)
153 @internaltool('local', False)
154 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
154 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
155 """Uses the local version of files as the merged version."""
155 """Uses the local version of files as the merged version."""
156 return 0
156 return 0
157
157
158 @internaltool('other', False)
158 @internaltool('other', False)
159 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
159 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
160 """Uses the other version of files as the merged version."""
160 """Uses the other version of files as the merged version."""
161 repo.wwrite(fcd.path(), fco.data(), fco.flags())
161 repo.wwrite(fcd.path(), fco.data(), fco.flags())
162 return 0
162 return 0
163
163
164 @internaltool('fail', False)
164 @internaltool('fail', False)
165 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
165 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
166 """
166 """
167 Rather than attempting to merge files that were modified on both
167 Rather than attempting to merge files that were modified on both
168 branches, it marks them as unresolved. The resolve command must be
168 branches, it marks them as unresolved. The resolve command must be
169 used to resolve these conflicts."""
169 used to resolve these conflicts."""
170 return 1
170 return 1
171
171
172 def _premerge(repo, toolconf, files, labels=None):
172 def _premerge(repo, toolconf, files, labels=None):
173 tool, toolpath, binary, symlink = toolconf
173 tool, toolpath, binary, symlink = toolconf
174 if symlink:
174 if symlink:
175 return 1
175 return 1
176 a, b, c, back = files
176 a, b, c, back = files
177
177
178 ui = repo.ui
178 ui = repo.ui
179
179
180 # do we attempt to simplemerge first?
180 # do we attempt to simplemerge first?
181 try:
181 try:
182 premerge = _toolbool(ui, tool, "premerge", not binary)
182 premerge = _toolbool(ui, tool, "premerge", not binary)
183 except error.ConfigError:
183 except error.ConfigError:
184 premerge = _toolstr(ui, tool, "premerge").lower()
184 premerge = _toolstr(ui, tool, "premerge").lower()
185 valid = 'keep'.split()
185 valid = 'keep'.split()
186 if premerge not in valid:
186 if premerge not in valid:
187 _valid = ', '.join(["'" + v + "'" for v in valid])
187 _valid = ', '.join(["'" + v + "'" for v in valid])
188 raise error.ConfigError(_("%s.premerge not valid "
188 raise error.ConfigError(_("%s.premerge not valid "
189 "('%s' is neither boolean nor %s)") %
189 "('%s' is neither boolean nor %s)") %
190 (tool, premerge, _valid))
190 (tool, premerge, _valid))
191
191
192 if premerge:
192 if premerge:
193 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
193 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
194 if not r:
194 if not r:
195 ui.debug(" premerge successful\n")
195 ui.debug(" premerge successful\n")
196 return 0
196 return 0
197 if premerge != 'keep':
197 if premerge != 'keep':
198 util.copyfile(back, a) # restore from backup and try again
198 util.copyfile(back, a) # restore from backup and try again
199 return 1 # continue merging
199 return 1 # continue merging
200
200
201 @internaltool('merge', True,
201 @internaltool('merge', True,
202 _("merging %s incomplete! "
202 _("merging %s incomplete! "
203 "(edit conflicts, then use 'hg resolve --mark')\n"))
203 "(edit conflicts, then use 'hg resolve --mark')\n"))
204 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
204 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
205 """
205 """
206 Uses the internal non-interactive simple merge algorithm for merging
206 Uses the internal non-interactive simple merge algorithm for merging
207 files. It will fail if there are any conflicts and leave markers in
207 files. It will fail if there are any conflicts and leave markers in
208 the partially merged file."""
208 the partially merged file."""
209 tool, toolpath, binary, symlink = toolconf
209 tool, toolpath, binary, symlink = toolconf
210 if symlink:
210 if symlink:
211 repo.ui.warn(_('warning: internal:merge cannot merge symlinks '
211 repo.ui.warn(_('warning: internal:merge cannot merge symlinks '
212 'for %s\n') % fcd.path())
212 'for %s\n') % fcd.path())
213 return False, 1
213 return False, 1
214 r = _premerge(repo, toolconf, files, labels=labels)
214 r = _premerge(repo, toolconf, files, labels=labels)
215 if r:
215 if r:
216 a, b, c, back = files
216 a, b, c, back = files
217
217
218 ui = repo.ui
218 ui = repo.ui
219
219
220 r = simplemerge.simplemerge(ui, a, b, c, label=labels)
220 r = simplemerge.simplemerge(ui, a, b, c, label=labels)
221 return True, r
221 return True, r
222 return False, 0
222 return False, 0
223
223
224 @internaltool('dump', True)
224 @internaltool('dump', True)
225 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
225 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
226 """
226 """
227 Creates three versions of the files to merge, containing the
227 Creates three versions of the files to merge, containing the
228 contents of local, other and base. These files can then be used to
228 contents of local, other and base. These files can then be used to
229 perform a merge manually. If the file to be merged is named
229 perform a merge manually. If the file to be merged is named
230 ``a.txt``, these files will accordingly be named ``a.txt.local``,
230 ``a.txt``, these files will accordingly be named ``a.txt.local``,
231 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
231 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
232 same directory as ``a.txt``."""
232 same directory as ``a.txt``."""
233 r = _premerge(repo, toolconf, files, labels=labels)
233 r = _premerge(repo, toolconf, files, labels=labels)
234 if r:
234 if r:
235 a, b, c, back = files
235 a, b, c, back = files
236
236
237 fd = fcd.path()
237 fd = fcd.path()
238
238
239 util.copyfile(a, a + ".local")
239 util.copyfile(a, a + ".local")
240 repo.wwrite(fd + ".other", fco.data(), fco.flags())
240 repo.wwrite(fd + ".other", fco.data(), fco.flags())
241 repo.wwrite(fd + ".base", fca.data(), fca.flags())
241 repo.wwrite(fd + ".base", fca.data(), fca.flags())
242 return False, r
242 return False, r
243
243
244 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
244 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
245 r = _premerge(repo, toolconf, files, labels=labels)
245 r = _premerge(repo, toolconf, files, labels=labels)
246 if r:
246 if r:
247 tool, toolpath, binary, symlink = toolconf
247 tool, toolpath, binary, symlink = toolconf
248 a, b, c, back = files
248 a, b, c, back = files
249 out = ""
249 out = ""
250 env = {'HG_FILE': fcd.path(),
250 env = {'HG_FILE': fcd.path(),
251 'HG_MY_NODE': short(mynode),
251 'HG_MY_NODE': short(mynode),
252 'HG_OTHER_NODE': str(fco.changectx()),
252 'HG_OTHER_NODE': str(fco.changectx()),
253 'HG_BASE_NODE': str(fca.changectx()),
253 'HG_BASE_NODE': str(fca.changectx()),
254 'HG_MY_ISLINK': 'l' in fcd.flags(),
254 'HG_MY_ISLINK': 'l' in fcd.flags(),
255 'HG_OTHER_ISLINK': 'l' in fco.flags(),
255 'HG_OTHER_ISLINK': 'l' in fco.flags(),
256 'HG_BASE_ISLINK': 'l' in fca.flags(),
256 'HG_BASE_ISLINK': 'l' in fca.flags(),
257 }
257 }
258
258
259 ui = repo.ui
259 ui = repo.ui
260
260
261 args = _toolstr(ui, tool, "args", '$local $base $other')
261 args = _toolstr(ui, tool, "args", '$local $base $other')
262 if "$output" in args:
262 if "$output" in args:
263 out, a = a, back # read input from backup, write to original
263 out, a = a, back # read input from backup, write to original
264 replace = {'local': a, 'base': b, 'other': c, 'output': out}
264 replace = {'local': a, 'base': b, 'other': c, 'output': out}
265 args = util.interpolate(r'\$', replace, args,
265 args = util.interpolate(r'\$', replace, args,
266 lambda s: util.shellquote(util.localpath(s)))
266 lambda s: util.shellquote(util.localpath(s)))
267 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
267 r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
268 out=ui.fout)
268 out=ui.fout)
269 return True, r
269 return True, r
270 return False, 0
270 return False, 0
271
271
272 def _formatconflictmarker(repo, ctx, template, label, pad):
272 def _formatconflictmarker(repo, ctx, template, label, pad):
273 """Applies the given template to the ctx, prefixed by the label.
273 """Applies the given template to the ctx, prefixed by the label.
274
274
275 Pad is the minimum width of the label prefix, so that multiple markers
275 Pad is the minimum width of the label prefix, so that multiple markers
276 can have aligned templated parts.
276 can have aligned templated parts.
277 """
277 """
278 if ctx.node() is None:
278 if ctx.node() is None:
279 ctx = ctx.p1()
279 ctx = ctx.p1()
280
280
281 props = templatekw.keywords.copy()
281 props = templatekw.keywords.copy()
282 props['templ'] = template
282 props['templ'] = template
283 props['ctx'] = ctx
283 props['ctx'] = ctx
284 props['repo'] = repo
284 props['repo'] = repo
285 templateresult = template('conflictmarker', **props)
285 templateresult = template('conflictmarker', **props)
286
286
287 label = ('%s:' % label).ljust(pad + 1)
287 label = ('%s:' % label).ljust(pad + 1)
288 mark = '%s %s' % (label, templater.stringify(templateresult))
288 mark = '%s %s' % (label, templater.stringify(templateresult))
289
289
290 # The <<< marks add 8 to the length, and '...' adds three, so max
290 # The <<< marks add 8 to the length, and '...' adds three, so max
291 # length of the actual marker is 69.
291 # length of the actual marker is 69.
292 maxlength = 80 - 8 - 3
292 maxlength = 80 - 8 - 3
293 if len(mark) > maxlength:
293 if len(mark) > maxlength:
294 mark = mark[:maxlength] + '...'
294 mark = mark[:maxlength] + '...'
295 return mark
295 return mark
296
296
297 _defaultconflictmarker = ('{node|short} ' +
297 _defaultconflictmarker = ('{node|short} ' +
298 '{ifeq(tags, "tip", "", "{tags} ")}' +
298 '{ifeq(tags, "tip", "", "{tags} ")}' +
299 '{if(bookmarks, "{bookmarks} ")}' +
299 '{if(bookmarks, "{bookmarks} ")}' +
300 '{ifeq(branch, "default", "", "{branch} ")}' +
300 '{ifeq(branch, "default", "", "{branch} ")}' +
301 '- {author|user}: "{desc|firstline}"')
301 '- {author|user}: "{desc|firstline}"')
302
302
303 _defaultconflictlabels = ['local', 'other']
304
303 def _formatlabels(repo, fcd, fco, labels):
305 def _formatlabels(repo, fcd, fco, labels):
304 """Formats the given labels using the conflict marker template.
306 """Formats the given labels using the conflict marker template.
305
307
306 Returns a list of formatted labels.
308 Returns a list of formatted labels.
307 """
309 """
308 cd = fcd.changectx()
310 cd = fcd.changectx()
309 co = fco.changectx()
311 co = fco.changectx()
310
312
311 ui = repo.ui
313 ui = repo.ui
312 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
314 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
313 template = templater.parsestring(template, quoted=False)
315 template = templater.parsestring(template, quoted=False)
314 tmpl = templater.templater(None, cache={ 'conflictmarker' : template })
316 tmpl = templater.templater(None, cache={ 'conflictmarker' : template })
315
317
316 pad = max(len(labels[0]), len(labels[1]))
318 pad = max(len(labels[0]), len(labels[1]))
317
319
318 return [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
320 return [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
319 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
321 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
320
322
321 def filemerge(repo, mynode, orig, fcd, fco, fca):
323 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
322 """perform a 3-way merge in the working directory
324 """perform a 3-way merge in the working directory
323
325
324 mynode = parent node before merge
326 mynode = parent node before merge
325 orig = original local filename before merge
327 orig = original local filename before merge
326 fco = other file context
328 fco = other file context
327 fca = ancestor file context
329 fca = ancestor file context
328 fcd = local file context for current/destination file
330 fcd = local file context for current/destination file
329 """
331 """
330
332
331 def temp(prefix, ctx):
333 def temp(prefix, ctx):
332 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
334 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
333 (fd, name) = tempfile.mkstemp(prefix=pre)
335 (fd, name) = tempfile.mkstemp(prefix=pre)
334 data = repo.wwritedata(ctx.path(), ctx.data())
336 data = repo.wwritedata(ctx.path(), ctx.data())
335 f = os.fdopen(fd, "wb")
337 f = os.fdopen(fd, "wb")
336 f.write(data)
338 f.write(data)
337 f.close()
339 f.close()
338 return name
340 return name
339
341
340 if not fco.cmp(fcd): # files identical?
342 if not fco.cmp(fcd): # files identical?
341 return None
343 return None
342
344
343 ui = repo.ui
345 ui = repo.ui
344 fd = fcd.path()
346 fd = fcd.path()
345 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
347 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
346 symlink = 'l' in fcd.flags() + fco.flags()
348 symlink = 'l' in fcd.flags() + fco.flags()
347 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
349 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
348 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
350 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
349 (tool, fd, binary, symlink))
351 (tool, fd, binary, symlink))
350
352
351 if tool in internals:
353 if tool in internals:
352 func = internals[tool]
354 func = internals[tool]
353 trymerge = func.trymerge
355 trymerge = func.trymerge
354 onfailure = func.onfailure
356 onfailure = func.onfailure
355 else:
357 else:
356 func = _xmerge
358 func = _xmerge
357 trymerge = True
359 trymerge = True
358 onfailure = _("merging %s failed!\n")
360 onfailure = _("merging %s failed!\n")
359
361
360 toolconf = tool, toolpath, binary, symlink
362 toolconf = tool, toolpath, binary, symlink
361
363
362 if not trymerge:
364 if not trymerge:
363 return func(repo, mynode, orig, fcd, fco, fca, toolconf)
365 return func(repo, mynode, orig, fcd, fco, fca, toolconf)
364
366
365 a = repo.wjoin(fd)
367 a = repo.wjoin(fd)
366 b = temp("base", fca)
368 b = temp("base", fca)
367 c = temp("other", fco)
369 c = temp("other", fco)
368 back = a + ".orig"
370 back = a + ".orig"
369 util.copyfile(a, back)
371 util.copyfile(a, back)
370
372
371 if orig != fco.path():
373 if orig != fco.path():
372 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
374 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
373 else:
375 else:
374 ui.status(_("merging %s\n") % fd)
376 ui.status(_("merging %s\n") % fd)
375
377
376 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
378 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
377
379
378 markerstyle = ui.config('ui', 'mergemarkers', 'detailed')
380 markerstyle = ui.config('ui', 'mergemarkers', 'detailed')
379 labels = ['local', 'other']
380 if markerstyle == 'basic':
381 if markerstyle == 'basic':
381 formattedlabels = labels
382 formattedlabels = _defaultconflictlabels
382 else:
383 else:
384 if not labels:
385 labels = _defaultconflictlabels
386
383 formattedlabels = _formatlabels(repo, fcd, fco, labels)
387 formattedlabels = _formatlabels(repo, fcd, fco, labels)
384
388
385 needcheck, r = func(repo, mynode, orig, fcd, fco, fca, toolconf,
389 needcheck, r = func(repo, mynode, orig, fcd, fco, fca, toolconf,
386 (a, b, c, back), labels=formattedlabels)
390 (a, b, c, back), labels=formattedlabels)
387 if not needcheck:
391 if not needcheck:
388 if r:
392 if r:
389 if onfailure:
393 if onfailure:
390 ui.warn(onfailure % fd)
394 ui.warn(onfailure % fd)
391 else:
395 else:
392 util.unlink(back)
396 util.unlink(back)
393
397
394 util.unlink(b)
398 util.unlink(b)
395 util.unlink(c)
399 util.unlink(c)
396 return r
400 return r
397
401
398 if not r and (_toolbool(ui, tool, "checkconflicts") or
402 if not r and (_toolbool(ui, tool, "checkconflicts") or
399 'conflicts' in _toollist(ui, tool, "check")):
403 'conflicts' in _toollist(ui, tool, "check")):
400 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
404 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
401 re.MULTILINE):
405 re.MULTILINE):
402 r = 1
406 r = 1
403
407
404 checked = False
408 checked = False
405 if 'prompt' in _toollist(ui, tool, "check"):
409 if 'prompt' in _toollist(ui, tool, "check"):
406 checked = True
410 checked = True
407 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
411 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
408 "$$ &Yes $$ &No") % fd, 1):
412 "$$ &Yes $$ &No") % fd, 1):
409 r = 1
413 r = 1
410
414
411 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
415 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
412 'changed' in _toollist(ui, tool, "check")):
416 'changed' in _toollist(ui, tool, "check")):
413 if filecmp.cmp(a, back):
417 if filecmp.cmp(a, back):
414 if ui.promptchoice(_(" output file %s appears unchanged\n"
418 if ui.promptchoice(_(" output file %s appears unchanged\n"
415 "was merge successful (yn)?"
419 "was merge successful (yn)?"
416 "$$ &Yes $$ &No") % fd, 1):
420 "$$ &Yes $$ &No") % fd, 1):
417 r = 1
421 r = 1
418
422
419 if _toolbool(ui, tool, "fixeol"):
423 if _toolbool(ui, tool, "fixeol"):
420 _matcheol(a, back)
424 _matcheol(a, back)
421
425
422 if r:
426 if r:
423 if onfailure:
427 if onfailure:
424 ui.warn(onfailure % fd)
428 ui.warn(onfailure % fd)
425 else:
429 else:
426 util.unlink(back)
430 util.unlink(back)
427
431
428 util.unlink(b)
432 util.unlink(b)
429 util.unlink(c)
433 util.unlink(c)
430 return r
434 return r
431
435
432 # tell hggettext to extract docstrings from these functions:
436 # tell hggettext to extract docstrings from these functions:
433 i18nfunctions = internals.values()
437 i18nfunctions = internals.values()
@@ -1,1188 +1,1189 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join("merge"), True)
63 shutil.rmtree(self._repo.join("merge"), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == "F":
81 elif rtype == "F":
82 bits = record.split("\0")
82 bits = record.split("\0")
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split("\0")
124 bits = r[1].split("\0")
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], "\0".join(bits))
126 v1records[idx] = (r[0], "\0".join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(("L", hex(self._local)))
194 records.append(("L", hex(self._local)))
195 records.append(("O", hex(self._other)))
195 records.append(("O", hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(("F", "\0".join([d] + v)))
197 records.append(("F", "\0".join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, "w")
208 f = self._repo.opener(self.statepathv1, "w")
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + "\n")
212 f.write(hex(self._local) + "\n")
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == "F":
214 if rtype == "F":
215 f.write("%s\n" % _droponode(data))
215 f.write("%s\n" % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, "w")
220 f = self._repo.opener(self.statepathv2, "w")
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = ">sI%is" % len(data)
223 format = ">sI%is" % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write("merge/" + hash, fcl.data())
237 self._repo.opener.write("merge/" + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener("merge/" + hash)
287 f = self._repo.opener("merge/" + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 if r is None:
292 if r is None:
292 # no real conflict
293 # no real conflict
293 del self._state[dfile]
294 del self._state[dfile]
294 self._dirty = True
295 self._dirty = True
295 elif not r:
296 elif not r:
296 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
297 return r
298 return r
298
299
299 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f):
300 return (not repo.dirstate._ignore(f)
301 return (not repo.dirstate._ignore(f)
301 and os.path.isfile(repo.wjoin(f))
302 and os.path.isfile(repo.wjoin(f))
302 and repo.wopener.audit.check(f)
303 and repo.wopener.audit.check(f)
303 and repo.dirstate.normalize(f) not in repo.dirstate
304 and repo.dirstate.normalize(f) not in repo.dirstate
304 and mctx[f].cmp(wctx[f]))
305 and mctx[f].cmp(wctx[f]))
305
306
306 def _checkunknown(repo, wctx, mctx):
307 def _checkunknown(repo, wctx, mctx):
307 "check for collisions between unknown files and files in mctx"
308 "check for collisions between unknown files and files in mctx"
308
309
309 error = False
310 error = False
310 for f in mctx:
311 for f in mctx:
311 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 error = True
313 error = True
313 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 if error:
315 if error:
315 raise util.Abort(_("untracked files in working directory differ "
316 raise util.Abort(_("untracked files in working directory differ "
316 "from files in requested revision"))
317 "from files in requested revision"))
317
318
318 def _forgetremoved(wctx, mctx, branchmerge):
319 def _forgetremoved(wctx, mctx, branchmerge):
319 """
320 """
320 Forget removed files
321 Forget removed files
321
322
322 If we're jumping between revisions (as opposed to merging), and if
323 If we're jumping between revisions (as opposed to merging), and if
323 neither the working directory nor the target rev has the file,
324 neither the working directory nor the target rev has the file,
324 then we need to remove it from the dirstate, to prevent the
325 then we need to remove it from the dirstate, to prevent the
325 dirstate from listing the file when it is no longer in the
326 dirstate from listing the file when it is no longer in the
326 manifest.
327 manifest.
327
328
328 If we're merging, and the other revision has removed a file
329 If we're merging, and the other revision has removed a file
329 that is not present in the working directory, we need to mark it
330 that is not present in the working directory, we need to mark it
330 as removed.
331 as removed.
331 """
332 """
332
333
333 actions = []
334 actions = []
334 state = branchmerge and 'r' or 'f'
335 state = branchmerge and 'r' or 'f'
335 for f in wctx.deleted():
336 for f in wctx.deleted():
336 if f not in mctx:
337 if f not in mctx:
337 actions.append((f, state, None, "forget deleted"))
338 actions.append((f, state, None, "forget deleted"))
338
339
339 if not branchmerge:
340 if not branchmerge:
340 for f in wctx.removed():
341 for f in wctx.removed():
341 if f not in mctx:
342 if f not in mctx:
342 actions.append((f, "f", None, "forget removed"))
343 actions.append((f, "f", None, "forget removed"))
343
344
344 return actions
345 return actions
345
346
346 def _checkcollision(repo, wmf, actions):
347 def _checkcollision(repo, wmf, actions):
347 # build provisional merged manifest up
348 # build provisional merged manifest up
348 pmmf = set(wmf)
349 pmmf = set(wmf)
349
350
350 def addop(f, args):
351 def addop(f, args):
351 pmmf.add(f)
352 pmmf.add(f)
352 def removeop(f, args):
353 def removeop(f, args):
353 pmmf.discard(f)
354 pmmf.discard(f)
354 def nop(f, args):
355 def nop(f, args):
355 pass
356 pass
356
357
357 def renamemoveop(f, args):
358 def renamemoveop(f, args):
358 f2, flags = args
359 f2, flags = args
359 pmmf.discard(f2)
360 pmmf.discard(f2)
360 pmmf.add(f)
361 pmmf.add(f)
361 def renamegetop(f, args):
362 def renamegetop(f, args):
362 f2, flags = args
363 f2, flags = args
363 pmmf.add(f)
364 pmmf.add(f)
364 def mergeop(f, args):
365 def mergeop(f, args):
365 f1, f2, fa, move, anc = args
366 f1, f2, fa, move, anc = args
366 if move:
367 if move:
367 pmmf.discard(f1)
368 pmmf.discard(f1)
368 pmmf.add(f)
369 pmmf.add(f)
369
370
370 opmap = {
371 opmap = {
371 "a": addop,
372 "a": addop,
372 "dm": renamemoveop,
373 "dm": renamemoveop,
373 "dg": renamegetop,
374 "dg": renamegetop,
374 "dr": nop,
375 "dr": nop,
375 "e": nop,
376 "e": nop,
376 "k": nop,
377 "k": nop,
377 "f": addop, # untracked file should be kept in working directory
378 "f": addop, # untracked file should be kept in working directory
378 "g": addop,
379 "g": addop,
379 "m": mergeop,
380 "m": mergeop,
380 "r": removeop,
381 "r": removeop,
381 "rd": nop,
382 "rd": nop,
382 "cd": addop,
383 "cd": addop,
383 "dc": addop,
384 "dc": addop,
384 }
385 }
385 for f, m, args, msg in actions:
386 for f, m, args, msg in actions:
386 op = opmap.get(m)
387 op = opmap.get(m)
387 assert op, m
388 assert op, m
388 op(f, args)
389 op(f, args)
389
390
390 # check case-folding collision in provisional merged manifest
391 # check case-folding collision in provisional merged manifest
391 foldmap = {}
392 foldmap = {}
392 for f in sorted(pmmf):
393 for f in sorted(pmmf):
393 fold = util.normcase(f)
394 fold = util.normcase(f)
394 if fold in foldmap:
395 if fold in foldmap:
395 raise util.Abort(_("case-folding collision between %s and %s")
396 raise util.Abort(_("case-folding collision between %s and %s")
396 % (f, foldmap[fold]))
397 % (f, foldmap[fold]))
397 foldmap[fold] = f
398 foldmap[fold] = f
398
399
399 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
400 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
400 acceptremote, followcopies):
401 acceptremote, followcopies):
401 """
402 """
402 Merge p1 and p2 with ancestor pa and generate merge action list
403 Merge p1 and p2 with ancestor pa and generate merge action list
403
404
404 branchmerge and force are as passed in to update
405 branchmerge and force are as passed in to update
405 partial = function to filter file lists
406 partial = function to filter file lists
406 acceptremote = accept the incoming changes without prompting
407 acceptremote = accept the incoming changes without prompting
407 """
408 """
408
409
409 actions, copy, movewithdir = [], {}, {}
410 actions, copy, movewithdir = [], {}, {}
410
411
411 # manifests fetched in order are going to be faster, so prime the caches
412 # manifests fetched in order are going to be faster, so prime the caches
412 [x.manifest() for x in
413 [x.manifest() for x in
413 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
414 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
414
415
415 if followcopies:
416 if followcopies:
416 ret = copies.mergecopies(repo, wctx, p2, pa)
417 ret = copies.mergecopies(repo, wctx, p2, pa)
417 copy, movewithdir, diverge, renamedelete = ret
418 copy, movewithdir, diverge, renamedelete = ret
418 for of, fl in diverge.iteritems():
419 for of, fl in diverge.iteritems():
419 actions.append((of, "dr", (fl,), "divergent renames"))
420 actions.append((of, "dr", (fl,), "divergent renames"))
420 for of, fl in renamedelete.iteritems():
421 for of, fl in renamedelete.iteritems():
421 actions.append((of, "rd", (fl,), "rename and delete"))
422 actions.append((of, "rd", (fl,), "rename and delete"))
422
423
423 repo.ui.note(_("resolving manifests\n"))
424 repo.ui.note(_("resolving manifests\n"))
424 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
425 % (bool(branchmerge), bool(force), bool(partial)))
426 % (bool(branchmerge), bool(force), bool(partial)))
426 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
427
428
428 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
429 copied = set(copy.values())
430 copied = set(copy.values())
430 copied.update(movewithdir.values())
431 copied.update(movewithdir.values())
431
432
432 if '.hgsubstate' in m1:
433 if '.hgsubstate' in m1:
433 # check whether sub state is modified
434 # check whether sub state is modified
434 for s in sorted(wctx.substate):
435 for s in sorted(wctx.substate):
435 if wctx.sub(s).dirty():
436 if wctx.sub(s).dirty():
436 m1['.hgsubstate'] += "+"
437 m1['.hgsubstate'] += "+"
437 break
438 break
438
439
439 aborts = []
440 aborts = []
440 # Compare manifests
441 # Compare manifests
441 fdiff = dicthelpers.diff(m1, m2)
442 fdiff = dicthelpers.diff(m1, m2)
442 flagsdiff = m1.flagsdiff(m2)
443 flagsdiff = m1.flagsdiff(m2)
443 diff12 = dicthelpers.join(fdiff, flagsdiff)
444 diff12 = dicthelpers.join(fdiff, flagsdiff)
444
445
445 for f, (n12, fl12) in diff12.iteritems():
446 for f, (n12, fl12) in diff12.iteritems():
446 if n12:
447 if n12:
447 n1, n2 = n12
448 n1, n2 = n12
448 else: # file contents didn't change, but flags did
449 else: # file contents didn't change, but flags did
449 n1 = n2 = m1.get(f, None)
450 n1 = n2 = m1.get(f, None)
450 if n1 is None:
451 if n1 is None:
451 # Since n1 == n2, the file isn't present in m2 either. This
452 # Since n1 == n2, the file isn't present in m2 either. This
452 # means that the file was removed or deleted locally and
453 # means that the file was removed or deleted locally and
453 # removed remotely, but that residual entries remain in flags.
454 # removed remotely, but that residual entries remain in flags.
454 # This can happen in manifests generated by workingctx.
455 # This can happen in manifests generated by workingctx.
455 continue
456 continue
456 if fl12:
457 if fl12:
457 fl1, fl2 = fl12
458 fl1, fl2 = fl12
458 else: # flags didn't change, file contents did
459 else: # flags didn't change, file contents did
459 fl1 = fl2 = m1.flags(f)
460 fl1 = fl2 = m1.flags(f)
460
461
461 if partial and not partial(f):
462 if partial and not partial(f):
462 continue
463 continue
463 if n1 and n2:
464 if n1 and n2:
464 fa = f
465 fa = f
465 a = ma.get(f, nullid)
466 a = ma.get(f, nullid)
466 if a == nullid:
467 if a == nullid:
467 fa = copy.get(f, f)
468 fa = copy.get(f, f)
468 # Note: f as default is wrong - we can't really make a 3-way
469 # Note: f as default is wrong - we can't really make a 3-way
469 # merge without an ancestor file.
470 # merge without an ancestor file.
470 fla = ma.flags(fa)
471 fla = ma.flags(fa)
471 nol = 'l' not in fl1 + fl2 + fla
472 nol = 'l' not in fl1 + fl2 + fla
472 if n2 == a and fl2 == fla:
473 if n2 == a and fl2 == fla:
473 actions.append((f, "k", (), "keep")) # remote unchanged
474 actions.append((f, "k", (), "keep")) # remote unchanged
474 elif n1 == a and fl1 == fla: # local unchanged - use remote
475 elif n1 == a and fl1 == fla: # local unchanged - use remote
475 if n1 == n2: # optimization: keep local content
476 if n1 == n2: # optimization: keep local content
476 actions.append((f, "e", (fl2,), "update permissions"))
477 actions.append((f, "e", (fl2,), "update permissions"))
477 else:
478 else:
478 actions.append((f, "g", (fl2,), "remote is newer"))
479 actions.append((f, "g", (fl2,), "remote is newer"))
479 elif nol and n2 == a: # remote only changed 'x'
480 elif nol and n2 == a: # remote only changed 'x'
480 actions.append((f, "e", (fl2,), "update permissions"))
481 actions.append((f, "e", (fl2,), "update permissions"))
481 elif nol and n1 == a: # local only changed 'x'
482 elif nol and n1 == a: # local only changed 'x'
482 actions.append((f, "g", (fl1,), "remote is newer"))
483 actions.append((f, "g", (fl1,), "remote is newer"))
483 else: # both changed something
484 else: # both changed something
484 actions.append((f, "m", (f, f, fa, False, pa.node()),
485 actions.append((f, "m", (f, f, fa, False, pa.node()),
485 "versions differ"))
486 "versions differ"))
486 elif f in copied: # files we'll deal with on m2 side
487 elif f in copied: # files we'll deal with on m2 side
487 pass
488 pass
488 elif n1 and f in movewithdir: # directory rename, move local
489 elif n1 and f in movewithdir: # directory rename, move local
489 f2 = movewithdir[f]
490 f2 = movewithdir[f]
490 actions.append((f2, "dm", (f, fl1),
491 actions.append((f2, "dm", (f, fl1),
491 "remote directory rename - move from " + f))
492 "remote directory rename - move from " + f))
492 elif n1 and f in copy:
493 elif n1 and f in copy:
493 f2 = copy[f]
494 f2 = copy[f]
494 actions.append((f, "m", (f, f2, f2, False, pa.node()),
495 actions.append((f, "m", (f, f2, f2, False, pa.node()),
495 "local copied/moved from " + f2))
496 "local copied/moved from " + f2))
496 elif n1 and f in ma: # clean, a different, no remote
497 elif n1 and f in ma: # clean, a different, no remote
497 if n1 != ma[f]:
498 if n1 != ma[f]:
498 if acceptremote:
499 if acceptremote:
499 actions.append((f, "r", None, "remote delete"))
500 actions.append((f, "r", None, "remote delete"))
500 else:
501 else:
501 actions.append((f, "cd", None, "prompt changed/deleted"))
502 actions.append((f, "cd", None, "prompt changed/deleted"))
502 elif n1[20:] == "a": # added, no remote
503 elif n1[20:] == "a": # added, no remote
503 actions.append((f, "f", None, "remote deleted"))
504 actions.append((f, "f", None, "remote deleted"))
504 else:
505 else:
505 actions.append((f, "r", None, "other deleted"))
506 actions.append((f, "r", None, "other deleted"))
506 elif n2 and f in movewithdir:
507 elif n2 and f in movewithdir:
507 f2 = movewithdir[f]
508 f2 = movewithdir[f]
508 actions.append((f2, "dg", (f, fl2),
509 actions.append((f2, "dg", (f, fl2),
509 "local directory rename - get from " + f))
510 "local directory rename - get from " + f))
510 elif n2 and f in copy:
511 elif n2 and f in copy:
511 f2 = copy[f]
512 f2 = copy[f]
512 if f2 in m2:
513 if f2 in m2:
513 actions.append((f, "m", (f2, f, f2, False, pa.node()),
514 actions.append((f, "m", (f2, f, f2, False, pa.node()),
514 "remote copied from " + f2))
515 "remote copied from " + f2))
515 else:
516 else:
516 actions.append((f, "m", (f2, f, f2, True, pa.node()),
517 actions.append((f, "m", (f2, f, f2, True, pa.node()),
517 "remote moved from " + f2))
518 "remote moved from " + f2))
518 elif n2 and f not in ma:
519 elif n2 and f not in ma:
519 # local unknown, remote created: the logic is described by the
520 # local unknown, remote created: the logic is described by the
520 # following table:
521 # following table:
521 #
522 #
522 # force branchmerge different | action
523 # force branchmerge different | action
523 # n * n | get
524 # n * n | get
524 # n * y | abort
525 # n * y | abort
525 # y n * | get
526 # y n * | get
526 # y y n | get
527 # y y n | get
527 # y y y | merge
528 # y y y | merge
528 #
529 #
529 # Checking whether the files are different is expensive, so we
530 # Checking whether the files are different is expensive, so we
530 # don't do that when we can avoid it.
531 # don't do that when we can avoid it.
531 if force and not branchmerge:
532 if force and not branchmerge:
532 actions.append((f, "g", (fl2,), "remote created"))
533 actions.append((f, "g", (fl2,), "remote created"))
533 else:
534 else:
534 different = _checkunknownfile(repo, wctx, p2, f)
535 different = _checkunknownfile(repo, wctx, p2, f)
535 if force and branchmerge and different:
536 if force and branchmerge and different:
536 # FIXME: This is wrong - f is not in ma ...
537 # FIXME: This is wrong - f is not in ma ...
537 actions.append((f, "m", (f, f, f, False, pa.node()),
538 actions.append((f, "m", (f, f, f, False, pa.node()),
538 "remote differs from untracked local"))
539 "remote differs from untracked local"))
539 elif not force and different:
540 elif not force and different:
540 aborts.append((f, "ud"))
541 aborts.append((f, "ud"))
541 else:
542 else:
542 actions.append((f, "g", (fl2,), "remote created"))
543 actions.append((f, "g", (fl2,), "remote created"))
543 elif n2 and n2 != ma[f]:
544 elif n2 and n2 != ma[f]:
544 different = _checkunknownfile(repo, wctx, p2, f)
545 different = _checkunknownfile(repo, wctx, p2, f)
545 if not force and different:
546 if not force and different:
546 aborts.append((f, "ud"))
547 aborts.append((f, "ud"))
547 else:
548 else:
548 # if different: old untracked f may be overwritten and lost
549 # if different: old untracked f may be overwritten and lost
549 if acceptremote:
550 if acceptremote:
550 actions.append((f, "g", (m2.flags(f),),
551 actions.append((f, "g", (m2.flags(f),),
551 "remote recreating"))
552 "remote recreating"))
552 else:
553 else:
553 actions.append((f, "dc", (m2.flags(f),),
554 actions.append((f, "dc", (m2.flags(f),),
554 "prompt deleted/changed"))
555 "prompt deleted/changed"))
555
556
556 for f, m in sorted(aborts):
557 for f, m in sorted(aborts):
557 if m == "ud":
558 if m == "ud":
558 repo.ui.warn(_("%s: untracked file differs\n") % f)
559 repo.ui.warn(_("%s: untracked file differs\n") % f)
559 else: assert False, m
560 else: assert False, m
560 if aborts:
561 if aborts:
561 raise util.Abort(_("untracked files in working directory differ "
562 raise util.Abort(_("untracked files in working directory differ "
562 "from files in requested revision"))
563 "from files in requested revision"))
563
564
564 if not util.checkcase(repo.path):
565 if not util.checkcase(repo.path):
565 # check collision between files only in p2 for clean update
566 # check collision between files only in p2 for clean update
566 if (not branchmerge and
567 if (not branchmerge and
567 (force or not wctx.dirty(missing=True, branch=False))):
568 (force or not wctx.dirty(missing=True, branch=False))):
568 _checkcollision(repo, m2, [])
569 _checkcollision(repo, m2, [])
569 else:
570 else:
570 _checkcollision(repo, m1, actions)
571 _checkcollision(repo, m1, actions)
571
572
572 return actions
573 return actions
573
574
574 actionpriority = dict((m, p) for p, m in enumerate(
575 actionpriority = dict((m, p) for p, m in enumerate(
575 ['r', 'f', 'g', 'a', 'k', 'm', 'dm', 'dg', 'dr', 'cd', 'dc', 'rd', 'e']))
576 ['r', 'f', 'g', 'a', 'k', 'm', 'dm', 'dg', 'dr', 'cd', 'dc', 'rd', 'e']))
576
577
577 def actionkey(a):
578 def actionkey(a):
578 return actionpriority[a[1]], a
579 return actionpriority[a[1]], a
579
580
580 def batchremove(repo, actions):
581 def batchremove(repo, actions):
581 """apply removes to the working directory
582 """apply removes to the working directory
582
583
583 yields tuples for progress updates
584 yields tuples for progress updates
584 """
585 """
585 verbose = repo.ui.verbose
586 verbose = repo.ui.verbose
586 unlink = util.unlinkpath
587 unlink = util.unlinkpath
587 wjoin = repo.wjoin
588 wjoin = repo.wjoin
588 audit = repo.wopener.audit
589 audit = repo.wopener.audit
589 i = 0
590 i = 0
590 for f, m, args, msg in actions:
591 for f, m, args, msg in actions:
591 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
592 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
592 if True:
593 if True:
593 if verbose:
594 if verbose:
594 repo.ui.note(_("removing %s\n") % f)
595 repo.ui.note(_("removing %s\n") % f)
595 audit(f)
596 audit(f)
596 try:
597 try:
597 unlink(wjoin(f), ignoremissing=True)
598 unlink(wjoin(f), ignoremissing=True)
598 except OSError, inst:
599 except OSError, inst:
599 repo.ui.warn(_("update failed to remove %s: %s!\n") %
600 repo.ui.warn(_("update failed to remove %s: %s!\n") %
600 (f, inst.strerror))
601 (f, inst.strerror))
601 if i == 100:
602 if i == 100:
602 yield i, f
603 yield i, f
603 i = 0
604 i = 0
604 i += 1
605 i += 1
605 if i > 0:
606 if i > 0:
606 yield i, f
607 yield i, f
607
608
608 def batchget(repo, mctx, actions):
609 def batchget(repo, mctx, actions):
609 """apply gets to the working directory
610 """apply gets to the working directory
610
611
611 mctx is the context to get from
612 mctx is the context to get from
612
613
613 yields tuples for progress updates
614 yields tuples for progress updates
614 """
615 """
615 verbose = repo.ui.verbose
616 verbose = repo.ui.verbose
616 fctx = mctx.filectx
617 fctx = mctx.filectx
617 wwrite = repo.wwrite
618 wwrite = repo.wwrite
618 i = 0
619 i = 0
619 for f, m, args, msg in actions:
620 for f, m, args, msg in actions:
620 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
621 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
621 if True:
622 if True:
622 if verbose:
623 if verbose:
623 repo.ui.note(_("getting %s\n") % f)
624 repo.ui.note(_("getting %s\n") % f)
624 wwrite(f, fctx(f).data(), args[0])
625 wwrite(f, fctx(f).data(), args[0])
625 if i == 100:
626 if i == 100:
626 yield i, f
627 yield i, f
627 i = 0
628 i = 0
628 i += 1
629 i += 1
629 if i > 0:
630 if i > 0:
630 yield i, f
631 yield i, f
631
632
632 def applyupdates(repo, actions, wctx, mctx, overwrite):
633 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
633 """apply the merge action list to the working directory
634 """apply the merge action list to the working directory
634
635
635 wctx is the working copy context
636 wctx is the working copy context
636 mctx is the context to be merged into the working copy
637 mctx is the context to be merged into the working copy
637
638
638 Return a tuple of counts (updated, merged, removed, unresolved) that
639 Return a tuple of counts (updated, merged, removed, unresolved) that
639 describes how many files were affected by the update.
640 describes how many files were affected by the update.
640 """
641 """
641
642
642 updated, merged, removed, unresolved = 0, 0, 0, 0
643 updated, merged, removed, unresolved = 0, 0, 0, 0
643 ms = mergestate(repo)
644 ms = mergestate(repo)
644 ms.reset(wctx.p1().node(), mctx.node())
645 ms.reset(wctx.p1().node(), mctx.node())
645 moves = []
646 moves = []
646 actions.sort(key=actionkey)
647 actions.sort(key=actionkey)
647
648
648 # prescan for merges
649 # prescan for merges
649 for a in actions:
650 for a in actions:
650 f, m, args, msg = a
651 f, m, args, msg = a
651 if m == "m": # merge
652 if m == "m": # merge
652 f1, f2, fa, move, anc = args
653 f1, f2, fa, move, anc = args
653 if f == '.hgsubstate': # merged internally
654 if f == '.hgsubstate': # merged internally
654 continue
655 continue
655 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
656 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
656 fcl = wctx[f1]
657 fcl = wctx[f1]
657 fco = mctx[f2]
658 fco = mctx[f2]
658 actx = repo[anc]
659 actx = repo[anc]
659 if fa in actx:
660 if fa in actx:
660 fca = actx[fa]
661 fca = actx[fa]
661 else:
662 else:
662 fca = repo.filectx(f1, fileid=nullrev)
663 fca = repo.filectx(f1, fileid=nullrev)
663 ms.add(fcl, fco, fca, f)
664 ms.add(fcl, fco, fca, f)
664 if f1 != f and move:
665 if f1 != f and move:
665 moves.append(f1)
666 moves.append(f1)
666
667
667 audit = repo.wopener.audit
668 audit = repo.wopener.audit
668 _updating = _('updating')
669 _updating = _('updating')
669 _files = _('files')
670 _files = _('files')
670 progress = repo.ui.progress
671 progress = repo.ui.progress
671
672
672 # remove renamed files after safely stored
673 # remove renamed files after safely stored
673 for f in moves:
674 for f in moves:
674 if os.path.lexists(repo.wjoin(f)):
675 if os.path.lexists(repo.wjoin(f)):
675 repo.ui.debug("removing %s\n" % f)
676 repo.ui.debug("removing %s\n" % f)
676 audit(f)
677 audit(f)
677 util.unlinkpath(repo.wjoin(f))
678 util.unlinkpath(repo.wjoin(f))
678
679
679 numupdates = len([a for a in actions if a[1] != 'k'])
680 numupdates = len([a for a in actions if a[1] != 'k'])
680 workeractions = [a for a in actions if a[1] in 'gr']
681 workeractions = [a for a in actions if a[1] in 'gr']
681 updateactions = [a for a in workeractions if a[1] == 'g']
682 updateactions = [a for a in workeractions if a[1] == 'g']
682 updated = len(updateactions)
683 updated = len(updateactions)
683 removeactions = [a for a in workeractions if a[1] == 'r']
684 removeactions = [a for a in workeractions if a[1] == 'r']
684 removed = len(removeactions)
685 removed = len(removeactions)
685 actions = [a for a in actions if a[1] not in 'gr']
686 actions = [a for a in actions if a[1] not in 'gr']
686
687
687 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
688 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
688 if hgsub and hgsub[0] == 'r':
689 if hgsub and hgsub[0] == 'r':
689 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
690 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
690
691
691 # remove in parallel (must come first)
692 # remove in parallel (must come first)
692 z = 0
693 z = 0
693 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), removeactions)
694 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), removeactions)
694 for i, item in prog:
695 for i, item in prog:
695 z += i
696 z += i
696 progress(_updating, z, item=item, total=numupdates, unit=_files)
697 progress(_updating, z, item=item, total=numupdates, unit=_files)
697
698
698 # get in parallel
699 # get in parallel
699 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), updateactions)
700 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), updateactions)
700 for i, item in prog:
701 for i, item in prog:
701 z += i
702 z += i
702 progress(_updating, z, item=item, total=numupdates, unit=_files)
703 progress(_updating, z, item=item, total=numupdates, unit=_files)
703
704
704 if hgsub and hgsub[0] == 'g':
705 if hgsub and hgsub[0] == 'g':
705 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
706 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
706
707
707 for f, m, args, msg in actions:
708 for f, m, args, msg in actions:
708
709
709 # forget (manifest only, just log it) (must come first)
710 # forget (manifest only, just log it) (must come first)
710 if m == "f":
711 if m == "f":
711 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
712 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
712 z += 1
713 z += 1
713 progress(_updating, z, item=f, total=numupdates, unit=_files)
714 progress(_updating, z, item=f, total=numupdates, unit=_files)
714
715
715 # re-add (manifest only, just log it)
716 # re-add (manifest only, just log it)
716 elif m == "a":
717 elif m == "a":
717 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
718 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
718 z += 1
719 z += 1
719 progress(_updating, z, item=f, total=numupdates, unit=_files)
720 progress(_updating, z, item=f, total=numupdates, unit=_files)
720
721
721 # keep (noop, just log it)
722 # keep (noop, just log it)
722 elif m == "k":
723 elif m == "k":
723 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
724 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
724 # no progress
725 # no progress
725
726
726 # merge
727 # merge
727 elif m == "m":
728 elif m == "m":
728 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
729 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
729 z += 1
730 z += 1
730 progress(_updating, z, item=f, total=numupdates, unit=_files)
731 progress(_updating, z, item=f, total=numupdates, unit=_files)
731 f1, f2, fa, move, anc = args
732 f1, f2, fa, move, anc = args
732 if f == '.hgsubstate': # subrepo states need updating
733 if f == '.hgsubstate': # subrepo states need updating
733 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
734 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
734 overwrite)
735 overwrite)
735 continue
736 continue
736 audit(f)
737 audit(f)
737 r = ms.resolve(f, wctx)
738 r = ms.resolve(f, wctx, labels=labels)
738 if r is not None and r > 0:
739 if r is not None and r > 0:
739 unresolved += 1
740 unresolved += 1
740 else:
741 else:
741 if r is None:
742 if r is None:
742 updated += 1
743 updated += 1
743 else:
744 else:
744 merged += 1
745 merged += 1
745
746
746 # directory rename, move local
747 # directory rename, move local
747 elif m == "dm":
748 elif m == "dm":
748 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
749 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
749 z += 1
750 z += 1
750 progress(_updating, z, item=f, total=numupdates, unit=_files)
751 progress(_updating, z, item=f, total=numupdates, unit=_files)
751 f0, flags = args
752 f0, flags = args
752 repo.ui.note(_("moving %s to %s\n") % (f0, f))
753 repo.ui.note(_("moving %s to %s\n") % (f0, f))
753 audit(f)
754 audit(f)
754 repo.wwrite(f, wctx.filectx(f0).data(), flags)
755 repo.wwrite(f, wctx.filectx(f0).data(), flags)
755 util.unlinkpath(repo.wjoin(f0))
756 util.unlinkpath(repo.wjoin(f0))
756 updated += 1
757 updated += 1
757
758
758 # local directory rename, get
759 # local directory rename, get
759 elif m == "dg":
760 elif m == "dg":
760 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
761 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
761 z += 1
762 z += 1
762 progress(_updating, z, item=f, total=numupdates, unit=_files)
763 progress(_updating, z, item=f, total=numupdates, unit=_files)
763 f0, flags = args
764 f0, flags = args
764 repo.ui.note(_("getting %s to %s\n") % (f0, f))
765 repo.ui.note(_("getting %s to %s\n") % (f0, f))
765 repo.wwrite(f, mctx.filectx(f0).data(), flags)
766 repo.wwrite(f, mctx.filectx(f0).data(), flags)
766 updated += 1
767 updated += 1
767
768
768 # divergent renames
769 # divergent renames
769 elif m == "dr":
770 elif m == "dr":
770 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
771 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
771 z += 1
772 z += 1
772 progress(_updating, z, item=f, total=numupdates, unit=_files)
773 progress(_updating, z, item=f, total=numupdates, unit=_files)
773 fl, = args
774 fl, = args
774 repo.ui.warn(_("note: possible conflict - %s was renamed "
775 repo.ui.warn(_("note: possible conflict - %s was renamed "
775 "multiple times to:\n") % f)
776 "multiple times to:\n") % f)
776 for nf in fl:
777 for nf in fl:
777 repo.ui.warn(" %s\n" % nf)
778 repo.ui.warn(" %s\n" % nf)
778
779
779 # rename and delete
780 # rename and delete
780 elif m == "rd":
781 elif m == "rd":
781 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
782 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
782 z += 1
783 z += 1
783 progress(_updating, z, item=f, total=numupdates, unit=_files)
784 progress(_updating, z, item=f, total=numupdates, unit=_files)
784 fl, = args
785 fl, = args
785 repo.ui.warn(_("note: possible conflict - %s was deleted "
786 repo.ui.warn(_("note: possible conflict - %s was deleted "
786 "and renamed to:\n") % f)
787 "and renamed to:\n") % f)
787 for nf in fl:
788 for nf in fl:
788 repo.ui.warn(" %s\n" % nf)
789 repo.ui.warn(" %s\n" % nf)
789
790
790 # exec
791 # exec
791 elif m == "e":
792 elif m == "e":
792 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
793 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
793 z += 1
794 z += 1
794 progress(_updating, z, item=f, total=numupdates, unit=_files)
795 progress(_updating, z, item=f, total=numupdates, unit=_files)
795 flags, = args
796 flags, = args
796 audit(f)
797 audit(f)
797 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
798 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
798 updated += 1
799 updated += 1
799
800
800 ms.commit()
801 ms.commit()
801 progress(_updating, None, total=numupdates, unit=_files)
802 progress(_updating, None, total=numupdates, unit=_files)
802
803
803 return updated, merged, removed, unresolved
804 return updated, merged, removed, unresolved
804
805
805 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
806 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
806 acceptremote, followcopies):
807 acceptremote, followcopies):
807 "Calculate the actions needed to merge mctx into wctx using ancestors"
808 "Calculate the actions needed to merge mctx into wctx using ancestors"
808
809
809 if len(ancestors) == 1: # default
810 if len(ancestors) == 1: # default
810 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
811 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
811 branchmerge, force,
812 branchmerge, force,
812 partial, acceptremote, followcopies)
813 partial, acceptremote, followcopies)
813
814
814 else: # only when merge.preferancestor=* - experimentalish code
815 else: # only when merge.preferancestor=* - experimentalish code
815 repo.ui.status(
816 repo.ui.status(
816 _("note: merging %s and %s using bids from ancestors %s\n") %
817 _("note: merging %s and %s using bids from ancestors %s\n") %
817 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
818 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
818
819
819 # Call for bids
820 # Call for bids
820 fbids = {} # mapping filename to list af action bids
821 fbids = {} # mapping filename to list af action bids
821 for ancestor in ancestors:
822 for ancestor in ancestors:
822 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
823 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
823 actions = manifestmerge(repo, wctx, mctx, ancestor,
824 actions = manifestmerge(repo, wctx, mctx, ancestor,
824 branchmerge, force,
825 branchmerge, force,
825 partial, acceptremote, followcopies)
826 partial, acceptremote, followcopies)
826 for a in sorted(actions, key=lambda a: (a[1], a)):
827 for a in sorted(actions, key=lambda a: (a[1], a)):
827 f, m, args, msg = a
828 f, m, args, msg = a
828 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
829 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
829 if f in fbids:
830 if f in fbids:
830 fbids[f].append(a)
831 fbids[f].append(a)
831 else:
832 else:
832 fbids[f] = [a]
833 fbids[f] = [a]
833
834
834 # Pick the best bid for each file
835 # Pick the best bid for each file
835 repo.ui.note(_('\nauction for merging merge bids\n'))
836 repo.ui.note(_('\nauction for merging merge bids\n'))
836 actions = []
837 actions = []
837 for f, bidsl in sorted(fbids.items()):
838 for f, bidsl in sorted(fbids.items()):
838 # Consensus?
839 # Consensus?
839 a0 = bidsl[0]
840 a0 = bidsl[0]
840 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
841 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
841 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
842 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
842 actions.append(a0)
843 actions.append(a0)
843 continue
844 continue
844 # Group bids by kind of action
845 # Group bids by kind of action
845 bids = {}
846 bids = {}
846 for a in bidsl:
847 for a in bidsl:
847 m = a[1]
848 m = a[1]
848 if m in bids:
849 if m in bids:
849 bids[m].append(a)
850 bids[m].append(a)
850 else:
851 else:
851 bids[m] = [a]
852 bids[m] = [a]
852 # If keep is an option, just do it.
853 # If keep is an option, just do it.
853 if "k" in bids:
854 if "k" in bids:
854 repo.ui.note(" %s: picking 'keep' action\n" % f)
855 repo.ui.note(" %s: picking 'keep' action\n" % f)
855 actions.append(bids["k"][0])
856 actions.append(bids["k"][0])
856 continue
857 continue
857 # If all gets agree [how could they not?], just do it.
858 # If all gets agree [how could they not?], just do it.
858 if "g" in bids:
859 if "g" in bids:
859 ga0 = bids["g"][0]
860 ga0 = bids["g"][0]
860 if util.all(a == ga0 for a in bids["g"][1:]):
861 if util.all(a == ga0 for a in bids["g"][1:]):
861 repo.ui.note(" %s: picking 'get' action\n" % f)
862 repo.ui.note(" %s: picking 'get' action\n" % f)
862 actions.append(ga0)
863 actions.append(ga0)
863 continue
864 continue
864 # TODO: Consider other simple actions such as mode changes
865 # TODO: Consider other simple actions such as mode changes
865 # Handle inefficient democrazy.
866 # Handle inefficient democrazy.
866 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
867 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
867 for _f, m, args, msg in bidsl:
868 for _f, m, args, msg in bidsl:
868 repo.ui.note(' %s -> %s\n' % (msg, m))
869 repo.ui.note(' %s -> %s\n' % (msg, m))
869 # Pick random action. TODO: Instead, prompt user when resolving
870 # Pick random action. TODO: Instead, prompt user when resolving
870 a0 = bidsl[0]
871 a0 = bidsl[0]
871 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
872 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
872 (f, a0[1]))
873 (f, a0[1]))
873 actions.append(a0)
874 actions.append(a0)
874 continue
875 continue
875 repo.ui.note(_('end of auction\n\n'))
876 repo.ui.note(_('end of auction\n\n'))
876
877
877 # Filter out prompts.
878 # Filter out prompts.
878 newactions, prompts = [], []
879 newactions, prompts = [], []
879 for a in actions:
880 for a in actions:
880 if a[1] in ("cd", "dc"):
881 if a[1] in ("cd", "dc"):
881 prompts.append(a)
882 prompts.append(a)
882 else:
883 else:
883 newactions.append(a)
884 newactions.append(a)
884 # Prompt and create actions. TODO: Move this towards resolve phase.
885 # Prompt and create actions. TODO: Move this towards resolve phase.
885 for f, m, args, msg in sorted(prompts):
886 for f, m, args, msg in sorted(prompts):
886 if m == "cd":
887 if m == "cd":
887 if repo.ui.promptchoice(
888 if repo.ui.promptchoice(
888 _("local changed %s which remote deleted\n"
889 _("local changed %s which remote deleted\n"
889 "use (c)hanged version or (d)elete?"
890 "use (c)hanged version or (d)elete?"
890 "$$ &Changed $$ &Delete") % f, 0):
891 "$$ &Changed $$ &Delete") % f, 0):
891 newactions.append((f, "r", None, "prompt delete"))
892 newactions.append((f, "r", None, "prompt delete"))
892 else:
893 else:
893 newactions.append((f, "a", None, "prompt keep"))
894 newactions.append((f, "a", None, "prompt keep"))
894 elif m == "dc":
895 elif m == "dc":
895 flags, = args
896 flags, = args
896 if repo.ui.promptchoice(
897 if repo.ui.promptchoice(
897 _("remote changed %s which local deleted\n"
898 _("remote changed %s which local deleted\n"
898 "use (c)hanged version or leave (d)eleted?"
899 "use (c)hanged version or leave (d)eleted?"
899 "$$ &Changed $$ &Deleted") % f, 0) == 0:
900 "$$ &Changed $$ &Deleted") % f, 0) == 0:
900 newactions.append((f, "g", (flags,), "prompt recreating"))
901 newactions.append((f, "g", (flags,), "prompt recreating"))
901 else: assert False, m
902 else: assert False, m
902
903
903 if wctx.rev() is None:
904 if wctx.rev() is None:
904 newactions += _forgetremoved(wctx, mctx, branchmerge)
905 newactions += _forgetremoved(wctx, mctx, branchmerge)
905
906
906 return newactions
907 return newactions
907
908
908 def recordupdates(repo, actions, branchmerge):
909 def recordupdates(repo, actions, branchmerge):
909 "record merge actions to the dirstate"
910 "record merge actions to the dirstate"
910
911
911 for f, m, args, msg in actions:
912 for f, m, args, msg in actions:
912
913
913 # remove (must come first)
914 # remove (must come first)
914 if m == "r": # remove
915 if m == "r": # remove
915 if branchmerge:
916 if branchmerge:
916 repo.dirstate.remove(f)
917 repo.dirstate.remove(f)
917 else:
918 else:
918 repo.dirstate.drop(f)
919 repo.dirstate.drop(f)
919
920
920 # forget (must come first)
921 # forget (must come first)
921 elif m == "f":
922 elif m == "f":
922 repo.dirstate.drop(f)
923 repo.dirstate.drop(f)
923
924
924 # re-add
925 # re-add
925 elif m == "a":
926 elif m == "a":
926 if not branchmerge:
927 if not branchmerge:
927 repo.dirstate.add(f)
928 repo.dirstate.add(f)
928
929
929 # exec change
930 # exec change
930 elif m == "e":
931 elif m == "e":
931 repo.dirstate.normallookup(f)
932 repo.dirstate.normallookup(f)
932
933
933 # keep
934 # keep
934 elif m == "k":
935 elif m == "k":
935 pass
936 pass
936
937
937 # get
938 # get
938 elif m == "g":
939 elif m == "g":
939 if branchmerge:
940 if branchmerge:
940 repo.dirstate.otherparent(f)
941 repo.dirstate.otherparent(f)
941 else:
942 else:
942 repo.dirstate.normal(f)
943 repo.dirstate.normal(f)
943
944
944 # merge
945 # merge
945 elif m == "m":
946 elif m == "m":
946 f1, f2, fa, move, anc = args
947 f1, f2, fa, move, anc = args
947 if branchmerge:
948 if branchmerge:
948 # We've done a branch merge, mark this file as merged
949 # We've done a branch merge, mark this file as merged
949 # so that we properly record the merger later
950 # so that we properly record the merger later
950 repo.dirstate.merge(f)
951 repo.dirstate.merge(f)
951 if f1 != f2: # copy/rename
952 if f1 != f2: # copy/rename
952 if move:
953 if move:
953 repo.dirstate.remove(f1)
954 repo.dirstate.remove(f1)
954 if f1 != f:
955 if f1 != f:
955 repo.dirstate.copy(f1, f)
956 repo.dirstate.copy(f1, f)
956 else:
957 else:
957 repo.dirstate.copy(f2, f)
958 repo.dirstate.copy(f2, f)
958 else:
959 else:
959 # We've update-merged a locally modified file, so
960 # We've update-merged a locally modified file, so
960 # we set the dirstate to emulate a normal checkout
961 # we set the dirstate to emulate a normal checkout
961 # of that file some time in the past. Thus our
962 # of that file some time in the past. Thus our
962 # merge will appear as a normal local file
963 # merge will appear as a normal local file
963 # modification.
964 # modification.
964 if f2 == f: # file not locally copied/moved
965 if f2 == f: # file not locally copied/moved
965 repo.dirstate.normallookup(f)
966 repo.dirstate.normallookup(f)
966 if move:
967 if move:
967 repo.dirstate.drop(f1)
968 repo.dirstate.drop(f1)
968
969
969 # directory rename, move local
970 # directory rename, move local
970 elif m == "dm":
971 elif m == "dm":
971 f0, flag = args
972 f0, flag = args
972 if f0 not in repo.dirstate:
973 if f0 not in repo.dirstate:
973 # untracked file moved
974 # untracked file moved
974 continue
975 continue
975 if branchmerge:
976 if branchmerge:
976 repo.dirstate.add(f)
977 repo.dirstate.add(f)
977 repo.dirstate.remove(f0)
978 repo.dirstate.remove(f0)
978 repo.dirstate.copy(f0, f)
979 repo.dirstate.copy(f0, f)
979 else:
980 else:
980 repo.dirstate.normal(f)
981 repo.dirstate.normal(f)
981 repo.dirstate.drop(f0)
982 repo.dirstate.drop(f0)
982
983
983 # directory rename, get
984 # directory rename, get
984 elif m == "dg":
985 elif m == "dg":
985 f0, flag = args
986 f0, flag = args
986 if branchmerge:
987 if branchmerge:
987 repo.dirstate.add(f)
988 repo.dirstate.add(f)
988 repo.dirstate.copy(f0, f)
989 repo.dirstate.copy(f0, f)
989 else:
990 else:
990 repo.dirstate.normal(f)
991 repo.dirstate.normal(f)
991
992
992 def update(repo, node, branchmerge, force, partial, ancestor=None,
993 def update(repo, node, branchmerge, force, partial, ancestor=None,
993 mergeancestor=False):
994 mergeancestor=False, labels=None):
994 """
995 """
995 Perform a merge between the working directory and the given node
996 Perform a merge between the working directory and the given node
996
997
997 node = the node to update to, or None if unspecified
998 node = the node to update to, or None if unspecified
998 branchmerge = whether to merge between branches
999 branchmerge = whether to merge between branches
999 force = whether to force branch merging or file overwriting
1000 force = whether to force branch merging or file overwriting
1000 partial = a function to filter file lists (dirstate not updated)
1001 partial = a function to filter file lists (dirstate not updated)
1001 mergeancestor = whether it is merging with an ancestor. If true,
1002 mergeancestor = whether it is merging with an ancestor. If true,
1002 we should accept the incoming changes for any prompts that occur.
1003 we should accept the incoming changes for any prompts that occur.
1003 If false, merging with an ancestor (fast-forward) is only allowed
1004 If false, merging with an ancestor (fast-forward) is only allowed
1004 between different named branches. This flag is used by rebase extension
1005 between different named branches. This flag is used by rebase extension
1005 as a temporary fix and should be avoided in general.
1006 as a temporary fix and should be avoided in general.
1006
1007
1007 The table below shows all the behaviors of the update command
1008 The table below shows all the behaviors of the update command
1008 given the -c and -C or no options, whether the working directory
1009 given the -c and -C or no options, whether the working directory
1009 is dirty, whether a revision is specified, and the relationship of
1010 is dirty, whether a revision is specified, and the relationship of
1010 the parent rev to the target rev (linear, on the same named
1011 the parent rev to the target rev (linear, on the same named
1011 branch, or on another named branch).
1012 branch, or on another named branch).
1012
1013
1013 This logic is tested by test-update-branches.t.
1014 This logic is tested by test-update-branches.t.
1014
1015
1015 -c -C dirty rev | linear same cross
1016 -c -C dirty rev | linear same cross
1016 n n n n | ok (1) x
1017 n n n n | ok (1) x
1017 n n n y | ok ok ok
1018 n n n y | ok ok ok
1018 n n y n | merge (2) (2)
1019 n n y n | merge (2) (2)
1019 n n y y | merge (3) (3)
1020 n n y y | merge (3) (3)
1020 n y * * | --- discard ---
1021 n y * * | --- discard ---
1021 y n y * | --- (4) ---
1022 y n y * | --- (4) ---
1022 y n n * | --- ok ---
1023 y n n * | --- ok ---
1023 y y * * | --- (5) ---
1024 y y * * | --- (5) ---
1024
1025
1025 x = can't happen
1026 x = can't happen
1026 * = don't-care
1027 * = don't-care
1027 1 = abort: not a linear update (merge or update --check to force update)
1028 1 = abort: not a linear update (merge or update --check to force update)
1028 2 = abort: uncommitted changes (commit and merge, or update --clean to
1029 2 = abort: uncommitted changes (commit and merge, or update --clean to
1029 discard changes)
1030 discard changes)
1030 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1031 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1031 4 = abort: uncommitted changes (checked in commands.py)
1032 4 = abort: uncommitted changes (checked in commands.py)
1032 5 = incompatible options (checked in commands.py)
1033 5 = incompatible options (checked in commands.py)
1033
1034
1034 Return the same tuple as applyupdates().
1035 Return the same tuple as applyupdates().
1035 """
1036 """
1036
1037
1037 onode = node
1038 onode = node
1038 wlock = repo.wlock()
1039 wlock = repo.wlock()
1039 try:
1040 try:
1040 wc = repo[None]
1041 wc = repo[None]
1041 pl = wc.parents()
1042 pl = wc.parents()
1042 p1 = pl[0]
1043 p1 = pl[0]
1043 pas = [None]
1044 pas = [None]
1044 if ancestor:
1045 if ancestor:
1045 pas = [repo[ancestor]]
1046 pas = [repo[ancestor]]
1046
1047
1047 if node is None:
1048 if node is None:
1048 # Here is where we should consider bookmarks, divergent bookmarks,
1049 # Here is where we should consider bookmarks, divergent bookmarks,
1049 # foreground changesets (successors), and tip of current branch;
1050 # foreground changesets (successors), and tip of current branch;
1050 # but currently we are only checking the branch tips.
1051 # but currently we are only checking the branch tips.
1051 try:
1052 try:
1052 node = repo.branchtip(wc.branch())
1053 node = repo.branchtip(wc.branch())
1053 except error.RepoLookupError:
1054 except error.RepoLookupError:
1054 if wc.branch() == "default": # no default branch!
1055 if wc.branch() == "default": # no default branch!
1055 node = repo.lookup("tip") # update to tip
1056 node = repo.lookup("tip") # update to tip
1056 else:
1057 else:
1057 raise util.Abort(_("branch %s not found") % wc.branch())
1058 raise util.Abort(_("branch %s not found") % wc.branch())
1058
1059
1059 if p1.obsolete() and not p1.children():
1060 if p1.obsolete() and not p1.children():
1060 # allow updating to successors
1061 # allow updating to successors
1061 successors = obsolete.successorssets(repo, p1.node())
1062 successors = obsolete.successorssets(repo, p1.node())
1062
1063
1063 # behavior of certain cases is as follows,
1064 # behavior of certain cases is as follows,
1064 #
1065 #
1065 # divergent changesets: update to highest rev, similar to what
1066 # divergent changesets: update to highest rev, similar to what
1066 # is currently done when there are more than one head
1067 # is currently done when there are more than one head
1067 # (i.e. 'tip')
1068 # (i.e. 'tip')
1068 #
1069 #
1069 # replaced changesets: same as divergent except we know there
1070 # replaced changesets: same as divergent except we know there
1070 # is no conflict
1071 # is no conflict
1071 #
1072 #
1072 # pruned changeset: no update is done; though, we could
1073 # pruned changeset: no update is done; though, we could
1073 # consider updating to the first non-obsolete parent,
1074 # consider updating to the first non-obsolete parent,
1074 # similar to what is current done for 'hg prune'
1075 # similar to what is current done for 'hg prune'
1075
1076
1076 if successors:
1077 if successors:
1077 # flatten the list here handles both divergent (len > 1)
1078 # flatten the list here handles both divergent (len > 1)
1078 # and the usual case (len = 1)
1079 # and the usual case (len = 1)
1079 successors = [n for sub in successors for n in sub]
1080 successors = [n for sub in successors for n in sub]
1080
1081
1081 # get the max revision for the given successors set,
1082 # get the max revision for the given successors set,
1082 # i.e. the 'tip' of a set
1083 # i.e. the 'tip' of a set
1083 node = repo.revs("max(%ln)", successors)[0]
1084 node = repo.revs("max(%ln)", successors)[0]
1084 pas = [p1]
1085 pas = [p1]
1085
1086
1086 overwrite = force and not branchmerge
1087 overwrite = force and not branchmerge
1087
1088
1088 p2 = repo[node]
1089 p2 = repo[node]
1089 if pas[0] is None:
1090 if pas[0] is None:
1090 if repo.ui.config("merge", "preferancestor") == '*':
1091 if repo.ui.config("merge", "preferancestor") == '*':
1091 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1092 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1092 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1093 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1093 else:
1094 else:
1094 pas = [p1.ancestor(p2, warn=True)]
1095 pas = [p1.ancestor(p2, warn=True)]
1095
1096
1096 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1097 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1097
1098
1098 ### check phase
1099 ### check phase
1099 if not overwrite and len(pl) > 1:
1100 if not overwrite and len(pl) > 1:
1100 raise util.Abort(_("outstanding uncommitted merges"))
1101 raise util.Abort(_("outstanding uncommitted merges"))
1101 if branchmerge:
1102 if branchmerge:
1102 if pas == [p2]:
1103 if pas == [p2]:
1103 raise util.Abort(_("merging with a working directory ancestor"
1104 raise util.Abort(_("merging with a working directory ancestor"
1104 " has no effect"))
1105 " has no effect"))
1105 elif pas == [p1]:
1106 elif pas == [p1]:
1106 if not mergeancestor and p1.branch() == p2.branch():
1107 if not mergeancestor and p1.branch() == p2.branch():
1107 raise util.Abort(_("nothing to merge"),
1108 raise util.Abort(_("nothing to merge"),
1108 hint=_("use 'hg update' "
1109 hint=_("use 'hg update' "
1109 "or check 'hg heads'"))
1110 "or check 'hg heads'"))
1110 if not force and (wc.files() or wc.deleted()):
1111 if not force and (wc.files() or wc.deleted()):
1111 raise util.Abort(_("uncommitted changes"),
1112 raise util.Abort(_("uncommitted changes"),
1112 hint=_("use 'hg status' to list changes"))
1113 hint=_("use 'hg status' to list changes"))
1113 for s in sorted(wc.substate):
1114 for s in sorted(wc.substate):
1114 if wc.sub(s).dirty():
1115 if wc.sub(s).dirty():
1115 raise util.Abort(_("uncommitted changes in "
1116 raise util.Abort(_("uncommitted changes in "
1116 "subrepository '%s'") % s)
1117 "subrepository '%s'") % s)
1117
1118
1118 elif not overwrite:
1119 elif not overwrite:
1119 if p1 == p2: # no-op update
1120 if p1 == p2: # no-op update
1120 # call the hooks and exit early
1121 # call the hooks and exit early
1121 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1122 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1122 repo.hook('update', parent1=xp2, parent2='', error=0)
1123 repo.hook('update', parent1=xp2, parent2='', error=0)
1123 return 0, 0, 0, 0
1124 return 0, 0, 0, 0
1124
1125
1125 if pas not in ([p1], [p2]): # nonlinear
1126 if pas not in ([p1], [p2]): # nonlinear
1126 dirty = wc.dirty(missing=True)
1127 dirty = wc.dirty(missing=True)
1127 if dirty or onode is None:
1128 if dirty or onode is None:
1128 # Branching is a bit strange to ensure we do the minimal
1129 # Branching is a bit strange to ensure we do the minimal
1129 # amount of call to obsolete.background.
1130 # amount of call to obsolete.background.
1130 foreground = obsolete.foreground(repo, [p1.node()])
1131 foreground = obsolete.foreground(repo, [p1.node()])
1131 # note: the <node> variable contains a random identifier
1132 # note: the <node> variable contains a random identifier
1132 if repo[node].node() in foreground:
1133 if repo[node].node() in foreground:
1133 pas = [p1] # allow updating to successors
1134 pas = [p1] # allow updating to successors
1134 elif dirty:
1135 elif dirty:
1135 msg = _("uncommitted changes")
1136 msg = _("uncommitted changes")
1136 if onode is None:
1137 if onode is None:
1137 hint = _("commit and merge, or update --clean to"
1138 hint = _("commit and merge, or update --clean to"
1138 " discard changes")
1139 " discard changes")
1139 else:
1140 else:
1140 hint = _("commit or update --clean to discard"
1141 hint = _("commit or update --clean to discard"
1141 " changes")
1142 " changes")
1142 raise util.Abort(msg, hint=hint)
1143 raise util.Abort(msg, hint=hint)
1143 else: # node is none
1144 else: # node is none
1144 msg = _("not a linear update")
1145 msg = _("not a linear update")
1145 hint = _("merge or update --check to force update")
1146 hint = _("merge or update --check to force update")
1146 raise util.Abort(msg, hint=hint)
1147 raise util.Abort(msg, hint=hint)
1147 else:
1148 else:
1148 # Allow jumping branches if clean and specific rev given
1149 # Allow jumping branches if clean and specific rev given
1149 pas = [p1]
1150 pas = [p1]
1150
1151
1151 followcopies = False
1152 followcopies = False
1152 if overwrite:
1153 if overwrite:
1153 pas = [wc]
1154 pas = [wc]
1154 elif pas == [p2]: # backwards
1155 elif pas == [p2]: # backwards
1155 pas = [wc.p1()]
1156 pas = [wc.p1()]
1156 elif not branchmerge and not wc.dirty(missing=True):
1157 elif not branchmerge and not wc.dirty(missing=True):
1157 pass
1158 pass
1158 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1159 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1159 followcopies = True
1160 followcopies = True
1160
1161
1161 ### calculate phase
1162 ### calculate phase
1162 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1163 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1163 partial, mergeancestor, followcopies)
1164 partial, mergeancestor, followcopies)
1164
1165
1165 ### apply phase
1166 ### apply phase
1166 if not branchmerge: # just jump to the new rev
1167 if not branchmerge: # just jump to the new rev
1167 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1168 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1168 if not partial:
1169 if not partial:
1169 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1170 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1170 # note that we're in the middle of an update
1171 # note that we're in the middle of an update
1171 repo.vfs.write('updatestate', p2.hex())
1172 repo.vfs.write('updatestate', p2.hex())
1172
1173
1173 stats = applyupdates(repo, actions, wc, p2, overwrite)
1174 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1174
1175
1175 if not partial:
1176 if not partial:
1176 repo.setparents(fp1, fp2)
1177 repo.setparents(fp1, fp2)
1177 recordupdates(repo, actions, branchmerge)
1178 recordupdates(repo, actions, branchmerge)
1178 # update completed, clear state
1179 # update completed, clear state
1179 util.unlink(repo.join('updatestate'))
1180 util.unlink(repo.join('updatestate'))
1180
1181
1181 if not branchmerge:
1182 if not branchmerge:
1182 repo.dirstate.setbranch(p2.branch())
1183 repo.dirstate.setbranch(p2.branch())
1183 finally:
1184 finally:
1184 wlock.release()
1185 wlock.release()
1185
1186
1186 if not partial:
1187 if not partial:
1187 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1188 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1188 return stats
1189 return stats
General Comments 0
You need to be logged in to leave comments. Login now