##// END OF EJS Templates
filemerge: return whether the file was deleted...
Siddharth Agarwal -
r27034:86ede9ed default
parent child Browse files
Show More
@@ -1,1430 +1,1430
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset, error
15 archival, pathutil, revset, error
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 if opts is None:
55 if opts is None:
56 opts = {}
56 opts = {}
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 return composenormalfilematcher(match, manifest)
58 return composenormalfilematcher(match, manifest)
59 oldmatch = installmatchfn(overridematch)
59 oldmatch = installmatchfn(overridematch)
60
60
61 def installmatchfn(f):
61 def installmatchfn(f):
62 '''monkey patch the scmutil module with a custom match function.
62 '''monkey patch the scmutil module with a custom match function.
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 oldmatch = scmutil.match
64 oldmatch = scmutil.match
65 setattr(f, 'oldmatch', oldmatch)
65 setattr(f, 'oldmatch', oldmatch)
66 scmutil.match = f
66 scmutil.match = f
67 return oldmatch
67 return oldmatch
68
68
69 def restorematchfn():
69 def restorematchfn():
70 '''restores scmutil.match to what it was before installmatchfn
70 '''restores scmutil.match to what it was before installmatchfn
71 was called. no-op if scmutil.match is its original function.
71 was called. no-op if scmutil.match is its original function.
72
72
73 Note that n calls to installmatchfn will require n calls to
73 Note that n calls to installmatchfn will require n calls to
74 restore the original matchfn.'''
74 restore the original matchfn.'''
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76
76
77 def installmatchandpatsfn(f):
77 def installmatchandpatsfn(f):
78 oldmatchandpats = scmutil.matchandpats
78 oldmatchandpats = scmutil.matchandpats
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 scmutil.matchandpats = f
80 scmutil.matchandpats = f
81 return oldmatchandpats
81 return oldmatchandpats
82
82
83 def restorematchandpatsfn():
83 def restorematchandpatsfn():
84 '''restores scmutil.matchandpats to what it was before
84 '''restores scmutil.matchandpats to what it was before
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 is its original function.
86 is its original function.
87
87
88 Note that n calls to installmatchandpatsfn will require n calls
88 Note that n calls to installmatchandpatsfn will require n calls
89 to restore the original matchfn.'''
89 to restore the original matchfn.'''
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 scmutil.matchandpats)
91 scmutil.matchandpats)
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97
97
98 lfmatcher = None
98 lfmatcher = None
99 if lfutil.islfilesrepo(repo):
99 if lfutil.islfilesrepo(repo):
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 if lfpats:
101 if lfpats:
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103
103
104 lfnames = []
104 lfnames = []
105 m = matcher
105 m = matcher
106
106
107 wctx = repo[None]
107 wctx = repo[None]
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 exact = m.exact(f)
109 exact = m.exact(f)
110 lfile = lfutil.standin(f) in wctx
110 lfile = lfutil.standin(f) in wctx
111 nfile = f in wctx
111 nfile = f in wctx
112 exists = lfile or nfile
112 exists = lfile or nfile
113
113
114 # addremove in core gets fancy with the name, add doesn't
114 # addremove in core gets fancy with the name, add doesn't
115 if isaddremove:
115 if isaddremove:
116 name = m.uipath(f)
116 name = m.uipath(f)
117 else:
117 else:
118 name = m.rel(f)
118 name = m.rel(f)
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_('%s already a largefile\n') % name)
124 ui.warn(_('%s already a largefile\n') % name)
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (lfsize and
133 abovemin = (lfsize and
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 lfnames.append(f)
136 lfnames.append(f)
137 if ui.verbose or not exact:
137 if ui.verbose or not exact:
138 ui.status(_('adding %s as a largefile\n') % name)
138 ui.status(_('adding %s as a largefile\n') % name)
139
139
140 bad = []
140 bad = []
141
141
142 # Need to lock, otherwise there could be a race condition between
142 # Need to lock, otherwise there could be a race condition between
143 # when standins are created and added to the repo.
143 # when standins are created and added to the repo.
144 wlock = repo.wlock()
144 wlock = repo.wlock()
145 try:
145 try:
146 if not opts.get('dry_run'):
146 if not opts.get('dry_run'):
147 standins = []
147 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
149 for f in lfnames:
150 standinname = lfutil.standin(f)
150 standinname = lfutil.standin(f)
151 lfutil.writestandin(repo, standinname, hash='',
151 lfutil.writestandin(repo, standinname, hash='',
152 executable=lfutil.getexecutable(repo.wjoin(f)))
152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 standins.append(standinname)
153 standins.append(standinname)
154 if lfdirstate[f] == 'r':
154 if lfdirstate[f] == 'r':
155 lfdirstate.normallookup(f)
155 lfdirstate.normallookup(f)
156 else:
156 else:
157 lfdirstate.add(f)
157 lfdirstate.add(f)
158 lfdirstate.write()
158 lfdirstate.write()
159 bad += [lfutil.splitstandin(f)
159 bad += [lfutil.splitstandin(f)
160 for f in repo[None].add(standins)
160 for f in repo[None].add(standins)
161 if f in m.files()]
161 if f in m.files()]
162
162
163 added = [f for f in lfnames if f not in bad]
163 added = [f for f in lfnames if f not in bad]
164 finally:
164 finally:
165 wlock.release()
165 wlock.release()
166 return added, bad
166 return added, bad
167
167
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 after = opts.get('after')
169 after = opts.get('after')
170 m = composelargefilematcher(matcher, repo[None].manifest())
170 m = composelargefilematcher(matcher, repo[None].manifest())
171 try:
171 try:
172 repo.lfstatus = True
172 repo.lfstatus = True
173 s = repo.status(match=m, clean=not isaddremove)
173 s = repo.status(match=m, clean=not isaddremove)
174 finally:
174 finally:
175 repo.lfstatus = False
175 repo.lfstatus = False
176 manifest = repo[None].manifest()
176 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [[f for f in list
177 modified, added, deleted, clean = [[f for f in list
178 if lfutil.standin(f) in manifest]
178 if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added,
179 for list in (s.modified, s.added,
180 s.deleted, s.clean)]
180 s.deleted, s.clean)]
181
181
182 def warn(files, msg):
182 def warn(files, msg):
183 for f in files:
183 for f in files:
184 ui.warn(msg % m.rel(f))
184 ui.warn(msg % m.rel(f))
185 return int(len(files) > 0)
185 return int(len(files) > 0)
186
186
187 result = 0
187 result = 0
188
188
189 if after:
189 if after:
190 remove = deleted
190 remove = deleted
191 result = warn(modified + added + clean,
191 result = warn(modified + added + clean,
192 _('not removing %s: file still exists\n'))
192 _('not removing %s: file still exists\n'))
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(modified, _('not removing %s: file is modified (use -f'
195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 ' to force removal)\n'))
196 ' to force removal)\n'))
197 result = warn(added, _('not removing %s: file has been marked for add'
197 result = warn(added, _('not removing %s: file has been marked for add'
198 ' (use forget to undo)\n')) or result
198 ' (use forget to undo)\n')) or result
199
199
200 # Need to lock because standin files are deleted then removed from the
200 # Need to lock because standin files are deleted then removed from the
201 # repository and we could race in-between.
201 # repository and we could race in-between.
202 wlock = repo.wlock()
202 wlock = repo.wlock()
203 try:
203 try:
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 for f in sorted(remove):
205 for f in sorted(remove):
206 if ui.verbose or not m.exact(f):
206 if ui.verbose or not m.exact(f):
207 # addremove in core gets fancy with the name, remove doesn't
207 # addremove in core gets fancy with the name, remove doesn't
208 if isaddremove:
208 if isaddremove:
209 name = m.uipath(f)
209 name = m.uipath(f)
210 else:
210 else:
211 name = m.rel(f)
211 name = m.rel(f)
212 ui.status(_('removing %s\n') % name)
212 ui.status(_('removing %s\n') % name)
213
213
214 if not opts.get('dry_run'):
214 if not opts.get('dry_run'):
215 if not after:
215 if not after:
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217
217
218 if opts.get('dry_run'):
218 if opts.get('dry_run'):
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 False)
231 False)
232
232
233 lfdirstate.write()
233 lfdirstate.write()
234 finally:
234 finally:
235 wlock.release()
235 wlock.release()
236
236
237 return result
237 return result
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 def decodepath(orig, path):
241 def decodepath(orig, path):
242 return lfutil.splitstandin(path) or path
242 return lfutil.splitstandin(path) or path
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246 def overrideadd(orig, ui, repo, *pats, **opts):
246 def overrideadd(orig, ui, repo, *pats, **opts):
247 if opts.get('normal') and opts.get('large'):
247 if opts.get('normal') and opts.get('large'):
248 raise error.Abort(_('--normal cannot be used with --large'))
248 raise error.Abort(_('--normal cannot be used with --large'))
249 return orig(ui, repo, *pats, **opts)
249 return orig(ui, repo, *pats, **opts)
250
250
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 # The --normal flag short circuits this override
252 # The --normal flag short circuits this override
253 if opts.get('normal'):
253 if opts.get('normal'):
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255
255
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 ladded)
258 ladded)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260
260
261 bad.extend(f for f in lbad)
261 bad.extend(f for f in lbad)
262 return bad
262 return bad
263
263
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 return removelargefiles(ui, repo, False, matcher, after=after,
267 return removelargefiles(ui, repo, False, matcher, after=after,
268 force=force) or result
268 force=force) or result
269
269
270 def overridestatusfn(orig, repo, rev2, **opts):
270 def overridestatusfn(orig, repo, rev2, **opts):
271 try:
271 try:
272 repo._repo.lfstatus = True
272 repo._repo.lfstatus = True
273 return orig(repo, rev2, **opts)
273 return orig(repo, rev2, **opts)
274 finally:
274 finally:
275 repo._repo.lfstatus = False
275 repo._repo.lfstatus = False
276
276
277 def overridestatus(orig, ui, repo, *pats, **opts):
277 def overridestatus(orig, ui, repo, *pats, **opts):
278 try:
278 try:
279 repo.lfstatus = True
279 repo.lfstatus = True
280 return orig(ui, repo, *pats, **opts)
280 return orig(ui, repo, *pats, **opts)
281 finally:
281 finally:
282 repo.lfstatus = False
282 repo.lfstatus = False
283
283
284 def overridedirty(orig, repo, ignoreupdate=False):
284 def overridedirty(orig, repo, ignoreupdate=False):
285 try:
285 try:
286 repo._repo.lfstatus = True
286 repo._repo.lfstatus = True
287 return orig(repo, ignoreupdate)
287 return orig(repo, ignoreupdate)
288 finally:
288 finally:
289 repo._repo.lfstatus = False
289 repo._repo.lfstatus = False
290
290
291 def overridelog(orig, ui, repo, *pats, **opts):
291 def overridelog(orig, ui, repo, *pats, **opts):
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 default='relpath', badfn=None):
293 default='relpath', badfn=None):
294 """Matcher that merges root directory with .hglf, suitable for log.
294 """Matcher that merges root directory with .hglf, suitable for log.
295 It is still possible to match .hglf directly.
295 It is still possible to match .hglf directly.
296 For any listed files run log on the standin too.
296 For any listed files run log on the standin too.
297 matchfn tries both the given filename and with .hglf stripped.
297 matchfn tries both the given filename and with .hglf stripped.
298 """
298 """
299 if opts is None:
299 if opts is None:
300 opts = {}
300 opts = {}
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 badfn=badfn)
302 badfn=badfn)
303 m, p = copy.copy(matchandpats)
303 m, p = copy.copy(matchandpats)
304
304
305 if m.always():
305 if m.always():
306 # We want to match everything anyway, so there's no benefit trying
306 # We want to match everything anyway, so there's no benefit trying
307 # to add standins.
307 # to add standins.
308 return matchandpats
308 return matchandpats
309
309
310 pats = set(p)
310 pats = set(p)
311
311
312 def fixpats(pat, tostandin=lfutil.standin):
312 def fixpats(pat, tostandin=lfutil.standin):
313 if pat.startswith('set:'):
313 if pat.startswith('set:'):
314 return pat
314 return pat
315
315
316 kindpat = match_._patsplit(pat, None)
316 kindpat = match_._patsplit(pat, None)
317
317
318 if kindpat[0] is not None:
318 if kindpat[0] is not None:
319 return kindpat[0] + ':' + tostandin(kindpat[1])
319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 return tostandin(kindpat[1])
320 return tostandin(kindpat[1])
321
321
322 if m._cwd:
322 if m._cwd:
323 hglf = lfutil.shortname
323 hglf = lfutil.shortname
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325
325
326 def tostandin(f):
326 def tostandin(f):
327 # The file may already be a standin, so truncate the back
327 # The file may already be a standin, so truncate the back
328 # prefix and test before mangling it. This avoids turning
328 # prefix and test before mangling it. This avoids turning
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 return f
331 return f
332
332
333 # An absolute path is from outside the repo, so truncate the
333 # An absolute path is from outside the repo, so truncate the
334 # path to the root before building the standin. Otherwise cwd
334 # path to the root before building the standin. Otherwise cwd
335 # is somewhere in the repo, relative to root, and needs to be
335 # is somewhere in the repo, relative to root, and needs to be
336 # prepended before building the standin.
336 # prepended before building the standin.
337 if os.path.isabs(m._cwd):
337 if os.path.isabs(m._cwd):
338 f = f[len(back):]
338 f = f[len(back):]
339 else:
339 else:
340 f = m._cwd + '/' + f
340 f = m._cwd + '/' + f
341 return back + lfutil.standin(f)
341 return back + lfutil.standin(f)
342
342
343 pats.update(fixpats(f, tostandin) for f in p)
343 pats.update(fixpats(f, tostandin) for f in p)
344 else:
344 else:
345 def tostandin(f):
345 def tostandin(f):
346 if lfutil.splitstandin(f):
346 if lfutil.splitstandin(f):
347 return f
347 return f
348 return lfutil.standin(f)
348 return lfutil.standin(f)
349 pats.update(fixpats(f, tostandin) for f in p)
349 pats.update(fixpats(f, tostandin) for f in p)
350
350
351 for i in range(0, len(m._files)):
351 for i in range(0, len(m._files)):
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 if m._files[i] == '.':
353 if m._files[i] == '.':
354 continue
354 continue
355 standin = lfutil.standin(m._files[i])
355 standin = lfutil.standin(m._files[i])
356 # If the "standin" is a directory, append instead of replace to
356 # If the "standin" is a directory, append instead of replace to
357 # support naming a directory on the command line with only
357 # support naming a directory on the command line with only
358 # largefiles. The original directory is kept to support normal
358 # largefiles. The original directory is kept to support normal
359 # files.
359 # files.
360 if standin in repo[ctx.node()]:
360 if standin in repo[ctx.node()]:
361 m._files[i] = standin
361 m._files[i] = standin
362 elif m._files[i] not in repo[ctx.node()] \
362 elif m._files[i] not in repo[ctx.node()] \
363 and repo.wvfs.isdir(standin):
363 and repo.wvfs.isdir(standin):
364 m._files.append(standin)
364 m._files.append(standin)
365
365
366 m._fileroots = set(m._files)
366 m._fileroots = set(m._files)
367 m._always = False
367 m._always = False
368 origmatchfn = m.matchfn
368 origmatchfn = m.matchfn
369 def lfmatchfn(f):
369 def lfmatchfn(f):
370 lf = lfutil.splitstandin(f)
370 lf = lfutil.splitstandin(f)
371 if lf is not None and origmatchfn(lf):
371 if lf is not None and origmatchfn(lf):
372 return True
372 return True
373 r = origmatchfn(f)
373 r = origmatchfn(f)
374 return r
374 return r
375 m.matchfn = lfmatchfn
375 m.matchfn = lfmatchfn
376
376
377 ui.debug('updated patterns: %s\n' % sorted(pats))
377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 return m, pats
378 return m, pats
379
379
380 # For hg log --patch, the match object is used in two different senses:
380 # For hg log --patch, the match object is used in two different senses:
381 # (1) to determine what revisions should be printed out, and
381 # (1) to determine what revisions should be printed out, and
382 # (2) to determine what files to print out diffs for.
382 # (2) to determine what files to print out diffs for.
383 # The magic matchandpats override should be used for case (1) but not for
383 # The magic matchandpats override should be used for case (1) but not for
384 # case (2).
384 # case (2).
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 wctx = repo[None]
386 wctx = repo[None]
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 return lambda rev: match
388 return lambda rev: match
389
389
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393
393
394 try:
394 try:
395 return orig(ui, repo, *pats, **opts)
395 return orig(ui, repo, *pats, **opts)
396 finally:
396 finally:
397 restorematchandpatsfn()
397 restorematchandpatsfn()
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399
399
400 def overrideverify(orig, ui, repo, *pats, **opts):
400 def overrideverify(orig, ui, repo, *pats, **opts):
401 large = opts.pop('large', False)
401 large = opts.pop('large', False)
402 all = opts.pop('lfa', False)
402 all = opts.pop('lfa', False)
403 contents = opts.pop('lfc', False)
403 contents = opts.pop('lfc', False)
404
404
405 result = orig(ui, repo, *pats, **opts)
405 result = orig(ui, repo, *pats, **opts)
406 if large or all or contents:
406 if large or all or contents:
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 return result
408 return result
409
409
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 large = opts.pop('large', False)
411 large = opts.pop('large', False)
412 if large:
412 if large:
413 class fakerepo(object):
413 class fakerepo(object):
414 dirstate = lfutil.openlfdirstate(ui, repo)
414 dirstate = lfutil.openlfdirstate(ui, repo)
415 orig(ui, fakerepo, *pats, **opts)
415 orig(ui, fakerepo, *pats, **opts)
416 else:
416 else:
417 orig(ui, repo, *pats, **opts)
417 orig(ui, repo, *pats, **opts)
418
418
419 # Before starting the manifest merge, merge.updates will call
419 # Before starting the manifest merge, merge.updates will call
420 # _checkunknownfile to check if there are any files in the merged-in
420 # _checkunknownfile to check if there are any files in the merged-in
421 # changeset that collide with unknown files in the working copy.
421 # changeset that collide with unknown files in the working copy.
422 #
422 #
423 # The largefiles are seen as unknown, so this prevents us from merging
423 # The largefiles are seen as unknown, so this prevents us from merging
424 # in a file 'foo' if we already have a largefile with the same name.
424 # in a file 'foo' if we already have a largefile with the same name.
425 #
425 #
426 # The overridden function filters the unknown files by removing any
426 # The overridden function filters the unknown files by removing any
427 # largefiles. This makes the merge proceed and we can then handle this
427 # largefiles. This makes the merge proceed and we can then handle this
428 # case further in the overridden calculateupdates function below.
428 # case further in the overridden calculateupdates function below.
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 return False
431 return False
432 return origfn(repo, wctx, mctx, f, f2)
432 return origfn(repo, wctx, mctx, f, f2)
433
433
434 # The manifest merge handles conflicts on the manifest level. We want
434 # The manifest merge handles conflicts on the manifest level. We want
435 # to handle changes in largefile-ness of files at this level too.
435 # to handle changes in largefile-ness of files at this level too.
436 #
436 #
437 # The strategy is to run the original calculateupdates and then process
437 # The strategy is to run the original calculateupdates and then process
438 # the action list it outputs. There are two cases we need to deal with:
438 # the action list it outputs. There are two cases we need to deal with:
439 #
439 #
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 # detected via its standin file, which will enter the working copy
441 # detected via its standin file, which will enter the working copy
442 # with a "get" action. It is not "merge" since the standin is all
442 # with a "get" action. It is not "merge" since the standin is all
443 # Mercurial is concerned with at this level -- the link to the
443 # Mercurial is concerned with at this level -- the link to the
444 # existing normal file is not relevant here.
444 # existing normal file is not relevant here.
445 #
445 #
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 # since the largefile will be present in the working copy and
447 # since the largefile will be present in the working copy and
448 # different from the normal file in p2. Mercurial therefore
448 # different from the normal file in p2. Mercurial therefore
449 # triggers a merge action.
449 # triggers a merge action.
450 #
450 #
451 # In both cases, we prompt the user and emit new actions to either
451 # In both cases, we prompt the user and emit new actions to either
452 # remove the standin (if the normal file was kept) or to remove the
452 # remove the standin (if the normal file was kept) or to remove the
453 # normal file and get the standin (if the largefile was kept). The
453 # normal file and get the standin (if the largefile was kept). The
454 # default prompt answer is to use the largefile version since it was
454 # default prompt answer is to use the largefile version since it was
455 # presumably changed on purpose.
455 # presumably changed on purpose.
456 #
456 #
457 # Finally, the merge.applyupdates function will then take care of
457 # Finally, the merge.applyupdates function will then take care of
458 # writing the files into the working copy and lfcommands.updatelfiles
458 # writing the files into the working copy and lfcommands.updatelfiles
459 # will update the largefiles.
459 # will update the largefiles.
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 partial, acceptremote, followcopies):
461 partial, acceptremote, followcopies):
462 overwrite = force and not branchmerge
462 overwrite = force and not branchmerge
463 actions, diverge, renamedelete = origfn(
463 actions, diverge, renamedelete = origfn(
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
465 followcopies)
465 followcopies)
466
466
467 if overwrite:
467 if overwrite:
468 return actions, diverge, renamedelete
468 return actions, diverge, renamedelete
469
469
470 # Convert to dictionary with filename as key and action as value.
470 # Convert to dictionary with filename as key and action as value.
471 lfiles = set()
471 lfiles = set()
472 for f in actions:
472 for f in actions:
473 splitstandin = f and lfutil.splitstandin(f)
473 splitstandin = f and lfutil.splitstandin(f)
474 if splitstandin in p1:
474 if splitstandin in p1:
475 lfiles.add(splitstandin)
475 lfiles.add(splitstandin)
476 elif lfutil.standin(f) in p1:
476 elif lfutil.standin(f) in p1:
477 lfiles.add(f)
477 lfiles.add(f)
478
478
479 for lfile in lfiles:
479 for lfile in lfiles:
480 standin = lfutil.standin(lfile)
480 standin = lfutil.standin(lfile)
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 if sm in ('g', 'dc') and lm != 'r':
483 if sm in ('g', 'dc') and lm != 'r':
484 if sm == 'dc':
484 if sm == 'dc':
485 f1, f2, fa, move, anc = sargs
485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
486 sargs = (p2[f2].flags(),)
487 # Case 1: normal file in the working copy, largefile in
487 # Case 1: normal file in the working copy, largefile in
488 # the second parent
488 # the second parent
489 usermsg = _('remote turned local normal file %s into a largefile\n'
489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 'use (l)argefile or keep (n)ormal file?'
490 'use (l)argefile or keep (n)ormal file?'
491 '$$ &Largefile $$ &Normal file') % lfile
491 '$$ &Largefile $$ &Normal file') % lfile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 actions[lfile] = ('r', None, 'replaced by standin')
493 actions[lfile] = ('r', None, 'replaced by standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
495 else: # keep local normal file
495 else: # keep local normal file
496 actions[lfile] = ('k', None, 'replaces standin')
496 actions[lfile] = ('k', None, 'replaces standin')
497 if branchmerge:
497 if branchmerge:
498 actions[standin] = ('k', None, 'replaced by non-standin')
498 actions[standin] = ('k', None, 'replaced by non-standin')
499 else:
499 else:
500 actions[standin] = ('r', None, 'replaced by non-standin')
500 actions[standin] = ('r', None, 'replaced by non-standin')
501 elif lm in ('g', 'dc') and sm != 'r':
501 elif lm in ('g', 'dc') and sm != 'r':
502 if lm == 'dc':
502 if lm == 'dc':
503 f1, f2, fa, move, anc = largs
503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
504 largs = (p2[f2].flags(),)
505 # Case 2: largefile in the working copy, normal file in
505 # Case 2: largefile in the working copy, normal file in
506 # the second parent
506 # the second parent
507 usermsg = _('remote turned local largefile %s into a normal file\n'
507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 'keep (l)argefile or use (n)ormal file?'
508 'keep (l)argefile or use (n)ormal file?'
509 '$$ &Largefile $$ &Normal file') % lfile
509 '$$ &Largefile $$ &Normal file') % lfile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 if branchmerge:
511 if branchmerge:
512 # largefile can be restored from standin safely
512 # largefile can be restored from standin safely
513 actions[lfile] = ('k', None, 'replaced by standin')
513 actions[lfile] = ('k', None, 'replaced by standin')
514 actions[standin] = ('k', None, 'replaces standin')
514 actions[standin] = ('k', None, 'replaces standin')
515 else:
515 else:
516 # "lfile" should be marked as "removed" without
516 # "lfile" should be marked as "removed" without
517 # removal of itself
517 # removal of itself
518 actions[lfile] = ('lfmr', None,
518 actions[lfile] = ('lfmr', None,
519 'forget non-standin largefile')
519 'forget non-standin largefile')
520
520
521 # linear-merge should treat this largefile as 're-added'
521 # linear-merge should treat this largefile as 're-added'
522 actions[standin] = ('a', None, 'keep standin')
522 actions[standin] = ('a', None, 'keep standin')
523 else: # pick remote normal file
523 else: # pick remote normal file
524 actions[lfile] = ('g', largs, 'replaces standin')
524 actions[lfile] = ('g', largs, 'replaces standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
526
526
527 return actions, diverge, renamedelete
527 return actions, diverge, renamedelete
528
528
529 def mergerecordupdates(orig, repo, actions, branchmerge):
529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 if 'lfmr' in actions:
530 if 'lfmr' in actions:
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 for lfile, args, msg in actions['lfmr']:
532 for lfile, args, msg in actions['lfmr']:
533 # this should be executed before 'orig', to execute 'remove'
533 # this should be executed before 'orig', to execute 'remove'
534 # before all other actions
534 # before all other actions
535 repo.dirstate.remove(lfile)
535 repo.dirstate.remove(lfile)
536 # make sure lfile doesn't get synclfdirstate'd as normal
536 # make sure lfile doesn't get synclfdirstate'd as normal
537 lfdirstate.add(lfile)
537 lfdirstate.add(lfile)
538 lfdirstate.write()
538 lfdirstate.write()
539
539
540 return orig(repo, actions, branchmerge)
540 return orig(repo, actions, branchmerge)
541
541
542
542
543 # Override filemerge to prompt the user about how they wish to merge
543 # Override filemerge to prompt the user about how they wish to merge
544 # largefiles. This will handle identical edits without prompting the user.
544 # largefiles. This will handle identical edits without prompting the user.
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 labels=None):
546 labels=None):
547 if not lfutil.isstandin(orig):
547 if not lfutil.isstandin(orig):
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 labels=labels)
549 labels=labels)
550
550
551 ahash = fca.data().strip().lower()
551 ahash = fca.data().strip().lower()
552 dhash = fcd.data().strip().lower()
552 dhash = fcd.data().strip().lower()
553 ohash = fco.data().strip().lower()
553 ohash = fco.data().strip().lower()
554 if (ohash != ahash and
554 if (ohash != ahash and
555 ohash != dhash and
555 ohash != dhash and
556 (dhash == ahash or
556 (dhash == ahash or
557 repo.ui.promptchoice(
557 repo.ui.promptchoice(
558 _('largefile %s has a merge conflict\nancestor was %s\n'
558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 '$$ &Local $$ &Other') %
560 '$$ &Local $$ &Other') %
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 0) == 1)):
562 0) == 1)):
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 return True, 0
564 return True, 0, False
565
565
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 copies = orig(ctx1, ctx2, match=match)
567 copies = orig(ctx1, ctx2, match=match)
568 updated = {}
568 updated = {}
569
569
570 for k, v in copies.iteritems():
570 for k, v in copies.iteritems():
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572
572
573 return updated
573 return updated
574
574
575 # Copy first changes the matchers to match standins instead of
575 # Copy first changes the matchers to match standins instead of
576 # largefiles. Then it overrides util.copyfile in that function it
576 # largefiles. Then it overrides util.copyfile in that function it
577 # checks if the destination largefile already exists. It also keeps a
577 # checks if the destination largefile already exists. It also keeps a
578 # list of copied files so that the largefiles can be copied and the
578 # list of copied files so that the largefiles can be copied and the
579 # dirstate updated.
579 # dirstate updated.
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 # doesn't remove largefile on rename
581 # doesn't remove largefile on rename
582 if len(pats) < 2:
582 if len(pats) < 2:
583 # this isn't legal, let the original function deal with it
583 # this isn't legal, let the original function deal with it
584 return orig(ui, repo, pats, opts, rename)
584 return orig(ui, repo, pats, opts, rename)
585
585
586 # This could copy both lfiles and normal files in one command,
586 # This could copy both lfiles and normal files in one command,
587 # but we don't want to do that. First replace their matcher to
587 # but we don't want to do that. First replace their matcher to
588 # only match normal files and run it, then replace it to just
588 # only match normal files and run it, then replace it to just
589 # match largefiles and run it again.
589 # match largefiles and run it again.
590 nonormalfiles = False
590 nonormalfiles = False
591 nolfiles = False
591 nolfiles = False
592 installnormalfilesmatchfn(repo[None].manifest())
592 installnormalfilesmatchfn(repo[None].manifest())
593 try:
593 try:
594 result = orig(ui, repo, pats, opts, rename)
594 result = orig(ui, repo, pats, opts, rename)
595 except error.Abort as e:
595 except error.Abort as e:
596 if str(e) != _('no files to copy'):
596 if str(e) != _('no files to copy'):
597 raise e
597 raise e
598 else:
598 else:
599 nonormalfiles = True
599 nonormalfiles = True
600 result = 0
600 result = 0
601 finally:
601 finally:
602 restorematchfn()
602 restorematchfn()
603
603
604 # The first rename can cause our current working directory to be removed.
604 # The first rename can cause our current working directory to be removed.
605 # In that case there is nothing left to copy/rename so just quit.
605 # In that case there is nothing left to copy/rename so just quit.
606 try:
606 try:
607 repo.getcwd()
607 repo.getcwd()
608 except OSError:
608 except OSError:
609 return result
609 return result
610
610
611 def makestandin(relpath):
611 def makestandin(relpath):
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614
614
615 fullpats = scmutil.expandpats(pats)
615 fullpats = scmutil.expandpats(pats)
616 dest = fullpats[-1]
616 dest = fullpats[-1]
617
617
618 if os.path.isdir(dest):
618 if os.path.isdir(dest):
619 if not os.path.isdir(makestandin(dest)):
619 if not os.path.isdir(makestandin(dest)):
620 os.makedirs(makestandin(dest))
620 os.makedirs(makestandin(dest))
621
621
622 try:
622 try:
623 # When we call orig below it creates the standins but we don't add
623 # When we call orig below it creates the standins but we don't add
624 # them to the dir state until later so lock during that time.
624 # them to the dir state until later so lock during that time.
625 wlock = repo.wlock()
625 wlock = repo.wlock()
626
626
627 manifest = repo[None].manifest()
627 manifest = repo[None].manifest()
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 default='relpath', badfn=None):
629 default='relpath', badfn=None):
630 if opts is None:
630 if opts is None:
631 opts = {}
631 opts = {}
632 newpats = []
632 newpats = []
633 # The patterns were previously mangled to add the standin
633 # The patterns were previously mangled to add the standin
634 # directory; we need to remove that now
634 # directory; we need to remove that now
635 for pat in pats:
635 for pat in pats:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 newpats.append(pat.replace(lfutil.shortname, ''))
637 newpats.append(pat.replace(lfutil.shortname, ''))
638 else:
638 else:
639 newpats.append(pat)
639 newpats.append(pat)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 m = copy.copy(match)
641 m = copy.copy(match)
642 lfile = lambda f: lfutil.standin(f) in manifest
642 lfile = lambda f: lfutil.standin(f) in manifest
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 m._fileroots = set(m._files)
644 m._fileroots = set(m._files)
645 origmatchfn = m.matchfn
645 origmatchfn = m.matchfn
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 (f in manifest) and
647 (f in manifest) and
648 origmatchfn(lfutil.splitstandin(f)) or
648 origmatchfn(lfutil.splitstandin(f)) or
649 None)
649 None)
650 return m
650 return m
651 oldmatch = installmatchfn(overridematch)
651 oldmatch = installmatchfn(overridematch)
652 listpats = []
652 listpats = []
653 for pat in pats:
653 for pat in pats:
654 if match_.patkind(pat) is not None:
654 if match_.patkind(pat) is not None:
655 listpats.append(pat)
655 listpats.append(pat)
656 else:
656 else:
657 listpats.append(makestandin(pat))
657 listpats.append(makestandin(pat))
658
658
659 try:
659 try:
660 origcopyfile = util.copyfile
660 origcopyfile = util.copyfile
661 copiedfiles = []
661 copiedfiles = []
662 def overridecopyfile(src, dest):
662 def overridecopyfile(src, dest):
663 if (lfutil.shortname in src and
663 if (lfutil.shortname in src and
664 dest.startswith(repo.wjoin(lfutil.shortname))):
664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 destlfile = dest.replace(lfutil.shortname, '')
665 destlfile = dest.replace(lfutil.shortname, '')
666 if not opts['force'] and os.path.exists(destlfile):
666 if not opts['force'] and os.path.exists(destlfile):
667 raise IOError('',
667 raise IOError('',
668 _('destination largefile already exists'))
668 _('destination largefile already exists'))
669 copiedfiles.append((src, dest))
669 copiedfiles.append((src, dest))
670 origcopyfile(src, dest)
670 origcopyfile(src, dest)
671
671
672 util.copyfile = overridecopyfile
672 util.copyfile = overridecopyfile
673 result += orig(ui, repo, listpats, opts, rename)
673 result += orig(ui, repo, listpats, opts, rename)
674 finally:
674 finally:
675 util.copyfile = origcopyfile
675 util.copyfile = origcopyfile
676
676
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 for (src, dest) in copiedfiles:
678 for (src, dest) in copiedfiles:
679 if (lfutil.shortname in src and
679 if (lfutil.shortname in src and
680 dest.startswith(repo.wjoin(lfutil.shortname))):
680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 if not os.path.isdir(destlfiledir):
684 if not os.path.isdir(destlfiledir):
685 os.makedirs(destlfiledir)
685 os.makedirs(destlfiledir)
686 if rename:
686 if rename:
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688
688
689 # The file is gone, but this deletes any empty parent
689 # The file is gone, but this deletes any empty parent
690 # directories as a side-effect.
690 # directories as a side-effect.
691 util.unlinkpath(repo.wjoin(srclfile), True)
691 util.unlinkpath(repo.wjoin(srclfile), True)
692 lfdirstate.remove(srclfile)
692 lfdirstate.remove(srclfile)
693 else:
693 else:
694 util.copyfile(repo.wjoin(srclfile),
694 util.copyfile(repo.wjoin(srclfile),
695 repo.wjoin(destlfile))
695 repo.wjoin(destlfile))
696
696
697 lfdirstate.add(destlfile)
697 lfdirstate.add(destlfile)
698 lfdirstate.write()
698 lfdirstate.write()
699 except error.Abort as e:
699 except error.Abort as e:
700 if str(e) != _('no files to copy'):
700 if str(e) != _('no files to copy'):
701 raise e
701 raise e
702 else:
702 else:
703 nolfiles = True
703 nolfiles = True
704 finally:
704 finally:
705 restorematchfn()
705 restorematchfn()
706 wlock.release()
706 wlock.release()
707
707
708 if nolfiles and nonormalfiles:
708 if nolfiles and nonormalfiles:
709 raise error.Abort(_('no files to copy'))
709 raise error.Abort(_('no files to copy'))
710
710
711 return result
711 return result
712
712
713 # When the user calls revert, we have to be careful to not revert any
713 # When the user calls revert, we have to be careful to not revert any
714 # changes to other largefiles accidentally. This means we have to keep
714 # changes to other largefiles accidentally. This means we have to keep
715 # track of the largefiles that are being reverted so we only pull down
715 # track of the largefiles that are being reverted so we only pull down
716 # the necessary largefiles.
716 # the necessary largefiles.
717 #
717 #
718 # Standins are only updated (to match the hash of largefiles) before
718 # Standins are only updated (to match the hash of largefiles) before
719 # commits. Update the standins then run the original revert, changing
719 # commits. Update the standins then run the original revert, changing
720 # the matcher to hit standins instead of largefiles. Based on the
720 # the matcher to hit standins instead of largefiles. Based on the
721 # resulting standins update the largefiles.
721 # resulting standins update the largefiles.
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 # Because we put the standins in a bad state (by updating them)
723 # Because we put the standins in a bad state (by updating them)
724 # and then return them to a correct state we need to lock to
724 # and then return them to a correct state we need to lock to
725 # prevent others from changing them in their incorrect state.
725 # prevent others from changing them in their incorrect state.
726 wlock = repo.wlock()
726 wlock = repo.wlock()
727 try:
727 try:
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 lfdirstate.write()
730 lfdirstate.write()
731 for lfile in s.modified:
731 for lfile in s.modified:
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 for lfile in s.deleted:
733 for lfile in s.deleted:
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736
736
737 oldstandins = lfutil.getstandinsstate(repo)
737 oldstandins = lfutil.getstandinsstate(repo)
738
738
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 default='relpath', badfn=None):
740 default='relpath', badfn=None):
741 if opts is None:
741 if opts is None:
742 opts = {}
742 opts = {}
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 m = copy.copy(match)
744 m = copy.copy(match)
745
745
746 # revert supports recursing into subrepos, and though largefiles
746 # revert supports recursing into subrepos, and though largefiles
747 # currently doesn't work correctly in that case, this match is
747 # currently doesn't work correctly in that case, this match is
748 # called, so the lfdirstate above may not be the correct one for
748 # called, so the lfdirstate above may not be the correct one for
749 # this invocation of match.
749 # this invocation of match.
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 False)
751 False)
752
752
753 def tostandin(f):
753 def tostandin(f):
754 standin = lfutil.standin(f)
754 standin = lfutil.standin(f)
755 if standin in ctx or standin in mctx:
755 if standin in ctx or standin in mctx:
756 return standin
756 return standin
757 elif standin in repo[None] or lfdirstate[f] == 'r':
757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 return None
758 return None
759 return f
759 return f
760 m._files = [tostandin(f) for f in m._files]
760 m._files = [tostandin(f) for f in m._files]
761 m._files = [f for f in m._files if f is not None]
761 m._files = [f for f in m._files if f is not None]
762 m._fileroots = set(m._files)
762 m._fileroots = set(m._files)
763 origmatchfn = m.matchfn
763 origmatchfn = m.matchfn
764 def matchfn(f):
764 def matchfn(f):
765 if lfutil.isstandin(f):
765 if lfutil.isstandin(f):
766 return (origmatchfn(lfutil.splitstandin(f)) and
766 return (origmatchfn(lfutil.splitstandin(f)) and
767 (f in ctx or f in mctx))
767 (f in ctx or f in mctx))
768 return origmatchfn(f)
768 return origmatchfn(f)
769 m.matchfn = matchfn
769 m.matchfn = matchfn
770 return m
770 return m
771 oldmatch = installmatchfn(overridematch)
771 oldmatch = installmatchfn(overridematch)
772 try:
772 try:
773 orig(ui, repo, ctx, parents, *pats, **opts)
773 orig(ui, repo, ctx, parents, *pats, **opts)
774 finally:
774 finally:
775 restorematchfn()
775 restorematchfn()
776
776
777 newstandins = lfutil.getstandinsstate(repo)
777 newstandins = lfutil.getstandinsstate(repo)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 # lfdirstate should be 'normallookup'-ed for updated files,
779 # lfdirstate should be 'normallookup'-ed for updated files,
780 # because reverting doesn't touch dirstate for 'normal' files
780 # because reverting doesn't touch dirstate for 'normal' files
781 # when target revision is explicitly specified: in such case,
781 # when target revision is explicitly specified: in such case,
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 # of target (standin) file.
783 # of target (standin) file.
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 normallookup=True)
785 normallookup=True)
786
786
787 finally:
787 finally:
788 wlock.release()
788 wlock.release()
789
789
790 # after pulling changesets, we need to take some extra care to get
790 # after pulling changesets, we need to take some extra care to get
791 # largefiles updated remotely
791 # largefiles updated remotely
792 def overridepull(orig, ui, repo, source=None, **opts):
792 def overridepull(orig, ui, repo, source=None, **opts):
793 revsprepull = len(repo)
793 revsprepull = len(repo)
794 if not source:
794 if not source:
795 source = 'default'
795 source = 'default'
796 repo.lfpullsource = source
796 repo.lfpullsource = source
797 result = orig(ui, repo, source, **opts)
797 result = orig(ui, repo, source, **opts)
798 revspostpull = len(repo)
798 revspostpull = len(repo)
799 lfrevs = opts.get('lfrev', [])
799 lfrevs = opts.get('lfrev', [])
800 if opts.get('all_largefiles'):
800 if opts.get('all_largefiles'):
801 lfrevs.append('pulled()')
801 lfrevs.append('pulled()')
802 if lfrevs and revspostpull > revsprepull:
802 if lfrevs and revspostpull > revsprepull:
803 numcached = 0
803 numcached = 0
804 repo.firstpulled = revsprepull # for pulled() revset expression
804 repo.firstpulled = revsprepull # for pulled() revset expression
805 try:
805 try:
806 for rev in scmutil.revrange(repo, lfrevs):
806 for rev in scmutil.revrange(repo, lfrevs):
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 numcached += len(cached)
809 numcached += len(cached)
810 finally:
810 finally:
811 del repo.firstpulled
811 del repo.firstpulled
812 ui.status(_("%d largefiles cached\n") % numcached)
812 ui.status(_("%d largefiles cached\n") % numcached)
813 return result
813 return result
814
814
815 def pulledrevsetsymbol(repo, subset, x):
815 def pulledrevsetsymbol(repo, subset, x):
816 """``pulled()``
816 """``pulled()``
817 Changesets that just has been pulled.
817 Changesets that just has been pulled.
818
818
819 Only available with largefiles from pull --lfrev expressions.
819 Only available with largefiles from pull --lfrev expressions.
820
820
821 .. container:: verbose
821 .. container:: verbose
822
822
823 Some examples:
823 Some examples:
824
824
825 - pull largefiles for all new changesets::
825 - pull largefiles for all new changesets::
826
826
827 hg pull -lfrev "pulled()"
827 hg pull -lfrev "pulled()"
828
828
829 - pull largefiles for all new branch heads::
829 - pull largefiles for all new branch heads::
830
830
831 hg pull -lfrev "head(pulled()) and not closed()"
831 hg pull -lfrev "head(pulled()) and not closed()"
832
832
833 """
833 """
834
834
835 try:
835 try:
836 firstpulled = repo.firstpulled
836 firstpulled = repo.firstpulled
837 except AttributeError:
837 except AttributeError:
838 raise error.Abort(_("pulled() only available in --lfrev"))
838 raise error.Abort(_("pulled() only available in --lfrev"))
839 return revset.baseset([r for r in subset if r >= firstpulled])
839 return revset.baseset([r for r in subset if r >= firstpulled])
840
840
841 def overrideclone(orig, ui, source, dest=None, **opts):
841 def overrideclone(orig, ui, source, dest=None, **opts):
842 d = dest
842 d = dest
843 if d is None:
843 if d is None:
844 d = hg.defaultdest(source)
844 d = hg.defaultdest(source)
845 if opts.get('all_largefiles') and not hg.islocal(d):
845 if opts.get('all_largefiles') and not hg.islocal(d):
846 raise error.Abort(_(
846 raise error.Abort(_(
847 '--all-largefiles is incompatible with non-local destination %s') %
847 '--all-largefiles is incompatible with non-local destination %s') %
848 d)
848 d)
849
849
850 return orig(ui, source, dest, **opts)
850 return orig(ui, source, dest, **opts)
851
851
852 def hgclone(orig, ui, opts, *args, **kwargs):
852 def hgclone(orig, ui, opts, *args, **kwargs):
853 result = orig(ui, opts, *args, **kwargs)
853 result = orig(ui, opts, *args, **kwargs)
854
854
855 if result is not None:
855 if result is not None:
856 sourcerepo, destrepo = result
856 sourcerepo, destrepo = result
857 repo = destrepo.local()
857 repo = destrepo.local()
858
858
859 # When cloning to a remote repo (like through SSH), no repo is available
859 # When cloning to a remote repo (like through SSH), no repo is available
860 # from the peer. Therefore the largefiles can't be downloaded and the
860 # from the peer. Therefore the largefiles can't be downloaded and the
861 # hgrc can't be updated.
861 # hgrc can't be updated.
862 if not repo:
862 if not repo:
863 return result
863 return result
864
864
865 # If largefiles is required for this repo, permanently enable it locally
865 # If largefiles is required for this repo, permanently enable it locally
866 if 'largefiles' in repo.requirements:
866 if 'largefiles' in repo.requirements:
867 fp = repo.vfs('hgrc', 'a', text=True)
867 fp = repo.vfs('hgrc', 'a', text=True)
868 try:
868 try:
869 fp.write('\n[extensions]\nlargefiles=\n')
869 fp.write('\n[extensions]\nlargefiles=\n')
870 finally:
870 finally:
871 fp.close()
871 fp.close()
872
872
873 # Caching is implicitly limited to 'rev' option, since the dest repo was
873 # Caching is implicitly limited to 'rev' option, since the dest repo was
874 # truncated at that point. The user may expect a download count with
874 # truncated at that point. The user may expect a download count with
875 # this option, so attempt whether or not this is a largefile repo.
875 # this option, so attempt whether or not this is a largefile repo.
876 if opts.get('all_largefiles'):
876 if opts.get('all_largefiles'):
877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
878
878
879 if missing != 0:
879 if missing != 0:
880 return None
880 return None
881
881
882 return result
882 return result
883
883
884 def overriderebase(orig, ui, repo, **opts):
884 def overriderebase(orig, ui, repo, **opts):
885 if not util.safehasattr(repo, '_largefilesenabled'):
885 if not util.safehasattr(repo, '_largefilesenabled'):
886 return orig(ui, repo, **opts)
886 return orig(ui, repo, **opts)
887
887
888 resuming = opts.get('continue')
888 resuming = opts.get('continue')
889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
891 try:
891 try:
892 return orig(ui, repo, **opts)
892 return orig(ui, repo, **opts)
893 finally:
893 finally:
894 repo._lfstatuswriters.pop()
894 repo._lfstatuswriters.pop()
895 repo._lfcommithooks.pop()
895 repo._lfcommithooks.pop()
896
896
897 def overridearchivecmd(orig, ui, repo, dest, **opts):
897 def overridearchivecmd(orig, ui, repo, dest, **opts):
898 repo.unfiltered().lfstatus = True
898 repo.unfiltered().lfstatus = True
899
899
900 try:
900 try:
901 return orig(ui, repo.unfiltered(), dest, **opts)
901 return orig(ui, repo.unfiltered(), dest, **opts)
902 finally:
902 finally:
903 repo.unfiltered().lfstatus = False
903 repo.unfiltered().lfstatus = False
904
904
905 def hgwebarchive(orig, web, req, tmpl):
905 def hgwebarchive(orig, web, req, tmpl):
906 web.repo.lfstatus = True
906 web.repo.lfstatus = True
907
907
908 try:
908 try:
909 return orig(web, req, tmpl)
909 return orig(web, req, tmpl)
910 finally:
910 finally:
911 web.repo.lfstatus = False
911 web.repo.lfstatus = False
912
912
913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
914 prefix='', mtime=None, subrepos=None):
914 prefix='', mtime=None, subrepos=None):
915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
916 # unfiltered repo's attr, so check that as well.
916 # unfiltered repo's attr, so check that as well.
917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
919 subrepos)
919 subrepos)
920
920
921 # No need to lock because we are only reading history and
921 # No need to lock because we are only reading history and
922 # largefile caches, neither of which are modified.
922 # largefile caches, neither of which are modified.
923 if node is not None:
923 if node is not None:
924 lfcommands.cachelfiles(repo.ui, repo, node)
924 lfcommands.cachelfiles(repo.ui, repo, node)
925
925
926 if kind not in archival.archivers:
926 if kind not in archival.archivers:
927 raise error.Abort(_("unknown archive type '%s'") % kind)
927 raise error.Abort(_("unknown archive type '%s'") % kind)
928
928
929 ctx = repo[node]
929 ctx = repo[node]
930
930
931 if kind == 'files':
931 if kind == 'files':
932 if prefix:
932 if prefix:
933 raise error.Abort(
933 raise error.Abort(
934 _('cannot give prefix when archiving to files'))
934 _('cannot give prefix when archiving to files'))
935 else:
935 else:
936 prefix = archival.tidyprefix(dest, kind, prefix)
936 prefix = archival.tidyprefix(dest, kind, prefix)
937
937
938 def write(name, mode, islink, getdata):
938 def write(name, mode, islink, getdata):
939 if matchfn and not matchfn(name):
939 if matchfn and not matchfn(name):
940 return
940 return
941 data = getdata()
941 data = getdata()
942 if decode:
942 if decode:
943 data = repo.wwritedata(name, data)
943 data = repo.wwritedata(name, data)
944 archiver.addfile(prefix + name, mode, islink, data)
944 archiver.addfile(prefix + name, mode, islink, data)
945
945
946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
947
947
948 if repo.ui.configbool("ui", "archivemeta", True):
948 if repo.ui.configbool("ui", "archivemeta", True):
949 write('.hg_archival.txt', 0o644, False,
949 write('.hg_archival.txt', 0o644, False,
950 lambda: archival.buildmetadata(ctx))
950 lambda: archival.buildmetadata(ctx))
951
951
952 for f in ctx:
952 for f in ctx:
953 ff = ctx.flags(f)
953 ff = ctx.flags(f)
954 getdata = ctx[f].data
954 getdata = ctx[f].data
955 if lfutil.isstandin(f):
955 if lfutil.isstandin(f):
956 if node is not None:
956 if node is not None:
957 path = lfutil.findfile(repo, getdata().strip())
957 path = lfutil.findfile(repo, getdata().strip())
958
958
959 if path is None:
959 if path is None:
960 raise error.Abort(
960 raise error.Abort(
961 _('largefile %s not found in repo store or system cache')
961 _('largefile %s not found in repo store or system cache')
962 % lfutil.splitstandin(f))
962 % lfutil.splitstandin(f))
963 else:
963 else:
964 path = lfutil.splitstandin(f)
964 path = lfutil.splitstandin(f)
965
965
966 f = lfutil.splitstandin(f)
966 f = lfutil.splitstandin(f)
967
967
968 def getdatafn():
968 def getdatafn():
969 fd = None
969 fd = None
970 try:
970 try:
971 fd = open(path, 'rb')
971 fd = open(path, 'rb')
972 return fd.read()
972 return fd.read()
973 finally:
973 finally:
974 if fd:
974 if fd:
975 fd.close()
975 fd.close()
976
976
977 getdata = getdatafn
977 getdata = getdatafn
978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
979
979
980 if subrepos:
980 if subrepos:
981 for subpath in sorted(ctx.substate):
981 for subpath in sorted(ctx.substate):
982 sub = ctx.workingsub(subpath)
982 sub = ctx.workingsub(subpath)
983 submatch = match_.narrowmatcher(subpath, matchfn)
983 submatch = match_.narrowmatcher(subpath, matchfn)
984 sub._repo.lfstatus = True
984 sub._repo.lfstatus = True
985 sub.archive(archiver, prefix, submatch)
985 sub.archive(archiver, prefix, submatch)
986
986
987 archiver.done()
987 archiver.done()
988
988
989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
990 if not repo._repo.lfstatus:
990 if not repo._repo.lfstatus:
991 return orig(repo, archiver, prefix, match)
991 return orig(repo, archiver, prefix, match)
992
992
993 repo._get(repo._state + ('hg',))
993 repo._get(repo._state + ('hg',))
994 rev = repo._state[1]
994 rev = repo._state[1]
995 ctx = repo._repo[rev]
995 ctx = repo._repo[rev]
996
996
997 if ctx.node() is not None:
997 if ctx.node() is not None:
998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
999
999
1000 def write(name, mode, islink, getdata):
1000 def write(name, mode, islink, getdata):
1001 # At this point, the standin has been replaced with the largefile name,
1001 # At this point, the standin has been replaced with the largefile name,
1002 # so the normal matcher works here without the lfutil variants.
1002 # so the normal matcher works here without the lfutil variants.
1003 if match and not match(f):
1003 if match and not match(f):
1004 return
1004 return
1005 data = getdata()
1005 data = getdata()
1006
1006
1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1008
1008
1009 for f in ctx:
1009 for f in ctx:
1010 ff = ctx.flags(f)
1010 ff = ctx.flags(f)
1011 getdata = ctx[f].data
1011 getdata = ctx[f].data
1012 if lfutil.isstandin(f):
1012 if lfutil.isstandin(f):
1013 if ctx.node() is not None:
1013 if ctx.node() is not None:
1014 path = lfutil.findfile(repo._repo, getdata().strip())
1014 path = lfutil.findfile(repo._repo, getdata().strip())
1015
1015
1016 if path is None:
1016 if path is None:
1017 raise error.Abort(
1017 raise error.Abort(
1018 _('largefile %s not found in repo store or system cache')
1018 _('largefile %s not found in repo store or system cache')
1019 % lfutil.splitstandin(f))
1019 % lfutil.splitstandin(f))
1020 else:
1020 else:
1021 path = lfutil.splitstandin(f)
1021 path = lfutil.splitstandin(f)
1022
1022
1023 f = lfutil.splitstandin(f)
1023 f = lfutil.splitstandin(f)
1024
1024
1025 def getdatafn():
1025 def getdatafn():
1026 fd = None
1026 fd = None
1027 try:
1027 try:
1028 fd = open(os.path.join(prefix, path), 'rb')
1028 fd = open(os.path.join(prefix, path), 'rb')
1029 return fd.read()
1029 return fd.read()
1030 finally:
1030 finally:
1031 if fd:
1031 if fd:
1032 fd.close()
1032 fd.close()
1033
1033
1034 getdata = getdatafn
1034 getdata = getdatafn
1035
1035
1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1037
1037
1038 for subpath in sorted(ctx.substate):
1038 for subpath in sorted(ctx.substate):
1039 sub = ctx.workingsub(subpath)
1039 sub = ctx.workingsub(subpath)
1040 submatch = match_.narrowmatcher(subpath, match)
1040 submatch = match_.narrowmatcher(subpath, match)
1041 sub._repo.lfstatus = True
1041 sub._repo.lfstatus = True
1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1043
1043
1044 # If a largefile is modified, the change is not reflected in its
1044 # If a largefile is modified, the change is not reflected in its
1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1046 # if the repo has uncommitted changes. Wrap it to also check if
1046 # if the repo has uncommitted changes. Wrap it to also check if
1047 # largefiles were changed. This is used by bisect, backout and fetch.
1047 # largefiles were changed. This is used by bisect, backout and fetch.
1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1049 orig(repo, *args, **kwargs)
1049 orig(repo, *args, **kwargs)
1050 repo.lfstatus = True
1050 repo.lfstatus = True
1051 s = repo.status()
1051 s = repo.status()
1052 repo.lfstatus = False
1052 repo.lfstatus = False
1053 if s.modified or s.added or s.removed or s.deleted:
1053 if s.modified or s.added or s.removed or s.deleted:
1054 raise error.Abort(_('uncommitted changes'))
1054 raise error.Abort(_('uncommitted changes'))
1055
1055
1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1059 m = composelargefilematcher(match, repo[None].manifest())
1059 m = composelargefilematcher(match, repo[None].manifest())
1060
1060
1061 try:
1061 try:
1062 repo.lfstatus = True
1062 repo.lfstatus = True
1063 s = repo.status(match=m, clean=True)
1063 s = repo.status(match=m, clean=True)
1064 finally:
1064 finally:
1065 repo.lfstatus = False
1065 repo.lfstatus = False
1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1068
1068
1069 for f in forget:
1069 for f in forget:
1070 if lfutil.standin(f) not in repo.dirstate and not \
1070 if lfutil.standin(f) not in repo.dirstate and not \
1071 repo.wvfs.isdir(lfutil.standin(f)):
1071 repo.wvfs.isdir(lfutil.standin(f)):
1072 ui.warn(_('not removing %s: file is already untracked\n')
1072 ui.warn(_('not removing %s: file is already untracked\n')
1073 % m.rel(f))
1073 % m.rel(f))
1074 bad.append(f)
1074 bad.append(f)
1075
1075
1076 for f in forget:
1076 for f in forget:
1077 if ui.verbose or not m.exact(f):
1077 if ui.verbose or not m.exact(f):
1078 ui.status(_('removing %s\n') % m.rel(f))
1078 ui.status(_('removing %s\n') % m.rel(f))
1079
1079
1080 # Need to lock because standin files are deleted then removed from the
1080 # Need to lock because standin files are deleted then removed from the
1081 # repository and we could race in-between.
1081 # repository and we could race in-between.
1082 wlock = repo.wlock()
1082 wlock = repo.wlock()
1083 try:
1083 try:
1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1085 for f in forget:
1085 for f in forget:
1086 if lfdirstate[f] == 'a':
1086 if lfdirstate[f] == 'a':
1087 lfdirstate.drop(f)
1087 lfdirstate.drop(f)
1088 else:
1088 else:
1089 lfdirstate.remove(f)
1089 lfdirstate.remove(f)
1090 lfdirstate.write()
1090 lfdirstate.write()
1091 standins = [lfutil.standin(f) for f in forget]
1091 standins = [lfutil.standin(f) for f in forget]
1092 for f in standins:
1092 for f in standins:
1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1094 rejected = repo[None].forget(standins)
1094 rejected = repo[None].forget(standins)
1095 finally:
1095 finally:
1096 wlock.release()
1096 wlock.release()
1097
1097
1098 bad.extend(f for f in rejected if f in m.files())
1098 bad.extend(f for f in rejected if f in m.files())
1099 forgot.extend(f for f in forget if f not in rejected)
1099 forgot.extend(f for f in forget if f not in rejected)
1100 return bad, forgot
1100 return bad, forgot
1101
1101
1102 def _getoutgoings(repo, other, missing, addfunc):
1102 def _getoutgoings(repo, other, missing, addfunc):
1103 """get pairs of filename and largefile hash in outgoing revisions
1103 """get pairs of filename and largefile hash in outgoing revisions
1104 in 'missing'.
1104 in 'missing'.
1105
1105
1106 largefiles already existing on 'other' repository are ignored.
1106 largefiles already existing on 'other' repository are ignored.
1107
1107
1108 'addfunc' is invoked with each unique pairs of filename and
1108 'addfunc' is invoked with each unique pairs of filename and
1109 largefile hash value.
1109 largefile hash value.
1110 """
1110 """
1111 knowns = set()
1111 knowns = set()
1112 lfhashes = set()
1112 lfhashes = set()
1113 def dedup(fn, lfhash):
1113 def dedup(fn, lfhash):
1114 k = (fn, lfhash)
1114 k = (fn, lfhash)
1115 if k not in knowns:
1115 if k not in knowns:
1116 knowns.add(k)
1116 knowns.add(k)
1117 lfhashes.add(lfhash)
1117 lfhashes.add(lfhash)
1118 lfutil.getlfilestoupload(repo, missing, dedup)
1118 lfutil.getlfilestoupload(repo, missing, dedup)
1119 if lfhashes:
1119 if lfhashes:
1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1121 for fn, lfhash in knowns:
1121 for fn, lfhash in knowns:
1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1123 addfunc(fn, lfhash)
1123 addfunc(fn, lfhash)
1124
1124
1125 def outgoinghook(ui, repo, other, opts, missing):
1125 def outgoinghook(ui, repo, other, opts, missing):
1126 if opts.pop('large', None):
1126 if opts.pop('large', None):
1127 lfhashes = set()
1127 lfhashes = set()
1128 if ui.debugflag:
1128 if ui.debugflag:
1129 toupload = {}
1129 toupload = {}
1130 def addfunc(fn, lfhash):
1130 def addfunc(fn, lfhash):
1131 if fn not in toupload:
1131 if fn not in toupload:
1132 toupload[fn] = []
1132 toupload[fn] = []
1133 toupload[fn].append(lfhash)
1133 toupload[fn].append(lfhash)
1134 lfhashes.add(lfhash)
1134 lfhashes.add(lfhash)
1135 def showhashes(fn):
1135 def showhashes(fn):
1136 for lfhash in sorted(toupload[fn]):
1136 for lfhash in sorted(toupload[fn]):
1137 ui.debug(' %s\n' % (lfhash))
1137 ui.debug(' %s\n' % (lfhash))
1138 else:
1138 else:
1139 toupload = set()
1139 toupload = set()
1140 def addfunc(fn, lfhash):
1140 def addfunc(fn, lfhash):
1141 toupload.add(fn)
1141 toupload.add(fn)
1142 lfhashes.add(lfhash)
1142 lfhashes.add(lfhash)
1143 def showhashes(fn):
1143 def showhashes(fn):
1144 pass
1144 pass
1145 _getoutgoings(repo, other, missing, addfunc)
1145 _getoutgoings(repo, other, missing, addfunc)
1146
1146
1147 if not toupload:
1147 if not toupload:
1148 ui.status(_('largefiles: no files to upload\n'))
1148 ui.status(_('largefiles: no files to upload\n'))
1149 else:
1149 else:
1150 ui.status(_('largefiles to upload (%d entities):\n')
1150 ui.status(_('largefiles to upload (%d entities):\n')
1151 % (len(lfhashes)))
1151 % (len(lfhashes)))
1152 for file in sorted(toupload):
1152 for file in sorted(toupload):
1153 ui.status(lfutil.splitstandin(file) + '\n')
1153 ui.status(lfutil.splitstandin(file) + '\n')
1154 showhashes(file)
1154 showhashes(file)
1155 ui.status('\n')
1155 ui.status('\n')
1156
1156
1157 def summaryremotehook(ui, repo, opts, changes):
1157 def summaryremotehook(ui, repo, opts, changes):
1158 largeopt = opts.get('large', False)
1158 largeopt = opts.get('large', False)
1159 if changes is None:
1159 if changes is None:
1160 if largeopt:
1160 if largeopt:
1161 return (False, True) # only outgoing check is needed
1161 return (False, True) # only outgoing check is needed
1162 else:
1162 else:
1163 return (False, False)
1163 return (False, False)
1164 elif largeopt:
1164 elif largeopt:
1165 url, branch, peer, outgoing = changes[1]
1165 url, branch, peer, outgoing = changes[1]
1166 if peer is None:
1166 if peer is None:
1167 # i18n: column positioning for "hg summary"
1167 # i18n: column positioning for "hg summary"
1168 ui.status(_('largefiles: (no remote repo)\n'))
1168 ui.status(_('largefiles: (no remote repo)\n'))
1169 return
1169 return
1170
1170
1171 toupload = set()
1171 toupload = set()
1172 lfhashes = set()
1172 lfhashes = set()
1173 def addfunc(fn, lfhash):
1173 def addfunc(fn, lfhash):
1174 toupload.add(fn)
1174 toupload.add(fn)
1175 lfhashes.add(lfhash)
1175 lfhashes.add(lfhash)
1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1177
1177
1178 if not toupload:
1178 if not toupload:
1179 # i18n: column positioning for "hg summary"
1179 # i18n: column positioning for "hg summary"
1180 ui.status(_('largefiles: (no files to upload)\n'))
1180 ui.status(_('largefiles: (no files to upload)\n'))
1181 else:
1181 else:
1182 # i18n: column positioning for "hg summary"
1182 # i18n: column positioning for "hg summary"
1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1184 % (len(lfhashes), len(toupload)))
1184 % (len(lfhashes), len(toupload)))
1185
1185
1186 def overridesummary(orig, ui, repo, *pats, **opts):
1186 def overridesummary(orig, ui, repo, *pats, **opts):
1187 try:
1187 try:
1188 repo.lfstatus = True
1188 repo.lfstatus = True
1189 orig(ui, repo, *pats, **opts)
1189 orig(ui, repo, *pats, **opts)
1190 finally:
1190 finally:
1191 repo.lfstatus = False
1191 repo.lfstatus = False
1192
1192
1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1194 similarity=None):
1194 similarity=None):
1195 if opts is None:
1195 if opts is None:
1196 opts = {}
1196 opts = {}
1197 if not lfutil.islfilesrepo(repo):
1197 if not lfutil.islfilesrepo(repo):
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1199 # Get the list of missing largefiles so we can remove them
1199 # Get the list of missing largefiles so we can remove them
1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1202 False, False, False)
1202 False, False, False)
1203
1203
1204 # Call into the normal remove code, but the removing of the standin, we want
1204 # Call into the normal remove code, but the removing of the standin, we want
1205 # to have handled by original addremove. Monkey patching here makes sure
1205 # to have handled by original addremove. Monkey patching here makes sure
1206 # we don't remove the standin in the largefiles code, preventing a very
1206 # we don't remove the standin in the largefiles code, preventing a very
1207 # confused state later.
1207 # confused state later.
1208 if s.deleted:
1208 if s.deleted:
1209 m = copy.copy(matcher)
1209 m = copy.copy(matcher)
1210
1210
1211 # The m._files and m._map attributes are not changed to the deleted list
1211 # The m._files and m._map attributes are not changed to the deleted list
1212 # because that affects the m.exact() test, which in turn governs whether
1212 # because that affects the m.exact() test, which in turn governs whether
1213 # or not the file name is printed, and how. Simply limit the original
1213 # or not the file name is printed, and how. Simply limit the original
1214 # matches to those in the deleted status list.
1214 # matches to those in the deleted status list.
1215 matchfn = m.matchfn
1215 matchfn = m.matchfn
1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1217
1217
1218 removelargefiles(repo.ui, repo, True, m, **opts)
1218 removelargefiles(repo.ui, repo, True, m, **opts)
1219 # Call into the normal add code, and any files that *should* be added as
1219 # Call into the normal add code, and any files that *should* be added as
1220 # largefiles will be
1220 # largefiles will be
1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1222 # Now that we've handled largefiles, hand off to the original addremove
1222 # Now that we've handled largefiles, hand off to the original addremove
1223 # function to take care of the rest. Make sure it doesn't do anything with
1223 # function to take care of the rest. Make sure it doesn't do anything with
1224 # largefiles by passing a matcher that will ignore them.
1224 # largefiles by passing a matcher that will ignore them.
1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1227
1227
1228 # Calling purge with --all will cause the largefiles to be deleted.
1228 # Calling purge with --all will cause the largefiles to be deleted.
1229 # Override repo.status to prevent this from happening.
1229 # Override repo.status to prevent this from happening.
1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1234 # monkey patched method exists on the repoview class the lookup will not
1234 # monkey patched method exists on the repoview class the lookup will not
1235 # fail. As a result, the original version will shadow the monkey patched
1235 # fail. As a result, the original version will shadow the monkey patched
1236 # one, defeating the monkey patch.
1236 # one, defeating the monkey patch.
1237 #
1237 #
1238 # As a work around we use an unfiltered repo here. We should do something
1238 # As a work around we use an unfiltered repo here. We should do something
1239 # cleaner instead.
1239 # cleaner instead.
1240 repo = repo.unfiltered()
1240 repo = repo.unfiltered()
1241 oldstatus = repo.status
1241 oldstatus = repo.status
1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1243 clean=False, unknown=False, listsubrepos=False):
1243 clean=False, unknown=False, listsubrepos=False):
1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1245 listsubrepos)
1245 listsubrepos)
1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1250 unknown, ignored, r.clean)
1250 unknown, ignored, r.clean)
1251 repo.status = overridestatus
1251 repo.status = overridestatus
1252 orig(ui, repo, *dirs, **opts)
1252 orig(ui, repo, *dirs, **opts)
1253 repo.status = oldstatus
1253 repo.status = oldstatus
1254 def overriderollback(orig, ui, repo, **opts):
1254 def overriderollback(orig, ui, repo, **opts):
1255 wlock = repo.wlock()
1255 wlock = repo.wlock()
1256 try:
1256 try:
1257 before = repo.dirstate.parents()
1257 before = repo.dirstate.parents()
1258 orphans = set(f for f in repo.dirstate
1258 orphans = set(f for f in repo.dirstate
1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1260 result = orig(ui, repo, **opts)
1260 result = orig(ui, repo, **opts)
1261 after = repo.dirstate.parents()
1261 after = repo.dirstate.parents()
1262 if before == after:
1262 if before == after:
1263 return result # no need to restore standins
1263 return result # no need to restore standins
1264
1264
1265 pctx = repo['.']
1265 pctx = repo['.']
1266 for f in repo.dirstate:
1266 for f in repo.dirstate:
1267 if lfutil.isstandin(f):
1267 if lfutil.isstandin(f):
1268 orphans.discard(f)
1268 orphans.discard(f)
1269 if repo.dirstate[f] == 'r':
1269 if repo.dirstate[f] == 'r':
1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1271 elif f in pctx:
1271 elif f in pctx:
1272 fctx = pctx[f]
1272 fctx = pctx[f]
1273 repo.wwrite(f, fctx.data(), fctx.flags())
1273 repo.wwrite(f, fctx.data(), fctx.flags())
1274 else:
1274 else:
1275 # content of standin is not so important in 'a',
1275 # content of standin is not so important in 'a',
1276 # 'm' or 'n' (coming from the 2nd parent) cases
1276 # 'm' or 'n' (coming from the 2nd parent) cases
1277 lfutil.writestandin(repo, f, '', False)
1277 lfutil.writestandin(repo, f, '', False)
1278 for standin in orphans:
1278 for standin in orphans:
1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1280
1280
1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1282 orphans = set(lfdirstate)
1282 orphans = set(lfdirstate)
1283 lfiles = lfutil.listlfiles(repo)
1283 lfiles = lfutil.listlfiles(repo)
1284 for file in lfiles:
1284 for file in lfiles:
1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1286 orphans.discard(file)
1286 orphans.discard(file)
1287 for lfile in orphans:
1287 for lfile in orphans:
1288 lfdirstate.drop(lfile)
1288 lfdirstate.drop(lfile)
1289 lfdirstate.write()
1289 lfdirstate.write()
1290 finally:
1290 finally:
1291 wlock.release()
1291 wlock.release()
1292 return result
1292 return result
1293
1293
1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1295 resuming = opts.get('continue')
1295 resuming = opts.get('continue')
1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1298 try:
1298 try:
1299 result = orig(ui, repo, *revs, **opts)
1299 result = orig(ui, repo, *revs, **opts)
1300 finally:
1300 finally:
1301 repo._lfstatuswriters.pop()
1301 repo._lfstatuswriters.pop()
1302 repo._lfcommithooks.pop()
1302 repo._lfcommithooks.pop()
1303 return result
1303 return result
1304
1304
1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1307 err = 1
1307 err = 1
1308 notbad = set()
1308 notbad = set()
1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1310 origmatchfn = m.matchfn
1310 origmatchfn = m.matchfn
1311 def lfmatchfn(f):
1311 def lfmatchfn(f):
1312 if origmatchfn(f):
1312 if origmatchfn(f):
1313 return True
1313 return True
1314 lf = lfutil.splitstandin(f)
1314 lf = lfutil.splitstandin(f)
1315 if lf is None:
1315 if lf is None:
1316 return False
1316 return False
1317 notbad.add(lf)
1317 notbad.add(lf)
1318 return origmatchfn(lf)
1318 return origmatchfn(lf)
1319 m.matchfn = lfmatchfn
1319 m.matchfn = lfmatchfn
1320 origbadfn = m.bad
1320 origbadfn = m.bad
1321 def lfbadfn(f, msg):
1321 def lfbadfn(f, msg):
1322 if not f in notbad:
1322 if not f in notbad:
1323 origbadfn(f, msg)
1323 origbadfn(f, msg)
1324 m.bad = lfbadfn
1324 m.bad = lfbadfn
1325
1325
1326 origvisitdirfn = m.visitdir
1326 origvisitdirfn = m.visitdir
1327 def lfvisitdirfn(dir):
1327 def lfvisitdirfn(dir):
1328 if dir == lfutil.shortname:
1328 if dir == lfutil.shortname:
1329 return True
1329 return True
1330 ret = origvisitdirfn(dir)
1330 ret = origvisitdirfn(dir)
1331 if ret:
1331 if ret:
1332 return ret
1332 return ret
1333 lf = lfutil.splitstandin(dir)
1333 lf = lfutil.splitstandin(dir)
1334 if lf is None:
1334 if lf is None:
1335 return False
1335 return False
1336 return origvisitdirfn(lf)
1336 return origvisitdirfn(lf)
1337 m.visitdir = lfvisitdirfn
1337 m.visitdir = lfvisitdirfn
1338
1338
1339 for f in ctx.walk(m):
1339 for f in ctx.walk(m):
1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1341 pathname=f)
1341 pathname=f)
1342 lf = lfutil.splitstandin(f)
1342 lf = lfutil.splitstandin(f)
1343 if lf is None or origmatchfn(f):
1343 if lf is None or origmatchfn(f):
1344 # duplicating unreachable code from commands.cat
1344 # duplicating unreachable code from commands.cat
1345 data = ctx[f].data()
1345 data = ctx[f].data()
1346 if opts.get('decode'):
1346 if opts.get('decode'):
1347 data = repo.wwritedata(f, data)
1347 data = repo.wwritedata(f, data)
1348 fp.write(data)
1348 fp.write(data)
1349 else:
1349 else:
1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1351 if not lfutil.inusercache(repo.ui, hash):
1351 if not lfutil.inusercache(repo.ui, hash):
1352 store = basestore._openstore(repo)
1352 store = basestore._openstore(repo)
1353 success, missing = store.get([(lf, hash)])
1353 success, missing = store.get([(lf, hash)])
1354 if len(success) != 1:
1354 if len(success) != 1:
1355 raise error.Abort(
1355 raise error.Abort(
1356 _('largefile %s is not in cache and could not be '
1356 _('largefile %s is not in cache and could not be '
1357 'downloaded') % lf)
1357 'downloaded') % lf)
1358 path = lfutil.usercachepath(repo.ui, hash)
1358 path = lfutil.usercachepath(repo.ui, hash)
1359 fpin = open(path, "rb")
1359 fpin = open(path, "rb")
1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1361 fp.write(chunk)
1361 fp.write(chunk)
1362 fpin.close()
1362 fpin.close()
1363 fp.close()
1363 fp.close()
1364 err = 0
1364 err = 0
1365 return err
1365 return err
1366
1366
1367 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1367 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1368 *args, **kwargs):
1368 *args, **kwargs):
1369 wlock = repo.wlock()
1369 wlock = repo.wlock()
1370 try:
1370 try:
1371 # branch | | |
1371 # branch | | |
1372 # merge | force | partial | action
1372 # merge | force | partial | action
1373 # -------+-------+---------+--------------
1373 # -------+-------+---------+--------------
1374 # x | x | x | linear-merge
1374 # x | x | x | linear-merge
1375 # o | x | x | branch-merge
1375 # o | x | x | branch-merge
1376 # x | o | x | overwrite (as clean update)
1376 # x | o | x | overwrite (as clean update)
1377 # o | o | x | force-branch-merge (*1)
1377 # o | o | x | force-branch-merge (*1)
1378 # x | x | o | (*)
1378 # x | x | o | (*)
1379 # o | x | o | (*)
1379 # o | x | o | (*)
1380 # x | o | o | overwrite (as revert)
1380 # x | o | o | overwrite (as revert)
1381 # o | o | o | (*)
1381 # o | o | o | (*)
1382 #
1382 #
1383 # (*) don't care
1383 # (*) don't care
1384 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1384 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1385
1385
1386 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1386 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1387 unsure, s = lfdirstate.status(match_.always(repo.root,
1387 unsure, s = lfdirstate.status(match_.always(repo.root,
1388 repo.getcwd()),
1388 repo.getcwd()),
1389 [], False, False, False)
1389 [], False, False, False)
1390 pctx = repo['.']
1390 pctx = repo['.']
1391 for lfile in unsure + s.modified:
1391 for lfile in unsure + s.modified:
1392 lfileabs = repo.wvfs.join(lfile)
1392 lfileabs = repo.wvfs.join(lfile)
1393 if not os.path.exists(lfileabs):
1393 if not os.path.exists(lfileabs):
1394 continue
1394 continue
1395 lfhash = lfutil.hashrepofile(repo, lfile)
1395 lfhash = lfutil.hashrepofile(repo, lfile)
1396 standin = lfutil.standin(lfile)
1396 standin = lfutil.standin(lfile)
1397 lfutil.writestandin(repo, standin, lfhash,
1397 lfutil.writestandin(repo, standin, lfhash,
1398 lfutil.getexecutable(lfileabs))
1398 lfutil.getexecutable(lfileabs))
1399 if (standin in pctx and
1399 if (standin in pctx and
1400 lfhash == lfutil.readstandin(repo, lfile, '.')):
1400 lfhash == lfutil.readstandin(repo, lfile, '.')):
1401 lfdirstate.normal(lfile)
1401 lfdirstate.normal(lfile)
1402 for lfile in s.added:
1402 for lfile in s.added:
1403 lfutil.updatestandin(repo, lfutil.standin(lfile))
1403 lfutil.updatestandin(repo, lfutil.standin(lfile))
1404 lfdirstate.write()
1404 lfdirstate.write()
1405
1405
1406 oldstandins = lfutil.getstandinsstate(repo)
1406 oldstandins = lfutil.getstandinsstate(repo)
1407
1407
1408 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1408 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1409
1409
1410 newstandins = lfutil.getstandinsstate(repo)
1410 newstandins = lfutil.getstandinsstate(repo)
1411 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1411 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1412 if branchmerge or force or partial:
1412 if branchmerge or force or partial:
1413 filelist.extend(s.deleted + s.removed)
1413 filelist.extend(s.deleted + s.removed)
1414
1414
1415 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1415 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1416 normallookup=partial)
1416 normallookup=partial)
1417
1417
1418 return result
1418 return result
1419 finally:
1419 finally:
1420 wlock.release()
1420 wlock.release()
1421
1421
1422 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1422 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1423 result = orig(repo, files, *args, **kwargs)
1423 result = orig(repo, files, *args, **kwargs)
1424
1424
1425 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1425 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1426 if filelist:
1426 if filelist:
1427 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1427 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1428 printmessage=False, normallookup=True)
1428 printmessage=False, normallookup=True)
1429
1429
1430 return result
1430 return result
@@ -1,633 +1,633
1 # filemerge.py - file-level merge handling for Mercurial
1 # filemerge.py - file-level merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import filecmp
10 import filecmp
11 import os
11 import os
12 import re
12 import re
13 import tempfile
13 import tempfile
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import nullid, short
16 from .node import nullid, short
17
17
18 from . import (
18 from . import (
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 match,
21 match,
22 simplemerge,
22 simplemerge,
23 tagmerge,
23 tagmerge,
24 templatekw,
24 templatekw,
25 templater,
25 templater,
26 util,
26 util,
27 )
27 )
28
28
29 def _toolstr(ui, tool, part, default=""):
29 def _toolstr(ui, tool, part, default=""):
30 return ui.config("merge-tools", tool + "." + part, default)
30 return ui.config("merge-tools", tool + "." + part, default)
31
31
32 def _toolbool(ui, tool, part, default=False):
32 def _toolbool(ui, tool, part, default=False):
33 return ui.configbool("merge-tools", tool + "." + part, default)
33 return ui.configbool("merge-tools", tool + "." + part, default)
34
34
35 def _toollist(ui, tool, part, default=[]):
35 def _toollist(ui, tool, part, default=[]):
36 return ui.configlist("merge-tools", tool + "." + part, default)
36 return ui.configlist("merge-tools", tool + "." + part, default)
37
37
38 internals = {}
38 internals = {}
39 # Merge tools to document.
39 # Merge tools to document.
40 internalsdoc = {}
40 internalsdoc = {}
41
41
42 # internal tool merge types
42 # internal tool merge types
43 nomerge = None
43 nomerge = None
44 mergeonly = 'mergeonly' # just the full merge, no premerge
44 mergeonly = 'mergeonly' # just the full merge, no premerge
45 fullmerge = 'fullmerge' # both premerge and merge
45 fullmerge = 'fullmerge' # both premerge and merge
46
46
47 class absentfilectx(object):
47 class absentfilectx(object):
48 """Represents a file that's ostensibly in a context but is actually not
48 """Represents a file that's ostensibly in a context but is actually not
49 present in it.
49 present in it.
50
50
51 This is here because it's very specific to the filemerge code for now --
51 This is here because it's very specific to the filemerge code for now --
52 other code is likely going to break with the values this returns."""
52 other code is likely going to break with the values this returns."""
53 def __init__(self, ctx, f):
53 def __init__(self, ctx, f):
54 self._ctx = ctx
54 self._ctx = ctx
55 self._f = f
55 self._f = f
56
56
57 def path(self):
57 def path(self):
58 return self._f
58 return self._f
59
59
60 def size(self):
60 def size(self):
61 return None
61 return None
62
62
63 def data(self):
63 def data(self):
64 return None
64 return None
65
65
66 def filenode(self):
66 def filenode(self):
67 return nullid
67 return nullid
68
68
69 _customcmp = True
69 _customcmp = True
70 def cmp(self, fctx):
70 def cmp(self, fctx):
71 """compare with other file context
71 """compare with other file context
72
72
73 returns True if different from fctx.
73 returns True if different from fctx.
74 """
74 """
75 return not (fctx.isabsent() and
75 return not (fctx.isabsent() and
76 fctx.ctx() == self.ctx() and
76 fctx.ctx() == self.ctx() and
77 fctx.path() == self.path())
77 fctx.path() == self.path())
78
78
79 def flags(self):
79 def flags(self):
80 return ''
80 return ''
81
81
82 def changectx(self):
82 def changectx(self):
83 return self._ctx
83 return self._ctx
84
84
85 def isbinary(self):
85 def isbinary(self):
86 return False
86 return False
87
87
88 def isabsent(self):
88 def isabsent(self):
89 return True
89 return True
90
90
91 def internaltool(name, mergetype, onfailure=None, precheck=None):
91 def internaltool(name, mergetype, onfailure=None, precheck=None):
92 '''return a decorator for populating internal merge tool table'''
92 '''return a decorator for populating internal merge tool table'''
93 def decorator(func):
93 def decorator(func):
94 fullname = ':' + name
94 fullname = ':' + name
95 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
95 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
96 internals[fullname] = func
96 internals[fullname] = func
97 internals['internal:' + name] = func
97 internals['internal:' + name] = func
98 internalsdoc[fullname] = func
98 internalsdoc[fullname] = func
99 func.mergetype = mergetype
99 func.mergetype = mergetype
100 func.onfailure = onfailure
100 func.onfailure = onfailure
101 func.precheck = precheck
101 func.precheck = precheck
102 return func
102 return func
103 return decorator
103 return decorator
104
104
105 def _findtool(ui, tool):
105 def _findtool(ui, tool):
106 if tool in internals:
106 if tool in internals:
107 return tool
107 return tool
108 return findexternaltool(ui, tool)
108 return findexternaltool(ui, tool)
109
109
110 def findexternaltool(ui, tool):
110 def findexternaltool(ui, tool):
111 for kn in ("regkey", "regkeyalt"):
111 for kn in ("regkey", "regkeyalt"):
112 k = _toolstr(ui, tool, kn)
112 k = _toolstr(ui, tool, kn)
113 if not k:
113 if not k:
114 continue
114 continue
115 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
115 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
116 if p:
116 if p:
117 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
117 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
118 if p:
118 if p:
119 return p
119 return p
120 exe = _toolstr(ui, tool, "executable", tool)
120 exe = _toolstr(ui, tool, "executable", tool)
121 return util.findexe(util.expandpath(exe))
121 return util.findexe(util.expandpath(exe))
122
122
123 def _picktool(repo, ui, path, binary, symlink):
123 def _picktool(repo, ui, path, binary, symlink):
124 def check(tool, pat, symlink, binary):
124 def check(tool, pat, symlink, binary):
125 tmsg = tool
125 tmsg = tool
126 if pat:
126 if pat:
127 tmsg += " specified for " + pat
127 tmsg += " specified for " + pat
128 if not _findtool(ui, tool):
128 if not _findtool(ui, tool):
129 if pat: # explicitly requested tool deserves a warning
129 if pat: # explicitly requested tool deserves a warning
130 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
130 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
131 else: # configured but non-existing tools are more silent
131 else: # configured but non-existing tools are more silent
132 ui.note(_("couldn't find merge tool %s\n") % tmsg)
132 ui.note(_("couldn't find merge tool %s\n") % tmsg)
133 elif symlink and not _toolbool(ui, tool, "symlink"):
133 elif symlink and not _toolbool(ui, tool, "symlink"):
134 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
134 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
135 elif binary and not _toolbool(ui, tool, "binary"):
135 elif binary and not _toolbool(ui, tool, "binary"):
136 ui.warn(_("tool %s can't handle binary\n") % tmsg)
136 ui.warn(_("tool %s can't handle binary\n") % tmsg)
137 elif not util.gui() and _toolbool(ui, tool, "gui"):
137 elif not util.gui() and _toolbool(ui, tool, "gui"):
138 ui.warn(_("tool %s requires a GUI\n") % tmsg)
138 ui.warn(_("tool %s requires a GUI\n") % tmsg)
139 else:
139 else:
140 return True
140 return True
141 return False
141 return False
142
142
143 # internal config: ui.forcemerge
143 # internal config: ui.forcemerge
144 # forcemerge comes from command line arguments, highest priority
144 # forcemerge comes from command line arguments, highest priority
145 force = ui.config('ui', 'forcemerge')
145 force = ui.config('ui', 'forcemerge')
146 if force:
146 if force:
147 toolpath = _findtool(ui, force)
147 toolpath = _findtool(ui, force)
148 if toolpath:
148 if toolpath:
149 return (force, util.shellquote(toolpath))
149 return (force, util.shellquote(toolpath))
150 else:
150 else:
151 # mimic HGMERGE if given tool not found
151 # mimic HGMERGE if given tool not found
152 return (force, force)
152 return (force, force)
153
153
154 # HGMERGE takes next precedence
154 # HGMERGE takes next precedence
155 hgmerge = os.environ.get("HGMERGE")
155 hgmerge = os.environ.get("HGMERGE")
156 if hgmerge:
156 if hgmerge:
157 return (hgmerge, hgmerge)
157 return (hgmerge, hgmerge)
158
158
159 # then patterns
159 # then patterns
160 for pat, tool in ui.configitems("merge-patterns"):
160 for pat, tool in ui.configitems("merge-patterns"):
161 mf = match.match(repo.root, '', [pat])
161 mf = match.match(repo.root, '', [pat])
162 if mf(path) and check(tool, pat, symlink, False):
162 if mf(path) and check(tool, pat, symlink, False):
163 toolpath = _findtool(ui, tool)
163 toolpath = _findtool(ui, tool)
164 return (tool, util.shellquote(toolpath))
164 return (tool, util.shellquote(toolpath))
165
165
166 # then merge tools
166 # then merge tools
167 tools = {}
167 tools = {}
168 disabled = set()
168 disabled = set()
169 for k, v in ui.configitems("merge-tools"):
169 for k, v in ui.configitems("merge-tools"):
170 t = k.split('.')[0]
170 t = k.split('.')[0]
171 if t not in tools:
171 if t not in tools:
172 tools[t] = int(_toolstr(ui, t, "priority", "0"))
172 tools[t] = int(_toolstr(ui, t, "priority", "0"))
173 if _toolbool(ui, t, "disabled", False):
173 if _toolbool(ui, t, "disabled", False):
174 disabled.add(t)
174 disabled.add(t)
175 names = tools.keys()
175 names = tools.keys()
176 tools = sorted([(-p, t) for t, p in tools.items() if t not in disabled])
176 tools = sorted([(-p, t) for t, p in tools.items() if t not in disabled])
177 uimerge = ui.config("ui", "merge")
177 uimerge = ui.config("ui", "merge")
178 if uimerge:
178 if uimerge:
179 if uimerge not in names:
179 if uimerge not in names:
180 return (uimerge, uimerge)
180 return (uimerge, uimerge)
181 tools.insert(0, (None, uimerge)) # highest priority
181 tools.insert(0, (None, uimerge)) # highest priority
182 tools.append((None, "hgmerge")) # the old default, if found
182 tools.append((None, "hgmerge")) # the old default, if found
183 for p, t in tools:
183 for p, t in tools:
184 if check(t, None, symlink, binary):
184 if check(t, None, symlink, binary):
185 toolpath = _findtool(ui, t)
185 toolpath = _findtool(ui, t)
186 return (t, util.shellquote(toolpath))
186 return (t, util.shellquote(toolpath))
187
187
188 # internal merge or prompt as last resort
188 # internal merge or prompt as last resort
189 if symlink or binary:
189 if symlink or binary:
190 return ":prompt", None
190 return ":prompt", None
191 return ":merge", None
191 return ":merge", None
192
192
193 def _eoltype(data):
193 def _eoltype(data):
194 "Guess the EOL type of a file"
194 "Guess the EOL type of a file"
195 if '\0' in data: # binary
195 if '\0' in data: # binary
196 return None
196 return None
197 if '\r\n' in data: # Windows
197 if '\r\n' in data: # Windows
198 return '\r\n'
198 return '\r\n'
199 if '\r' in data: # Old Mac
199 if '\r' in data: # Old Mac
200 return '\r'
200 return '\r'
201 if '\n' in data: # UNIX
201 if '\n' in data: # UNIX
202 return '\n'
202 return '\n'
203 return None # unknown
203 return None # unknown
204
204
205 def _matcheol(file, origfile):
205 def _matcheol(file, origfile):
206 "Convert EOL markers in a file to match origfile"
206 "Convert EOL markers in a file to match origfile"
207 tostyle = _eoltype(util.readfile(origfile))
207 tostyle = _eoltype(util.readfile(origfile))
208 if tostyle:
208 if tostyle:
209 data = util.readfile(file)
209 data = util.readfile(file)
210 style = _eoltype(data)
210 style = _eoltype(data)
211 if style:
211 if style:
212 newdata = data.replace(style, tostyle)
212 newdata = data.replace(style, tostyle)
213 if newdata != data:
213 if newdata != data:
214 util.writefile(file, newdata)
214 util.writefile(file, newdata)
215
215
216 @internaltool('prompt', nomerge)
216 @internaltool('prompt', nomerge)
217 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
217 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
218 """Asks the user which of the local or the other version to keep as
218 """Asks the user which of the local or the other version to keep as
219 the merged version."""
219 the merged version."""
220 ui = repo.ui
220 ui = repo.ui
221 fd = fcd.path()
221 fd = fcd.path()
222
222
223 try:
223 try:
224 index = ui.promptchoice(_("no tool found to merge %s\n"
224 index = ui.promptchoice(_("no tool found to merge %s\n"
225 "keep (l)ocal or take (o)ther?"
225 "keep (l)ocal or take (o)ther?"
226 "$$ &Local $$ &Other") % fd, 0)
226 "$$ &Local $$ &Other") % fd, 0)
227 choice = ['local', 'other'][index]
227 choice = ['local', 'other'][index]
228
228
229 if choice == 'other':
229 if choice == 'other':
230 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
230 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
231 else:
231 else:
232 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
232 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
233 except error.ResponseExpected:
233 except error.ResponseExpected:
234 ui.write("\n")
234 ui.write("\n")
235 return 1, False
235 return 1, False
236
236
237 @internaltool('local', nomerge)
237 @internaltool('local', nomerge)
238 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
238 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
239 """Uses the local version of files as the merged version."""
239 """Uses the local version of files as the merged version."""
240 return 0, False
240 return 0, False
241
241
242 @internaltool('other', nomerge)
242 @internaltool('other', nomerge)
243 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
243 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
244 """Uses the other version of files as the merged version."""
244 """Uses the other version of files as the merged version."""
245 repo.wwrite(fcd.path(), fco.data(), fco.flags())
245 repo.wwrite(fcd.path(), fco.data(), fco.flags())
246 return 0, False
246 return 0, False
247
247
248 @internaltool('fail', nomerge)
248 @internaltool('fail', nomerge)
249 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
249 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
250 """
250 """
251 Rather than attempting to merge files that were modified on both
251 Rather than attempting to merge files that were modified on both
252 branches, it marks them as unresolved. The resolve command must be
252 branches, it marks them as unresolved. The resolve command must be
253 used to resolve these conflicts."""
253 used to resolve these conflicts."""
254 return 1, False
254 return 1, False
255
255
256 def _premerge(repo, toolconf, files, labels=None):
256 def _premerge(repo, toolconf, files, labels=None):
257 tool, toolpath, binary, symlink = toolconf
257 tool, toolpath, binary, symlink = toolconf
258 if symlink:
258 if symlink:
259 return 1
259 return 1
260 a, b, c, back = files
260 a, b, c, back = files
261
261
262 ui = repo.ui
262 ui = repo.ui
263
263
264 validkeep = ['keep', 'keep-merge3']
264 validkeep = ['keep', 'keep-merge3']
265
265
266 # do we attempt to simplemerge first?
266 # do we attempt to simplemerge first?
267 try:
267 try:
268 premerge = _toolbool(ui, tool, "premerge", not binary)
268 premerge = _toolbool(ui, tool, "premerge", not binary)
269 except error.ConfigError:
269 except error.ConfigError:
270 premerge = _toolstr(ui, tool, "premerge").lower()
270 premerge = _toolstr(ui, tool, "premerge").lower()
271 if premerge not in validkeep:
271 if premerge not in validkeep:
272 _valid = ', '.join(["'" + v + "'" for v in validkeep])
272 _valid = ', '.join(["'" + v + "'" for v in validkeep])
273 raise error.ConfigError(_("%s.premerge not valid "
273 raise error.ConfigError(_("%s.premerge not valid "
274 "('%s' is neither boolean nor %s)") %
274 "('%s' is neither boolean nor %s)") %
275 (tool, premerge, _valid))
275 (tool, premerge, _valid))
276
276
277 if premerge:
277 if premerge:
278 if premerge == 'keep-merge3':
278 if premerge == 'keep-merge3':
279 if not labels:
279 if not labels:
280 labels = _defaultconflictlabels
280 labels = _defaultconflictlabels
281 if len(labels) < 3:
281 if len(labels) < 3:
282 labels.append('base')
282 labels.append('base')
283 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
283 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
284 if not r:
284 if not r:
285 ui.debug(" premerge successful\n")
285 ui.debug(" premerge successful\n")
286 return 0
286 return 0
287 if premerge not in validkeep:
287 if premerge not in validkeep:
288 util.copyfile(back, a) # restore from backup and try again
288 util.copyfile(back, a) # restore from backup and try again
289 return 1 # continue merging
289 return 1 # continue merging
290
290
291 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
291 def _mergecheck(repo, mynode, orig, fcd, fco, fca, toolconf):
292 tool, toolpath, binary, symlink = toolconf
292 tool, toolpath, binary, symlink = toolconf
293 if symlink:
293 if symlink:
294 repo.ui.warn(_('warning: internal %s cannot merge symlinks '
294 repo.ui.warn(_('warning: internal %s cannot merge symlinks '
295 'for %s\n') % (tool, fcd.path()))
295 'for %s\n') % (tool, fcd.path()))
296 return False
296 return False
297 return True
297 return True
298
298
299 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
299 def _merge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels, mode):
300 """
300 """
301 Uses the internal non-interactive simple merge algorithm for merging
301 Uses the internal non-interactive simple merge algorithm for merging
302 files. It will fail if there are any conflicts and leave markers in
302 files. It will fail if there are any conflicts and leave markers in
303 the partially merged file. Markers will have two sections, one for each side
303 the partially merged file. Markers will have two sections, one for each side
304 of merge, unless mode equals 'union' which suppresses the markers."""
304 of merge, unless mode equals 'union' which suppresses the markers."""
305 a, b, c, back = files
305 a, b, c, back = files
306
306
307 ui = repo.ui
307 ui = repo.ui
308
308
309 r = simplemerge.simplemerge(ui, a, b, c, label=labels, mode=mode)
309 r = simplemerge.simplemerge(ui, a, b, c, label=labels, mode=mode)
310 return True, r, False
310 return True, r, False
311
311
312 @internaltool('union', fullmerge,
312 @internaltool('union', fullmerge,
313 _("warning: conflicts while merging %s! "
313 _("warning: conflicts while merging %s! "
314 "(edit, then use 'hg resolve --mark')\n"),
314 "(edit, then use 'hg resolve --mark')\n"),
315 precheck=_mergecheck)
315 precheck=_mergecheck)
316 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
316 def _iunion(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
317 """
317 """
318 Uses the internal non-interactive simple merge algorithm for merging
318 Uses the internal non-interactive simple merge algorithm for merging
319 files. It will use both left and right sides for conflict regions.
319 files. It will use both left and right sides for conflict regions.
320 No markers are inserted."""
320 No markers are inserted."""
321 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
321 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
322 files, labels, 'union')
322 files, labels, 'union')
323
323
324 @internaltool('merge', fullmerge,
324 @internaltool('merge', fullmerge,
325 _("warning: conflicts while merging %s! "
325 _("warning: conflicts while merging %s! "
326 "(edit, then use 'hg resolve --mark')\n"),
326 "(edit, then use 'hg resolve --mark')\n"),
327 precheck=_mergecheck)
327 precheck=_mergecheck)
328 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
328 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
329 """
329 """
330 Uses the internal non-interactive simple merge algorithm for merging
330 Uses the internal non-interactive simple merge algorithm for merging
331 files. It will fail if there are any conflicts and leave markers in
331 files. It will fail if there are any conflicts and leave markers in
332 the partially merged file. Markers will have two sections, one for each side
332 the partially merged file. Markers will have two sections, one for each side
333 of merge."""
333 of merge."""
334 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
334 return _merge(repo, mynode, orig, fcd, fco, fca, toolconf,
335 files, labels, 'merge')
335 files, labels, 'merge')
336
336
337 @internaltool('merge3', fullmerge,
337 @internaltool('merge3', fullmerge,
338 _("warning: conflicts while merging %s! "
338 _("warning: conflicts while merging %s! "
339 "(edit, then use 'hg resolve --mark')\n"),
339 "(edit, then use 'hg resolve --mark')\n"),
340 precheck=_mergecheck)
340 precheck=_mergecheck)
341 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
341 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
342 """
342 """
343 Uses the internal non-interactive simple merge algorithm for merging
343 Uses the internal non-interactive simple merge algorithm for merging
344 files. It will fail if there are any conflicts and leave markers in
344 files. It will fail if there are any conflicts and leave markers in
345 the partially merged file. Marker will have three sections, one from each
345 the partially merged file. Marker will have three sections, one from each
346 side of the merge and one for the base content."""
346 side of the merge and one for the base content."""
347 if not labels:
347 if not labels:
348 labels = _defaultconflictlabels
348 labels = _defaultconflictlabels
349 if len(labels) < 3:
349 if len(labels) < 3:
350 labels.append('base')
350 labels.append('base')
351 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
351 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
352
352
353 def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
353 def _imergeauto(repo, mynode, orig, fcd, fco, fca, toolconf, files,
354 labels=None, localorother=None):
354 labels=None, localorother=None):
355 """
355 """
356 Generic driver for _imergelocal and _imergeother
356 Generic driver for _imergelocal and _imergeother
357 """
357 """
358 assert localorother is not None
358 assert localorother is not None
359 tool, toolpath, binary, symlink = toolconf
359 tool, toolpath, binary, symlink = toolconf
360 a, b, c, back = files
360 a, b, c, back = files
361 r = simplemerge.simplemerge(repo.ui, a, b, c, label=labels,
361 r = simplemerge.simplemerge(repo.ui, a, b, c, label=labels,
362 localorother=localorother)
362 localorother=localorother)
363 return True, r
363 return True, r
364
364
365 @internaltool('merge-local', mergeonly, precheck=_mergecheck)
365 @internaltool('merge-local', mergeonly, precheck=_mergecheck)
366 def _imergelocal(*args, **kwargs):
366 def _imergelocal(*args, **kwargs):
367 """
367 """
368 Like :merge, but resolve all conflicts non-interactively in favor
368 Like :merge, but resolve all conflicts non-interactively in favor
369 of the local changes."""
369 of the local changes."""
370 success, status = _imergeauto(localorother='local', *args, **kwargs)
370 success, status = _imergeauto(localorother='local', *args, **kwargs)
371 return success, status, False
371 return success, status, False
372
372
373 @internaltool('merge-other', mergeonly, precheck=_mergecheck)
373 @internaltool('merge-other', mergeonly, precheck=_mergecheck)
374 def _imergeother(*args, **kwargs):
374 def _imergeother(*args, **kwargs):
375 """
375 """
376 Like :merge, but resolve all conflicts non-interactively in favor
376 Like :merge, but resolve all conflicts non-interactively in favor
377 of the other changes."""
377 of the other changes."""
378 success, status = _imergeauto(localorother='other', *args, **kwargs)
378 success, status = _imergeauto(localorother='other', *args, **kwargs)
379 return success, status, False
379 return success, status, False
380
380
381 @internaltool('tagmerge', mergeonly,
381 @internaltool('tagmerge', mergeonly,
382 _("automatic tag merging of %s failed! "
382 _("automatic tag merging of %s failed! "
383 "(use 'hg resolve --tool :merge' or another merge "
383 "(use 'hg resolve --tool :merge' or another merge "
384 "tool of your choice)\n"))
384 "tool of your choice)\n"))
385 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
385 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
386 """
386 """
387 Uses the internal tag merge algorithm (experimental).
387 Uses the internal tag merge algorithm (experimental).
388 """
388 """
389 success, status = tagmerge.merge(repo, fcd, fco, fca)
389 success, status = tagmerge.merge(repo, fcd, fco, fca)
390 return success, status, False
390 return success, status, False
391
391
392 @internaltool('dump', fullmerge)
392 @internaltool('dump', fullmerge)
393 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
393 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
394 """
394 """
395 Creates three versions of the files to merge, containing the
395 Creates three versions of the files to merge, containing the
396 contents of local, other and base. These files can then be used to
396 contents of local, other and base. These files can then be used to
397 perform a merge manually. If the file to be merged is named
397 perform a merge manually. If the file to be merged is named
398 ``a.txt``, these files will accordingly be named ``a.txt.local``,
398 ``a.txt``, these files will accordingly be named ``a.txt.local``,
399 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
399 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
400 same directory as ``a.txt``."""
400 same directory as ``a.txt``."""
401 a, b, c, back = files
401 a, b, c, back = files
402
402
403 fd = fcd.path()
403 fd = fcd.path()
404
404
405 util.copyfile(a, a + ".local")
405 util.copyfile(a, a + ".local")
406 repo.wwrite(fd + ".other", fco.data(), fco.flags())
406 repo.wwrite(fd + ".other", fco.data(), fco.flags())
407 repo.wwrite(fd + ".base", fca.data(), fca.flags())
407 repo.wwrite(fd + ".base", fca.data(), fca.flags())
408 return False, 1, False
408 return False, 1, False
409
409
410 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
410 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
411 tool, toolpath, binary, symlink = toolconf
411 tool, toolpath, binary, symlink = toolconf
412 a, b, c, back = files
412 a, b, c, back = files
413 out = ""
413 out = ""
414 env = {'HG_FILE': fcd.path(),
414 env = {'HG_FILE': fcd.path(),
415 'HG_MY_NODE': short(mynode),
415 'HG_MY_NODE': short(mynode),
416 'HG_OTHER_NODE': str(fco.changectx()),
416 'HG_OTHER_NODE': str(fco.changectx()),
417 'HG_BASE_NODE': str(fca.changectx()),
417 'HG_BASE_NODE': str(fca.changectx()),
418 'HG_MY_ISLINK': 'l' in fcd.flags(),
418 'HG_MY_ISLINK': 'l' in fcd.flags(),
419 'HG_OTHER_ISLINK': 'l' in fco.flags(),
419 'HG_OTHER_ISLINK': 'l' in fco.flags(),
420 'HG_BASE_ISLINK': 'l' in fca.flags(),
420 'HG_BASE_ISLINK': 'l' in fca.flags(),
421 }
421 }
422
422
423 ui = repo.ui
423 ui = repo.ui
424
424
425 args = _toolstr(ui, tool, "args", '$local $base $other')
425 args = _toolstr(ui, tool, "args", '$local $base $other')
426 if "$output" in args:
426 if "$output" in args:
427 out, a = a, back # read input from backup, write to original
427 out, a = a, back # read input from backup, write to original
428 replace = {'local': a, 'base': b, 'other': c, 'output': out}
428 replace = {'local': a, 'base': b, 'other': c, 'output': out}
429 args = util.interpolate(r'\$', replace, args,
429 args = util.interpolate(r'\$', replace, args,
430 lambda s: util.shellquote(util.localpath(s)))
430 lambda s: util.shellquote(util.localpath(s)))
431 cmd = toolpath + ' ' + args
431 cmd = toolpath + ' ' + args
432 repo.ui.debug('launching merge tool: %s\n' % cmd)
432 repo.ui.debug('launching merge tool: %s\n' % cmd)
433 r = ui.system(cmd, cwd=repo.root, environ=env)
433 r = ui.system(cmd, cwd=repo.root, environ=env)
434 repo.ui.debug('merge tool returned: %s\n' % r)
434 repo.ui.debug('merge tool returned: %s\n' % r)
435 return True, r, False
435 return True, r, False
436
436
437 def _formatconflictmarker(repo, ctx, template, label, pad):
437 def _formatconflictmarker(repo, ctx, template, label, pad):
438 """Applies the given template to the ctx, prefixed by the label.
438 """Applies the given template to the ctx, prefixed by the label.
439
439
440 Pad is the minimum width of the label prefix, so that multiple markers
440 Pad is the minimum width of the label prefix, so that multiple markers
441 can have aligned templated parts.
441 can have aligned templated parts.
442 """
442 """
443 if ctx.node() is None:
443 if ctx.node() is None:
444 ctx = ctx.p1()
444 ctx = ctx.p1()
445
445
446 props = templatekw.keywords.copy()
446 props = templatekw.keywords.copy()
447 props['templ'] = template
447 props['templ'] = template
448 props['ctx'] = ctx
448 props['ctx'] = ctx
449 props['repo'] = repo
449 props['repo'] = repo
450 templateresult = template('conflictmarker', **props)
450 templateresult = template('conflictmarker', **props)
451
451
452 label = ('%s:' % label).ljust(pad + 1)
452 label = ('%s:' % label).ljust(pad + 1)
453 mark = '%s %s' % (label, templater.stringify(templateresult))
453 mark = '%s %s' % (label, templater.stringify(templateresult))
454
454
455 if mark:
455 if mark:
456 mark = mark.splitlines()[0] # split for safety
456 mark = mark.splitlines()[0] # split for safety
457
457
458 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
458 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
459 return util.ellipsis(mark, 80 - 8)
459 return util.ellipsis(mark, 80 - 8)
460
460
461 _defaultconflictmarker = ('{node|short} ' +
461 _defaultconflictmarker = ('{node|short} ' +
462 '{ifeq(tags, "tip", "", "{tags} ")}' +
462 '{ifeq(tags, "tip", "", "{tags} ")}' +
463 '{if(bookmarks, "{bookmarks} ")}' +
463 '{if(bookmarks, "{bookmarks} ")}' +
464 '{ifeq(branch, "default", "", "{branch} ")}' +
464 '{ifeq(branch, "default", "", "{branch} ")}' +
465 '- {author|user}: {desc|firstline}')
465 '- {author|user}: {desc|firstline}')
466
466
467 _defaultconflictlabels = ['local', 'other']
467 _defaultconflictlabels = ['local', 'other']
468
468
469 def _formatlabels(repo, fcd, fco, fca, labels):
469 def _formatlabels(repo, fcd, fco, fca, labels):
470 """Formats the given labels using the conflict marker template.
470 """Formats the given labels using the conflict marker template.
471
471
472 Returns a list of formatted labels.
472 Returns a list of formatted labels.
473 """
473 """
474 cd = fcd.changectx()
474 cd = fcd.changectx()
475 co = fco.changectx()
475 co = fco.changectx()
476 ca = fca.changectx()
476 ca = fca.changectx()
477
477
478 ui = repo.ui
478 ui = repo.ui
479 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
479 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
480 tmpl = templater.templater(None, cache={'conflictmarker': template})
480 tmpl = templater.templater(None, cache={'conflictmarker': template})
481
481
482 pad = max(len(l) for l in labels)
482 pad = max(len(l) for l in labels)
483
483
484 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
484 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
485 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
485 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
486 if len(labels) > 2:
486 if len(labels) > 2:
487 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
487 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
488 return newlabels
488 return newlabels
489
489
490 def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
490 def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
491 """perform a 3-way merge in the working directory
491 """perform a 3-way merge in the working directory
492
492
493 premerge = whether this is a premerge
493 premerge = whether this is a premerge
494 mynode = parent node before merge
494 mynode = parent node before merge
495 orig = original local filename before merge
495 orig = original local filename before merge
496 fco = other file context
496 fco = other file context
497 fca = ancestor file context
497 fca = ancestor file context
498 fcd = local file context for current/destination file
498 fcd = local file context for current/destination file
499
499
500 Returns whether the merge is complete, and the return value of the merge.
500 Returns whether the merge is complete, the return value of the merge, and
501 """
501 a boolean indicating whether the file was deleted from disk."""
502
502
503 def temp(prefix, ctx):
503 def temp(prefix, ctx):
504 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
504 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
505 (fd, name) = tempfile.mkstemp(prefix=pre)
505 (fd, name) = tempfile.mkstemp(prefix=pre)
506 data = repo.wwritedata(ctx.path(), ctx.data())
506 data = repo.wwritedata(ctx.path(), ctx.data())
507 f = os.fdopen(fd, "wb")
507 f = os.fdopen(fd, "wb")
508 f.write(data)
508 f.write(data)
509 f.close()
509 f.close()
510 return name
510 return name
511
511
512 if not fco.cmp(fcd): # files identical?
512 if not fco.cmp(fcd): # files identical?
513 return True, None
513 return True, None, False
514
514
515 ui = repo.ui
515 ui = repo.ui
516 fd = fcd.path()
516 fd = fcd.path()
517 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
517 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
518 symlink = 'l' in fcd.flags() + fco.flags()
518 symlink = 'l' in fcd.flags() + fco.flags()
519 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
519 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
520 if tool in internals and tool.startswith('internal:'):
520 if tool in internals and tool.startswith('internal:'):
521 # normalize to new-style names (':merge' etc)
521 # normalize to new-style names (':merge' etc)
522 tool = tool[len('internal'):]
522 tool = tool[len('internal'):]
523 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
523 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
524 (tool, fd, binary, symlink))
524 (tool, fd, binary, symlink))
525
525
526 if tool in internals:
526 if tool in internals:
527 func = internals[tool]
527 func = internals[tool]
528 mergetype = func.mergetype
528 mergetype = func.mergetype
529 onfailure = func.onfailure
529 onfailure = func.onfailure
530 precheck = func.precheck
530 precheck = func.precheck
531 else:
531 else:
532 func = _xmerge
532 func = _xmerge
533 mergetype = fullmerge
533 mergetype = fullmerge
534 onfailure = _("merging %s failed!\n")
534 onfailure = _("merging %s failed!\n")
535 precheck = None
535 precheck = None
536
536
537 toolconf = tool, toolpath, binary, symlink
537 toolconf = tool, toolpath, binary, symlink
538
538
539 if mergetype == nomerge:
539 if mergetype == nomerge:
540 r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf)
540 r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf)
541 return True, r
541 return True, r, deleted
542
542
543 if premerge:
543 if premerge:
544 if orig != fco.path():
544 if orig != fco.path():
545 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
545 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
546 else:
546 else:
547 ui.status(_("merging %s\n") % fd)
547 ui.status(_("merging %s\n") % fd)
548
548
549 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
549 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
550
550
551 if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
551 if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
552 toolconf):
552 toolconf):
553 if onfailure:
553 if onfailure:
554 ui.warn(onfailure % fd)
554 ui.warn(onfailure % fd)
555 return True, 1
555 return True, 1, False
556
556
557 a = repo.wjoin(fd)
557 a = repo.wjoin(fd)
558 b = temp("base", fca)
558 b = temp("base", fca)
559 c = temp("other", fco)
559 c = temp("other", fco)
560 back = cmdutil.origpath(ui, repo, a)
560 back = cmdutil.origpath(ui, repo, a)
561 if premerge:
561 if premerge:
562 util.copyfile(a, back)
562 util.copyfile(a, back)
563 files = (a, b, c, back)
563 files = (a, b, c, back)
564
564
565 r = 1
565 r = 1
566 try:
566 try:
567 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
567 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
568 if not labels:
568 if not labels:
569 labels = _defaultconflictlabels
569 labels = _defaultconflictlabels
570 if markerstyle != 'basic':
570 if markerstyle != 'basic':
571 labels = _formatlabels(repo, fcd, fco, fca, labels)
571 labels = _formatlabels(repo, fcd, fco, fca, labels)
572
572
573 if premerge and mergetype == fullmerge:
573 if premerge and mergetype == fullmerge:
574 r = _premerge(repo, toolconf, files, labels=labels)
574 r = _premerge(repo, toolconf, files, labels=labels)
575 # complete if premerge successful (r is 0)
575 # complete if premerge successful (r is 0)
576 return not r, r
576 return not r, r, False
577
577
578 needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
578 needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
579 toolconf, files, labels=labels)
579 toolconf, files, labels=labels)
580
580
581 if needcheck:
581 if needcheck:
582 r = _check(r, ui, tool, fcd, files)
582 r = _check(r, ui, tool, fcd, files)
583
583
584 if r:
584 if r:
585 if onfailure:
585 if onfailure:
586 ui.warn(onfailure % fd)
586 ui.warn(onfailure % fd)
587
587
588 return True, r
588 return True, r, deleted
589 finally:
589 finally:
590 if not r:
590 if not r:
591 util.unlink(back)
591 util.unlink(back)
592 util.unlink(b)
592 util.unlink(b)
593 util.unlink(c)
593 util.unlink(c)
594
594
595 def _check(r, ui, tool, fcd, files):
595 def _check(r, ui, tool, fcd, files):
596 fd = fcd.path()
596 fd = fcd.path()
597 a, b, c, back = files
597 a, b, c, back = files
598
598
599 if not r and (_toolbool(ui, tool, "checkconflicts") or
599 if not r and (_toolbool(ui, tool, "checkconflicts") or
600 'conflicts' in _toollist(ui, tool, "check")):
600 'conflicts' in _toollist(ui, tool, "check")):
601 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
601 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
602 re.MULTILINE):
602 re.MULTILINE):
603 r = 1
603 r = 1
604
604
605 checked = False
605 checked = False
606 if 'prompt' in _toollist(ui, tool, "check"):
606 if 'prompt' in _toollist(ui, tool, "check"):
607 checked = True
607 checked = True
608 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
608 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
609 "$$ &Yes $$ &No") % fd, 1):
609 "$$ &Yes $$ &No") % fd, 1):
610 r = 1
610 r = 1
611
611
612 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
612 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
613 'changed' in
613 'changed' in
614 _toollist(ui, tool, "check")):
614 _toollist(ui, tool, "check")):
615 if filecmp.cmp(a, back):
615 if filecmp.cmp(a, back):
616 if ui.promptchoice(_(" output file %s appears unchanged\n"
616 if ui.promptchoice(_(" output file %s appears unchanged\n"
617 "was merge successful (yn)?"
617 "was merge successful (yn)?"
618 "$$ &Yes $$ &No") % fd, 1):
618 "$$ &Yes $$ &No") % fd, 1):
619 r = 1
619 r = 1
620
620
621 if _toolbool(ui, tool, "fixeol"):
621 if _toolbool(ui, tool, "fixeol"):
622 _matcheol(a, back)
622 _matcheol(a, back)
623
623
624 return r
624 return r
625
625
626 def premerge(repo, mynode, orig, fcd, fco, fca, labels=None):
626 def premerge(repo, mynode, orig, fcd, fco, fca, labels=None):
627 return _filemerge(True, repo, mynode, orig, fcd, fco, fca, labels=labels)
627 return _filemerge(True, repo, mynode, orig, fcd, fco, fca, labels=labels)
628
628
629 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
629 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
630 return _filemerge(False, repo, mynode, orig, fcd, fco, fca, labels=labels)
630 return _filemerge(False, repo, mynode, orig, fcd, fco, fca, labels=labels)
631
631
632 # tell hggettext to extract docstrings from these functions:
632 # tell hggettext to extract docstrings from these functions:
633 i18nfunctions = internals.values()
633 i18nfunctions = internals.values()
@@ -1,1400 +1,1402
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 )
22 )
23 from . import (
23 from . import (
24 copies,
24 copies,
25 destutil,
25 destutil,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 obsolete,
28 obsolete,
29 subrepo,
29 subrepo,
30 util,
30 util,
31 worker,
31 worker,
32 )
32 )
33
33
34 _pack = struct.pack
34 _pack = struct.pack
35 _unpack = struct.unpack
35 _unpack = struct.unpack
36
36
37 def _droponode(data):
37 def _droponode(data):
38 # used for compatibility for v1
38 # used for compatibility for v1
39 bits = data.split('\0')
39 bits = data.split('\0')
40 bits = bits[:-2] + bits[-1:]
40 bits = bits[:-2] + bits[-1:]
41 return '\0'.join(bits)
41 return '\0'.join(bits)
42
42
43 class mergestate(object):
43 class mergestate(object):
44 '''track 3-way merge state of individual files
44 '''track 3-way merge state of individual files
45
45
46 The merge state is stored on disk when needed. Two files are used: one with
46 The merge state is stored on disk when needed. Two files are used: one with
47 an old format (version 1), and one with a new format (version 2). Version 2
47 an old format (version 1), and one with a new format (version 2). Version 2
48 stores a superset of the data in version 1, including new kinds of records
48 stores a superset of the data in version 1, including new kinds of records
49 in the future. For more about the new format, see the documentation for
49 in the future. For more about the new format, see the documentation for
50 `_readrecordsv2`.
50 `_readrecordsv2`.
51
51
52 Each record can contain arbitrary content, and has an associated type. This
52 Each record can contain arbitrary content, and has an associated type. This
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 versions of Mercurial that don't support it should abort. If `type` is
54 versions of Mercurial that don't support it should abort. If `type` is
55 lowercase, the record can be safely ignored.
55 lowercase, the record can be safely ignored.
56
56
57 Currently known records:
57 Currently known records:
58
58
59 L: the node of the "local" part of the merge (hexified version)
59 L: the node of the "local" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
61 F: a file to be merged entry
61 F: a file to be merged entry
62 C: a change/delete or delete/change conflict
62 C: a change/delete or delete/change conflict
63 D: a file that the external merge driver will merge internally
63 D: a file that the external merge driver will merge internally
64 (experimental)
64 (experimental)
65 m: the external merge driver defined for this merge plus its run state
65 m: the external merge driver defined for this merge plus its run state
66 (experimental)
66 (experimental)
67 X: unsupported mandatory record type (used in tests)
67 X: unsupported mandatory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
69
69
70 Merge driver run states (experimental):
70 Merge driver run states (experimental):
71 u: driver-resolved files unmarked -- needs to be run next time we're about
71 u: driver-resolved files unmarked -- needs to be run next time we're about
72 to resolve or commit
72 to resolve or commit
73 m: driver-resolved files marked -- only needs to be run before commit
73 m: driver-resolved files marked -- only needs to be run before commit
74 s: success/skipped -- does not need to be run any more
74 s: success/skipped -- does not need to be run any more
75
75
76 '''
76 '''
77 statepathv1 = 'merge/state'
77 statepathv1 = 'merge/state'
78 statepathv2 = 'merge/state2'
78 statepathv2 = 'merge/state2'
79
79
80 @staticmethod
80 @staticmethod
81 def clean(repo, node=None, other=None):
81 def clean(repo, node=None, other=None):
82 """Initialize a brand new merge state, removing any existing state on
82 """Initialize a brand new merge state, removing any existing state on
83 disk."""
83 disk."""
84 ms = mergestate(repo)
84 ms = mergestate(repo)
85 ms.reset(node, other)
85 ms.reset(node, other)
86 return ms
86 return ms
87
87
88 @staticmethod
88 @staticmethod
89 def read(repo):
89 def read(repo):
90 """Initialize the merge state, reading it from disk."""
90 """Initialize the merge state, reading it from disk."""
91 ms = mergestate(repo)
91 ms = mergestate(repo)
92 ms._read()
92 ms._read()
93 return ms
93 return ms
94
94
95 def __init__(self, repo):
95 def __init__(self, repo):
96 """Initialize the merge state.
96 """Initialize the merge state.
97
97
98 Do not use this directly! Instead call read() or clean()."""
98 Do not use this directly! Instead call read() or clean()."""
99 self._repo = repo
99 self._repo = repo
100 self._dirty = False
100 self._dirty = False
101
101
102 def reset(self, node=None, other=None):
102 def reset(self, node=None, other=None):
103 self._state = {}
103 self._state = {}
104 self._local = None
104 self._local = None
105 self._other = None
105 self._other = None
106 if 'otherctx' in vars(self):
106 if 'otherctx' in vars(self):
107 del self.otherctx
107 del self.otherctx
108 if node:
108 if node:
109 self._local = node
109 self._local = node
110 self._other = other
110 self._other = other
111 self._readmergedriver = None
111 self._readmergedriver = None
112 if self.mergedriver:
112 if self.mergedriver:
113 self._mdstate = 's'
113 self._mdstate = 's'
114 else:
114 else:
115 self._mdstate = 'u'
115 self._mdstate = 'u'
116 shutil.rmtree(self._repo.join('merge'), True)
116 shutil.rmtree(self._repo.join('merge'), True)
117 self._dirty = False
117 self._dirty = False
118
118
119 def _read(self):
119 def _read(self):
120 """Analyse each record content to restore a serialized state from disk
120 """Analyse each record content to restore a serialized state from disk
121
121
122 This function process "record" entry produced by the de-serialization
122 This function process "record" entry produced by the de-serialization
123 of on disk file.
123 of on disk file.
124 """
124 """
125 self._state = {}
125 self._state = {}
126 self._local = None
126 self._local = None
127 self._other = None
127 self._other = None
128 if 'otherctx' in vars(self):
128 if 'otherctx' in vars(self):
129 del self.otherctx
129 del self.otherctx
130 self._readmergedriver = None
130 self._readmergedriver = None
131 self._mdstate = 's'
131 self._mdstate = 's'
132 unsupported = set()
132 unsupported = set()
133 records = self._readrecords()
133 records = self._readrecords()
134 for rtype, record in records:
134 for rtype, record in records:
135 if rtype == 'L':
135 if rtype == 'L':
136 self._local = bin(record)
136 self._local = bin(record)
137 elif rtype == 'O':
137 elif rtype == 'O':
138 self._other = bin(record)
138 self._other = bin(record)
139 elif rtype == 'm':
139 elif rtype == 'm':
140 bits = record.split('\0', 1)
140 bits = record.split('\0', 1)
141 mdstate = bits[1]
141 mdstate = bits[1]
142 if len(mdstate) != 1 or mdstate not in 'ums':
142 if len(mdstate) != 1 or mdstate not in 'ums':
143 # the merge driver should be idempotent, so just rerun it
143 # the merge driver should be idempotent, so just rerun it
144 mdstate = 'u'
144 mdstate = 'u'
145
145
146 self._readmergedriver = bits[0]
146 self._readmergedriver = bits[0]
147 self._mdstate = mdstate
147 self._mdstate = mdstate
148 elif rtype in 'FDC':
148 elif rtype in 'FDC':
149 bits = record.split('\0')
149 bits = record.split('\0')
150 self._state[bits[0]] = bits[1:]
150 self._state[bits[0]] = bits[1:]
151 elif not rtype.islower():
151 elif not rtype.islower():
152 unsupported.add(rtype)
152 unsupported.add(rtype)
153 self._dirty = False
153 self._dirty = False
154
154
155 if unsupported:
155 if unsupported:
156 raise error.UnsupportedMergeRecords(unsupported)
156 raise error.UnsupportedMergeRecords(unsupported)
157
157
158 def _readrecords(self):
158 def _readrecords(self):
159 """Read merge state from disk and return a list of record (TYPE, data)
159 """Read merge state from disk and return a list of record (TYPE, data)
160
160
161 We read data from both v1 and v2 files and decide which one to use.
161 We read data from both v1 and v2 files and decide which one to use.
162
162
163 V1 has been used by version prior to 2.9.1 and contains less data than
163 V1 has been used by version prior to 2.9.1 and contains less data than
164 v2. We read both versions and check if no data in v2 contradicts
164 v2. We read both versions and check if no data in v2 contradicts
165 v1. If there is not contradiction we can safely assume that both v1
165 v1. If there is not contradiction we can safely assume that both v1
166 and v2 were written at the same time and use the extract data in v2. If
166 and v2 were written at the same time and use the extract data in v2. If
167 there is contradiction we ignore v2 content as we assume an old version
167 there is contradiction we ignore v2 content as we assume an old version
168 of Mercurial has overwritten the mergestate file and left an old v2
168 of Mercurial has overwritten the mergestate file and left an old v2
169 file around.
169 file around.
170
170
171 returns list of record [(TYPE, data), ...]"""
171 returns list of record [(TYPE, data), ...]"""
172 v1records = self._readrecordsv1()
172 v1records = self._readrecordsv1()
173 v2records = self._readrecordsv2()
173 v2records = self._readrecordsv2()
174 if self._v1v2match(v1records, v2records):
174 if self._v1v2match(v1records, v2records):
175 return v2records
175 return v2records
176 else:
176 else:
177 # v1 file is newer than v2 file, use it
177 # v1 file is newer than v2 file, use it
178 # we have to infer the "other" changeset of the merge
178 # we have to infer the "other" changeset of the merge
179 # we cannot do better than that with v1 of the format
179 # we cannot do better than that with v1 of the format
180 mctx = self._repo[None].parents()[-1]
180 mctx = self._repo[None].parents()[-1]
181 v1records.append(('O', mctx.hex()))
181 v1records.append(('O', mctx.hex()))
182 # add place holder "other" file node information
182 # add place holder "other" file node information
183 # nobody is using it yet so we do no need to fetch the data
183 # nobody is using it yet so we do no need to fetch the data
184 # if mctx was wrong `mctx[bits[-2]]` may fails.
184 # if mctx was wrong `mctx[bits[-2]]` may fails.
185 for idx, r in enumerate(v1records):
185 for idx, r in enumerate(v1records):
186 if r[0] == 'F':
186 if r[0] == 'F':
187 bits = r[1].split('\0')
187 bits = r[1].split('\0')
188 bits.insert(-2, '')
188 bits.insert(-2, '')
189 v1records[idx] = (r[0], '\0'.join(bits))
189 v1records[idx] = (r[0], '\0'.join(bits))
190 return v1records
190 return v1records
191
191
192 def _v1v2match(self, v1records, v2records):
192 def _v1v2match(self, v1records, v2records):
193 oldv2 = set() # old format version of v2 record
193 oldv2 = set() # old format version of v2 record
194 for rec in v2records:
194 for rec in v2records:
195 if rec[0] == 'L':
195 if rec[0] == 'L':
196 oldv2.add(rec)
196 oldv2.add(rec)
197 elif rec[0] == 'F':
197 elif rec[0] == 'F':
198 # drop the onode data (not contained in v1)
198 # drop the onode data (not contained in v1)
199 oldv2.add(('F', _droponode(rec[1])))
199 oldv2.add(('F', _droponode(rec[1])))
200 for rec in v1records:
200 for rec in v1records:
201 if rec not in oldv2:
201 if rec not in oldv2:
202 return False
202 return False
203 else:
203 else:
204 return True
204 return True
205
205
206 def _readrecordsv1(self):
206 def _readrecordsv1(self):
207 """read on disk merge state for version 1 file
207 """read on disk merge state for version 1 file
208
208
209 returns list of record [(TYPE, data), ...]
209 returns list of record [(TYPE, data), ...]
210
210
211 Note: the "F" data from this file are one entry short
211 Note: the "F" data from this file are one entry short
212 (no "other file node" entry)
212 (no "other file node" entry)
213 """
213 """
214 records = []
214 records = []
215 try:
215 try:
216 f = self._repo.vfs(self.statepathv1)
216 f = self._repo.vfs(self.statepathv1)
217 for i, l in enumerate(f):
217 for i, l in enumerate(f):
218 if i == 0:
218 if i == 0:
219 records.append(('L', l[:-1]))
219 records.append(('L', l[:-1]))
220 else:
220 else:
221 records.append(('F', l[:-1]))
221 records.append(('F', l[:-1]))
222 f.close()
222 f.close()
223 except IOError as err:
223 except IOError as err:
224 if err.errno != errno.ENOENT:
224 if err.errno != errno.ENOENT:
225 raise
225 raise
226 return records
226 return records
227
227
228 def _readrecordsv2(self):
228 def _readrecordsv2(self):
229 """read on disk merge state for version 2 file
229 """read on disk merge state for version 2 file
230
230
231 This format is a list of arbitrary records of the form:
231 This format is a list of arbitrary records of the form:
232
232
233 [type][length][content]
233 [type][length][content]
234
234
235 `type` is a single character, `length` is a 4 byte integer, and
235 `type` is a single character, `length` is a 4 byte integer, and
236 `content` is an arbitrary byte sequence of length `length`.
236 `content` is an arbitrary byte sequence of length `length`.
237
237
238 Mercurial versions prior to 3.7 have a bug where if there are
238 Mercurial versions prior to 3.7 have a bug where if there are
239 unsupported mandatory merge records, attempting to clear out the merge
239 unsupported mandatory merge records, attempting to clear out the merge
240 state with hg update --clean or similar aborts. The 't' record type
240 state with hg update --clean or similar aborts. The 't' record type
241 works around that by writing out what those versions treat as an
241 works around that by writing out what those versions treat as an
242 advisory record, but later versions interpret as special: the first
242 advisory record, but later versions interpret as special: the first
243 character is the 'real' record type and everything onwards is the data.
243 character is the 'real' record type and everything onwards is the data.
244
244
245 Returns list of records [(TYPE, data), ...]."""
245 Returns list of records [(TYPE, data), ...]."""
246 records = []
246 records = []
247 try:
247 try:
248 f = self._repo.vfs(self.statepathv2)
248 f = self._repo.vfs(self.statepathv2)
249 data = f.read()
249 data = f.read()
250 off = 0
250 off = 0
251 end = len(data)
251 end = len(data)
252 while off < end:
252 while off < end:
253 rtype = data[off]
253 rtype = data[off]
254 off += 1
254 off += 1
255 length = _unpack('>I', data[off:(off + 4)])[0]
255 length = _unpack('>I', data[off:(off + 4)])[0]
256 off += 4
256 off += 4
257 record = data[off:(off + length)]
257 record = data[off:(off + length)]
258 off += length
258 off += length
259 if rtype == 't':
259 if rtype == 't':
260 rtype, record = record[0], record[1:]
260 rtype, record = record[0], record[1:]
261 records.append((rtype, record))
261 records.append((rtype, record))
262 f.close()
262 f.close()
263 except IOError as err:
263 except IOError as err:
264 if err.errno != errno.ENOENT:
264 if err.errno != errno.ENOENT:
265 raise
265 raise
266 return records
266 return records
267
267
268 @util.propertycache
268 @util.propertycache
269 def mergedriver(self):
269 def mergedriver(self):
270 # protect against the following:
270 # protect against the following:
271 # - A configures a malicious merge driver in their hgrc, then
271 # - A configures a malicious merge driver in their hgrc, then
272 # pauses the merge
272 # pauses the merge
273 # - A edits their hgrc to remove references to the merge driver
273 # - A edits their hgrc to remove references to the merge driver
274 # - A gives a copy of their entire repo, including .hg, to B
274 # - A gives a copy of their entire repo, including .hg, to B
275 # - B inspects .hgrc and finds it to be clean
275 # - B inspects .hgrc and finds it to be clean
276 # - B then continues the merge and the malicious merge driver
276 # - B then continues the merge and the malicious merge driver
277 # gets invoked
277 # gets invoked
278 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
278 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
279 if (self._readmergedriver is not None
279 if (self._readmergedriver is not None
280 and self._readmergedriver != configmergedriver):
280 and self._readmergedriver != configmergedriver):
281 raise error.ConfigError(
281 raise error.ConfigError(
282 _("merge driver changed since merge started"),
282 _("merge driver changed since merge started"),
283 hint=_("revert merge driver change or abort merge"))
283 hint=_("revert merge driver change or abort merge"))
284
284
285 return configmergedriver
285 return configmergedriver
286
286
287 @util.propertycache
287 @util.propertycache
288 def otherctx(self):
288 def otherctx(self):
289 return self._repo[self._other]
289 return self._repo[self._other]
290
290
291 def active(self):
291 def active(self):
292 """Whether mergestate is active.
292 """Whether mergestate is active.
293
293
294 Returns True if there appears to be mergestate. This is a rough proxy
294 Returns True if there appears to be mergestate. This is a rough proxy
295 for "is a merge in progress."
295 for "is a merge in progress."
296 """
296 """
297 # Check local variables before looking at filesystem for performance
297 # Check local variables before looking at filesystem for performance
298 # reasons.
298 # reasons.
299 return bool(self._local) or bool(self._state) or \
299 return bool(self._local) or bool(self._state) or \
300 self._repo.vfs.exists(self.statepathv1) or \
300 self._repo.vfs.exists(self.statepathv1) or \
301 self._repo.vfs.exists(self.statepathv2)
301 self._repo.vfs.exists(self.statepathv2)
302
302
303 def commit(self):
303 def commit(self):
304 """Write current state on disk (if necessary)"""
304 """Write current state on disk (if necessary)"""
305 if self._dirty:
305 if self._dirty:
306 records = self._makerecords()
306 records = self._makerecords()
307 self._writerecords(records)
307 self._writerecords(records)
308 self._dirty = False
308 self._dirty = False
309
309
310 def _makerecords(self):
310 def _makerecords(self):
311 records = []
311 records = []
312 records.append(('L', hex(self._local)))
312 records.append(('L', hex(self._local)))
313 records.append(('O', hex(self._other)))
313 records.append(('O', hex(self._other)))
314 if self.mergedriver:
314 if self.mergedriver:
315 records.append(('m', '\0'.join([
315 records.append(('m', '\0'.join([
316 self.mergedriver, self._mdstate])))
316 self.mergedriver, self._mdstate])))
317 for d, v in self._state.iteritems():
317 for d, v in self._state.iteritems():
318 if v[0] == 'd':
318 if v[0] == 'd':
319 records.append(('D', '\0'.join([d] + v)))
319 records.append(('D', '\0'.join([d] + v)))
320 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
320 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
321 # older versions of Mercurial
321 # older versions of Mercurial
322 elif v[1] == nullhex or v[6] == nullhex:
322 elif v[1] == nullhex or v[6] == nullhex:
323 records.append(('C', '\0'.join([d] + v)))
323 records.append(('C', '\0'.join([d] + v)))
324 else:
324 else:
325 records.append(('F', '\0'.join([d] + v)))
325 records.append(('F', '\0'.join([d] + v)))
326 return records
326 return records
327
327
328 def _writerecords(self, records):
328 def _writerecords(self, records):
329 """Write current state on disk (both v1 and v2)"""
329 """Write current state on disk (both v1 and v2)"""
330 self._writerecordsv1(records)
330 self._writerecordsv1(records)
331 self._writerecordsv2(records)
331 self._writerecordsv2(records)
332
332
333 def _writerecordsv1(self, records):
333 def _writerecordsv1(self, records):
334 """Write current state on disk in a version 1 file"""
334 """Write current state on disk in a version 1 file"""
335 f = self._repo.vfs(self.statepathv1, 'w')
335 f = self._repo.vfs(self.statepathv1, 'w')
336 irecords = iter(records)
336 irecords = iter(records)
337 lrecords = irecords.next()
337 lrecords = irecords.next()
338 assert lrecords[0] == 'L'
338 assert lrecords[0] == 'L'
339 f.write(hex(self._local) + '\n')
339 f.write(hex(self._local) + '\n')
340 for rtype, data in irecords:
340 for rtype, data in irecords:
341 if rtype == 'F':
341 if rtype == 'F':
342 f.write('%s\n' % _droponode(data))
342 f.write('%s\n' % _droponode(data))
343 f.close()
343 f.close()
344
344
345 def _writerecordsv2(self, records):
345 def _writerecordsv2(self, records):
346 """Write current state on disk in a version 2 file
346 """Write current state on disk in a version 2 file
347
347
348 See the docstring for _readrecordsv2 for why we use 't'."""
348 See the docstring for _readrecordsv2 for why we use 't'."""
349 # these are the records that all version 2 clients can read
349 # these are the records that all version 2 clients can read
350 whitelist = 'LOF'
350 whitelist = 'LOF'
351 f = self._repo.vfs(self.statepathv2, 'w')
351 f = self._repo.vfs(self.statepathv2, 'w')
352 for key, data in records:
352 for key, data in records:
353 assert len(key) == 1
353 assert len(key) == 1
354 if key not in whitelist:
354 if key not in whitelist:
355 key, data = 't', '%s%s' % (key, data)
355 key, data = 't', '%s%s' % (key, data)
356 format = '>sI%is' % len(data)
356 format = '>sI%is' % len(data)
357 f.write(_pack(format, key, len(data), data))
357 f.write(_pack(format, key, len(data), data))
358 f.close()
358 f.close()
359
359
360 def add(self, fcl, fco, fca, fd):
360 def add(self, fcl, fco, fca, fd):
361 """add a new (potentially?) conflicting file the merge state
361 """add a new (potentially?) conflicting file the merge state
362 fcl: file context for local,
362 fcl: file context for local,
363 fco: file context for remote,
363 fco: file context for remote,
364 fca: file context for ancestors,
364 fca: file context for ancestors,
365 fd: file path of the resulting merge.
365 fd: file path of the resulting merge.
366
366
367 note: also write the local version to the `.hg/merge` directory.
367 note: also write the local version to the `.hg/merge` directory.
368 """
368 """
369 hash = util.sha1(fcl.path()).hexdigest()
369 hash = util.sha1(fcl.path()).hexdigest()
370 self._repo.vfs.write('merge/' + hash, fcl.data())
370 self._repo.vfs.write('merge/' + hash, fcl.data())
371 self._state[fd] = ['u', hash, fcl.path(),
371 self._state[fd] = ['u', hash, fcl.path(),
372 fca.path(), hex(fca.filenode()),
372 fca.path(), hex(fca.filenode()),
373 fco.path(), hex(fco.filenode()),
373 fco.path(), hex(fco.filenode()),
374 fcl.flags()]
374 fcl.flags()]
375 self._dirty = True
375 self._dirty = True
376
376
377 def __contains__(self, dfile):
377 def __contains__(self, dfile):
378 return dfile in self._state
378 return dfile in self._state
379
379
380 def __getitem__(self, dfile):
380 def __getitem__(self, dfile):
381 return self._state[dfile][0]
381 return self._state[dfile][0]
382
382
383 def __iter__(self):
383 def __iter__(self):
384 return iter(sorted(self._state))
384 return iter(sorted(self._state))
385
385
386 def files(self):
386 def files(self):
387 return self._state.keys()
387 return self._state.keys()
388
388
389 def mark(self, dfile, state):
389 def mark(self, dfile, state):
390 self._state[dfile][0] = state
390 self._state[dfile][0] = state
391 self._dirty = True
391 self._dirty = True
392
392
393 def mdstate(self):
393 def mdstate(self):
394 return self._mdstate
394 return self._mdstate
395
395
396 def unresolved(self):
396 def unresolved(self):
397 """Obtain the paths of unresolved files."""
397 """Obtain the paths of unresolved files."""
398
398
399 for f, entry in self._state.items():
399 for f, entry in self._state.items():
400 if entry[0] == 'u':
400 if entry[0] == 'u':
401 yield f
401 yield f
402
402
403 def driverresolved(self):
403 def driverresolved(self):
404 """Obtain the paths of driver-resolved files."""
404 """Obtain the paths of driver-resolved files."""
405
405
406 for f, entry in self._state.items():
406 for f, entry in self._state.items():
407 if entry[0] == 'd':
407 if entry[0] == 'd':
408 yield f
408 yield f
409
409
410 def _resolve(self, preresolve, dfile, wctx, labels=None):
410 def _resolve(self, preresolve, dfile, wctx, labels=None):
411 """rerun merge process for file path `dfile`"""
411 """rerun merge process for file path `dfile`"""
412 if self[dfile] in 'rd':
412 if self[dfile] in 'rd':
413 return True, 0
413 return True, 0
414 stateentry = self._state[dfile]
414 stateentry = self._state[dfile]
415 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
415 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
416 octx = self._repo[self._other]
416 octx = self._repo[self._other]
417 fcd = wctx[dfile]
417 fcd = wctx[dfile]
418 fco = octx[ofile]
418 fco = octx[ofile]
419 fca = self._repo.filectx(afile, fileid=anode)
419 fca = self._repo.filectx(afile, fileid=anode)
420 # "premerge" x flags
420 # "premerge" x flags
421 flo = fco.flags()
421 flo = fco.flags()
422 fla = fca.flags()
422 fla = fca.flags()
423 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
423 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
424 if fca.node() == nullid:
424 if fca.node() == nullid:
425 if preresolve:
425 if preresolve:
426 self._repo.ui.warn(
426 self._repo.ui.warn(
427 _('warning: cannot merge flags for %s\n') % afile)
427 _('warning: cannot merge flags for %s\n') % afile)
428 elif flags == fla:
428 elif flags == fla:
429 flags = flo
429 flags = flo
430 if preresolve:
430 if preresolve:
431 # restore local
431 # restore local
432 f = self._repo.vfs('merge/' + hash)
432 f = self._repo.vfs('merge/' + hash)
433 self._repo.wwrite(dfile, f.read(), flags)
433 self._repo.wwrite(dfile, f.read(), flags)
434 f.close()
434 f.close()
435 complete, r = filemerge.premerge(self._repo, self._local, lfile,
435 complete, r, deleted = filemerge.premerge(self._repo, self._local,
436 fcd, fco, fca, labels=labels)
436 lfile, fcd, fco, fca,
437 labels=labels)
437 else:
438 else:
438 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
439 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
439 fcd, fco, fca, labels=labels)
440 lfile, fcd, fco, fca,
441 labels=labels)
440 if r is None:
442 if r is None:
441 # no real conflict
443 # no real conflict
442 del self._state[dfile]
444 del self._state[dfile]
443 self._dirty = True
445 self._dirty = True
444 elif not r:
446 elif not r:
445 self.mark(dfile, 'r')
447 self.mark(dfile, 'r')
446 return complete, r
448 return complete, r
447
449
448 def preresolve(self, dfile, wctx, labels=None):
450 def preresolve(self, dfile, wctx, labels=None):
449 """run premerge process for dfile
451 """run premerge process for dfile
450
452
451 Returns whether the merge is complete, and the exit code."""
453 Returns whether the merge is complete, and the exit code."""
452 return self._resolve(True, dfile, wctx, labels=labels)
454 return self._resolve(True, dfile, wctx, labels=labels)
453
455
454 def resolve(self, dfile, wctx, labels=None):
456 def resolve(self, dfile, wctx, labels=None):
455 """run merge process (assuming premerge was run) for dfile
457 """run merge process (assuming premerge was run) for dfile
456
458
457 Returns the exit code of the merge."""
459 Returns the exit code of the merge."""
458 return self._resolve(False, dfile, wctx, labels=labels)[1]
460 return self._resolve(False, dfile, wctx, labels=labels)[1]
459
461
460 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
462 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
461 if f2 is None:
463 if f2 is None:
462 f2 = f
464 f2 = f
463 return (os.path.isfile(repo.wjoin(f))
465 return (os.path.isfile(repo.wjoin(f))
464 and repo.wvfs.audit.check(f)
466 and repo.wvfs.audit.check(f)
465 and repo.dirstate.normalize(f) not in repo.dirstate
467 and repo.dirstate.normalize(f) not in repo.dirstate
466 and mctx[f2].cmp(wctx[f]))
468 and mctx[f2].cmp(wctx[f]))
467
469
468 def _checkunknownfiles(repo, wctx, mctx, force, actions):
470 def _checkunknownfiles(repo, wctx, mctx, force, actions):
469 """
471 """
470 Considers any actions that care about the presence of conflicting unknown
472 Considers any actions that care about the presence of conflicting unknown
471 files. For some actions, the result is to abort; for others, it is to
473 files. For some actions, the result is to abort; for others, it is to
472 choose a different action.
474 choose a different action.
473 """
475 """
474 aborts = []
476 aborts = []
475 if not force:
477 if not force:
476 for f, (m, args, msg) in actions.iteritems():
478 for f, (m, args, msg) in actions.iteritems():
477 if m in ('c', 'dc'):
479 if m in ('c', 'dc'):
478 if _checkunknownfile(repo, wctx, mctx, f):
480 if _checkunknownfile(repo, wctx, mctx, f):
479 aborts.append(f)
481 aborts.append(f)
480 elif m == 'dg':
482 elif m == 'dg':
481 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
483 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
482 aborts.append(f)
484 aborts.append(f)
483
485
484 for f in sorted(aborts):
486 for f in sorted(aborts):
485 repo.ui.warn(_("%s: untracked file differs\n") % f)
487 repo.ui.warn(_("%s: untracked file differs\n") % f)
486 if aborts:
488 if aborts:
487 raise error.Abort(_("untracked files in working directory differ "
489 raise error.Abort(_("untracked files in working directory differ "
488 "from files in requested revision"))
490 "from files in requested revision"))
489
491
490 for f, (m, args, msg) in actions.iteritems():
492 for f, (m, args, msg) in actions.iteritems():
491 if m == 'c':
493 if m == 'c':
492 actions[f] = ('g', args, msg)
494 actions[f] = ('g', args, msg)
493 elif m == 'cm':
495 elif m == 'cm':
494 fl2, anc = args
496 fl2, anc = args
495 different = _checkunknownfile(repo, wctx, mctx, f)
497 different = _checkunknownfile(repo, wctx, mctx, f)
496 if different:
498 if different:
497 actions[f] = ('m', (f, f, None, False, anc),
499 actions[f] = ('m', (f, f, None, False, anc),
498 "remote differs from untracked local")
500 "remote differs from untracked local")
499 else:
501 else:
500 actions[f] = ('g', (fl2,), "remote created")
502 actions[f] = ('g', (fl2,), "remote created")
501
503
502 def _forgetremoved(wctx, mctx, branchmerge):
504 def _forgetremoved(wctx, mctx, branchmerge):
503 """
505 """
504 Forget removed files
506 Forget removed files
505
507
506 If we're jumping between revisions (as opposed to merging), and if
508 If we're jumping between revisions (as opposed to merging), and if
507 neither the working directory nor the target rev has the file,
509 neither the working directory nor the target rev has the file,
508 then we need to remove it from the dirstate, to prevent the
510 then we need to remove it from the dirstate, to prevent the
509 dirstate from listing the file when it is no longer in the
511 dirstate from listing the file when it is no longer in the
510 manifest.
512 manifest.
511
513
512 If we're merging, and the other revision has removed a file
514 If we're merging, and the other revision has removed a file
513 that is not present in the working directory, we need to mark it
515 that is not present in the working directory, we need to mark it
514 as removed.
516 as removed.
515 """
517 """
516
518
517 actions = {}
519 actions = {}
518 m = 'f'
520 m = 'f'
519 if branchmerge:
521 if branchmerge:
520 m = 'r'
522 m = 'r'
521 for f in wctx.deleted():
523 for f in wctx.deleted():
522 if f not in mctx:
524 if f not in mctx:
523 actions[f] = m, None, "forget deleted"
525 actions[f] = m, None, "forget deleted"
524
526
525 if not branchmerge:
527 if not branchmerge:
526 for f in wctx.removed():
528 for f in wctx.removed():
527 if f not in mctx:
529 if f not in mctx:
528 actions[f] = 'f', None, "forget removed"
530 actions[f] = 'f', None, "forget removed"
529
531
530 return actions
532 return actions
531
533
532 def _checkcollision(repo, wmf, actions):
534 def _checkcollision(repo, wmf, actions):
533 # build provisional merged manifest up
535 # build provisional merged manifest up
534 pmmf = set(wmf)
536 pmmf = set(wmf)
535
537
536 if actions:
538 if actions:
537 # k, dr, e and rd are no-op
539 # k, dr, e and rd are no-op
538 for m in 'a', 'f', 'g', 'cd', 'dc':
540 for m in 'a', 'f', 'g', 'cd', 'dc':
539 for f, args, msg in actions[m]:
541 for f, args, msg in actions[m]:
540 pmmf.add(f)
542 pmmf.add(f)
541 for f, args, msg in actions['r']:
543 for f, args, msg in actions['r']:
542 pmmf.discard(f)
544 pmmf.discard(f)
543 for f, args, msg in actions['dm']:
545 for f, args, msg in actions['dm']:
544 f2, flags = args
546 f2, flags = args
545 pmmf.discard(f2)
547 pmmf.discard(f2)
546 pmmf.add(f)
548 pmmf.add(f)
547 for f, args, msg in actions['dg']:
549 for f, args, msg in actions['dg']:
548 pmmf.add(f)
550 pmmf.add(f)
549 for f, args, msg in actions['m']:
551 for f, args, msg in actions['m']:
550 f1, f2, fa, move, anc = args
552 f1, f2, fa, move, anc = args
551 if move:
553 if move:
552 pmmf.discard(f1)
554 pmmf.discard(f1)
553 pmmf.add(f)
555 pmmf.add(f)
554
556
555 # check case-folding collision in provisional merged manifest
557 # check case-folding collision in provisional merged manifest
556 foldmap = {}
558 foldmap = {}
557 for f in sorted(pmmf):
559 for f in sorted(pmmf):
558 fold = util.normcase(f)
560 fold = util.normcase(f)
559 if fold in foldmap:
561 if fold in foldmap:
560 raise error.Abort(_("case-folding collision between %s and %s")
562 raise error.Abort(_("case-folding collision between %s and %s")
561 % (f, foldmap[fold]))
563 % (f, foldmap[fold]))
562 foldmap[fold] = f
564 foldmap[fold] = f
563
565
564 # check case-folding of directories
566 # check case-folding of directories
565 foldprefix = unfoldprefix = lastfull = ''
567 foldprefix = unfoldprefix = lastfull = ''
566 for fold, f in sorted(foldmap.items()):
568 for fold, f in sorted(foldmap.items()):
567 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
569 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
568 # the folded prefix matches but actual casing is different
570 # the folded prefix matches but actual casing is different
569 raise error.Abort(_("case-folding collision between "
571 raise error.Abort(_("case-folding collision between "
570 "%s and directory of %s") % (lastfull, f))
572 "%s and directory of %s") % (lastfull, f))
571 foldprefix = fold + '/'
573 foldprefix = fold + '/'
572 unfoldprefix = f + '/'
574 unfoldprefix = f + '/'
573 lastfull = f
575 lastfull = f
574
576
575 def driverpreprocess(repo, ms, wctx, labels=None):
577 def driverpreprocess(repo, ms, wctx, labels=None):
576 """run the preprocess step of the merge driver, if any
578 """run the preprocess step of the merge driver, if any
577
579
578 This is currently not implemented -- it's an extension point."""
580 This is currently not implemented -- it's an extension point."""
579 return True
581 return True
580
582
581 def driverconclude(repo, ms, wctx, labels=None):
583 def driverconclude(repo, ms, wctx, labels=None):
582 """run the conclude step of the merge driver, if any
584 """run the conclude step of the merge driver, if any
583
585
584 This is currently not implemented -- it's an extension point."""
586 This is currently not implemented -- it's an extension point."""
585 return True
587 return True
586
588
587 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
589 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
588 acceptremote, followcopies):
590 acceptremote, followcopies):
589 """
591 """
590 Merge p1 and p2 with ancestor pa and generate merge action list
592 Merge p1 and p2 with ancestor pa and generate merge action list
591
593
592 branchmerge and force are as passed in to update
594 branchmerge and force are as passed in to update
593 partial = function to filter file lists
595 partial = function to filter file lists
594 acceptremote = accept the incoming changes without prompting
596 acceptremote = accept the incoming changes without prompting
595 """
597 """
596
598
597 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
599 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
598
600
599 # manifests fetched in order are going to be faster, so prime the caches
601 # manifests fetched in order are going to be faster, so prime the caches
600 [x.manifest() for x in
602 [x.manifest() for x in
601 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
603 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
602
604
603 if followcopies:
605 if followcopies:
604 ret = copies.mergecopies(repo, wctx, p2, pa)
606 ret = copies.mergecopies(repo, wctx, p2, pa)
605 copy, movewithdir, diverge, renamedelete = ret
607 copy, movewithdir, diverge, renamedelete = ret
606
608
607 repo.ui.note(_("resolving manifests\n"))
609 repo.ui.note(_("resolving manifests\n"))
608 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
610 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
609 % (bool(branchmerge), bool(force), bool(partial)))
611 % (bool(branchmerge), bool(force), bool(partial)))
610 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
612 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
611
613
612 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
614 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
613 copied = set(copy.values())
615 copied = set(copy.values())
614 copied.update(movewithdir.values())
616 copied.update(movewithdir.values())
615
617
616 if '.hgsubstate' in m1:
618 if '.hgsubstate' in m1:
617 # check whether sub state is modified
619 # check whether sub state is modified
618 for s in sorted(wctx.substate):
620 for s in sorted(wctx.substate):
619 if wctx.sub(s).dirty():
621 if wctx.sub(s).dirty():
620 m1['.hgsubstate'] += '+'
622 m1['.hgsubstate'] += '+'
621 break
623 break
622
624
623 # Compare manifests
625 # Compare manifests
624 diff = m1.diff(m2)
626 diff = m1.diff(m2)
625
627
626 actions = {}
628 actions = {}
627 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
629 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
628 if partial and not partial(f):
630 if partial and not partial(f):
629 continue
631 continue
630 if n1 and n2: # file exists on both local and remote side
632 if n1 and n2: # file exists on both local and remote side
631 if f not in ma:
633 if f not in ma:
632 fa = copy.get(f, None)
634 fa = copy.get(f, None)
633 if fa is not None:
635 if fa is not None:
634 actions[f] = ('m', (f, f, fa, False, pa.node()),
636 actions[f] = ('m', (f, f, fa, False, pa.node()),
635 "both renamed from " + fa)
637 "both renamed from " + fa)
636 else:
638 else:
637 actions[f] = ('m', (f, f, None, False, pa.node()),
639 actions[f] = ('m', (f, f, None, False, pa.node()),
638 "both created")
640 "both created")
639 else:
641 else:
640 a = ma[f]
642 a = ma[f]
641 fla = ma.flags(f)
643 fla = ma.flags(f)
642 nol = 'l' not in fl1 + fl2 + fla
644 nol = 'l' not in fl1 + fl2 + fla
643 if n2 == a and fl2 == fla:
645 if n2 == a and fl2 == fla:
644 actions[f] = ('k' , (), "remote unchanged")
646 actions[f] = ('k' , (), "remote unchanged")
645 elif n1 == a and fl1 == fla: # local unchanged - use remote
647 elif n1 == a and fl1 == fla: # local unchanged - use remote
646 if n1 == n2: # optimization: keep local content
648 if n1 == n2: # optimization: keep local content
647 actions[f] = ('e', (fl2,), "update permissions")
649 actions[f] = ('e', (fl2,), "update permissions")
648 else:
650 else:
649 actions[f] = ('g', (fl2,), "remote is newer")
651 actions[f] = ('g', (fl2,), "remote is newer")
650 elif nol and n2 == a: # remote only changed 'x'
652 elif nol and n2 == a: # remote only changed 'x'
651 actions[f] = ('e', (fl2,), "update permissions")
653 actions[f] = ('e', (fl2,), "update permissions")
652 elif nol and n1 == a: # local only changed 'x'
654 elif nol and n1 == a: # local only changed 'x'
653 actions[f] = ('g', (fl1,), "remote is newer")
655 actions[f] = ('g', (fl1,), "remote is newer")
654 else: # both changed something
656 else: # both changed something
655 actions[f] = ('m', (f, f, f, False, pa.node()),
657 actions[f] = ('m', (f, f, f, False, pa.node()),
656 "versions differ")
658 "versions differ")
657 elif n1: # file exists only on local side
659 elif n1: # file exists only on local side
658 if f in copied:
660 if f in copied:
659 pass # we'll deal with it on m2 side
661 pass # we'll deal with it on m2 side
660 elif f in movewithdir: # directory rename, move local
662 elif f in movewithdir: # directory rename, move local
661 f2 = movewithdir[f]
663 f2 = movewithdir[f]
662 if f2 in m2:
664 if f2 in m2:
663 actions[f2] = ('m', (f, f2, None, True, pa.node()),
665 actions[f2] = ('m', (f, f2, None, True, pa.node()),
664 "remote directory rename, both created")
666 "remote directory rename, both created")
665 else:
667 else:
666 actions[f2] = ('dm', (f, fl1),
668 actions[f2] = ('dm', (f, fl1),
667 "remote directory rename - move from " + f)
669 "remote directory rename - move from " + f)
668 elif f in copy:
670 elif f in copy:
669 f2 = copy[f]
671 f2 = copy[f]
670 actions[f] = ('m', (f, f2, f2, False, pa.node()),
672 actions[f] = ('m', (f, f2, f2, False, pa.node()),
671 "local copied/moved from " + f2)
673 "local copied/moved from " + f2)
672 elif f in ma: # clean, a different, no remote
674 elif f in ma: # clean, a different, no remote
673 if n1 != ma[f]:
675 if n1 != ma[f]:
674 if acceptremote:
676 if acceptremote:
675 actions[f] = ('r', None, "remote delete")
677 actions[f] = ('r', None, "remote delete")
676 else:
678 else:
677 actions[f] = ('cd', (f, None, f, False, pa.node()),
679 actions[f] = ('cd', (f, None, f, False, pa.node()),
678 "prompt changed/deleted")
680 "prompt changed/deleted")
679 elif n1[20:] == 'a':
681 elif n1[20:] == 'a':
680 # This extra 'a' is added by working copy manifest to mark
682 # This extra 'a' is added by working copy manifest to mark
681 # the file as locally added. We should forget it instead of
683 # the file as locally added. We should forget it instead of
682 # deleting it.
684 # deleting it.
683 actions[f] = ('f', None, "remote deleted")
685 actions[f] = ('f', None, "remote deleted")
684 else:
686 else:
685 actions[f] = ('r', None, "other deleted")
687 actions[f] = ('r', None, "other deleted")
686 elif n2: # file exists only on remote side
688 elif n2: # file exists only on remote side
687 if f in copied:
689 if f in copied:
688 pass # we'll deal with it on m1 side
690 pass # we'll deal with it on m1 side
689 elif f in movewithdir:
691 elif f in movewithdir:
690 f2 = movewithdir[f]
692 f2 = movewithdir[f]
691 if f2 in m1:
693 if f2 in m1:
692 actions[f2] = ('m', (f2, f, None, False, pa.node()),
694 actions[f2] = ('m', (f2, f, None, False, pa.node()),
693 "local directory rename, both created")
695 "local directory rename, both created")
694 else:
696 else:
695 actions[f2] = ('dg', (f, fl2),
697 actions[f2] = ('dg', (f, fl2),
696 "local directory rename - get from " + f)
698 "local directory rename - get from " + f)
697 elif f in copy:
699 elif f in copy:
698 f2 = copy[f]
700 f2 = copy[f]
699 if f2 in m2:
701 if f2 in m2:
700 actions[f] = ('m', (f2, f, f2, False, pa.node()),
702 actions[f] = ('m', (f2, f, f2, False, pa.node()),
701 "remote copied from " + f2)
703 "remote copied from " + f2)
702 else:
704 else:
703 actions[f] = ('m', (f2, f, f2, True, pa.node()),
705 actions[f] = ('m', (f2, f, f2, True, pa.node()),
704 "remote moved from " + f2)
706 "remote moved from " + f2)
705 elif f not in ma:
707 elif f not in ma:
706 # local unknown, remote created: the logic is described by the
708 # local unknown, remote created: the logic is described by the
707 # following table:
709 # following table:
708 #
710 #
709 # force branchmerge different | action
711 # force branchmerge different | action
710 # n * * | create
712 # n * * | create
711 # y n * | create
713 # y n * | create
712 # y y n | create
714 # y y n | create
713 # y y y | merge
715 # y y y | merge
714 #
716 #
715 # Checking whether the files are different is expensive, so we
717 # Checking whether the files are different is expensive, so we
716 # don't do that when we can avoid it.
718 # don't do that when we can avoid it.
717 if not force:
719 if not force:
718 actions[f] = ('c', (fl2,), "remote created")
720 actions[f] = ('c', (fl2,), "remote created")
719 elif not branchmerge:
721 elif not branchmerge:
720 actions[f] = ('c', (fl2,), "remote created")
722 actions[f] = ('c', (fl2,), "remote created")
721 else:
723 else:
722 actions[f] = ('cm', (fl2, pa.node()),
724 actions[f] = ('cm', (fl2, pa.node()),
723 "remote created, get or merge")
725 "remote created, get or merge")
724 elif n2 != ma[f]:
726 elif n2 != ma[f]:
725 if acceptremote:
727 if acceptremote:
726 actions[f] = ('c', (fl2,), "remote recreating")
728 actions[f] = ('c', (fl2,), "remote recreating")
727 else:
729 else:
728 actions[f] = ('dc', (None, f, f, False, pa.node()),
730 actions[f] = ('dc', (None, f, f, False, pa.node()),
729 "prompt deleted/changed")
731 "prompt deleted/changed")
730
732
731 return actions, diverge, renamedelete
733 return actions, diverge, renamedelete
732
734
733 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
735 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
734 """Resolves false conflicts where the nodeid changed but the content
736 """Resolves false conflicts where the nodeid changed but the content
735 remained the same."""
737 remained the same."""
736
738
737 for f, (m, args, msg) in actions.items():
739 for f, (m, args, msg) in actions.items():
738 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
740 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
739 # local did change but ended up with same content
741 # local did change but ended up with same content
740 actions[f] = 'r', None, "prompt same"
742 actions[f] = 'r', None, "prompt same"
741 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
743 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
742 # remote did change but ended up with same content
744 # remote did change but ended up with same content
743 del actions[f] # don't get = keep local deleted
745 del actions[f] # don't get = keep local deleted
744
746
745 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
747 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
746 acceptremote, followcopies):
748 acceptremote, followcopies):
747 "Calculate the actions needed to merge mctx into wctx using ancestors"
749 "Calculate the actions needed to merge mctx into wctx using ancestors"
748
750
749 if len(ancestors) == 1: # default
751 if len(ancestors) == 1: # default
750 actions, diverge, renamedelete = manifestmerge(
752 actions, diverge, renamedelete = manifestmerge(
751 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
753 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
752 acceptremote, followcopies)
754 acceptremote, followcopies)
753 _checkunknownfiles(repo, wctx, mctx, force, actions)
755 _checkunknownfiles(repo, wctx, mctx, force, actions)
754
756
755 else: # only when merge.preferancestor=* - the default
757 else: # only when merge.preferancestor=* - the default
756 repo.ui.note(
758 repo.ui.note(
757 _("note: merging %s and %s using bids from ancestors %s\n") %
759 _("note: merging %s and %s using bids from ancestors %s\n") %
758 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
760 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
759
761
760 # Call for bids
762 # Call for bids
761 fbids = {} # mapping filename to bids (action method to list af actions)
763 fbids = {} # mapping filename to bids (action method to list af actions)
762 diverge, renamedelete = None, None
764 diverge, renamedelete = None, None
763 for ancestor in ancestors:
765 for ancestor in ancestors:
764 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
766 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
765 actions, diverge1, renamedelete1 = manifestmerge(
767 actions, diverge1, renamedelete1 = manifestmerge(
766 repo, wctx, mctx, ancestor, branchmerge, force, partial,
768 repo, wctx, mctx, ancestor, branchmerge, force, partial,
767 acceptremote, followcopies)
769 acceptremote, followcopies)
768 _checkunknownfiles(repo, wctx, mctx, force, actions)
770 _checkunknownfiles(repo, wctx, mctx, force, actions)
769
771
770 # Track the shortest set of warning on the theory that bid
772 # Track the shortest set of warning on the theory that bid
771 # merge will correctly incorporate more information
773 # merge will correctly incorporate more information
772 if diverge is None or len(diverge1) < len(diverge):
774 if diverge is None or len(diverge1) < len(diverge):
773 diverge = diverge1
775 diverge = diverge1
774 if renamedelete is None or len(renamedelete) < len(renamedelete1):
776 if renamedelete is None or len(renamedelete) < len(renamedelete1):
775 renamedelete = renamedelete1
777 renamedelete = renamedelete1
776
778
777 for f, a in sorted(actions.iteritems()):
779 for f, a in sorted(actions.iteritems()):
778 m, args, msg = a
780 m, args, msg = a
779 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
781 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
780 if f in fbids:
782 if f in fbids:
781 d = fbids[f]
783 d = fbids[f]
782 if m in d:
784 if m in d:
783 d[m].append(a)
785 d[m].append(a)
784 else:
786 else:
785 d[m] = [a]
787 d[m] = [a]
786 else:
788 else:
787 fbids[f] = {m: [a]}
789 fbids[f] = {m: [a]}
788
790
789 # Pick the best bid for each file
791 # Pick the best bid for each file
790 repo.ui.note(_('\nauction for merging merge bids\n'))
792 repo.ui.note(_('\nauction for merging merge bids\n'))
791 actions = {}
793 actions = {}
792 for f, bids in sorted(fbids.items()):
794 for f, bids in sorted(fbids.items()):
793 # bids is a mapping from action method to list af actions
795 # bids is a mapping from action method to list af actions
794 # Consensus?
796 # Consensus?
795 if len(bids) == 1: # all bids are the same kind of method
797 if len(bids) == 1: # all bids are the same kind of method
796 m, l = bids.items()[0]
798 m, l = bids.items()[0]
797 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
799 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
798 repo.ui.note(" %s: consensus for %s\n" % (f, m))
800 repo.ui.note(" %s: consensus for %s\n" % (f, m))
799 actions[f] = l[0]
801 actions[f] = l[0]
800 continue
802 continue
801 # If keep is an option, just do it.
803 # If keep is an option, just do it.
802 if 'k' in bids:
804 if 'k' in bids:
803 repo.ui.note(" %s: picking 'keep' action\n" % f)
805 repo.ui.note(" %s: picking 'keep' action\n" % f)
804 actions[f] = bids['k'][0]
806 actions[f] = bids['k'][0]
805 continue
807 continue
806 # If there are gets and they all agree [how could they not?], do it.
808 # If there are gets and they all agree [how could they not?], do it.
807 if 'g' in bids:
809 if 'g' in bids:
808 ga0 = bids['g'][0]
810 ga0 = bids['g'][0]
809 if all(a == ga0 for a in bids['g'][1:]):
811 if all(a == ga0 for a in bids['g'][1:]):
810 repo.ui.note(" %s: picking 'get' action\n" % f)
812 repo.ui.note(" %s: picking 'get' action\n" % f)
811 actions[f] = ga0
813 actions[f] = ga0
812 continue
814 continue
813 # TODO: Consider other simple actions such as mode changes
815 # TODO: Consider other simple actions such as mode changes
814 # Handle inefficient democrazy.
816 # Handle inefficient democrazy.
815 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
817 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
816 for m, l in sorted(bids.items()):
818 for m, l in sorted(bids.items()):
817 for _f, args, msg in l:
819 for _f, args, msg in l:
818 repo.ui.note(' %s -> %s\n' % (msg, m))
820 repo.ui.note(' %s -> %s\n' % (msg, m))
819 # Pick random action. TODO: Instead, prompt user when resolving
821 # Pick random action. TODO: Instead, prompt user when resolving
820 m, l = bids.items()[0]
822 m, l = bids.items()[0]
821 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
823 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
822 (f, m))
824 (f, m))
823 actions[f] = l[0]
825 actions[f] = l[0]
824 continue
826 continue
825 repo.ui.note(_('end of auction\n\n'))
827 repo.ui.note(_('end of auction\n\n'))
826
828
827 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
829 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
828
830
829 if wctx.rev() is None:
831 if wctx.rev() is None:
830 fractions = _forgetremoved(wctx, mctx, branchmerge)
832 fractions = _forgetremoved(wctx, mctx, branchmerge)
831 actions.update(fractions)
833 actions.update(fractions)
832
834
833 return actions, diverge, renamedelete
835 return actions, diverge, renamedelete
834
836
835 def batchremove(repo, actions):
837 def batchremove(repo, actions):
836 """apply removes to the working directory
838 """apply removes to the working directory
837
839
838 yields tuples for progress updates
840 yields tuples for progress updates
839 """
841 """
840 verbose = repo.ui.verbose
842 verbose = repo.ui.verbose
841 unlink = util.unlinkpath
843 unlink = util.unlinkpath
842 wjoin = repo.wjoin
844 wjoin = repo.wjoin
843 audit = repo.wvfs.audit
845 audit = repo.wvfs.audit
844 i = 0
846 i = 0
845 for f, args, msg in actions:
847 for f, args, msg in actions:
846 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
848 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
847 if verbose:
849 if verbose:
848 repo.ui.note(_("removing %s\n") % f)
850 repo.ui.note(_("removing %s\n") % f)
849 audit(f)
851 audit(f)
850 try:
852 try:
851 unlink(wjoin(f), ignoremissing=True)
853 unlink(wjoin(f), ignoremissing=True)
852 except OSError as inst:
854 except OSError as inst:
853 repo.ui.warn(_("update failed to remove %s: %s!\n") %
855 repo.ui.warn(_("update failed to remove %s: %s!\n") %
854 (f, inst.strerror))
856 (f, inst.strerror))
855 if i == 100:
857 if i == 100:
856 yield i, f
858 yield i, f
857 i = 0
859 i = 0
858 i += 1
860 i += 1
859 if i > 0:
861 if i > 0:
860 yield i, f
862 yield i, f
861
863
862 def batchget(repo, mctx, actions):
864 def batchget(repo, mctx, actions):
863 """apply gets to the working directory
865 """apply gets to the working directory
864
866
865 mctx is the context to get from
867 mctx is the context to get from
866
868
867 yields tuples for progress updates
869 yields tuples for progress updates
868 """
870 """
869 verbose = repo.ui.verbose
871 verbose = repo.ui.verbose
870 fctx = mctx.filectx
872 fctx = mctx.filectx
871 wwrite = repo.wwrite
873 wwrite = repo.wwrite
872 i = 0
874 i = 0
873 for f, args, msg in actions:
875 for f, args, msg in actions:
874 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
876 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
875 if verbose:
877 if verbose:
876 repo.ui.note(_("getting %s\n") % f)
878 repo.ui.note(_("getting %s\n") % f)
877 wwrite(f, fctx(f).data(), args[0])
879 wwrite(f, fctx(f).data(), args[0])
878 if i == 100:
880 if i == 100:
879 yield i, f
881 yield i, f
880 i = 0
882 i = 0
881 i += 1
883 i += 1
882 if i > 0:
884 if i > 0:
883 yield i, f
885 yield i, f
884
886
885 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
887 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
886 """apply the merge action list to the working directory
888 """apply the merge action list to the working directory
887
889
888 wctx is the working copy context
890 wctx is the working copy context
889 mctx is the context to be merged into the working copy
891 mctx is the context to be merged into the working copy
890
892
891 Return a tuple of counts (updated, merged, removed, unresolved) that
893 Return a tuple of counts (updated, merged, removed, unresolved) that
892 describes how many files were affected by the update.
894 describes how many files were affected by the update.
893 """
895 """
894
896
895 updated, merged, removed, unresolved = 0, 0, 0, 0
897 updated, merged, removed, unresolved = 0, 0, 0, 0
896 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
898 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
897 moves = []
899 moves = []
898 for m, l in actions.items():
900 for m, l in actions.items():
899 l.sort()
901 l.sort()
900
902
901 # prescan for merges
903 # prescan for merges
902 for f, args, msg in actions['m']:
904 for f, args, msg in actions['m']:
903 f1, f2, fa, move, anc = args
905 f1, f2, fa, move, anc = args
904 if f == '.hgsubstate': # merged internally
906 if f == '.hgsubstate': # merged internally
905 continue
907 continue
906 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
908 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
907 fcl = wctx[f1]
909 fcl = wctx[f1]
908 fco = mctx[f2]
910 fco = mctx[f2]
909 actx = repo[anc]
911 actx = repo[anc]
910 if fa in actx:
912 if fa in actx:
911 fca = actx[fa]
913 fca = actx[fa]
912 else:
914 else:
913 fca = repo.filectx(f1, fileid=nullrev)
915 fca = repo.filectx(f1, fileid=nullrev)
914 ms.add(fcl, fco, fca, f)
916 ms.add(fcl, fco, fca, f)
915 if f1 != f and move:
917 if f1 != f and move:
916 moves.append(f1)
918 moves.append(f1)
917
919
918 audit = repo.wvfs.audit
920 audit = repo.wvfs.audit
919 _updating = _('updating')
921 _updating = _('updating')
920 _files = _('files')
922 _files = _('files')
921 progress = repo.ui.progress
923 progress = repo.ui.progress
922
924
923 # remove renamed files after safely stored
925 # remove renamed files after safely stored
924 for f in moves:
926 for f in moves:
925 if os.path.lexists(repo.wjoin(f)):
927 if os.path.lexists(repo.wjoin(f)):
926 repo.ui.debug("removing %s\n" % f)
928 repo.ui.debug("removing %s\n" % f)
927 audit(f)
929 audit(f)
928 util.unlinkpath(repo.wjoin(f))
930 util.unlinkpath(repo.wjoin(f))
929
931
930 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
932 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
931
933
932 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
934 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
933 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
935 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
934
936
935 # remove in parallel (must come first)
937 # remove in parallel (must come first)
936 z = 0
938 z = 0
937 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
939 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
938 for i, item in prog:
940 for i, item in prog:
939 z += i
941 z += i
940 progress(_updating, z, item=item, total=numupdates, unit=_files)
942 progress(_updating, z, item=item, total=numupdates, unit=_files)
941 removed = len(actions['r'])
943 removed = len(actions['r'])
942
944
943 # get in parallel
945 # get in parallel
944 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
946 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
945 for i, item in prog:
947 for i, item in prog:
946 z += i
948 z += i
947 progress(_updating, z, item=item, total=numupdates, unit=_files)
949 progress(_updating, z, item=item, total=numupdates, unit=_files)
948 updated = len(actions['g'])
950 updated = len(actions['g'])
949
951
950 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
952 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
951 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
953 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
952
954
953 # forget (manifest only, just log it) (must come first)
955 # forget (manifest only, just log it) (must come first)
954 for f, args, msg in actions['f']:
956 for f, args, msg in actions['f']:
955 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
957 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
956 z += 1
958 z += 1
957 progress(_updating, z, item=f, total=numupdates, unit=_files)
959 progress(_updating, z, item=f, total=numupdates, unit=_files)
958
960
959 # re-add (manifest only, just log it)
961 # re-add (manifest only, just log it)
960 for f, args, msg in actions['a']:
962 for f, args, msg in actions['a']:
961 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
963 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
962 z += 1
964 z += 1
963 progress(_updating, z, item=f, total=numupdates, unit=_files)
965 progress(_updating, z, item=f, total=numupdates, unit=_files)
964
966
965 # keep (noop, just log it)
967 # keep (noop, just log it)
966 for f, args, msg in actions['k']:
968 for f, args, msg in actions['k']:
967 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
969 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
968 # no progress
970 # no progress
969
971
970 # directory rename, move local
972 # directory rename, move local
971 for f, args, msg in actions['dm']:
973 for f, args, msg in actions['dm']:
972 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
974 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
973 z += 1
975 z += 1
974 progress(_updating, z, item=f, total=numupdates, unit=_files)
976 progress(_updating, z, item=f, total=numupdates, unit=_files)
975 f0, flags = args
977 f0, flags = args
976 repo.ui.note(_("moving %s to %s\n") % (f0, f))
978 repo.ui.note(_("moving %s to %s\n") % (f0, f))
977 audit(f)
979 audit(f)
978 repo.wwrite(f, wctx.filectx(f0).data(), flags)
980 repo.wwrite(f, wctx.filectx(f0).data(), flags)
979 util.unlinkpath(repo.wjoin(f0))
981 util.unlinkpath(repo.wjoin(f0))
980 updated += 1
982 updated += 1
981
983
982 # local directory rename, get
984 # local directory rename, get
983 for f, args, msg in actions['dg']:
985 for f, args, msg in actions['dg']:
984 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
986 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
985 z += 1
987 z += 1
986 progress(_updating, z, item=f, total=numupdates, unit=_files)
988 progress(_updating, z, item=f, total=numupdates, unit=_files)
987 f0, flags = args
989 f0, flags = args
988 repo.ui.note(_("getting %s to %s\n") % (f0, f))
990 repo.ui.note(_("getting %s to %s\n") % (f0, f))
989 repo.wwrite(f, mctx.filectx(f0).data(), flags)
991 repo.wwrite(f, mctx.filectx(f0).data(), flags)
990 updated += 1
992 updated += 1
991
993
992 # exec
994 # exec
993 for f, args, msg in actions['e']:
995 for f, args, msg in actions['e']:
994 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
996 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
995 z += 1
997 z += 1
996 progress(_updating, z, item=f, total=numupdates, unit=_files)
998 progress(_updating, z, item=f, total=numupdates, unit=_files)
997 flags, = args
999 flags, = args
998 audit(f)
1000 audit(f)
999 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1001 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1000 updated += 1
1002 updated += 1
1001
1003
1002 mergeactions = actions['m']
1004 mergeactions = actions['m']
1003 # the ordering is important here -- ms.mergedriver will raise if the merge
1005 # the ordering is important here -- ms.mergedriver will raise if the merge
1004 # driver has changed, and we want to be able to bypass it when overwrite is
1006 # driver has changed, and we want to be able to bypass it when overwrite is
1005 # True
1007 # True
1006 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1008 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1007
1009
1008 if usemergedriver:
1010 if usemergedriver:
1009 ms.commit()
1011 ms.commit()
1010 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1012 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1011 # the driver might leave some files unresolved
1013 # the driver might leave some files unresolved
1012 unresolvedf = set(ms.unresolved())
1014 unresolvedf = set(ms.unresolved())
1013 if not proceed:
1015 if not proceed:
1014 # XXX setting unresolved to at least 1 is a hack to make sure we
1016 # XXX setting unresolved to at least 1 is a hack to make sure we
1015 # error out
1017 # error out
1016 return updated, merged, removed, max(len(unresolvedf), 1)
1018 return updated, merged, removed, max(len(unresolvedf), 1)
1017 newactions = []
1019 newactions = []
1018 for f, args, msg in mergeactions:
1020 for f, args, msg in mergeactions:
1019 if f in unresolvedf:
1021 if f in unresolvedf:
1020 newactions.append((f, args, msg))
1022 newactions.append((f, args, msg))
1021 mergeactions = newactions
1023 mergeactions = newactions
1022
1024
1023 # premerge
1025 # premerge
1024 tocomplete = []
1026 tocomplete = []
1025 for f, args, msg in mergeactions:
1027 for f, args, msg in mergeactions:
1026 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1028 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1027 z += 1
1029 z += 1
1028 progress(_updating, z, item=f, total=numupdates, unit=_files)
1030 progress(_updating, z, item=f, total=numupdates, unit=_files)
1029 if f == '.hgsubstate': # subrepo states need updating
1031 if f == '.hgsubstate': # subrepo states need updating
1030 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1032 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1031 overwrite)
1033 overwrite)
1032 continue
1034 continue
1033 audit(f)
1035 audit(f)
1034 complete, r = ms.preresolve(f, wctx, labels=labels)
1036 complete, r = ms.preresolve(f, wctx, labels=labels)
1035 if complete:
1037 if complete:
1036 if r is not None and r > 0:
1038 if r is not None and r > 0:
1037 unresolved += 1
1039 unresolved += 1
1038 else:
1040 else:
1039 if r is None:
1041 if r is None:
1040 updated += 1
1042 updated += 1
1041 else:
1043 else:
1042 merged += 1
1044 merged += 1
1043 else:
1045 else:
1044 numupdates += 1
1046 numupdates += 1
1045 tocomplete.append((f, args, msg))
1047 tocomplete.append((f, args, msg))
1046
1048
1047 # merge
1049 # merge
1048 for f, args, msg in tocomplete:
1050 for f, args, msg in tocomplete:
1049 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1051 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1050 z += 1
1052 z += 1
1051 progress(_updating, z, item=f, total=numupdates, unit=_files)
1053 progress(_updating, z, item=f, total=numupdates, unit=_files)
1052 r = ms.resolve(f, wctx, labels=labels)
1054 r = ms.resolve(f, wctx, labels=labels)
1053 if r is not None and r > 0:
1055 if r is not None and r > 0:
1054 unresolved += 1
1056 unresolved += 1
1055 else:
1057 else:
1056 if r is None:
1058 if r is None:
1057 updated += 1
1059 updated += 1
1058 else:
1060 else:
1059 merged += 1
1061 merged += 1
1060
1062
1061 ms.commit()
1063 ms.commit()
1062
1064
1063 if usemergedriver and not unresolved and ms.mdstate() != 's':
1065 if usemergedriver and not unresolved and ms.mdstate() != 's':
1064 if not driverconclude(repo, ms, wctx, labels=labels):
1066 if not driverconclude(repo, ms, wctx, labels=labels):
1065 # XXX setting unresolved to at least 1 is a hack to make sure we
1067 # XXX setting unresolved to at least 1 is a hack to make sure we
1066 # error out
1068 # error out
1067 unresolved = max(unresolved, 1)
1069 unresolved = max(unresolved, 1)
1068
1070
1069 ms.commit()
1071 ms.commit()
1070
1072
1071 progress(_updating, None, total=numupdates, unit=_files)
1073 progress(_updating, None, total=numupdates, unit=_files)
1072
1074
1073 return updated, merged, removed, unresolved
1075 return updated, merged, removed, unresolved
1074
1076
1075 def recordupdates(repo, actions, branchmerge):
1077 def recordupdates(repo, actions, branchmerge):
1076 "record merge actions to the dirstate"
1078 "record merge actions to the dirstate"
1077 # remove (must come first)
1079 # remove (must come first)
1078 for f, args, msg in actions['r']:
1080 for f, args, msg in actions['r']:
1079 if branchmerge:
1081 if branchmerge:
1080 repo.dirstate.remove(f)
1082 repo.dirstate.remove(f)
1081 else:
1083 else:
1082 repo.dirstate.drop(f)
1084 repo.dirstate.drop(f)
1083
1085
1084 # forget (must come first)
1086 # forget (must come first)
1085 for f, args, msg in actions['f']:
1087 for f, args, msg in actions['f']:
1086 repo.dirstate.drop(f)
1088 repo.dirstate.drop(f)
1087
1089
1088 # re-add
1090 # re-add
1089 for f, args, msg in actions['a']:
1091 for f, args, msg in actions['a']:
1090 if not branchmerge:
1092 if not branchmerge:
1091 repo.dirstate.add(f)
1093 repo.dirstate.add(f)
1092
1094
1093 # exec change
1095 # exec change
1094 for f, args, msg in actions['e']:
1096 for f, args, msg in actions['e']:
1095 repo.dirstate.normallookup(f)
1097 repo.dirstate.normallookup(f)
1096
1098
1097 # keep
1099 # keep
1098 for f, args, msg in actions['k']:
1100 for f, args, msg in actions['k']:
1099 pass
1101 pass
1100
1102
1101 # get
1103 # get
1102 for f, args, msg in actions['g']:
1104 for f, args, msg in actions['g']:
1103 if branchmerge:
1105 if branchmerge:
1104 repo.dirstate.otherparent(f)
1106 repo.dirstate.otherparent(f)
1105 else:
1107 else:
1106 repo.dirstate.normal(f)
1108 repo.dirstate.normal(f)
1107
1109
1108 # merge
1110 # merge
1109 for f, args, msg in actions['m']:
1111 for f, args, msg in actions['m']:
1110 f1, f2, fa, move, anc = args
1112 f1, f2, fa, move, anc = args
1111 if branchmerge:
1113 if branchmerge:
1112 # We've done a branch merge, mark this file as merged
1114 # We've done a branch merge, mark this file as merged
1113 # so that we properly record the merger later
1115 # so that we properly record the merger later
1114 repo.dirstate.merge(f)
1116 repo.dirstate.merge(f)
1115 if f1 != f2: # copy/rename
1117 if f1 != f2: # copy/rename
1116 if move:
1118 if move:
1117 repo.dirstate.remove(f1)
1119 repo.dirstate.remove(f1)
1118 if f1 != f:
1120 if f1 != f:
1119 repo.dirstate.copy(f1, f)
1121 repo.dirstate.copy(f1, f)
1120 else:
1122 else:
1121 repo.dirstate.copy(f2, f)
1123 repo.dirstate.copy(f2, f)
1122 else:
1124 else:
1123 # We've update-merged a locally modified file, so
1125 # We've update-merged a locally modified file, so
1124 # we set the dirstate to emulate a normal checkout
1126 # we set the dirstate to emulate a normal checkout
1125 # of that file some time in the past. Thus our
1127 # of that file some time in the past. Thus our
1126 # merge will appear as a normal local file
1128 # merge will appear as a normal local file
1127 # modification.
1129 # modification.
1128 if f2 == f: # file not locally copied/moved
1130 if f2 == f: # file not locally copied/moved
1129 repo.dirstate.normallookup(f)
1131 repo.dirstate.normallookup(f)
1130 if move:
1132 if move:
1131 repo.dirstate.drop(f1)
1133 repo.dirstate.drop(f1)
1132
1134
1133 # directory rename, move local
1135 # directory rename, move local
1134 for f, args, msg in actions['dm']:
1136 for f, args, msg in actions['dm']:
1135 f0, flag = args
1137 f0, flag = args
1136 if branchmerge:
1138 if branchmerge:
1137 repo.dirstate.add(f)
1139 repo.dirstate.add(f)
1138 repo.dirstate.remove(f0)
1140 repo.dirstate.remove(f0)
1139 repo.dirstate.copy(f0, f)
1141 repo.dirstate.copy(f0, f)
1140 else:
1142 else:
1141 repo.dirstate.normal(f)
1143 repo.dirstate.normal(f)
1142 repo.dirstate.drop(f0)
1144 repo.dirstate.drop(f0)
1143
1145
1144 # directory rename, get
1146 # directory rename, get
1145 for f, args, msg in actions['dg']:
1147 for f, args, msg in actions['dg']:
1146 f0, flag = args
1148 f0, flag = args
1147 if branchmerge:
1149 if branchmerge:
1148 repo.dirstate.add(f)
1150 repo.dirstate.add(f)
1149 repo.dirstate.copy(f0, f)
1151 repo.dirstate.copy(f0, f)
1150 else:
1152 else:
1151 repo.dirstate.normal(f)
1153 repo.dirstate.normal(f)
1152
1154
1153 def update(repo, node, branchmerge, force, partial, ancestor=None,
1155 def update(repo, node, branchmerge, force, partial, ancestor=None,
1154 mergeancestor=False, labels=None):
1156 mergeancestor=False, labels=None):
1155 """
1157 """
1156 Perform a merge between the working directory and the given node
1158 Perform a merge between the working directory and the given node
1157
1159
1158 node = the node to update to, or None if unspecified
1160 node = the node to update to, or None if unspecified
1159 branchmerge = whether to merge between branches
1161 branchmerge = whether to merge between branches
1160 force = whether to force branch merging or file overwriting
1162 force = whether to force branch merging or file overwriting
1161 partial = a function to filter file lists (dirstate not updated)
1163 partial = a function to filter file lists (dirstate not updated)
1162 mergeancestor = whether it is merging with an ancestor. If true,
1164 mergeancestor = whether it is merging with an ancestor. If true,
1163 we should accept the incoming changes for any prompts that occur.
1165 we should accept the incoming changes for any prompts that occur.
1164 If false, merging with an ancestor (fast-forward) is only allowed
1166 If false, merging with an ancestor (fast-forward) is only allowed
1165 between different named branches. This flag is used by rebase extension
1167 between different named branches. This flag is used by rebase extension
1166 as a temporary fix and should be avoided in general.
1168 as a temporary fix and should be avoided in general.
1167
1169
1168 The table below shows all the behaviors of the update command
1170 The table below shows all the behaviors of the update command
1169 given the -c and -C or no options, whether the working directory
1171 given the -c and -C or no options, whether the working directory
1170 is dirty, whether a revision is specified, and the relationship of
1172 is dirty, whether a revision is specified, and the relationship of
1171 the parent rev to the target rev (linear, on the same named
1173 the parent rev to the target rev (linear, on the same named
1172 branch, or on another named branch).
1174 branch, or on another named branch).
1173
1175
1174 This logic is tested by test-update-branches.t.
1176 This logic is tested by test-update-branches.t.
1175
1177
1176 -c -C dirty rev | linear same cross
1178 -c -C dirty rev | linear same cross
1177 n n n n | ok (1) x
1179 n n n n | ok (1) x
1178 n n n y | ok ok ok
1180 n n n y | ok ok ok
1179 n n y n | merge (2) (2)
1181 n n y n | merge (2) (2)
1180 n n y y | merge (3) (3)
1182 n n y y | merge (3) (3)
1181 n y * * | discard discard discard
1183 n y * * | discard discard discard
1182 y n y * | (4) (4) (4)
1184 y n y * | (4) (4) (4)
1183 y n n * | ok ok ok
1185 y n n * | ok ok ok
1184 y y * * | (5) (5) (5)
1186 y y * * | (5) (5) (5)
1185
1187
1186 x = can't happen
1188 x = can't happen
1187 * = don't-care
1189 * = don't-care
1188 1 = abort: not a linear update (merge or update --check to force update)
1190 1 = abort: not a linear update (merge or update --check to force update)
1189 2 = abort: uncommitted changes (commit and merge, or update --clean to
1191 2 = abort: uncommitted changes (commit and merge, or update --clean to
1190 discard changes)
1192 discard changes)
1191 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1193 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1192 4 = abort: uncommitted changes (checked in commands.py)
1194 4 = abort: uncommitted changes (checked in commands.py)
1193 5 = incompatible options (checked in commands.py)
1195 5 = incompatible options (checked in commands.py)
1194
1196
1195 Return the same tuple as applyupdates().
1197 Return the same tuple as applyupdates().
1196 """
1198 """
1197
1199
1198 onode = node
1200 onode = node
1199 wlock = repo.wlock()
1201 wlock = repo.wlock()
1200 try:
1202 try:
1201 wc = repo[None]
1203 wc = repo[None]
1202 pl = wc.parents()
1204 pl = wc.parents()
1203 p1 = pl[0]
1205 p1 = pl[0]
1204 pas = [None]
1206 pas = [None]
1205 if ancestor is not None:
1207 if ancestor is not None:
1206 pas = [repo[ancestor]]
1208 pas = [repo[ancestor]]
1207
1209
1208 if node is None:
1210 if node is None:
1209 if (repo.ui.configbool('devel', 'all-warnings')
1211 if (repo.ui.configbool('devel', 'all-warnings')
1210 or repo.ui.configbool('devel', 'oldapi')):
1212 or repo.ui.configbool('devel', 'oldapi')):
1211 repo.ui.develwarn('update with no target')
1213 repo.ui.develwarn('update with no target')
1212 rev, _mark, _act = destutil.destupdate(repo)
1214 rev, _mark, _act = destutil.destupdate(repo)
1213 node = repo[rev].node()
1215 node = repo[rev].node()
1214
1216
1215 overwrite = force and not branchmerge
1217 overwrite = force and not branchmerge
1216
1218
1217 p2 = repo[node]
1219 p2 = repo[node]
1218 if pas[0] is None:
1220 if pas[0] is None:
1219 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1221 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1220 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1222 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1221 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1223 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1222 else:
1224 else:
1223 pas = [p1.ancestor(p2, warn=branchmerge)]
1225 pas = [p1.ancestor(p2, warn=branchmerge)]
1224
1226
1225 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1227 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1226
1228
1227 ### check phase
1229 ### check phase
1228 if not overwrite and len(pl) > 1:
1230 if not overwrite and len(pl) > 1:
1229 raise error.Abort(_("outstanding uncommitted merge"))
1231 raise error.Abort(_("outstanding uncommitted merge"))
1230 if branchmerge:
1232 if branchmerge:
1231 if pas == [p2]:
1233 if pas == [p2]:
1232 raise error.Abort(_("merging with a working directory ancestor"
1234 raise error.Abort(_("merging with a working directory ancestor"
1233 " has no effect"))
1235 " has no effect"))
1234 elif pas == [p1]:
1236 elif pas == [p1]:
1235 if not mergeancestor and p1.branch() == p2.branch():
1237 if not mergeancestor and p1.branch() == p2.branch():
1236 raise error.Abort(_("nothing to merge"),
1238 raise error.Abort(_("nothing to merge"),
1237 hint=_("use 'hg update' "
1239 hint=_("use 'hg update' "
1238 "or check 'hg heads'"))
1240 "or check 'hg heads'"))
1239 if not force and (wc.files() or wc.deleted()):
1241 if not force and (wc.files() or wc.deleted()):
1240 raise error.Abort(_("uncommitted changes"),
1242 raise error.Abort(_("uncommitted changes"),
1241 hint=_("use 'hg status' to list changes"))
1243 hint=_("use 'hg status' to list changes"))
1242 for s in sorted(wc.substate):
1244 for s in sorted(wc.substate):
1243 wc.sub(s).bailifchanged()
1245 wc.sub(s).bailifchanged()
1244
1246
1245 elif not overwrite:
1247 elif not overwrite:
1246 if p1 == p2: # no-op update
1248 if p1 == p2: # no-op update
1247 # call the hooks and exit early
1249 # call the hooks and exit early
1248 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1250 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1249 repo.hook('update', parent1=xp2, parent2='', error=0)
1251 repo.hook('update', parent1=xp2, parent2='', error=0)
1250 return 0, 0, 0, 0
1252 return 0, 0, 0, 0
1251
1253
1252 if pas not in ([p1], [p2]): # nonlinear
1254 if pas not in ([p1], [p2]): # nonlinear
1253 dirty = wc.dirty(missing=True)
1255 dirty = wc.dirty(missing=True)
1254 if dirty or onode is None:
1256 if dirty or onode is None:
1255 # Branching is a bit strange to ensure we do the minimal
1257 # Branching is a bit strange to ensure we do the minimal
1256 # amount of call to obsolete.background.
1258 # amount of call to obsolete.background.
1257 foreground = obsolete.foreground(repo, [p1.node()])
1259 foreground = obsolete.foreground(repo, [p1.node()])
1258 # note: the <node> variable contains a random identifier
1260 # note: the <node> variable contains a random identifier
1259 if repo[node].node() in foreground:
1261 if repo[node].node() in foreground:
1260 pas = [p1] # allow updating to successors
1262 pas = [p1] # allow updating to successors
1261 elif dirty:
1263 elif dirty:
1262 msg = _("uncommitted changes")
1264 msg = _("uncommitted changes")
1263 if onode is None:
1265 if onode is None:
1264 hint = _("commit and merge, or update --clean to"
1266 hint = _("commit and merge, or update --clean to"
1265 " discard changes")
1267 " discard changes")
1266 else:
1268 else:
1267 hint = _("commit or update --clean to discard"
1269 hint = _("commit or update --clean to discard"
1268 " changes")
1270 " changes")
1269 raise error.Abort(msg, hint=hint)
1271 raise error.Abort(msg, hint=hint)
1270 else: # node is none
1272 else: # node is none
1271 msg = _("not a linear update")
1273 msg = _("not a linear update")
1272 hint = _("merge or update --check to force update")
1274 hint = _("merge or update --check to force update")
1273 raise error.Abort(msg, hint=hint)
1275 raise error.Abort(msg, hint=hint)
1274 else:
1276 else:
1275 # Allow jumping branches if clean and specific rev given
1277 # Allow jumping branches if clean and specific rev given
1276 pas = [p1]
1278 pas = [p1]
1277
1279
1278 # deprecated config: merge.followcopies
1280 # deprecated config: merge.followcopies
1279 followcopies = False
1281 followcopies = False
1280 if overwrite:
1282 if overwrite:
1281 pas = [wc]
1283 pas = [wc]
1282 elif pas == [p2]: # backwards
1284 elif pas == [p2]: # backwards
1283 pas = [wc.p1()]
1285 pas = [wc.p1()]
1284 elif not branchmerge and not wc.dirty(missing=True):
1286 elif not branchmerge and not wc.dirty(missing=True):
1285 pass
1287 pass
1286 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1288 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1287 followcopies = True
1289 followcopies = True
1288
1290
1289 ### calculate phase
1291 ### calculate phase
1290 actionbyfile, diverge, renamedelete = calculateupdates(
1292 actionbyfile, diverge, renamedelete = calculateupdates(
1291 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1293 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1292 followcopies)
1294 followcopies)
1293 # Convert to dictionary-of-lists format
1295 # Convert to dictionary-of-lists format
1294 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1296 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1295 for f, (m, args, msg) in actionbyfile.iteritems():
1297 for f, (m, args, msg) in actionbyfile.iteritems():
1296 if m not in actions:
1298 if m not in actions:
1297 actions[m] = []
1299 actions[m] = []
1298 actions[m].append((f, args, msg))
1300 actions[m].append((f, args, msg))
1299
1301
1300 if not util.checkcase(repo.path):
1302 if not util.checkcase(repo.path):
1301 # check collision between files only in p2 for clean update
1303 # check collision between files only in p2 for clean update
1302 if (not branchmerge and
1304 if (not branchmerge and
1303 (force or not wc.dirty(missing=True, branch=False))):
1305 (force or not wc.dirty(missing=True, branch=False))):
1304 _checkcollision(repo, p2.manifest(), None)
1306 _checkcollision(repo, p2.manifest(), None)
1305 else:
1307 else:
1306 _checkcollision(repo, wc.manifest(), actions)
1308 _checkcollision(repo, wc.manifest(), actions)
1307
1309
1308 # Prompt and create actions. TODO: Move this towards resolve phase.
1310 # Prompt and create actions. TODO: Move this towards resolve phase.
1309 for f, args, msg in sorted(actions['cd']):
1311 for f, args, msg in sorted(actions['cd']):
1310 if repo.ui.promptchoice(
1312 if repo.ui.promptchoice(
1311 _("local changed %s which remote deleted\n"
1313 _("local changed %s which remote deleted\n"
1312 "use (c)hanged version or (d)elete?"
1314 "use (c)hanged version or (d)elete?"
1313 "$$ &Changed $$ &Delete") % f, 0):
1315 "$$ &Changed $$ &Delete") % f, 0):
1314 actions['r'].append((f, None, "prompt delete"))
1316 actions['r'].append((f, None, "prompt delete"))
1315 else:
1317 else:
1316 actions['a'].append((f, None, "prompt keep"))
1318 actions['a'].append((f, None, "prompt keep"))
1317
1319
1318 for f, args, msg in sorted(actions['dc']):
1320 for f, args, msg in sorted(actions['dc']):
1319 f1, f2, fa, move, anc = args
1321 f1, f2, fa, move, anc = args
1320 flags = p2[f2].flags()
1322 flags = p2[f2].flags()
1321 if repo.ui.promptchoice(
1323 if repo.ui.promptchoice(
1322 _("remote changed %s which local deleted\n"
1324 _("remote changed %s which local deleted\n"
1323 "use (c)hanged version or leave (d)eleted?"
1325 "use (c)hanged version or leave (d)eleted?"
1324 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1326 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1325 actions['g'].append((f, (flags,), "prompt recreating"))
1327 actions['g'].append((f, (flags,), "prompt recreating"))
1326
1328
1327 # divergent renames
1329 # divergent renames
1328 for f, fl in sorted(diverge.iteritems()):
1330 for f, fl in sorted(diverge.iteritems()):
1329 repo.ui.warn(_("note: possible conflict - %s was renamed "
1331 repo.ui.warn(_("note: possible conflict - %s was renamed "
1330 "multiple times to:\n") % f)
1332 "multiple times to:\n") % f)
1331 for nf in fl:
1333 for nf in fl:
1332 repo.ui.warn(" %s\n" % nf)
1334 repo.ui.warn(" %s\n" % nf)
1333
1335
1334 # rename and delete
1336 # rename and delete
1335 for f, fl in sorted(renamedelete.iteritems()):
1337 for f, fl in sorted(renamedelete.iteritems()):
1336 repo.ui.warn(_("note: possible conflict - %s was deleted "
1338 repo.ui.warn(_("note: possible conflict - %s was deleted "
1337 "and renamed to:\n") % f)
1339 "and renamed to:\n") % f)
1338 for nf in fl:
1340 for nf in fl:
1339 repo.ui.warn(" %s\n" % nf)
1341 repo.ui.warn(" %s\n" % nf)
1340
1342
1341 ### apply phase
1343 ### apply phase
1342 if not branchmerge: # just jump to the new rev
1344 if not branchmerge: # just jump to the new rev
1343 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1345 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1344 if not partial:
1346 if not partial:
1345 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1347 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1346 # note that we're in the middle of an update
1348 # note that we're in the middle of an update
1347 repo.vfs.write('updatestate', p2.hex())
1349 repo.vfs.write('updatestate', p2.hex())
1348
1350
1349 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1351 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1350
1352
1351 if not partial:
1353 if not partial:
1352 repo.dirstate.beginparentchange()
1354 repo.dirstate.beginparentchange()
1353 repo.setparents(fp1, fp2)
1355 repo.setparents(fp1, fp2)
1354 recordupdates(repo, actions, branchmerge)
1356 recordupdates(repo, actions, branchmerge)
1355 # update completed, clear state
1357 # update completed, clear state
1356 util.unlink(repo.join('updatestate'))
1358 util.unlink(repo.join('updatestate'))
1357
1359
1358 if not branchmerge:
1360 if not branchmerge:
1359 repo.dirstate.setbranch(p2.branch())
1361 repo.dirstate.setbranch(p2.branch())
1360 repo.dirstate.endparentchange()
1362 repo.dirstate.endparentchange()
1361 finally:
1363 finally:
1362 wlock.release()
1364 wlock.release()
1363
1365
1364 if not partial:
1366 if not partial:
1365 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1367 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1366 return stats
1368 return stats
1367
1369
1368 def graft(repo, ctx, pctx, labels):
1370 def graft(repo, ctx, pctx, labels):
1369 """Do a graft-like merge.
1371 """Do a graft-like merge.
1370
1372
1371 This is a merge where the merge ancestor is chosen such that one
1373 This is a merge where the merge ancestor is chosen such that one
1372 or more changesets are grafted onto the current changeset. In
1374 or more changesets are grafted onto the current changeset. In
1373 addition to the merge, this fixes up the dirstate to include only
1375 addition to the merge, this fixes up the dirstate to include only
1374 a single parent and tries to duplicate any renames/copies
1376 a single parent and tries to duplicate any renames/copies
1375 appropriately.
1377 appropriately.
1376
1378
1377 ctx - changeset to rebase
1379 ctx - changeset to rebase
1378 pctx - merge base, usually ctx.p1()
1380 pctx - merge base, usually ctx.p1()
1379 labels - merge labels eg ['local', 'graft']
1381 labels - merge labels eg ['local', 'graft']
1380
1382
1381 """
1383 """
1382 # If we're grafting a descendant onto an ancestor, be sure to pass
1384 # If we're grafting a descendant onto an ancestor, be sure to pass
1383 # mergeancestor=True to update. This does two things: 1) allows the merge if
1385 # mergeancestor=True to update. This does two things: 1) allows the merge if
1384 # the destination is the same as the parent of the ctx (so we can use graft
1386 # the destination is the same as the parent of the ctx (so we can use graft
1385 # to copy commits), and 2) informs update that the incoming changes are
1387 # to copy commits), and 2) informs update that the incoming changes are
1386 # newer than the destination so it doesn't prompt about "remote changed foo
1388 # newer than the destination so it doesn't prompt about "remote changed foo
1387 # which local deleted".
1389 # which local deleted".
1388 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1390 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1389
1391
1390 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1392 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1391 mergeancestor=mergeancestor, labels=labels)
1393 mergeancestor=mergeancestor, labels=labels)
1392
1394
1393 # drop the second merge parent
1395 # drop the second merge parent
1394 repo.dirstate.beginparentchange()
1396 repo.dirstate.beginparentchange()
1395 repo.setparents(repo['.'].node(), nullid)
1397 repo.setparents(repo['.'].node(), nullid)
1396 repo.dirstate.write(repo.currenttransaction())
1398 repo.dirstate.write(repo.currenttransaction())
1397 # fix up dirstate for copies and renames
1399 # fix up dirstate for copies and renames
1398 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1400 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1399 repo.dirstate.endparentchange()
1401 repo.dirstate.endparentchange()
1400 return stats
1402 return stats
General Comments 0
You need to be logged in to leave comments. Login now