##// END OF EJS Templates
merge: make 'cd' and 'dc' actions store the same arguments as 'm'...
Siddharth Agarwal -
r26962:fa2daf0e default
parent child Browse files
Show More
@@ -1,1424 +1,1430
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset, error
15 archival, pathutil, revset, error
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 if opts is None:
55 if opts is None:
56 opts = {}
56 opts = {}
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 return composenormalfilematcher(match, manifest)
58 return composenormalfilematcher(match, manifest)
59 oldmatch = installmatchfn(overridematch)
59 oldmatch = installmatchfn(overridematch)
60
60
61 def installmatchfn(f):
61 def installmatchfn(f):
62 '''monkey patch the scmutil module with a custom match function.
62 '''monkey patch the scmutil module with a custom match function.
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 oldmatch = scmutil.match
64 oldmatch = scmutil.match
65 setattr(f, 'oldmatch', oldmatch)
65 setattr(f, 'oldmatch', oldmatch)
66 scmutil.match = f
66 scmutil.match = f
67 return oldmatch
67 return oldmatch
68
68
69 def restorematchfn():
69 def restorematchfn():
70 '''restores scmutil.match to what it was before installmatchfn
70 '''restores scmutil.match to what it was before installmatchfn
71 was called. no-op if scmutil.match is its original function.
71 was called. no-op if scmutil.match is its original function.
72
72
73 Note that n calls to installmatchfn will require n calls to
73 Note that n calls to installmatchfn will require n calls to
74 restore the original matchfn.'''
74 restore the original matchfn.'''
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76
76
77 def installmatchandpatsfn(f):
77 def installmatchandpatsfn(f):
78 oldmatchandpats = scmutil.matchandpats
78 oldmatchandpats = scmutil.matchandpats
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 scmutil.matchandpats = f
80 scmutil.matchandpats = f
81 return oldmatchandpats
81 return oldmatchandpats
82
82
83 def restorematchandpatsfn():
83 def restorematchandpatsfn():
84 '''restores scmutil.matchandpats to what it was before
84 '''restores scmutil.matchandpats to what it was before
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 is its original function.
86 is its original function.
87
87
88 Note that n calls to installmatchandpatsfn will require n calls
88 Note that n calls to installmatchandpatsfn will require n calls
89 to restore the original matchfn.'''
89 to restore the original matchfn.'''
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 scmutil.matchandpats)
91 scmutil.matchandpats)
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97
97
98 lfmatcher = None
98 lfmatcher = None
99 if lfutil.islfilesrepo(repo):
99 if lfutil.islfilesrepo(repo):
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 if lfpats:
101 if lfpats:
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103
103
104 lfnames = []
104 lfnames = []
105 m = matcher
105 m = matcher
106
106
107 wctx = repo[None]
107 wctx = repo[None]
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 exact = m.exact(f)
109 exact = m.exact(f)
110 lfile = lfutil.standin(f) in wctx
110 lfile = lfutil.standin(f) in wctx
111 nfile = f in wctx
111 nfile = f in wctx
112 exists = lfile or nfile
112 exists = lfile or nfile
113
113
114 # addremove in core gets fancy with the name, add doesn't
114 # addremove in core gets fancy with the name, add doesn't
115 if isaddremove:
115 if isaddremove:
116 name = m.uipath(f)
116 name = m.uipath(f)
117 else:
117 else:
118 name = m.rel(f)
118 name = m.rel(f)
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_('%s already a largefile\n') % name)
124 ui.warn(_('%s already a largefile\n') % name)
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (lfsize and
133 abovemin = (lfsize and
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 lfnames.append(f)
136 lfnames.append(f)
137 if ui.verbose or not exact:
137 if ui.verbose or not exact:
138 ui.status(_('adding %s as a largefile\n') % name)
138 ui.status(_('adding %s as a largefile\n') % name)
139
139
140 bad = []
140 bad = []
141
141
142 # Need to lock, otherwise there could be a race condition between
142 # Need to lock, otherwise there could be a race condition between
143 # when standins are created and added to the repo.
143 # when standins are created and added to the repo.
144 wlock = repo.wlock()
144 wlock = repo.wlock()
145 try:
145 try:
146 if not opts.get('dry_run'):
146 if not opts.get('dry_run'):
147 standins = []
147 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
149 for f in lfnames:
150 standinname = lfutil.standin(f)
150 standinname = lfutil.standin(f)
151 lfutil.writestandin(repo, standinname, hash='',
151 lfutil.writestandin(repo, standinname, hash='',
152 executable=lfutil.getexecutable(repo.wjoin(f)))
152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 standins.append(standinname)
153 standins.append(standinname)
154 if lfdirstate[f] == 'r':
154 if lfdirstate[f] == 'r':
155 lfdirstate.normallookup(f)
155 lfdirstate.normallookup(f)
156 else:
156 else:
157 lfdirstate.add(f)
157 lfdirstate.add(f)
158 lfdirstate.write()
158 lfdirstate.write()
159 bad += [lfutil.splitstandin(f)
159 bad += [lfutil.splitstandin(f)
160 for f in repo[None].add(standins)
160 for f in repo[None].add(standins)
161 if f in m.files()]
161 if f in m.files()]
162
162
163 added = [f for f in lfnames if f not in bad]
163 added = [f for f in lfnames if f not in bad]
164 finally:
164 finally:
165 wlock.release()
165 wlock.release()
166 return added, bad
166 return added, bad
167
167
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 after = opts.get('after')
169 after = opts.get('after')
170 m = composelargefilematcher(matcher, repo[None].manifest())
170 m = composelargefilematcher(matcher, repo[None].manifest())
171 try:
171 try:
172 repo.lfstatus = True
172 repo.lfstatus = True
173 s = repo.status(match=m, clean=not isaddremove)
173 s = repo.status(match=m, clean=not isaddremove)
174 finally:
174 finally:
175 repo.lfstatus = False
175 repo.lfstatus = False
176 manifest = repo[None].manifest()
176 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [[f for f in list
177 modified, added, deleted, clean = [[f for f in list
178 if lfutil.standin(f) in manifest]
178 if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added,
179 for list in (s.modified, s.added,
180 s.deleted, s.clean)]
180 s.deleted, s.clean)]
181
181
182 def warn(files, msg):
182 def warn(files, msg):
183 for f in files:
183 for f in files:
184 ui.warn(msg % m.rel(f))
184 ui.warn(msg % m.rel(f))
185 return int(len(files) > 0)
185 return int(len(files) > 0)
186
186
187 result = 0
187 result = 0
188
188
189 if after:
189 if after:
190 remove = deleted
190 remove = deleted
191 result = warn(modified + added + clean,
191 result = warn(modified + added + clean,
192 _('not removing %s: file still exists\n'))
192 _('not removing %s: file still exists\n'))
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(modified, _('not removing %s: file is modified (use -f'
195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 ' to force removal)\n'))
196 ' to force removal)\n'))
197 result = warn(added, _('not removing %s: file has been marked for add'
197 result = warn(added, _('not removing %s: file has been marked for add'
198 ' (use forget to undo)\n')) or result
198 ' (use forget to undo)\n')) or result
199
199
200 # Need to lock because standin files are deleted then removed from the
200 # Need to lock because standin files are deleted then removed from the
201 # repository and we could race in-between.
201 # repository and we could race in-between.
202 wlock = repo.wlock()
202 wlock = repo.wlock()
203 try:
203 try:
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 for f in sorted(remove):
205 for f in sorted(remove):
206 if ui.verbose or not m.exact(f):
206 if ui.verbose or not m.exact(f):
207 # addremove in core gets fancy with the name, remove doesn't
207 # addremove in core gets fancy with the name, remove doesn't
208 if isaddremove:
208 if isaddremove:
209 name = m.uipath(f)
209 name = m.uipath(f)
210 else:
210 else:
211 name = m.rel(f)
211 name = m.rel(f)
212 ui.status(_('removing %s\n') % name)
212 ui.status(_('removing %s\n') % name)
213
213
214 if not opts.get('dry_run'):
214 if not opts.get('dry_run'):
215 if not after:
215 if not after:
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217
217
218 if opts.get('dry_run'):
218 if opts.get('dry_run'):
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 False)
231 False)
232
232
233 lfdirstate.write()
233 lfdirstate.write()
234 finally:
234 finally:
235 wlock.release()
235 wlock.release()
236
236
237 return result
237 return result
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 def decodepath(orig, path):
241 def decodepath(orig, path):
242 return lfutil.splitstandin(path) or path
242 return lfutil.splitstandin(path) or path
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246 def overrideadd(orig, ui, repo, *pats, **opts):
246 def overrideadd(orig, ui, repo, *pats, **opts):
247 if opts.get('normal') and opts.get('large'):
247 if opts.get('normal') and opts.get('large'):
248 raise error.Abort(_('--normal cannot be used with --large'))
248 raise error.Abort(_('--normal cannot be used with --large'))
249 return orig(ui, repo, *pats, **opts)
249 return orig(ui, repo, *pats, **opts)
250
250
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 # The --normal flag short circuits this override
252 # The --normal flag short circuits this override
253 if opts.get('normal'):
253 if opts.get('normal'):
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255
255
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 ladded)
258 ladded)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260
260
261 bad.extend(f for f in lbad)
261 bad.extend(f for f in lbad)
262 return bad
262 return bad
263
263
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 return removelargefiles(ui, repo, False, matcher, after=after,
267 return removelargefiles(ui, repo, False, matcher, after=after,
268 force=force) or result
268 force=force) or result
269
269
270 def overridestatusfn(orig, repo, rev2, **opts):
270 def overridestatusfn(orig, repo, rev2, **opts):
271 try:
271 try:
272 repo._repo.lfstatus = True
272 repo._repo.lfstatus = True
273 return orig(repo, rev2, **opts)
273 return orig(repo, rev2, **opts)
274 finally:
274 finally:
275 repo._repo.lfstatus = False
275 repo._repo.lfstatus = False
276
276
277 def overridestatus(orig, ui, repo, *pats, **opts):
277 def overridestatus(orig, ui, repo, *pats, **opts):
278 try:
278 try:
279 repo.lfstatus = True
279 repo.lfstatus = True
280 return orig(ui, repo, *pats, **opts)
280 return orig(ui, repo, *pats, **opts)
281 finally:
281 finally:
282 repo.lfstatus = False
282 repo.lfstatus = False
283
283
284 def overridedirty(orig, repo, ignoreupdate=False):
284 def overridedirty(orig, repo, ignoreupdate=False):
285 try:
285 try:
286 repo._repo.lfstatus = True
286 repo._repo.lfstatus = True
287 return orig(repo, ignoreupdate)
287 return orig(repo, ignoreupdate)
288 finally:
288 finally:
289 repo._repo.lfstatus = False
289 repo._repo.lfstatus = False
290
290
291 def overridelog(orig, ui, repo, *pats, **opts):
291 def overridelog(orig, ui, repo, *pats, **opts):
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 default='relpath', badfn=None):
293 default='relpath', badfn=None):
294 """Matcher that merges root directory with .hglf, suitable for log.
294 """Matcher that merges root directory with .hglf, suitable for log.
295 It is still possible to match .hglf directly.
295 It is still possible to match .hglf directly.
296 For any listed files run log on the standin too.
296 For any listed files run log on the standin too.
297 matchfn tries both the given filename and with .hglf stripped.
297 matchfn tries both the given filename and with .hglf stripped.
298 """
298 """
299 if opts is None:
299 if opts is None:
300 opts = {}
300 opts = {}
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 badfn=badfn)
302 badfn=badfn)
303 m, p = copy.copy(matchandpats)
303 m, p = copy.copy(matchandpats)
304
304
305 if m.always():
305 if m.always():
306 # We want to match everything anyway, so there's no benefit trying
306 # We want to match everything anyway, so there's no benefit trying
307 # to add standins.
307 # to add standins.
308 return matchandpats
308 return matchandpats
309
309
310 pats = set(p)
310 pats = set(p)
311
311
312 def fixpats(pat, tostandin=lfutil.standin):
312 def fixpats(pat, tostandin=lfutil.standin):
313 if pat.startswith('set:'):
313 if pat.startswith('set:'):
314 return pat
314 return pat
315
315
316 kindpat = match_._patsplit(pat, None)
316 kindpat = match_._patsplit(pat, None)
317
317
318 if kindpat[0] is not None:
318 if kindpat[0] is not None:
319 return kindpat[0] + ':' + tostandin(kindpat[1])
319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 return tostandin(kindpat[1])
320 return tostandin(kindpat[1])
321
321
322 if m._cwd:
322 if m._cwd:
323 hglf = lfutil.shortname
323 hglf = lfutil.shortname
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325
325
326 def tostandin(f):
326 def tostandin(f):
327 # The file may already be a standin, so truncate the back
327 # The file may already be a standin, so truncate the back
328 # prefix and test before mangling it. This avoids turning
328 # prefix and test before mangling it. This avoids turning
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 return f
331 return f
332
332
333 # An absolute path is from outside the repo, so truncate the
333 # An absolute path is from outside the repo, so truncate the
334 # path to the root before building the standin. Otherwise cwd
334 # path to the root before building the standin. Otherwise cwd
335 # is somewhere in the repo, relative to root, and needs to be
335 # is somewhere in the repo, relative to root, and needs to be
336 # prepended before building the standin.
336 # prepended before building the standin.
337 if os.path.isabs(m._cwd):
337 if os.path.isabs(m._cwd):
338 f = f[len(back):]
338 f = f[len(back):]
339 else:
339 else:
340 f = m._cwd + '/' + f
340 f = m._cwd + '/' + f
341 return back + lfutil.standin(f)
341 return back + lfutil.standin(f)
342
342
343 pats.update(fixpats(f, tostandin) for f in p)
343 pats.update(fixpats(f, tostandin) for f in p)
344 else:
344 else:
345 def tostandin(f):
345 def tostandin(f):
346 if lfutil.splitstandin(f):
346 if lfutil.splitstandin(f):
347 return f
347 return f
348 return lfutil.standin(f)
348 return lfutil.standin(f)
349 pats.update(fixpats(f, tostandin) for f in p)
349 pats.update(fixpats(f, tostandin) for f in p)
350
350
351 for i in range(0, len(m._files)):
351 for i in range(0, len(m._files)):
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 if m._files[i] == '.':
353 if m._files[i] == '.':
354 continue
354 continue
355 standin = lfutil.standin(m._files[i])
355 standin = lfutil.standin(m._files[i])
356 # If the "standin" is a directory, append instead of replace to
356 # If the "standin" is a directory, append instead of replace to
357 # support naming a directory on the command line with only
357 # support naming a directory on the command line with only
358 # largefiles. The original directory is kept to support normal
358 # largefiles. The original directory is kept to support normal
359 # files.
359 # files.
360 if standin in repo[ctx.node()]:
360 if standin in repo[ctx.node()]:
361 m._files[i] = standin
361 m._files[i] = standin
362 elif m._files[i] not in repo[ctx.node()] \
362 elif m._files[i] not in repo[ctx.node()] \
363 and repo.wvfs.isdir(standin):
363 and repo.wvfs.isdir(standin):
364 m._files.append(standin)
364 m._files.append(standin)
365
365
366 m._fileroots = set(m._files)
366 m._fileroots = set(m._files)
367 m._always = False
367 m._always = False
368 origmatchfn = m.matchfn
368 origmatchfn = m.matchfn
369 def lfmatchfn(f):
369 def lfmatchfn(f):
370 lf = lfutil.splitstandin(f)
370 lf = lfutil.splitstandin(f)
371 if lf is not None and origmatchfn(lf):
371 if lf is not None and origmatchfn(lf):
372 return True
372 return True
373 r = origmatchfn(f)
373 r = origmatchfn(f)
374 return r
374 return r
375 m.matchfn = lfmatchfn
375 m.matchfn = lfmatchfn
376
376
377 ui.debug('updated patterns: %s\n' % sorted(pats))
377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 return m, pats
378 return m, pats
379
379
380 # For hg log --patch, the match object is used in two different senses:
380 # For hg log --patch, the match object is used in two different senses:
381 # (1) to determine what revisions should be printed out, and
381 # (1) to determine what revisions should be printed out, and
382 # (2) to determine what files to print out diffs for.
382 # (2) to determine what files to print out diffs for.
383 # The magic matchandpats override should be used for case (1) but not for
383 # The magic matchandpats override should be used for case (1) but not for
384 # case (2).
384 # case (2).
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 wctx = repo[None]
386 wctx = repo[None]
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 return lambda rev: match
388 return lambda rev: match
389
389
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393
393
394 try:
394 try:
395 return orig(ui, repo, *pats, **opts)
395 return orig(ui, repo, *pats, **opts)
396 finally:
396 finally:
397 restorematchandpatsfn()
397 restorematchandpatsfn()
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399
399
400 def overrideverify(orig, ui, repo, *pats, **opts):
400 def overrideverify(orig, ui, repo, *pats, **opts):
401 large = opts.pop('large', False)
401 large = opts.pop('large', False)
402 all = opts.pop('lfa', False)
402 all = opts.pop('lfa', False)
403 contents = opts.pop('lfc', False)
403 contents = opts.pop('lfc', False)
404
404
405 result = orig(ui, repo, *pats, **opts)
405 result = orig(ui, repo, *pats, **opts)
406 if large or all or contents:
406 if large or all or contents:
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 return result
408 return result
409
409
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 large = opts.pop('large', False)
411 large = opts.pop('large', False)
412 if large:
412 if large:
413 class fakerepo(object):
413 class fakerepo(object):
414 dirstate = lfutil.openlfdirstate(ui, repo)
414 dirstate = lfutil.openlfdirstate(ui, repo)
415 orig(ui, fakerepo, *pats, **opts)
415 orig(ui, fakerepo, *pats, **opts)
416 else:
416 else:
417 orig(ui, repo, *pats, **opts)
417 orig(ui, repo, *pats, **opts)
418
418
419 # Before starting the manifest merge, merge.updates will call
419 # Before starting the manifest merge, merge.updates will call
420 # _checkunknownfile to check if there are any files in the merged-in
420 # _checkunknownfile to check if there are any files in the merged-in
421 # changeset that collide with unknown files in the working copy.
421 # changeset that collide with unknown files in the working copy.
422 #
422 #
423 # The largefiles are seen as unknown, so this prevents us from merging
423 # The largefiles are seen as unknown, so this prevents us from merging
424 # in a file 'foo' if we already have a largefile with the same name.
424 # in a file 'foo' if we already have a largefile with the same name.
425 #
425 #
426 # The overridden function filters the unknown files by removing any
426 # The overridden function filters the unknown files by removing any
427 # largefiles. This makes the merge proceed and we can then handle this
427 # largefiles. This makes the merge proceed and we can then handle this
428 # case further in the overridden calculateupdates function below.
428 # case further in the overridden calculateupdates function below.
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 return False
431 return False
432 return origfn(repo, wctx, mctx, f, f2)
432 return origfn(repo, wctx, mctx, f, f2)
433
433
434 # The manifest merge handles conflicts on the manifest level. We want
434 # The manifest merge handles conflicts on the manifest level. We want
435 # to handle changes in largefile-ness of files at this level too.
435 # to handle changes in largefile-ness of files at this level too.
436 #
436 #
437 # The strategy is to run the original calculateupdates and then process
437 # The strategy is to run the original calculateupdates and then process
438 # the action list it outputs. There are two cases we need to deal with:
438 # the action list it outputs. There are two cases we need to deal with:
439 #
439 #
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 # detected via its standin file, which will enter the working copy
441 # detected via its standin file, which will enter the working copy
442 # with a "get" action. It is not "merge" since the standin is all
442 # with a "get" action. It is not "merge" since the standin is all
443 # Mercurial is concerned with at this level -- the link to the
443 # Mercurial is concerned with at this level -- the link to the
444 # existing normal file is not relevant here.
444 # existing normal file is not relevant here.
445 #
445 #
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 # since the largefile will be present in the working copy and
447 # since the largefile will be present in the working copy and
448 # different from the normal file in p2. Mercurial therefore
448 # different from the normal file in p2. Mercurial therefore
449 # triggers a merge action.
449 # triggers a merge action.
450 #
450 #
451 # In both cases, we prompt the user and emit new actions to either
451 # In both cases, we prompt the user and emit new actions to either
452 # remove the standin (if the normal file was kept) or to remove the
452 # remove the standin (if the normal file was kept) or to remove the
453 # normal file and get the standin (if the largefile was kept). The
453 # normal file and get the standin (if the largefile was kept). The
454 # default prompt answer is to use the largefile version since it was
454 # default prompt answer is to use the largefile version since it was
455 # presumably changed on purpose.
455 # presumably changed on purpose.
456 #
456 #
457 # Finally, the merge.applyupdates function will then take care of
457 # Finally, the merge.applyupdates function will then take care of
458 # writing the files into the working copy and lfcommands.updatelfiles
458 # writing the files into the working copy and lfcommands.updatelfiles
459 # will update the largefiles.
459 # will update the largefiles.
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 partial, acceptremote, followcopies):
461 partial, acceptremote, followcopies):
462 overwrite = force and not branchmerge
462 overwrite = force and not branchmerge
463 actions, diverge, renamedelete = origfn(
463 actions, diverge, renamedelete = origfn(
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
464 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
465 followcopies)
465 followcopies)
466
466
467 if overwrite:
467 if overwrite:
468 return actions, diverge, renamedelete
468 return actions, diverge, renamedelete
469
469
470 # Convert to dictionary with filename as key and action as value.
470 # Convert to dictionary with filename as key and action as value.
471 lfiles = set()
471 lfiles = set()
472 for f in actions:
472 for f in actions:
473 splitstandin = f and lfutil.splitstandin(f)
473 splitstandin = f and lfutil.splitstandin(f)
474 if splitstandin in p1:
474 if splitstandin in p1:
475 lfiles.add(splitstandin)
475 lfiles.add(splitstandin)
476 elif lfutil.standin(f) in p1:
476 elif lfutil.standin(f) in p1:
477 lfiles.add(f)
477 lfiles.add(f)
478
478
479 for lfile in lfiles:
479 for lfile in lfiles:
480 standin = lfutil.standin(lfile)
480 standin = lfutil.standin(lfile)
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 if sm in ('g', 'dc') and lm != 'r':
483 if sm in ('g', 'dc') and lm != 'r':
484 if sm == 'dc':
485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
484 # Case 1: normal file in the working copy, largefile in
487 # Case 1: normal file in the working copy, largefile in
485 # the second parent
488 # the second parent
486 usermsg = _('remote turned local normal file %s into a largefile\n'
489 usermsg = _('remote turned local normal file %s into a largefile\n'
487 'use (l)argefile or keep (n)ormal file?'
490 'use (l)argefile or keep (n)ormal file?'
488 '$$ &Largefile $$ &Normal file') % lfile
491 '$$ &Largefile $$ &Normal file') % lfile
489 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
490 actions[lfile] = ('r', None, 'replaced by standin')
493 actions[lfile] = ('r', None, 'replaced by standin')
491 actions[standin] = ('g', sargs, 'replaces standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
492 else: # keep local normal file
495 else: # keep local normal file
493 actions[lfile] = ('k', None, 'replaces standin')
496 actions[lfile] = ('k', None, 'replaces standin')
494 if branchmerge:
497 if branchmerge:
495 actions[standin] = ('k', None, 'replaced by non-standin')
498 actions[standin] = ('k', None, 'replaced by non-standin')
496 else:
499 else:
497 actions[standin] = ('r', None, 'replaced by non-standin')
500 actions[standin] = ('r', None, 'replaced by non-standin')
498 elif lm in ('g', 'dc') and sm != 'r':
501 elif lm in ('g', 'dc') and sm != 'r':
502 if lm == 'dc':
503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
499 # Case 2: largefile in the working copy, normal file in
505 # Case 2: largefile in the working copy, normal file in
500 # the second parent
506 # the second parent
501 usermsg = _('remote turned local largefile %s into a normal file\n'
507 usermsg = _('remote turned local largefile %s into a normal file\n'
502 'keep (l)argefile or use (n)ormal file?'
508 'keep (l)argefile or use (n)ormal file?'
503 '$$ &Largefile $$ &Normal file') % lfile
509 '$$ &Largefile $$ &Normal file') % lfile
504 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
505 if branchmerge:
511 if branchmerge:
506 # largefile can be restored from standin safely
512 # largefile can be restored from standin safely
507 actions[lfile] = ('k', None, 'replaced by standin')
513 actions[lfile] = ('k', None, 'replaced by standin')
508 actions[standin] = ('k', None, 'replaces standin')
514 actions[standin] = ('k', None, 'replaces standin')
509 else:
515 else:
510 # "lfile" should be marked as "removed" without
516 # "lfile" should be marked as "removed" without
511 # removal of itself
517 # removal of itself
512 actions[lfile] = ('lfmr', None,
518 actions[lfile] = ('lfmr', None,
513 'forget non-standin largefile')
519 'forget non-standin largefile')
514
520
515 # linear-merge should treat this largefile as 're-added'
521 # linear-merge should treat this largefile as 're-added'
516 actions[standin] = ('a', None, 'keep standin')
522 actions[standin] = ('a', None, 'keep standin')
517 else: # pick remote normal file
523 else: # pick remote normal file
518 actions[lfile] = ('g', largs, 'replaces standin')
524 actions[lfile] = ('g', largs, 'replaces standin')
519 actions[standin] = ('r', None, 'replaced by non-standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
520
526
521 return actions, diverge, renamedelete
527 return actions, diverge, renamedelete
522
528
523 def mergerecordupdates(orig, repo, actions, branchmerge):
529 def mergerecordupdates(orig, repo, actions, branchmerge):
524 if 'lfmr' in actions:
530 if 'lfmr' in actions:
525 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
526 for lfile, args, msg in actions['lfmr']:
532 for lfile, args, msg in actions['lfmr']:
527 # this should be executed before 'orig', to execute 'remove'
533 # this should be executed before 'orig', to execute 'remove'
528 # before all other actions
534 # before all other actions
529 repo.dirstate.remove(lfile)
535 repo.dirstate.remove(lfile)
530 # make sure lfile doesn't get synclfdirstate'd as normal
536 # make sure lfile doesn't get synclfdirstate'd as normal
531 lfdirstate.add(lfile)
537 lfdirstate.add(lfile)
532 lfdirstate.write()
538 lfdirstate.write()
533
539
534 return orig(repo, actions, branchmerge)
540 return orig(repo, actions, branchmerge)
535
541
536
542
537 # Override filemerge to prompt the user about how they wish to merge
543 # Override filemerge to prompt the user about how they wish to merge
538 # largefiles. This will handle identical edits without prompting the user.
544 # largefiles. This will handle identical edits without prompting the user.
539 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
540 labels=None):
546 labels=None):
541 if not lfutil.isstandin(orig):
547 if not lfutil.isstandin(orig):
542 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
543 labels=labels)
549 labels=labels)
544
550
545 ahash = fca.data().strip().lower()
551 ahash = fca.data().strip().lower()
546 dhash = fcd.data().strip().lower()
552 dhash = fcd.data().strip().lower()
547 ohash = fco.data().strip().lower()
553 ohash = fco.data().strip().lower()
548 if (ohash != ahash and
554 if (ohash != ahash and
549 ohash != dhash and
555 ohash != dhash and
550 (dhash == ahash or
556 (dhash == ahash or
551 repo.ui.promptchoice(
557 repo.ui.promptchoice(
552 _('largefile %s has a merge conflict\nancestor was %s\n'
558 _('largefile %s has a merge conflict\nancestor was %s\n'
553 'keep (l)ocal %s or\ntake (o)ther %s?'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
554 '$$ &Local $$ &Other') %
560 '$$ &Local $$ &Other') %
555 (lfutil.splitstandin(orig), ahash, dhash, ohash),
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
556 0) == 1)):
562 0) == 1)):
557 repo.wwrite(fcd.path(), fco.data(), fco.flags())
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
558 return True, 0
564 return True, 0
559
565
560 def copiespathcopies(orig, ctx1, ctx2, match=None):
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
561 copies = orig(ctx1, ctx2, match=match)
567 copies = orig(ctx1, ctx2, match=match)
562 updated = {}
568 updated = {}
563
569
564 for k, v in copies.iteritems():
570 for k, v in copies.iteritems():
565 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
566
572
567 return updated
573 return updated
568
574
569 # Copy first changes the matchers to match standins instead of
575 # Copy first changes the matchers to match standins instead of
570 # largefiles. Then it overrides util.copyfile in that function it
576 # largefiles. Then it overrides util.copyfile in that function it
571 # checks if the destination largefile already exists. It also keeps a
577 # checks if the destination largefile already exists. It also keeps a
572 # list of copied files so that the largefiles can be copied and the
578 # list of copied files so that the largefiles can be copied and the
573 # dirstate updated.
579 # dirstate updated.
574 def overridecopy(orig, ui, repo, pats, opts, rename=False):
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
575 # doesn't remove largefile on rename
581 # doesn't remove largefile on rename
576 if len(pats) < 2:
582 if len(pats) < 2:
577 # this isn't legal, let the original function deal with it
583 # this isn't legal, let the original function deal with it
578 return orig(ui, repo, pats, opts, rename)
584 return orig(ui, repo, pats, opts, rename)
579
585
580 # This could copy both lfiles and normal files in one command,
586 # This could copy both lfiles and normal files in one command,
581 # but we don't want to do that. First replace their matcher to
587 # but we don't want to do that. First replace their matcher to
582 # only match normal files and run it, then replace it to just
588 # only match normal files and run it, then replace it to just
583 # match largefiles and run it again.
589 # match largefiles and run it again.
584 nonormalfiles = False
590 nonormalfiles = False
585 nolfiles = False
591 nolfiles = False
586 installnormalfilesmatchfn(repo[None].manifest())
592 installnormalfilesmatchfn(repo[None].manifest())
587 try:
593 try:
588 result = orig(ui, repo, pats, opts, rename)
594 result = orig(ui, repo, pats, opts, rename)
589 except error.Abort as e:
595 except error.Abort as e:
590 if str(e) != _('no files to copy'):
596 if str(e) != _('no files to copy'):
591 raise e
597 raise e
592 else:
598 else:
593 nonormalfiles = True
599 nonormalfiles = True
594 result = 0
600 result = 0
595 finally:
601 finally:
596 restorematchfn()
602 restorematchfn()
597
603
598 # The first rename can cause our current working directory to be removed.
604 # The first rename can cause our current working directory to be removed.
599 # In that case there is nothing left to copy/rename so just quit.
605 # In that case there is nothing left to copy/rename so just quit.
600 try:
606 try:
601 repo.getcwd()
607 repo.getcwd()
602 except OSError:
608 except OSError:
603 return result
609 return result
604
610
605 def makestandin(relpath):
611 def makestandin(relpath):
606 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
607 return os.path.join(repo.wjoin(lfutil.standin(path)))
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
608
614
609 fullpats = scmutil.expandpats(pats)
615 fullpats = scmutil.expandpats(pats)
610 dest = fullpats[-1]
616 dest = fullpats[-1]
611
617
612 if os.path.isdir(dest):
618 if os.path.isdir(dest):
613 if not os.path.isdir(makestandin(dest)):
619 if not os.path.isdir(makestandin(dest)):
614 os.makedirs(makestandin(dest))
620 os.makedirs(makestandin(dest))
615
621
616 try:
622 try:
617 # When we call orig below it creates the standins but we don't add
623 # When we call orig below it creates the standins but we don't add
618 # them to the dir state until later so lock during that time.
624 # them to the dir state until later so lock during that time.
619 wlock = repo.wlock()
625 wlock = repo.wlock()
620
626
621 manifest = repo[None].manifest()
627 manifest = repo[None].manifest()
622 def overridematch(ctx, pats=(), opts=None, globbed=False,
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
623 default='relpath', badfn=None):
629 default='relpath', badfn=None):
624 if opts is None:
630 if opts is None:
625 opts = {}
631 opts = {}
626 newpats = []
632 newpats = []
627 # The patterns were previously mangled to add the standin
633 # The patterns were previously mangled to add the standin
628 # directory; we need to remove that now
634 # directory; we need to remove that now
629 for pat in pats:
635 for pat in pats:
630 if match_.patkind(pat) is None and lfutil.shortname in pat:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
631 newpats.append(pat.replace(lfutil.shortname, ''))
637 newpats.append(pat.replace(lfutil.shortname, ''))
632 else:
638 else:
633 newpats.append(pat)
639 newpats.append(pat)
634 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
635 m = copy.copy(match)
641 m = copy.copy(match)
636 lfile = lambda f: lfutil.standin(f) in manifest
642 lfile = lambda f: lfutil.standin(f) in manifest
637 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
638 m._fileroots = set(m._files)
644 m._fileroots = set(m._files)
639 origmatchfn = m.matchfn
645 origmatchfn = m.matchfn
640 m.matchfn = lambda f: (lfutil.isstandin(f) and
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
641 (f in manifest) and
647 (f in manifest) and
642 origmatchfn(lfutil.splitstandin(f)) or
648 origmatchfn(lfutil.splitstandin(f)) or
643 None)
649 None)
644 return m
650 return m
645 oldmatch = installmatchfn(overridematch)
651 oldmatch = installmatchfn(overridematch)
646 listpats = []
652 listpats = []
647 for pat in pats:
653 for pat in pats:
648 if match_.patkind(pat) is not None:
654 if match_.patkind(pat) is not None:
649 listpats.append(pat)
655 listpats.append(pat)
650 else:
656 else:
651 listpats.append(makestandin(pat))
657 listpats.append(makestandin(pat))
652
658
653 try:
659 try:
654 origcopyfile = util.copyfile
660 origcopyfile = util.copyfile
655 copiedfiles = []
661 copiedfiles = []
656 def overridecopyfile(src, dest):
662 def overridecopyfile(src, dest):
657 if (lfutil.shortname in src and
663 if (lfutil.shortname in src and
658 dest.startswith(repo.wjoin(lfutil.shortname))):
664 dest.startswith(repo.wjoin(lfutil.shortname))):
659 destlfile = dest.replace(lfutil.shortname, '')
665 destlfile = dest.replace(lfutil.shortname, '')
660 if not opts['force'] and os.path.exists(destlfile):
666 if not opts['force'] and os.path.exists(destlfile):
661 raise IOError('',
667 raise IOError('',
662 _('destination largefile already exists'))
668 _('destination largefile already exists'))
663 copiedfiles.append((src, dest))
669 copiedfiles.append((src, dest))
664 origcopyfile(src, dest)
670 origcopyfile(src, dest)
665
671
666 util.copyfile = overridecopyfile
672 util.copyfile = overridecopyfile
667 result += orig(ui, repo, listpats, opts, rename)
673 result += orig(ui, repo, listpats, opts, rename)
668 finally:
674 finally:
669 util.copyfile = origcopyfile
675 util.copyfile = origcopyfile
670
676
671 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
672 for (src, dest) in copiedfiles:
678 for (src, dest) in copiedfiles:
673 if (lfutil.shortname in src and
679 if (lfutil.shortname in src and
674 dest.startswith(repo.wjoin(lfutil.shortname))):
680 dest.startswith(repo.wjoin(lfutil.shortname))):
675 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
676 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
677 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
678 if not os.path.isdir(destlfiledir):
684 if not os.path.isdir(destlfiledir):
679 os.makedirs(destlfiledir)
685 os.makedirs(destlfiledir)
680 if rename:
686 if rename:
681 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
682
688
683 # The file is gone, but this deletes any empty parent
689 # The file is gone, but this deletes any empty parent
684 # directories as a side-effect.
690 # directories as a side-effect.
685 util.unlinkpath(repo.wjoin(srclfile), True)
691 util.unlinkpath(repo.wjoin(srclfile), True)
686 lfdirstate.remove(srclfile)
692 lfdirstate.remove(srclfile)
687 else:
693 else:
688 util.copyfile(repo.wjoin(srclfile),
694 util.copyfile(repo.wjoin(srclfile),
689 repo.wjoin(destlfile))
695 repo.wjoin(destlfile))
690
696
691 lfdirstate.add(destlfile)
697 lfdirstate.add(destlfile)
692 lfdirstate.write()
698 lfdirstate.write()
693 except error.Abort as e:
699 except error.Abort as e:
694 if str(e) != _('no files to copy'):
700 if str(e) != _('no files to copy'):
695 raise e
701 raise e
696 else:
702 else:
697 nolfiles = True
703 nolfiles = True
698 finally:
704 finally:
699 restorematchfn()
705 restorematchfn()
700 wlock.release()
706 wlock.release()
701
707
702 if nolfiles and nonormalfiles:
708 if nolfiles and nonormalfiles:
703 raise error.Abort(_('no files to copy'))
709 raise error.Abort(_('no files to copy'))
704
710
705 return result
711 return result
706
712
707 # When the user calls revert, we have to be careful to not revert any
713 # When the user calls revert, we have to be careful to not revert any
708 # changes to other largefiles accidentally. This means we have to keep
714 # changes to other largefiles accidentally. This means we have to keep
709 # track of the largefiles that are being reverted so we only pull down
715 # track of the largefiles that are being reverted so we only pull down
710 # the necessary largefiles.
716 # the necessary largefiles.
711 #
717 #
712 # Standins are only updated (to match the hash of largefiles) before
718 # Standins are only updated (to match the hash of largefiles) before
713 # commits. Update the standins then run the original revert, changing
719 # commits. Update the standins then run the original revert, changing
714 # the matcher to hit standins instead of largefiles. Based on the
720 # the matcher to hit standins instead of largefiles. Based on the
715 # resulting standins update the largefiles.
721 # resulting standins update the largefiles.
716 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
717 # Because we put the standins in a bad state (by updating them)
723 # Because we put the standins in a bad state (by updating them)
718 # and then return them to a correct state we need to lock to
724 # and then return them to a correct state we need to lock to
719 # prevent others from changing them in their incorrect state.
725 # prevent others from changing them in their incorrect state.
720 wlock = repo.wlock()
726 wlock = repo.wlock()
721 try:
727 try:
722 lfdirstate = lfutil.openlfdirstate(ui, repo)
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
723 s = lfutil.lfdirstatestatus(lfdirstate, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
724 lfdirstate.write()
730 lfdirstate.write()
725 for lfile in s.modified:
731 for lfile in s.modified:
726 lfutil.updatestandin(repo, lfutil.standin(lfile))
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
727 for lfile in s.deleted:
733 for lfile in s.deleted:
728 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
729 os.unlink(repo.wjoin(lfutil.standin(lfile)))
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
730
736
731 oldstandins = lfutil.getstandinsstate(repo)
737 oldstandins = lfutil.getstandinsstate(repo)
732
738
733 def overridematch(mctx, pats=(), opts=None, globbed=False,
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
734 default='relpath', badfn=None):
740 default='relpath', badfn=None):
735 if opts is None:
741 if opts is None:
736 opts = {}
742 opts = {}
737 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
738 m = copy.copy(match)
744 m = copy.copy(match)
739
745
740 # revert supports recursing into subrepos, and though largefiles
746 # revert supports recursing into subrepos, and though largefiles
741 # currently doesn't work correctly in that case, this match is
747 # currently doesn't work correctly in that case, this match is
742 # called, so the lfdirstate above may not be the correct one for
748 # called, so the lfdirstate above may not be the correct one for
743 # this invocation of match.
749 # this invocation of match.
744 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
745 False)
751 False)
746
752
747 def tostandin(f):
753 def tostandin(f):
748 standin = lfutil.standin(f)
754 standin = lfutil.standin(f)
749 if standin in ctx or standin in mctx:
755 if standin in ctx or standin in mctx:
750 return standin
756 return standin
751 elif standin in repo[None] or lfdirstate[f] == 'r':
757 elif standin in repo[None] or lfdirstate[f] == 'r':
752 return None
758 return None
753 return f
759 return f
754 m._files = [tostandin(f) for f in m._files]
760 m._files = [tostandin(f) for f in m._files]
755 m._files = [f for f in m._files if f is not None]
761 m._files = [f for f in m._files if f is not None]
756 m._fileroots = set(m._files)
762 m._fileroots = set(m._files)
757 origmatchfn = m.matchfn
763 origmatchfn = m.matchfn
758 def matchfn(f):
764 def matchfn(f):
759 if lfutil.isstandin(f):
765 if lfutil.isstandin(f):
760 return (origmatchfn(lfutil.splitstandin(f)) and
766 return (origmatchfn(lfutil.splitstandin(f)) and
761 (f in ctx or f in mctx))
767 (f in ctx or f in mctx))
762 return origmatchfn(f)
768 return origmatchfn(f)
763 m.matchfn = matchfn
769 m.matchfn = matchfn
764 return m
770 return m
765 oldmatch = installmatchfn(overridematch)
771 oldmatch = installmatchfn(overridematch)
766 try:
772 try:
767 orig(ui, repo, ctx, parents, *pats, **opts)
773 orig(ui, repo, ctx, parents, *pats, **opts)
768 finally:
774 finally:
769 restorematchfn()
775 restorematchfn()
770
776
771 newstandins = lfutil.getstandinsstate(repo)
777 newstandins = lfutil.getstandinsstate(repo)
772 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
773 # lfdirstate should be 'normallookup'-ed for updated files,
779 # lfdirstate should be 'normallookup'-ed for updated files,
774 # because reverting doesn't touch dirstate for 'normal' files
780 # because reverting doesn't touch dirstate for 'normal' files
775 # when target revision is explicitly specified: in such case,
781 # when target revision is explicitly specified: in such case,
776 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
777 # of target (standin) file.
783 # of target (standin) file.
778 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
779 normallookup=True)
785 normallookup=True)
780
786
781 finally:
787 finally:
782 wlock.release()
788 wlock.release()
783
789
784 # after pulling changesets, we need to take some extra care to get
790 # after pulling changesets, we need to take some extra care to get
785 # largefiles updated remotely
791 # largefiles updated remotely
786 def overridepull(orig, ui, repo, source=None, **opts):
792 def overridepull(orig, ui, repo, source=None, **opts):
787 revsprepull = len(repo)
793 revsprepull = len(repo)
788 if not source:
794 if not source:
789 source = 'default'
795 source = 'default'
790 repo.lfpullsource = source
796 repo.lfpullsource = source
791 result = orig(ui, repo, source, **opts)
797 result = orig(ui, repo, source, **opts)
792 revspostpull = len(repo)
798 revspostpull = len(repo)
793 lfrevs = opts.get('lfrev', [])
799 lfrevs = opts.get('lfrev', [])
794 if opts.get('all_largefiles'):
800 if opts.get('all_largefiles'):
795 lfrevs.append('pulled()')
801 lfrevs.append('pulled()')
796 if lfrevs and revspostpull > revsprepull:
802 if lfrevs and revspostpull > revsprepull:
797 numcached = 0
803 numcached = 0
798 repo.firstpulled = revsprepull # for pulled() revset expression
804 repo.firstpulled = revsprepull # for pulled() revset expression
799 try:
805 try:
800 for rev in scmutil.revrange(repo, lfrevs):
806 for rev in scmutil.revrange(repo, lfrevs):
801 ui.note(_('pulling largefiles for revision %s\n') % rev)
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
802 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
803 numcached += len(cached)
809 numcached += len(cached)
804 finally:
810 finally:
805 del repo.firstpulled
811 del repo.firstpulled
806 ui.status(_("%d largefiles cached\n") % numcached)
812 ui.status(_("%d largefiles cached\n") % numcached)
807 return result
813 return result
808
814
809 def pulledrevsetsymbol(repo, subset, x):
815 def pulledrevsetsymbol(repo, subset, x):
810 """``pulled()``
816 """``pulled()``
811 Changesets that just has been pulled.
817 Changesets that just has been pulled.
812
818
813 Only available with largefiles from pull --lfrev expressions.
819 Only available with largefiles from pull --lfrev expressions.
814
820
815 .. container:: verbose
821 .. container:: verbose
816
822
817 Some examples:
823 Some examples:
818
824
819 - pull largefiles for all new changesets::
825 - pull largefiles for all new changesets::
820
826
821 hg pull -lfrev "pulled()"
827 hg pull -lfrev "pulled()"
822
828
823 - pull largefiles for all new branch heads::
829 - pull largefiles for all new branch heads::
824
830
825 hg pull -lfrev "head(pulled()) and not closed()"
831 hg pull -lfrev "head(pulled()) and not closed()"
826
832
827 """
833 """
828
834
829 try:
835 try:
830 firstpulled = repo.firstpulled
836 firstpulled = repo.firstpulled
831 except AttributeError:
837 except AttributeError:
832 raise error.Abort(_("pulled() only available in --lfrev"))
838 raise error.Abort(_("pulled() only available in --lfrev"))
833 return revset.baseset([r for r in subset if r >= firstpulled])
839 return revset.baseset([r for r in subset if r >= firstpulled])
834
840
835 def overrideclone(orig, ui, source, dest=None, **opts):
841 def overrideclone(orig, ui, source, dest=None, **opts):
836 d = dest
842 d = dest
837 if d is None:
843 if d is None:
838 d = hg.defaultdest(source)
844 d = hg.defaultdest(source)
839 if opts.get('all_largefiles') and not hg.islocal(d):
845 if opts.get('all_largefiles') and not hg.islocal(d):
840 raise error.Abort(_(
846 raise error.Abort(_(
841 '--all-largefiles is incompatible with non-local destination %s') %
847 '--all-largefiles is incompatible with non-local destination %s') %
842 d)
848 d)
843
849
844 return orig(ui, source, dest, **opts)
850 return orig(ui, source, dest, **opts)
845
851
846 def hgclone(orig, ui, opts, *args, **kwargs):
852 def hgclone(orig, ui, opts, *args, **kwargs):
847 result = orig(ui, opts, *args, **kwargs)
853 result = orig(ui, opts, *args, **kwargs)
848
854
849 if result is not None:
855 if result is not None:
850 sourcerepo, destrepo = result
856 sourcerepo, destrepo = result
851 repo = destrepo.local()
857 repo = destrepo.local()
852
858
853 # When cloning to a remote repo (like through SSH), no repo is available
859 # When cloning to a remote repo (like through SSH), no repo is available
854 # from the peer. Therefore the largefiles can't be downloaded and the
860 # from the peer. Therefore the largefiles can't be downloaded and the
855 # hgrc can't be updated.
861 # hgrc can't be updated.
856 if not repo:
862 if not repo:
857 return result
863 return result
858
864
859 # If largefiles is required for this repo, permanently enable it locally
865 # If largefiles is required for this repo, permanently enable it locally
860 if 'largefiles' in repo.requirements:
866 if 'largefiles' in repo.requirements:
861 fp = repo.vfs('hgrc', 'a', text=True)
867 fp = repo.vfs('hgrc', 'a', text=True)
862 try:
868 try:
863 fp.write('\n[extensions]\nlargefiles=\n')
869 fp.write('\n[extensions]\nlargefiles=\n')
864 finally:
870 finally:
865 fp.close()
871 fp.close()
866
872
867 # Caching is implicitly limited to 'rev' option, since the dest repo was
873 # Caching is implicitly limited to 'rev' option, since the dest repo was
868 # truncated at that point. The user may expect a download count with
874 # truncated at that point. The user may expect a download count with
869 # this option, so attempt whether or not this is a largefile repo.
875 # this option, so attempt whether or not this is a largefile repo.
870 if opts.get('all_largefiles'):
876 if opts.get('all_largefiles'):
871 success, missing = lfcommands.downloadlfiles(ui, repo, None)
877 success, missing = lfcommands.downloadlfiles(ui, repo, None)
872
878
873 if missing != 0:
879 if missing != 0:
874 return None
880 return None
875
881
876 return result
882 return result
877
883
878 def overriderebase(orig, ui, repo, **opts):
884 def overriderebase(orig, ui, repo, **opts):
879 if not util.safehasattr(repo, '_largefilesenabled'):
885 if not util.safehasattr(repo, '_largefilesenabled'):
880 return orig(ui, repo, **opts)
886 return orig(ui, repo, **opts)
881
887
882 resuming = opts.get('continue')
888 resuming = opts.get('continue')
883 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
889 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
884 repo._lfstatuswriters.append(lambda *msg, **opts: None)
890 repo._lfstatuswriters.append(lambda *msg, **opts: None)
885 try:
891 try:
886 return orig(ui, repo, **opts)
892 return orig(ui, repo, **opts)
887 finally:
893 finally:
888 repo._lfstatuswriters.pop()
894 repo._lfstatuswriters.pop()
889 repo._lfcommithooks.pop()
895 repo._lfcommithooks.pop()
890
896
891 def overridearchivecmd(orig, ui, repo, dest, **opts):
897 def overridearchivecmd(orig, ui, repo, dest, **opts):
892 repo.unfiltered().lfstatus = True
898 repo.unfiltered().lfstatus = True
893
899
894 try:
900 try:
895 return orig(ui, repo.unfiltered(), dest, **opts)
901 return orig(ui, repo.unfiltered(), dest, **opts)
896 finally:
902 finally:
897 repo.unfiltered().lfstatus = False
903 repo.unfiltered().lfstatus = False
898
904
899 def hgwebarchive(orig, web, req, tmpl):
905 def hgwebarchive(orig, web, req, tmpl):
900 web.repo.lfstatus = True
906 web.repo.lfstatus = True
901
907
902 try:
908 try:
903 return orig(web, req, tmpl)
909 return orig(web, req, tmpl)
904 finally:
910 finally:
905 web.repo.lfstatus = False
911 web.repo.lfstatus = False
906
912
907 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
913 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
908 prefix='', mtime=None, subrepos=None):
914 prefix='', mtime=None, subrepos=None):
909 # For some reason setting repo.lfstatus in hgwebarchive only changes the
915 # For some reason setting repo.lfstatus in hgwebarchive only changes the
910 # unfiltered repo's attr, so check that as well.
916 # unfiltered repo's attr, so check that as well.
911 if not repo.lfstatus and not repo.unfiltered().lfstatus:
917 if not repo.lfstatus and not repo.unfiltered().lfstatus:
912 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
918 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
913 subrepos)
919 subrepos)
914
920
915 # No need to lock because we are only reading history and
921 # No need to lock because we are only reading history and
916 # largefile caches, neither of which are modified.
922 # largefile caches, neither of which are modified.
917 if node is not None:
923 if node is not None:
918 lfcommands.cachelfiles(repo.ui, repo, node)
924 lfcommands.cachelfiles(repo.ui, repo, node)
919
925
920 if kind not in archival.archivers:
926 if kind not in archival.archivers:
921 raise error.Abort(_("unknown archive type '%s'") % kind)
927 raise error.Abort(_("unknown archive type '%s'") % kind)
922
928
923 ctx = repo[node]
929 ctx = repo[node]
924
930
925 if kind == 'files':
931 if kind == 'files':
926 if prefix:
932 if prefix:
927 raise error.Abort(
933 raise error.Abort(
928 _('cannot give prefix when archiving to files'))
934 _('cannot give prefix when archiving to files'))
929 else:
935 else:
930 prefix = archival.tidyprefix(dest, kind, prefix)
936 prefix = archival.tidyprefix(dest, kind, prefix)
931
937
932 def write(name, mode, islink, getdata):
938 def write(name, mode, islink, getdata):
933 if matchfn and not matchfn(name):
939 if matchfn and not matchfn(name):
934 return
940 return
935 data = getdata()
941 data = getdata()
936 if decode:
942 if decode:
937 data = repo.wwritedata(name, data)
943 data = repo.wwritedata(name, data)
938 archiver.addfile(prefix + name, mode, islink, data)
944 archiver.addfile(prefix + name, mode, islink, data)
939
945
940 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
946 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
941
947
942 if repo.ui.configbool("ui", "archivemeta", True):
948 if repo.ui.configbool("ui", "archivemeta", True):
943 write('.hg_archival.txt', 0o644, False,
949 write('.hg_archival.txt', 0o644, False,
944 lambda: archival.buildmetadata(ctx))
950 lambda: archival.buildmetadata(ctx))
945
951
946 for f in ctx:
952 for f in ctx:
947 ff = ctx.flags(f)
953 ff = ctx.flags(f)
948 getdata = ctx[f].data
954 getdata = ctx[f].data
949 if lfutil.isstandin(f):
955 if lfutil.isstandin(f):
950 if node is not None:
956 if node is not None:
951 path = lfutil.findfile(repo, getdata().strip())
957 path = lfutil.findfile(repo, getdata().strip())
952
958
953 if path is None:
959 if path is None:
954 raise error.Abort(
960 raise error.Abort(
955 _('largefile %s not found in repo store or system cache')
961 _('largefile %s not found in repo store or system cache')
956 % lfutil.splitstandin(f))
962 % lfutil.splitstandin(f))
957 else:
963 else:
958 path = lfutil.splitstandin(f)
964 path = lfutil.splitstandin(f)
959
965
960 f = lfutil.splitstandin(f)
966 f = lfutil.splitstandin(f)
961
967
962 def getdatafn():
968 def getdatafn():
963 fd = None
969 fd = None
964 try:
970 try:
965 fd = open(path, 'rb')
971 fd = open(path, 'rb')
966 return fd.read()
972 return fd.read()
967 finally:
973 finally:
968 if fd:
974 if fd:
969 fd.close()
975 fd.close()
970
976
971 getdata = getdatafn
977 getdata = getdatafn
972 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
978 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
973
979
974 if subrepos:
980 if subrepos:
975 for subpath in sorted(ctx.substate):
981 for subpath in sorted(ctx.substate):
976 sub = ctx.workingsub(subpath)
982 sub = ctx.workingsub(subpath)
977 submatch = match_.narrowmatcher(subpath, matchfn)
983 submatch = match_.narrowmatcher(subpath, matchfn)
978 sub._repo.lfstatus = True
984 sub._repo.lfstatus = True
979 sub.archive(archiver, prefix, submatch)
985 sub.archive(archiver, prefix, submatch)
980
986
981 archiver.done()
987 archiver.done()
982
988
983 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
989 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
984 if not repo._repo.lfstatus:
990 if not repo._repo.lfstatus:
985 return orig(repo, archiver, prefix, match)
991 return orig(repo, archiver, prefix, match)
986
992
987 repo._get(repo._state + ('hg',))
993 repo._get(repo._state + ('hg',))
988 rev = repo._state[1]
994 rev = repo._state[1]
989 ctx = repo._repo[rev]
995 ctx = repo._repo[rev]
990
996
991 if ctx.node() is not None:
997 if ctx.node() is not None:
992 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
998 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
993
999
994 def write(name, mode, islink, getdata):
1000 def write(name, mode, islink, getdata):
995 # At this point, the standin has been replaced with the largefile name,
1001 # At this point, the standin has been replaced with the largefile name,
996 # so the normal matcher works here without the lfutil variants.
1002 # so the normal matcher works here without the lfutil variants.
997 if match and not match(f):
1003 if match and not match(f):
998 return
1004 return
999 data = getdata()
1005 data = getdata()
1000
1006
1001 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1007 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1002
1008
1003 for f in ctx:
1009 for f in ctx:
1004 ff = ctx.flags(f)
1010 ff = ctx.flags(f)
1005 getdata = ctx[f].data
1011 getdata = ctx[f].data
1006 if lfutil.isstandin(f):
1012 if lfutil.isstandin(f):
1007 if ctx.node() is not None:
1013 if ctx.node() is not None:
1008 path = lfutil.findfile(repo._repo, getdata().strip())
1014 path = lfutil.findfile(repo._repo, getdata().strip())
1009
1015
1010 if path is None:
1016 if path is None:
1011 raise error.Abort(
1017 raise error.Abort(
1012 _('largefile %s not found in repo store or system cache')
1018 _('largefile %s not found in repo store or system cache')
1013 % lfutil.splitstandin(f))
1019 % lfutil.splitstandin(f))
1014 else:
1020 else:
1015 path = lfutil.splitstandin(f)
1021 path = lfutil.splitstandin(f)
1016
1022
1017 f = lfutil.splitstandin(f)
1023 f = lfutil.splitstandin(f)
1018
1024
1019 def getdatafn():
1025 def getdatafn():
1020 fd = None
1026 fd = None
1021 try:
1027 try:
1022 fd = open(os.path.join(prefix, path), 'rb')
1028 fd = open(os.path.join(prefix, path), 'rb')
1023 return fd.read()
1029 return fd.read()
1024 finally:
1030 finally:
1025 if fd:
1031 if fd:
1026 fd.close()
1032 fd.close()
1027
1033
1028 getdata = getdatafn
1034 getdata = getdatafn
1029
1035
1030 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1036 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1031
1037
1032 for subpath in sorted(ctx.substate):
1038 for subpath in sorted(ctx.substate):
1033 sub = ctx.workingsub(subpath)
1039 sub = ctx.workingsub(subpath)
1034 submatch = match_.narrowmatcher(subpath, match)
1040 submatch = match_.narrowmatcher(subpath, match)
1035 sub._repo.lfstatus = True
1041 sub._repo.lfstatus = True
1036 sub.archive(archiver, prefix + repo._path + '/', submatch)
1042 sub.archive(archiver, prefix + repo._path + '/', submatch)
1037
1043
1038 # If a largefile is modified, the change is not reflected in its
1044 # If a largefile is modified, the change is not reflected in its
1039 # standin until a commit. cmdutil.bailifchanged() raises an exception
1045 # standin until a commit. cmdutil.bailifchanged() raises an exception
1040 # if the repo has uncommitted changes. Wrap it to also check if
1046 # if the repo has uncommitted changes. Wrap it to also check if
1041 # largefiles were changed. This is used by bisect, backout and fetch.
1047 # largefiles were changed. This is used by bisect, backout and fetch.
1042 def overridebailifchanged(orig, repo, *args, **kwargs):
1048 def overridebailifchanged(orig, repo, *args, **kwargs):
1043 orig(repo, *args, **kwargs)
1049 orig(repo, *args, **kwargs)
1044 repo.lfstatus = True
1050 repo.lfstatus = True
1045 s = repo.status()
1051 s = repo.status()
1046 repo.lfstatus = False
1052 repo.lfstatus = False
1047 if s.modified or s.added or s.removed or s.deleted:
1053 if s.modified or s.added or s.removed or s.deleted:
1048 raise error.Abort(_('uncommitted changes'))
1054 raise error.Abort(_('uncommitted changes'))
1049
1055
1050 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1056 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1051 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1057 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1052 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1058 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1053 m = composelargefilematcher(match, repo[None].manifest())
1059 m = composelargefilematcher(match, repo[None].manifest())
1054
1060
1055 try:
1061 try:
1056 repo.lfstatus = True
1062 repo.lfstatus = True
1057 s = repo.status(match=m, clean=True)
1063 s = repo.status(match=m, clean=True)
1058 finally:
1064 finally:
1059 repo.lfstatus = False
1065 repo.lfstatus = False
1060 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1066 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1061 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1067 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1062
1068
1063 for f in forget:
1069 for f in forget:
1064 if lfutil.standin(f) not in repo.dirstate and not \
1070 if lfutil.standin(f) not in repo.dirstate and not \
1065 repo.wvfs.isdir(lfutil.standin(f)):
1071 repo.wvfs.isdir(lfutil.standin(f)):
1066 ui.warn(_('not removing %s: file is already untracked\n')
1072 ui.warn(_('not removing %s: file is already untracked\n')
1067 % m.rel(f))
1073 % m.rel(f))
1068 bad.append(f)
1074 bad.append(f)
1069
1075
1070 for f in forget:
1076 for f in forget:
1071 if ui.verbose or not m.exact(f):
1077 if ui.verbose or not m.exact(f):
1072 ui.status(_('removing %s\n') % m.rel(f))
1078 ui.status(_('removing %s\n') % m.rel(f))
1073
1079
1074 # Need to lock because standin files are deleted then removed from the
1080 # Need to lock because standin files are deleted then removed from the
1075 # repository and we could race in-between.
1081 # repository and we could race in-between.
1076 wlock = repo.wlock()
1082 wlock = repo.wlock()
1077 try:
1083 try:
1078 lfdirstate = lfutil.openlfdirstate(ui, repo)
1084 lfdirstate = lfutil.openlfdirstate(ui, repo)
1079 for f in forget:
1085 for f in forget:
1080 if lfdirstate[f] == 'a':
1086 if lfdirstate[f] == 'a':
1081 lfdirstate.drop(f)
1087 lfdirstate.drop(f)
1082 else:
1088 else:
1083 lfdirstate.remove(f)
1089 lfdirstate.remove(f)
1084 lfdirstate.write()
1090 lfdirstate.write()
1085 standins = [lfutil.standin(f) for f in forget]
1091 standins = [lfutil.standin(f) for f in forget]
1086 for f in standins:
1092 for f in standins:
1087 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1093 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1088 rejected = repo[None].forget(standins)
1094 rejected = repo[None].forget(standins)
1089 finally:
1095 finally:
1090 wlock.release()
1096 wlock.release()
1091
1097
1092 bad.extend(f for f in rejected if f in m.files())
1098 bad.extend(f for f in rejected if f in m.files())
1093 forgot.extend(f for f in forget if f not in rejected)
1099 forgot.extend(f for f in forget if f not in rejected)
1094 return bad, forgot
1100 return bad, forgot
1095
1101
1096 def _getoutgoings(repo, other, missing, addfunc):
1102 def _getoutgoings(repo, other, missing, addfunc):
1097 """get pairs of filename and largefile hash in outgoing revisions
1103 """get pairs of filename and largefile hash in outgoing revisions
1098 in 'missing'.
1104 in 'missing'.
1099
1105
1100 largefiles already existing on 'other' repository are ignored.
1106 largefiles already existing on 'other' repository are ignored.
1101
1107
1102 'addfunc' is invoked with each unique pairs of filename and
1108 'addfunc' is invoked with each unique pairs of filename and
1103 largefile hash value.
1109 largefile hash value.
1104 """
1110 """
1105 knowns = set()
1111 knowns = set()
1106 lfhashes = set()
1112 lfhashes = set()
1107 def dedup(fn, lfhash):
1113 def dedup(fn, lfhash):
1108 k = (fn, lfhash)
1114 k = (fn, lfhash)
1109 if k not in knowns:
1115 if k not in knowns:
1110 knowns.add(k)
1116 knowns.add(k)
1111 lfhashes.add(lfhash)
1117 lfhashes.add(lfhash)
1112 lfutil.getlfilestoupload(repo, missing, dedup)
1118 lfutil.getlfilestoupload(repo, missing, dedup)
1113 if lfhashes:
1119 if lfhashes:
1114 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1120 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1115 for fn, lfhash in knowns:
1121 for fn, lfhash in knowns:
1116 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1122 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1117 addfunc(fn, lfhash)
1123 addfunc(fn, lfhash)
1118
1124
1119 def outgoinghook(ui, repo, other, opts, missing):
1125 def outgoinghook(ui, repo, other, opts, missing):
1120 if opts.pop('large', None):
1126 if opts.pop('large', None):
1121 lfhashes = set()
1127 lfhashes = set()
1122 if ui.debugflag:
1128 if ui.debugflag:
1123 toupload = {}
1129 toupload = {}
1124 def addfunc(fn, lfhash):
1130 def addfunc(fn, lfhash):
1125 if fn not in toupload:
1131 if fn not in toupload:
1126 toupload[fn] = []
1132 toupload[fn] = []
1127 toupload[fn].append(lfhash)
1133 toupload[fn].append(lfhash)
1128 lfhashes.add(lfhash)
1134 lfhashes.add(lfhash)
1129 def showhashes(fn):
1135 def showhashes(fn):
1130 for lfhash in sorted(toupload[fn]):
1136 for lfhash in sorted(toupload[fn]):
1131 ui.debug(' %s\n' % (lfhash))
1137 ui.debug(' %s\n' % (lfhash))
1132 else:
1138 else:
1133 toupload = set()
1139 toupload = set()
1134 def addfunc(fn, lfhash):
1140 def addfunc(fn, lfhash):
1135 toupload.add(fn)
1141 toupload.add(fn)
1136 lfhashes.add(lfhash)
1142 lfhashes.add(lfhash)
1137 def showhashes(fn):
1143 def showhashes(fn):
1138 pass
1144 pass
1139 _getoutgoings(repo, other, missing, addfunc)
1145 _getoutgoings(repo, other, missing, addfunc)
1140
1146
1141 if not toupload:
1147 if not toupload:
1142 ui.status(_('largefiles: no files to upload\n'))
1148 ui.status(_('largefiles: no files to upload\n'))
1143 else:
1149 else:
1144 ui.status(_('largefiles to upload (%d entities):\n')
1150 ui.status(_('largefiles to upload (%d entities):\n')
1145 % (len(lfhashes)))
1151 % (len(lfhashes)))
1146 for file in sorted(toupload):
1152 for file in sorted(toupload):
1147 ui.status(lfutil.splitstandin(file) + '\n')
1153 ui.status(lfutil.splitstandin(file) + '\n')
1148 showhashes(file)
1154 showhashes(file)
1149 ui.status('\n')
1155 ui.status('\n')
1150
1156
1151 def summaryremotehook(ui, repo, opts, changes):
1157 def summaryremotehook(ui, repo, opts, changes):
1152 largeopt = opts.get('large', False)
1158 largeopt = opts.get('large', False)
1153 if changes is None:
1159 if changes is None:
1154 if largeopt:
1160 if largeopt:
1155 return (False, True) # only outgoing check is needed
1161 return (False, True) # only outgoing check is needed
1156 else:
1162 else:
1157 return (False, False)
1163 return (False, False)
1158 elif largeopt:
1164 elif largeopt:
1159 url, branch, peer, outgoing = changes[1]
1165 url, branch, peer, outgoing = changes[1]
1160 if peer is None:
1166 if peer is None:
1161 # i18n: column positioning for "hg summary"
1167 # i18n: column positioning for "hg summary"
1162 ui.status(_('largefiles: (no remote repo)\n'))
1168 ui.status(_('largefiles: (no remote repo)\n'))
1163 return
1169 return
1164
1170
1165 toupload = set()
1171 toupload = set()
1166 lfhashes = set()
1172 lfhashes = set()
1167 def addfunc(fn, lfhash):
1173 def addfunc(fn, lfhash):
1168 toupload.add(fn)
1174 toupload.add(fn)
1169 lfhashes.add(lfhash)
1175 lfhashes.add(lfhash)
1170 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1176 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1171
1177
1172 if not toupload:
1178 if not toupload:
1173 # i18n: column positioning for "hg summary"
1179 # i18n: column positioning for "hg summary"
1174 ui.status(_('largefiles: (no files to upload)\n'))
1180 ui.status(_('largefiles: (no files to upload)\n'))
1175 else:
1181 else:
1176 # i18n: column positioning for "hg summary"
1182 # i18n: column positioning for "hg summary"
1177 ui.status(_('largefiles: %d entities for %d files to upload\n')
1183 ui.status(_('largefiles: %d entities for %d files to upload\n')
1178 % (len(lfhashes), len(toupload)))
1184 % (len(lfhashes), len(toupload)))
1179
1185
1180 def overridesummary(orig, ui, repo, *pats, **opts):
1186 def overridesummary(orig, ui, repo, *pats, **opts):
1181 try:
1187 try:
1182 repo.lfstatus = True
1188 repo.lfstatus = True
1183 orig(ui, repo, *pats, **opts)
1189 orig(ui, repo, *pats, **opts)
1184 finally:
1190 finally:
1185 repo.lfstatus = False
1191 repo.lfstatus = False
1186
1192
1187 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1193 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1188 similarity=None):
1194 similarity=None):
1189 if opts is None:
1195 if opts is None:
1190 opts = {}
1196 opts = {}
1191 if not lfutil.islfilesrepo(repo):
1197 if not lfutil.islfilesrepo(repo):
1192 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1193 # Get the list of missing largefiles so we can remove them
1199 # Get the list of missing largefiles so we can remove them
1194 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1200 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1195 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1201 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1196 False, False, False)
1202 False, False, False)
1197
1203
1198 # Call into the normal remove code, but the removing of the standin, we want
1204 # Call into the normal remove code, but the removing of the standin, we want
1199 # to have handled by original addremove. Monkey patching here makes sure
1205 # to have handled by original addremove. Monkey patching here makes sure
1200 # we don't remove the standin in the largefiles code, preventing a very
1206 # we don't remove the standin in the largefiles code, preventing a very
1201 # confused state later.
1207 # confused state later.
1202 if s.deleted:
1208 if s.deleted:
1203 m = copy.copy(matcher)
1209 m = copy.copy(matcher)
1204
1210
1205 # The m._files and m._map attributes are not changed to the deleted list
1211 # The m._files and m._map attributes are not changed to the deleted list
1206 # because that affects the m.exact() test, which in turn governs whether
1212 # because that affects the m.exact() test, which in turn governs whether
1207 # or not the file name is printed, and how. Simply limit the original
1213 # or not the file name is printed, and how. Simply limit the original
1208 # matches to those in the deleted status list.
1214 # matches to those in the deleted status list.
1209 matchfn = m.matchfn
1215 matchfn = m.matchfn
1210 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1216 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1211
1217
1212 removelargefiles(repo.ui, repo, True, m, **opts)
1218 removelargefiles(repo.ui, repo, True, m, **opts)
1213 # Call into the normal add code, and any files that *should* be added as
1219 # Call into the normal add code, and any files that *should* be added as
1214 # largefiles will be
1220 # largefiles will be
1215 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1221 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1216 # Now that we've handled largefiles, hand off to the original addremove
1222 # Now that we've handled largefiles, hand off to the original addremove
1217 # function to take care of the rest. Make sure it doesn't do anything with
1223 # function to take care of the rest. Make sure it doesn't do anything with
1218 # largefiles by passing a matcher that will ignore them.
1224 # largefiles by passing a matcher that will ignore them.
1219 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1225 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1220 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1226 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1221
1227
1222 # Calling purge with --all will cause the largefiles to be deleted.
1228 # Calling purge with --all will cause the largefiles to be deleted.
1223 # Override repo.status to prevent this from happening.
1229 # Override repo.status to prevent this from happening.
1224 def overridepurge(orig, ui, repo, *dirs, **opts):
1230 def overridepurge(orig, ui, repo, *dirs, **opts):
1225 # XXX Monkey patching a repoview will not work. The assigned attribute will
1231 # XXX Monkey patching a repoview will not work. The assigned attribute will
1226 # be set on the unfiltered repo, but we will only lookup attributes in the
1232 # be set on the unfiltered repo, but we will only lookup attributes in the
1227 # unfiltered repo if the lookup in the repoview object itself fails. As the
1233 # unfiltered repo if the lookup in the repoview object itself fails. As the
1228 # monkey patched method exists on the repoview class the lookup will not
1234 # monkey patched method exists on the repoview class the lookup will not
1229 # fail. As a result, the original version will shadow the monkey patched
1235 # fail. As a result, the original version will shadow the monkey patched
1230 # one, defeating the monkey patch.
1236 # one, defeating the monkey patch.
1231 #
1237 #
1232 # As a work around we use an unfiltered repo here. We should do something
1238 # As a work around we use an unfiltered repo here. We should do something
1233 # cleaner instead.
1239 # cleaner instead.
1234 repo = repo.unfiltered()
1240 repo = repo.unfiltered()
1235 oldstatus = repo.status
1241 oldstatus = repo.status
1236 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1242 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1237 clean=False, unknown=False, listsubrepos=False):
1243 clean=False, unknown=False, listsubrepos=False):
1238 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1244 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1239 listsubrepos)
1245 listsubrepos)
1240 lfdirstate = lfutil.openlfdirstate(ui, repo)
1246 lfdirstate = lfutil.openlfdirstate(ui, repo)
1241 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1247 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1242 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1248 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1243 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1249 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1244 unknown, ignored, r.clean)
1250 unknown, ignored, r.clean)
1245 repo.status = overridestatus
1251 repo.status = overridestatus
1246 orig(ui, repo, *dirs, **opts)
1252 orig(ui, repo, *dirs, **opts)
1247 repo.status = oldstatus
1253 repo.status = oldstatus
1248 def overriderollback(orig, ui, repo, **opts):
1254 def overriderollback(orig, ui, repo, **opts):
1249 wlock = repo.wlock()
1255 wlock = repo.wlock()
1250 try:
1256 try:
1251 before = repo.dirstate.parents()
1257 before = repo.dirstate.parents()
1252 orphans = set(f for f in repo.dirstate
1258 orphans = set(f for f in repo.dirstate
1253 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1259 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1254 result = orig(ui, repo, **opts)
1260 result = orig(ui, repo, **opts)
1255 after = repo.dirstate.parents()
1261 after = repo.dirstate.parents()
1256 if before == after:
1262 if before == after:
1257 return result # no need to restore standins
1263 return result # no need to restore standins
1258
1264
1259 pctx = repo['.']
1265 pctx = repo['.']
1260 for f in repo.dirstate:
1266 for f in repo.dirstate:
1261 if lfutil.isstandin(f):
1267 if lfutil.isstandin(f):
1262 orphans.discard(f)
1268 orphans.discard(f)
1263 if repo.dirstate[f] == 'r':
1269 if repo.dirstate[f] == 'r':
1264 repo.wvfs.unlinkpath(f, ignoremissing=True)
1270 repo.wvfs.unlinkpath(f, ignoremissing=True)
1265 elif f in pctx:
1271 elif f in pctx:
1266 fctx = pctx[f]
1272 fctx = pctx[f]
1267 repo.wwrite(f, fctx.data(), fctx.flags())
1273 repo.wwrite(f, fctx.data(), fctx.flags())
1268 else:
1274 else:
1269 # content of standin is not so important in 'a',
1275 # content of standin is not so important in 'a',
1270 # 'm' or 'n' (coming from the 2nd parent) cases
1276 # 'm' or 'n' (coming from the 2nd parent) cases
1271 lfutil.writestandin(repo, f, '', False)
1277 lfutil.writestandin(repo, f, '', False)
1272 for standin in orphans:
1278 for standin in orphans:
1273 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1279 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1274
1280
1275 lfdirstate = lfutil.openlfdirstate(ui, repo)
1281 lfdirstate = lfutil.openlfdirstate(ui, repo)
1276 orphans = set(lfdirstate)
1282 orphans = set(lfdirstate)
1277 lfiles = lfutil.listlfiles(repo)
1283 lfiles = lfutil.listlfiles(repo)
1278 for file in lfiles:
1284 for file in lfiles:
1279 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1285 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1280 orphans.discard(file)
1286 orphans.discard(file)
1281 for lfile in orphans:
1287 for lfile in orphans:
1282 lfdirstate.drop(lfile)
1288 lfdirstate.drop(lfile)
1283 lfdirstate.write()
1289 lfdirstate.write()
1284 finally:
1290 finally:
1285 wlock.release()
1291 wlock.release()
1286 return result
1292 return result
1287
1293
1288 def overridetransplant(orig, ui, repo, *revs, **opts):
1294 def overridetransplant(orig, ui, repo, *revs, **opts):
1289 resuming = opts.get('continue')
1295 resuming = opts.get('continue')
1290 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1296 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1291 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1297 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1292 try:
1298 try:
1293 result = orig(ui, repo, *revs, **opts)
1299 result = orig(ui, repo, *revs, **opts)
1294 finally:
1300 finally:
1295 repo._lfstatuswriters.pop()
1301 repo._lfstatuswriters.pop()
1296 repo._lfcommithooks.pop()
1302 repo._lfcommithooks.pop()
1297 return result
1303 return result
1298
1304
1299 def overridecat(orig, ui, repo, file1, *pats, **opts):
1305 def overridecat(orig, ui, repo, file1, *pats, **opts):
1300 ctx = scmutil.revsingle(repo, opts.get('rev'))
1306 ctx = scmutil.revsingle(repo, opts.get('rev'))
1301 err = 1
1307 err = 1
1302 notbad = set()
1308 notbad = set()
1303 m = scmutil.match(ctx, (file1,) + pats, opts)
1309 m = scmutil.match(ctx, (file1,) + pats, opts)
1304 origmatchfn = m.matchfn
1310 origmatchfn = m.matchfn
1305 def lfmatchfn(f):
1311 def lfmatchfn(f):
1306 if origmatchfn(f):
1312 if origmatchfn(f):
1307 return True
1313 return True
1308 lf = lfutil.splitstandin(f)
1314 lf = lfutil.splitstandin(f)
1309 if lf is None:
1315 if lf is None:
1310 return False
1316 return False
1311 notbad.add(lf)
1317 notbad.add(lf)
1312 return origmatchfn(lf)
1318 return origmatchfn(lf)
1313 m.matchfn = lfmatchfn
1319 m.matchfn = lfmatchfn
1314 origbadfn = m.bad
1320 origbadfn = m.bad
1315 def lfbadfn(f, msg):
1321 def lfbadfn(f, msg):
1316 if not f in notbad:
1322 if not f in notbad:
1317 origbadfn(f, msg)
1323 origbadfn(f, msg)
1318 m.bad = lfbadfn
1324 m.bad = lfbadfn
1319
1325
1320 origvisitdirfn = m.visitdir
1326 origvisitdirfn = m.visitdir
1321 def lfvisitdirfn(dir):
1327 def lfvisitdirfn(dir):
1322 if dir == lfutil.shortname:
1328 if dir == lfutil.shortname:
1323 return True
1329 return True
1324 ret = origvisitdirfn(dir)
1330 ret = origvisitdirfn(dir)
1325 if ret:
1331 if ret:
1326 return ret
1332 return ret
1327 lf = lfutil.splitstandin(dir)
1333 lf = lfutil.splitstandin(dir)
1328 if lf is None:
1334 if lf is None:
1329 return False
1335 return False
1330 return origvisitdirfn(lf)
1336 return origvisitdirfn(lf)
1331 m.visitdir = lfvisitdirfn
1337 m.visitdir = lfvisitdirfn
1332
1338
1333 for f in ctx.walk(m):
1339 for f in ctx.walk(m):
1334 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1340 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1335 pathname=f)
1341 pathname=f)
1336 lf = lfutil.splitstandin(f)
1342 lf = lfutil.splitstandin(f)
1337 if lf is None or origmatchfn(f):
1343 if lf is None or origmatchfn(f):
1338 # duplicating unreachable code from commands.cat
1344 # duplicating unreachable code from commands.cat
1339 data = ctx[f].data()
1345 data = ctx[f].data()
1340 if opts.get('decode'):
1346 if opts.get('decode'):
1341 data = repo.wwritedata(f, data)
1347 data = repo.wwritedata(f, data)
1342 fp.write(data)
1348 fp.write(data)
1343 else:
1349 else:
1344 hash = lfutil.readstandin(repo, lf, ctx.rev())
1350 hash = lfutil.readstandin(repo, lf, ctx.rev())
1345 if not lfutil.inusercache(repo.ui, hash):
1351 if not lfutil.inusercache(repo.ui, hash):
1346 store = basestore._openstore(repo)
1352 store = basestore._openstore(repo)
1347 success, missing = store.get([(lf, hash)])
1353 success, missing = store.get([(lf, hash)])
1348 if len(success) != 1:
1354 if len(success) != 1:
1349 raise error.Abort(
1355 raise error.Abort(
1350 _('largefile %s is not in cache and could not be '
1356 _('largefile %s is not in cache and could not be '
1351 'downloaded') % lf)
1357 'downloaded') % lf)
1352 path = lfutil.usercachepath(repo.ui, hash)
1358 path = lfutil.usercachepath(repo.ui, hash)
1353 fpin = open(path, "rb")
1359 fpin = open(path, "rb")
1354 for chunk in util.filechunkiter(fpin, 128 * 1024):
1360 for chunk in util.filechunkiter(fpin, 128 * 1024):
1355 fp.write(chunk)
1361 fp.write(chunk)
1356 fpin.close()
1362 fpin.close()
1357 fp.close()
1363 fp.close()
1358 err = 0
1364 err = 0
1359 return err
1365 return err
1360
1366
1361 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1367 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1362 *args, **kwargs):
1368 *args, **kwargs):
1363 wlock = repo.wlock()
1369 wlock = repo.wlock()
1364 try:
1370 try:
1365 # branch | | |
1371 # branch | | |
1366 # merge | force | partial | action
1372 # merge | force | partial | action
1367 # -------+-------+---------+--------------
1373 # -------+-------+---------+--------------
1368 # x | x | x | linear-merge
1374 # x | x | x | linear-merge
1369 # o | x | x | branch-merge
1375 # o | x | x | branch-merge
1370 # x | o | x | overwrite (as clean update)
1376 # x | o | x | overwrite (as clean update)
1371 # o | o | x | force-branch-merge (*1)
1377 # o | o | x | force-branch-merge (*1)
1372 # x | x | o | (*)
1378 # x | x | o | (*)
1373 # o | x | o | (*)
1379 # o | x | o | (*)
1374 # x | o | o | overwrite (as revert)
1380 # x | o | o | overwrite (as revert)
1375 # o | o | o | (*)
1381 # o | o | o | (*)
1376 #
1382 #
1377 # (*) don't care
1383 # (*) don't care
1378 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1384 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1379
1385
1380 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1386 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1381 unsure, s = lfdirstate.status(match_.always(repo.root,
1387 unsure, s = lfdirstate.status(match_.always(repo.root,
1382 repo.getcwd()),
1388 repo.getcwd()),
1383 [], False, False, False)
1389 [], False, False, False)
1384 pctx = repo['.']
1390 pctx = repo['.']
1385 for lfile in unsure + s.modified:
1391 for lfile in unsure + s.modified:
1386 lfileabs = repo.wvfs.join(lfile)
1392 lfileabs = repo.wvfs.join(lfile)
1387 if not os.path.exists(lfileabs):
1393 if not os.path.exists(lfileabs):
1388 continue
1394 continue
1389 lfhash = lfutil.hashrepofile(repo, lfile)
1395 lfhash = lfutil.hashrepofile(repo, lfile)
1390 standin = lfutil.standin(lfile)
1396 standin = lfutil.standin(lfile)
1391 lfutil.writestandin(repo, standin, lfhash,
1397 lfutil.writestandin(repo, standin, lfhash,
1392 lfutil.getexecutable(lfileabs))
1398 lfutil.getexecutable(lfileabs))
1393 if (standin in pctx and
1399 if (standin in pctx and
1394 lfhash == lfutil.readstandin(repo, lfile, '.')):
1400 lfhash == lfutil.readstandin(repo, lfile, '.')):
1395 lfdirstate.normal(lfile)
1401 lfdirstate.normal(lfile)
1396 for lfile in s.added:
1402 for lfile in s.added:
1397 lfutil.updatestandin(repo, lfutil.standin(lfile))
1403 lfutil.updatestandin(repo, lfutil.standin(lfile))
1398 lfdirstate.write()
1404 lfdirstate.write()
1399
1405
1400 oldstandins = lfutil.getstandinsstate(repo)
1406 oldstandins = lfutil.getstandinsstate(repo)
1401
1407
1402 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1408 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1403
1409
1404 newstandins = lfutil.getstandinsstate(repo)
1410 newstandins = lfutil.getstandinsstate(repo)
1405 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1411 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1406 if branchmerge or force or partial:
1412 if branchmerge or force or partial:
1407 filelist.extend(s.deleted + s.removed)
1413 filelist.extend(s.deleted + s.removed)
1408
1414
1409 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1415 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1410 normallookup=partial)
1416 normallookup=partial)
1411
1417
1412 return result
1418 return result
1413 finally:
1419 finally:
1414 wlock.release()
1420 wlock.release()
1415
1421
1416 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1422 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1417 result = orig(repo, files, *args, **kwargs)
1423 result = orig(repo, files, *args, **kwargs)
1418
1424
1419 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1425 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1420 if filelist:
1426 if filelist:
1421 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1427 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1422 printmessage=False, normallookup=True)
1428 printmessage=False, normallookup=True)
1423
1429
1424 return result
1430 return result
@@ -1,1347 +1,1350
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 )
21 )
22 from . import (
22 from . import (
23 copies,
23 copies,
24 destutil,
24 destutil,
25 error,
25 error,
26 filemerge,
26 filemerge,
27 obsolete,
27 obsolete,
28 subrepo,
28 subrepo,
29 util,
29 util,
30 worker,
30 worker,
31 )
31 )
32
32
33 _pack = struct.pack
33 _pack = struct.pack
34 _unpack = struct.unpack
34 _unpack = struct.unpack
35
35
36 def _droponode(data):
36 def _droponode(data):
37 # used for compatibility for v1
37 # used for compatibility for v1
38 bits = data.split('\0')
38 bits = data.split('\0')
39 bits = bits[:-2] + bits[-1:]
39 bits = bits[:-2] + bits[-1:]
40 return '\0'.join(bits)
40 return '\0'.join(bits)
41
41
42 class mergestate(object):
42 class mergestate(object):
43 '''track 3-way merge state of individual files
43 '''track 3-way merge state of individual files
44
44
45 it is stored on disk when needed. Two file are used, one with an old
45 it is stored on disk when needed. Two file are used, one with an old
46 format, one with a new format. Both contains similar data, but the new
46 format, one with a new format. Both contains similar data, but the new
47 format can store new kinds of field.
47 format can store new kinds of field.
48
48
49 Current new format is a list of arbitrary record of the form:
49 Current new format is a list of arbitrary record of the form:
50
50
51 [type][length][content]
51 [type][length][content]
52
52
53 Type is a single character, length is a 4 bytes integer, content is an
53 Type is a single character, length is a 4 bytes integer, content is an
54 arbitrary suites of bytes of length `length`.
54 arbitrary suites of bytes of length `length`.
55
55
56 Type should be a letter. Capital letter are mandatory record, Mercurial
56 Type should be a letter. Capital letter are mandatory record, Mercurial
57 should abort if they are unknown. lower case record can be safely ignored.
57 should abort if they are unknown. lower case record can be safely ignored.
58
58
59 Currently known record:
59 Currently known record:
60
60
61 L: the node of the "local" part of the merge (hexified version)
61 L: the node of the "local" part of the merge (hexified version)
62 O: the node of the "other" part of the merge (hexified version)
62 O: the node of the "other" part of the merge (hexified version)
63 F: a file to be merged entry
63 F: a file to be merged entry
64 D: a file that the external merge driver will merge internally
64 D: a file that the external merge driver will merge internally
65 (experimental)
65 (experimental)
66 m: the external merge driver defined for this merge plus its run state
66 m: the external merge driver defined for this merge plus its run state
67 (experimental)
67 (experimental)
68
68
69 Merge driver run states (experimental):
69 Merge driver run states (experimental):
70 u: driver-resolved files unmarked -- needs to be run next time we're about
70 u: driver-resolved files unmarked -- needs to be run next time we're about
71 to resolve or commit
71 to resolve or commit
72 m: driver-resolved files marked -- only needs to be run before commit
72 m: driver-resolved files marked -- only needs to be run before commit
73 s: success/skipped -- does not need to be run any more
73 s: success/skipped -- does not need to be run any more
74 '''
74 '''
75 statepathv1 = 'merge/state'
75 statepathv1 = 'merge/state'
76 statepathv2 = 'merge/state2'
76 statepathv2 = 'merge/state2'
77
77
78 def __init__(self, repo):
78 def __init__(self, repo):
79 self._repo = repo
79 self._repo = repo
80 self._dirty = False
80 self._dirty = False
81 self._read()
81 self._read()
82
82
83 def reset(self, node=None, other=None):
83 def reset(self, node=None, other=None):
84 self._state = {}
84 self._state = {}
85 self._local = None
85 self._local = None
86 self._other = None
86 self._other = None
87 if 'otherctx' in vars(self):
87 if 'otherctx' in vars(self):
88 del self.otherctx
88 del self.otherctx
89 if node:
89 if node:
90 self._local = node
90 self._local = node
91 self._other = other
91 self._other = other
92 self._readmergedriver = None
92 self._readmergedriver = None
93 if self.mergedriver:
93 if self.mergedriver:
94 self._mdstate = 's'
94 self._mdstate = 's'
95 else:
95 else:
96 self._mdstate = 'u'
96 self._mdstate = 'u'
97 shutil.rmtree(self._repo.join('merge'), True)
97 shutil.rmtree(self._repo.join('merge'), True)
98 self._dirty = False
98 self._dirty = False
99
99
100 def _read(self):
100 def _read(self):
101 """Analyse each record content to restore a serialized state from disk
101 """Analyse each record content to restore a serialized state from disk
102
102
103 This function process "record" entry produced by the de-serialization
103 This function process "record" entry produced by the de-serialization
104 of on disk file.
104 of on disk file.
105 """
105 """
106 self._state = {}
106 self._state = {}
107 self._local = None
107 self._local = None
108 self._other = None
108 self._other = None
109 if 'otherctx' in vars(self):
109 if 'otherctx' in vars(self):
110 del self.otherctx
110 del self.otherctx
111 self._readmergedriver = None
111 self._readmergedriver = None
112 self._mdstate = 's'
112 self._mdstate = 's'
113 records = self._readrecords()
113 records = self._readrecords()
114 for rtype, record in records:
114 for rtype, record in records:
115 if rtype == 'L':
115 if rtype == 'L':
116 self._local = bin(record)
116 self._local = bin(record)
117 elif rtype == 'O':
117 elif rtype == 'O':
118 self._other = bin(record)
118 self._other = bin(record)
119 elif rtype == 'm':
119 elif rtype == 'm':
120 bits = record.split('\0', 1)
120 bits = record.split('\0', 1)
121 mdstate = bits[1]
121 mdstate = bits[1]
122 if len(mdstate) != 1 or mdstate not in 'ums':
122 if len(mdstate) != 1 or mdstate not in 'ums':
123 # the merge driver should be idempotent, so just rerun it
123 # the merge driver should be idempotent, so just rerun it
124 mdstate = 'u'
124 mdstate = 'u'
125
125
126 self._readmergedriver = bits[0]
126 self._readmergedriver = bits[0]
127 self._mdstate = mdstate
127 self._mdstate = mdstate
128 elif rtype in 'FD':
128 elif rtype in 'FD':
129 bits = record.split('\0')
129 bits = record.split('\0')
130 self._state[bits[0]] = bits[1:]
130 self._state[bits[0]] = bits[1:]
131 elif not rtype.islower():
131 elif not rtype.islower():
132 raise error.Abort(_('unsupported merge state record: %s')
132 raise error.Abort(_('unsupported merge state record: %s')
133 % rtype)
133 % rtype)
134 self._dirty = False
134 self._dirty = False
135
135
136 def _readrecords(self):
136 def _readrecords(self):
137 """Read merge state from disk and return a list of record (TYPE, data)
137 """Read merge state from disk and return a list of record (TYPE, data)
138
138
139 We read data from both v1 and v2 files and decide which one to use.
139 We read data from both v1 and v2 files and decide which one to use.
140
140
141 V1 has been used by version prior to 2.9.1 and contains less data than
141 V1 has been used by version prior to 2.9.1 and contains less data than
142 v2. We read both versions and check if no data in v2 contradicts
142 v2. We read both versions and check if no data in v2 contradicts
143 v1. If there is not contradiction we can safely assume that both v1
143 v1. If there is not contradiction we can safely assume that both v1
144 and v2 were written at the same time and use the extract data in v2. If
144 and v2 were written at the same time and use the extract data in v2. If
145 there is contradiction we ignore v2 content as we assume an old version
145 there is contradiction we ignore v2 content as we assume an old version
146 of Mercurial has overwritten the mergestate file and left an old v2
146 of Mercurial has overwritten the mergestate file and left an old v2
147 file around.
147 file around.
148
148
149 returns list of record [(TYPE, data), ...]"""
149 returns list of record [(TYPE, data), ...]"""
150 v1records = self._readrecordsv1()
150 v1records = self._readrecordsv1()
151 v2records = self._readrecordsv2()
151 v2records = self._readrecordsv2()
152 if self._v1v2match(v1records, v2records):
152 if self._v1v2match(v1records, v2records):
153 return v2records
153 return v2records
154 else:
154 else:
155 # v1 file is newer than v2 file, use it
155 # v1 file is newer than v2 file, use it
156 # we have to infer the "other" changeset of the merge
156 # we have to infer the "other" changeset of the merge
157 # we cannot do better than that with v1 of the format
157 # we cannot do better than that with v1 of the format
158 mctx = self._repo[None].parents()[-1]
158 mctx = self._repo[None].parents()[-1]
159 v1records.append(('O', mctx.hex()))
159 v1records.append(('O', mctx.hex()))
160 # add place holder "other" file node information
160 # add place holder "other" file node information
161 # nobody is using it yet so we do no need to fetch the data
161 # nobody is using it yet so we do no need to fetch the data
162 # if mctx was wrong `mctx[bits[-2]]` may fails.
162 # if mctx was wrong `mctx[bits[-2]]` may fails.
163 for idx, r in enumerate(v1records):
163 for idx, r in enumerate(v1records):
164 if r[0] == 'F':
164 if r[0] == 'F':
165 bits = r[1].split('\0')
165 bits = r[1].split('\0')
166 bits.insert(-2, '')
166 bits.insert(-2, '')
167 v1records[idx] = (r[0], '\0'.join(bits))
167 v1records[idx] = (r[0], '\0'.join(bits))
168 return v1records
168 return v1records
169
169
170 def _v1v2match(self, v1records, v2records):
170 def _v1v2match(self, v1records, v2records):
171 oldv2 = set() # old format version of v2 record
171 oldv2 = set() # old format version of v2 record
172 for rec in v2records:
172 for rec in v2records:
173 if rec[0] == 'L':
173 if rec[0] == 'L':
174 oldv2.add(rec)
174 oldv2.add(rec)
175 elif rec[0] == 'F':
175 elif rec[0] == 'F':
176 # drop the onode data (not contained in v1)
176 # drop the onode data (not contained in v1)
177 oldv2.add(('F', _droponode(rec[1])))
177 oldv2.add(('F', _droponode(rec[1])))
178 for rec in v1records:
178 for rec in v1records:
179 if rec not in oldv2:
179 if rec not in oldv2:
180 return False
180 return False
181 else:
181 else:
182 return True
182 return True
183
183
184 def _readrecordsv1(self):
184 def _readrecordsv1(self):
185 """read on disk merge state for version 1 file
185 """read on disk merge state for version 1 file
186
186
187 returns list of record [(TYPE, data), ...]
187 returns list of record [(TYPE, data), ...]
188
188
189 Note: the "F" data from this file are one entry short
189 Note: the "F" data from this file are one entry short
190 (no "other file node" entry)
190 (no "other file node" entry)
191 """
191 """
192 records = []
192 records = []
193 try:
193 try:
194 f = self._repo.vfs(self.statepathv1)
194 f = self._repo.vfs(self.statepathv1)
195 for i, l in enumerate(f):
195 for i, l in enumerate(f):
196 if i == 0:
196 if i == 0:
197 records.append(('L', l[:-1]))
197 records.append(('L', l[:-1]))
198 else:
198 else:
199 records.append(('F', l[:-1]))
199 records.append(('F', l[:-1]))
200 f.close()
200 f.close()
201 except IOError as err:
201 except IOError as err:
202 if err.errno != errno.ENOENT:
202 if err.errno != errno.ENOENT:
203 raise
203 raise
204 return records
204 return records
205
205
206 def _readrecordsv2(self):
206 def _readrecordsv2(self):
207 """read on disk merge state for version 2 file
207 """read on disk merge state for version 2 file
208
208
209 returns list of record [(TYPE, data), ...]
209 returns list of record [(TYPE, data), ...]
210 """
210 """
211 records = []
211 records = []
212 try:
212 try:
213 f = self._repo.vfs(self.statepathv2)
213 f = self._repo.vfs(self.statepathv2)
214 data = f.read()
214 data = f.read()
215 off = 0
215 off = 0
216 end = len(data)
216 end = len(data)
217 while off < end:
217 while off < end:
218 rtype = data[off]
218 rtype = data[off]
219 off += 1
219 off += 1
220 length = _unpack('>I', data[off:(off + 4)])[0]
220 length = _unpack('>I', data[off:(off + 4)])[0]
221 off += 4
221 off += 4
222 record = data[off:(off + length)]
222 record = data[off:(off + length)]
223 off += length
223 off += length
224 records.append((rtype, record))
224 records.append((rtype, record))
225 f.close()
225 f.close()
226 except IOError as err:
226 except IOError as err:
227 if err.errno != errno.ENOENT:
227 if err.errno != errno.ENOENT:
228 raise
228 raise
229 return records
229 return records
230
230
231 @util.propertycache
231 @util.propertycache
232 def mergedriver(self):
232 def mergedriver(self):
233 # protect against the following:
233 # protect against the following:
234 # - A configures a malicious merge driver in their hgrc, then
234 # - A configures a malicious merge driver in their hgrc, then
235 # pauses the merge
235 # pauses the merge
236 # - A edits their hgrc to remove references to the merge driver
236 # - A edits their hgrc to remove references to the merge driver
237 # - A gives a copy of their entire repo, including .hg, to B
237 # - A gives a copy of their entire repo, including .hg, to B
238 # - B inspects .hgrc and finds it to be clean
238 # - B inspects .hgrc and finds it to be clean
239 # - B then continues the merge and the malicious merge driver
239 # - B then continues the merge and the malicious merge driver
240 # gets invoked
240 # gets invoked
241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
241 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
242 if (self._readmergedriver is not None
242 if (self._readmergedriver is not None
243 and self._readmergedriver != configmergedriver):
243 and self._readmergedriver != configmergedriver):
244 raise error.ConfigError(
244 raise error.ConfigError(
245 _("merge driver changed since merge started"),
245 _("merge driver changed since merge started"),
246 hint=_("revert merge driver change or abort merge"))
246 hint=_("revert merge driver change or abort merge"))
247
247
248 return configmergedriver
248 return configmergedriver
249
249
250 @util.propertycache
250 @util.propertycache
251 def otherctx(self):
251 def otherctx(self):
252 return self._repo[self._other]
252 return self._repo[self._other]
253
253
254 def active(self):
254 def active(self):
255 """Whether mergestate is active.
255 """Whether mergestate is active.
256
256
257 Returns True if there appears to be mergestate. This is a rough proxy
257 Returns True if there appears to be mergestate. This is a rough proxy
258 for "is a merge in progress."
258 for "is a merge in progress."
259 """
259 """
260 # Check local variables before looking at filesystem for performance
260 # Check local variables before looking at filesystem for performance
261 # reasons.
261 # reasons.
262 return bool(self._local) or bool(self._state) or \
262 return bool(self._local) or bool(self._state) or \
263 self._repo.vfs.exists(self.statepathv1) or \
263 self._repo.vfs.exists(self.statepathv1) or \
264 self._repo.vfs.exists(self.statepathv2)
264 self._repo.vfs.exists(self.statepathv2)
265
265
266 def commit(self):
266 def commit(self):
267 """Write current state on disk (if necessary)"""
267 """Write current state on disk (if necessary)"""
268 if self._dirty:
268 if self._dirty:
269 records = []
269 records = []
270 records.append(('L', hex(self._local)))
270 records.append(('L', hex(self._local)))
271 records.append(('O', hex(self._other)))
271 records.append(('O', hex(self._other)))
272 if self.mergedriver:
272 if self.mergedriver:
273 records.append(('m', '\0'.join([
273 records.append(('m', '\0'.join([
274 self.mergedriver, self._mdstate])))
274 self.mergedriver, self._mdstate])))
275 for d, v in self._state.iteritems():
275 for d, v in self._state.iteritems():
276 if v[0] == 'd':
276 if v[0] == 'd':
277 records.append(('D', '\0'.join([d] + v)))
277 records.append(('D', '\0'.join([d] + v)))
278 else:
278 else:
279 records.append(('F', '\0'.join([d] + v)))
279 records.append(('F', '\0'.join([d] + v)))
280 self._writerecords(records)
280 self._writerecords(records)
281 self._dirty = False
281 self._dirty = False
282
282
283 def _writerecords(self, records):
283 def _writerecords(self, records):
284 """Write current state on disk (both v1 and v2)"""
284 """Write current state on disk (both v1 and v2)"""
285 self._writerecordsv1(records)
285 self._writerecordsv1(records)
286 self._writerecordsv2(records)
286 self._writerecordsv2(records)
287
287
288 def _writerecordsv1(self, records):
288 def _writerecordsv1(self, records):
289 """Write current state on disk in a version 1 file"""
289 """Write current state on disk in a version 1 file"""
290 f = self._repo.vfs(self.statepathv1, 'w')
290 f = self._repo.vfs(self.statepathv1, 'w')
291 irecords = iter(records)
291 irecords = iter(records)
292 lrecords = irecords.next()
292 lrecords = irecords.next()
293 assert lrecords[0] == 'L'
293 assert lrecords[0] == 'L'
294 f.write(hex(self._local) + '\n')
294 f.write(hex(self._local) + '\n')
295 for rtype, data in irecords:
295 for rtype, data in irecords:
296 if rtype == 'F':
296 if rtype == 'F':
297 f.write('%s\n' % _droponode(data))
297 f.write('%s\n' % _droponode(data))
298 f.close()
298 f.close()
299
299
300 def _writerecordsv2(self, records):
300 def _writerecordsv2(self, records):
301 """Write current state on disk in a version 2 file"""
301 """Write current state on disk in a version 2 file"""
302 f = self._repo.vfs(self.statepathv2, 'w')
302 f = self._repo.vfs(self.statepathv2, 'w')
303 for key, data in records:
303 for key, data in records:
304 assert len(key) == 1
304 assert len(key) == 1
305 format = '>sI%is' % len(data)
305 format = '>sI%is' % len(data)
306 f.write(_pack(format, key, len(data), data))
306 f.write(_pack(format, key, len(data), data))
307 f.close()
307 f.close()
308
308
309 def add(self, fcl, fco, fca, fd):
309 def add(self, fcl, fco, fca, fd):
310 """add a new (potentially?) conflicting file the merge state
310 """add a new (potentially?) conflicting file the merge state
311 fcl: file context for local,
311 fcl: file context for local,
312 fco: file context for remote,
312 fco: file context for remote,
313 fca: file context for ancestors,
313 fca: file context for ancestors,
314 fd: file path of the resulting merge.
314 fd: file path of the resulting merge.
315
315
316 note: also write the local version to the `.hg/merge` directory.
316 note: also write the local version to the `.hg/merge` directory.
317 """
317 """
318 hash = util.sha1(fcl.path()).hexdigest()
318 hash = util.sha1(fcl.path()).hexdigest()
319 self._repo.vfs.write('merge/' + hash, fcl.data())
319 self._repo.vfs.write('merge/' + hash, fcl.data())
320 self._state[fd] = ['u', hash, fcl.path(),
320 self._state[fd] = ['u', hash, fcl.path(),
321 fca.path(), hex(fca.filenode()),
321 fca.path(), hex(fca.filenode()),
322 fco.path(), hex(fco.filenode()),
322 fco.path(), hex(fco.filenode()),
323 fcl.flags()]
323 fcl.flags()]
324 self._dirty = True
324 self._dirty = True
325
325
326 def __contains__(self, dfile):
326 def __contains__(self, dfile):
327 return dfile in self._state
327 return dfile in self._state
328
328
329 def __getitem__(self, dfile):
329 def __getitem__(self, dfile):
330 return self._state[dfile][0]
330 return self._state[dfile][0]
331
331
332 def __iter__(self):
332 def __iter__(self):
333 return iter(sorted(self._state))
333 return iter(sorted(self._state))
334
334
335 def files(self):
335 def files(self):
336 return self._state.keys()
336 return self._state.keys()
337
337
338 def mark(self, dfile, state):
338 def mark(self, dfile, state):
339 self._state[dfile][0] = state
339 self._state[dfile][0] = state
340 self._dirty = True
340 self._dirty = True
341
341
342 def mdstate(self):
342 def mdstate(self):
343 return self._mdstate
343 return self._mdstate
344
344
345 def unresolved(self):
345 def unresolved(self):
346 """Obtain the paths of unresolved files."""
346 """Obtain the paths of unresolved files."""
347
347
348 for f, entry in self._state.items():
348 for f, entry in self._state.items():
349 if entry[0] == 'u':
349 if entry[0] == 'u':
350 yield f
350 yield f
351
351
352 def driverresolved(self):
352 def driverresolved(self):
353 """Obtain the paths of driver-resolved files."""
353 """Obtain the paths of driver-resolved files."""
354
354
355 for f, entry in self._state.items():
355 for f, entry in self._state.items():
356 if entry[0] == 'd':
356 if entry[0] == 'd':
357 yield f
357 yield f
358
358
359 def _resolve(self, preresolve, dfile, wctx, labels=None):
359 def _resolve(self, preresolve, dfile, wctx, labels=None):
360 """rerun merge process for file path `dfile`"""
360 """rerun merge process for file path `dfile`"""
361 if self[dfile] in 'rd':
361 if self[dfile] in 'rd':
362 return True, 0
362 return True, 0
363 stateentry = self._state[dfile]
363 stateentry = self._state[dfile]
364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
364 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
365 octx = self._repo[self._other]
365 octx = self._repo[self._other]
366 fcd = wctx[dfile]
366 fcd = wctx[dfile]
367 fco = octx[ofile]
367 fco = octx[ofile]
368 fca = self._repo.filectx(afile, fileid=anode)
368 fca = self._repo.filectx(afile, fileid=anode)
369 # "premerge" x flags
369 # "premerge" x flags
370 flo = fco.flags()
370 flo = fco.flags()
371 fla = fca.flags()
371 fla = fca.flags()
372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
372 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
373 if fca.node() == nullid:
373 if fca.node() == nullid:
374 if preresolve:
374 if preresolve:
375 self._repo.ui.warn(
375 self._repo.ui.warn(
376 _('warning: cannot merge flags for %s\n') % afile)
376 _('warning: cannot merge flags for %s\n') % afile)
377 elif flags == fla:
377 elif flags == fla:
378 flags = flo
378 flags = flo
379 if preresolve:
379 if preresolve:
380 # restore local
380 # restore local
381 f = self._repo.vfs('merge/' + hash)
381 f = self._repo.vfs('merge/' + hash)
382 self._repo.wwrite(dfile, f.read(), flags)
382 self._repo.wwrite(dfile, f.read(), flags)
383 f.close()
383 f.close()
384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
384 complete, r = filemerge.premerge(self._repo, self._local, lfile,
385 fcd, fco, fca, labels=labels)
385 fcd, fco, fca, labels=labels)
386 else:
386 else:
387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
387 complete, r = filemerge.filemerge(self._repo, self._local, lfile,
388 fcd, fco, fca, labels=labels)
388 fcd, fco, fca, labels=labels)
389 if r is None:
389 if r is None:
390 # no real conflict
390 # no real conflict
391 del self._state[dfile]
391 del self._state[dfile]
392 self._dirty = True
392 self._dirty = True
393 elif not r:
393 elif not r:
394 self.mark(dfile, 'r')
394 self.mark(dfile, 'r')
395 return complete, r
395 return complete, r
396
396
397 def preresolve(self, dfile, wctx, labels=None):
397 def preresolve(self, dfile, wctx, labels=None):
398 """run premerge process for dfile
398 """run premerge process for dfile
399
399
400 Returns whether the merge is complete, and the exit code."""
400 Returns whether the merge is complete, and the exit code."""
401 return self._resolve(True, dfile, wctx, labels=labels)
401 return self._resolve(True, dfile, wctx, labels=labels)
402
402
403 def resolve(self, dfile, wctx, labels=None):
403 def resolve(self, dfile, wctx, labels=None):
404 """run merge process (assuming premerge was run) for dfile
404 """run merge process (assuming premerge was run) for dfile
405
405
406 Returns the exit code of the merge."""
406 Returns the exit code of the merge."""
407 return self._resolve(False, dfile, wctx, labels=labels)[1]
407 return self._resolve(False, dfile, wctx, labels=labels)[1]
408
408
409 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
409 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
410 if f2 is None:
410 if f2 is None:
411 f2 = f
411 f2 = f
412 return (os.path.isfile(repo.wjoin(f))
412 return (os.path.isfile(repo.wjoin(f))
413 and repo.wvfs.audit.check(f)
413 and repo.wvfs.audit.check(f)
414 and repo.dirstate.normalize(f) not in repo.dirstate
414 and repo.dirstate.normalize(f) not in repo.dirstate
415 and mctx[f2].cmp(wctx[f]))
415 and mctx[f2].cmp(wctx[f]))
416
416
417 def _checkunknownfiles(repo, wctx, mctx, force, actions):
417 def _checkunknownfiles(repo, wctx, mctx, force, actions):
418 """
418 """
419 Considers any actions that care about the presence of conflicting unknown
419 Considers any actions that care about the presence of conflicting unknown
420 files. For some actions, the result is to abort; for others, it is to
420 files. For some actions, the result is to abort; for others, it is to
421 choose a different action.
421 choose a different action.
422 """
422 """
423 aborts = []
423 aborts = []
424 if not force:
424 if not force:
425 for f, (m, args, msg) in actions.iteritems():
425 for f, (m, args, msg) in actions.iteritems():
426 if m in ('c', 'dc'):
426 if m in ('c', 'dc'):
427 if _checkunknownfile(repo, wctx, mctx, f):
427 if _checkunknownfile(repo, wctx, mctx, f):
428 aborts.append(f)
428 aborts.append(f)
429 elif m == 'dg':
429 elif m == 'dg':
430 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
430 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
431 aborts.append(f)
431 aborts.append(f)
432
432
433 for f in sorted(aborts):
433 for f in sorted(aborts):
434 repo.ui.warn(_("%s: untracked file differs\n") % f)
434 repo.ui.warn(_("%s: untracked file differs\n") % f)
435 if aborts:
435 if aborts:
436 raise error.Abort(_("untracked files in working directory differ "
436 raise error.Abort(_("untracked files in working directory differ "
437 "from files in requested revision"))
437 "from files in requested revision"))
438
438
439 for f, (m, args, msg) in actions.iteritems():
439 for f, (m, args, msg) in actions.iteritems():
440 if m == 'c':
440 if m == 'c':
441 actions[f] = ('g', args, msg)
441 actions[f] = ('g', args, msg)
442 elif m == 'cm':
442 elif m == 'cm':
443 fl2, anc = args
443 fl2, anc = args
444 different = _checkunknownfile(repo, wctx, mctx, f)
444 different = _checkunknownfile(repo, wctx, mctx, f)
445 if different:
445 if different:
446 actions[f] = ('m', (f, f, None, False, anc),
446 actions[f] = ('m', (f, f, None, False, anc),
447 "remote differs from untracked local")
447 "remote differs from untracked local")
448 else:
448 else:
449 actions[f] = ('g', (fl2,), "remote created")
449 actions[f] = ('g', (fl2,), "remote created")
450
450
451 def _forgetremoved(wctx, mctx, branchmerge):
451 def _forgetremoved(wctx, mctx, branchmerge):
452 """
452 """
453 Forget removed files
453 Forget removed files
454
454
455 If we're jumping between revisions (as opposed to merging), and if
455 If we're jumping between revisions (as opposed to merging), and if
456 neither the working directory nor the target rev has the file,
456 neither the working directory nor the target rev has the file,
457 then we need to remove it from the dirstate, to prevent the
457 then we need to remove it from the dirstate, to prevent the
458 dirstate from listing the file when it is no longer in the
458 dirstate from listing the file when it is no longer in the
459 manifest.
459 manifest.
460
460
461 If we're merging, and the other revision has removed a file
461 If we're merging, and the other revision has removed a file
462 that is not present in the working directory, we need to mark it
462 that is not present in the working directory, we need to mark it
463 as removed.
463 as removed.
464 """
464 """
465
465
466 actions = {}
466 actions = {}
467 m = 'f'
467 m = 'f'
468 if branchmerge:
468 if branchmerge:
469 m = 'r'
469 m = 'r'
470 for f in wctx.deleted():
470 for f in wctx.deleted():
471 if f not in mctx:
471 if f not in mctx:
472 actions[f] = m, None, "forget deleted"
472 actions[f] = m, None, "forget deleted"
473
473
474 if not branchmerge:
474 if not branchmerge:
475 for f in wctx.removed():
475 for f in wctx.removed():
476 if f not in mctx:
476 if f not in mctx:
477 actions[f] = 'f', None, "forget removed"
477 actions[f] = 'f', None, "forget removed"
478
478
479 return actions
479 return actions
480
480
481 def _checkcollision(repo, wmf, actions):
481 def _checkcollision(repo, wmf, actions):
482 # build provisional merged manifest up
482 # build provisional merged manifest up
483 pmmf = set(wmf)
483 pmmf = set(wmf)
484
484
485 if actions:
485 if actions:
486 # k, dr, e and rd are no-op
486 # k, dr, e and rd are no-op
487 for m in 'a', 'f', 'g', 'cd', 'dc':
487 for m in 'a', 'f', 'g', 'cd', 'dc':
488 for f, args, msg in actions[m]:
488 for f, args, msg in actions[m]:
489 pmmf.add(f)
489 pmmf.add(f)
490 for f, args, msg in actions['r']:
490 for f, args, msg in actions['r']:
491 pmmf.discard(f)
491 pmmf.discard(f)
492 for f, args, msg in actions['dm']:
492 for f, args, msg in actions['dm']:
493 f2, flags = args
493 f2, flags = args
494 pmmf.discard(f2)
494 pmmf.discard(f2)
495 pmmf.add(f)
495 pmmf.add(f)
496 for f, args, msg in actions['dg']:
496 for f, args, msg in actions['dg']:
497 pmmf.add(f)
497 pmmf.add(f)
498 for f, args, msg in actions['m']:
498 for f, args, msg in actions['m']:
499 f1, f2, fa, move, anc = args
499 f1, f2, fa, move, anc = args
500 if move:
500 if move:
501 pmmf.discard(f1)
501 pmmf.discard(f1)
502 pmmf.add(f)
502 pmmf.add(f)
503
503
504 # check case-folding collision in provisional merged manifest
504 # check case-folding collision in provisional merged manifest
505 foldmap = {}
505 foldmap = {}
506 for f in sorted(pmmf):
506 for f in sorted(pmmf):
507 fold = util.normcase(f)
507 fold = util.normcase(f)
508 if fold in foldmap:
508 if fold in foldmap:
509 raise error.Abort(_("case-folding collision between %s and %s")
509 raise error.Abort(_("case-folding collision between %s and %s")
510 % (f, foldmap[fold]))
510 % (f, foldmap[fold]))
511 foldmap[fold] = f
511 foldmap[fold] = f
512
512
513 # check case-folding of directories
513 # check case-folding of directories
514 foldprefix = unfoldprefix = lastfull = ''
514 foldprefix = unfoldprefix = lastfull = ''
515 for fold, f in sorted(foldmap.items()):
515 for fold, f in sorted(foldmap.items()):
516 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
516 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
517 # the folded prefix matches but actual casing is different
517 # the folded prefix matches but actual casing is different
518 raise error.Abort(_("case-folding collision between "
518 raise error.Abort(_("case-folding collision between "
519 "%s and directory of %s") % (lastfull, f))
519 "%s and directory of %s") % (lastfull, f))
520 foldprefix = fold + '/'
520 foldprefix = fold + '/'
521 unfoldprefix = f + '/'
521 unfoldprefix = f + '/'
522 lastfull = f
522 lastfull = f
523
523
524 def driverpreprocess(repo, ms, wctx, labels=None):
524 def driverpreprocess(repo, ms, wctx, labels=None):
525 """run the preprocess step of the merge driver, if any
525 """run the preprocess step of the merge driver, if any
526
526
527 This is currently not implemented -- it's an extension point."""
527 This is currently not implemented -- it's an extension point."""
528 return True
528 return True
529
529
530 def driverconclude(repo, ms, wctx, labels=None):
530 def driverconclude(repo, ms, wctx, labels=None):
531 """run the conclude step of the merge driver, if any
531 """run the conclude step of the merge driver, if any
532
532
533 This is currently not implemented -- it's an extension point."""
533 This is currently not implemented -- it's an extension point."""
534 return True
534 return True
535
535
536 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
536 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
537 acceptremote, followcopies):
537 acceptremote, followcopies):
538 """
538 """
539 Merge p1 and p2 with ancestor pa and generate merge action list
539 Merge p1 and p2 with ancestor pa and generate merge action list
540
540
541 branchmerge and force are as passed in to update
541 branchmerge and force are as passed in to update
542 partial = function to filter file lists
542 partial = function to filter file lists
543 acceptremote = accept the incoming changes without prompting
543 acceptremote = accept the incoming changes without prompting
544 """
544 """
545
545
546 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
546 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
547
547
548 # manifests fetched in order are going to be faster, so prime the caches
548 # manifests fetched in order are going to be faster, so prime the caches
549 [x.manifest() for x in
549 [x.manifest() for x in
550 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
550 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
551
551
552 if followcopies:
552 if followcopies:
553 ret = copies.mergecopies(repo, wctx, p2, pa)
553 ret = copies.mergecopies(repo, wctx, p2, pa)
554 copy, movewithdir, diverge, renamedelete = ret
554 copy, movewithdir, diverge, renamedelete = ret
555
555
556 repo.ui.note(_("resolving manifests\n"))
556 repo.ui.note(_("resolving manifests\n"))
557 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
557 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
558 % (bool(branchmerge), bool(force), bool(partial)))
558 % (bool(branchmerge), bool(force), bool(partial)))
559 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
559 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
560
560
561 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
561 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
562 copied = set(copy.values())
562 copied = set(copy.values())
563 copied.update(movewithdir.values())
563 copied.update(movewithdir.values())
564
564
565 if '.hgsubstate' in m1:
565 if '.hgsubstate' in m1:
566 # check whether sub state is modified
566 # check whether sub state is modified
567 for s in sorted(wctx.substate):
567 for s in sorted(wctx.substate):
568 if wctx.sub(s).dirty():
568 if wctx.sub(s).dirty():
569 m1['.hgsubstate'] += '+'
569 m1['.hgsubstate'] += '+'
570 break
570 break
571
571
572 # Compare manifests
572 # Compare manifests
573 diff = m1.diff(m2)
573 diff = m1.diff(m2)
574
574
575 actions = {}
575 actions = {}
576 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
576 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
577 if partial and not partial(f):
577 if partial and not partial(f):
578 continue
578 continue
579 if n1 and n2: # file exists on both local and remote side
579 if n1 and n2: # file exists on both local and remote side
580 if f not in ma:
580 if f not in ma:
581 fa = copy.get(f, None)
581 fa = copy.get(f, None)
582 if fa is not None:
582 if fa is not None:
583 actions[f] = ('m', (f, f, fa, False, pa.node()),
583 actions[f] = ('m', (f, f, fa, False, pa.node()),
584 "both renamed from " + fa)
584 "both renamed from " + fa)
585 else:
585 else:
586 actions[f] = ('m', (f, f, None, False, pa.node()),
586 actions[f] = ('m', (f, f, None, False, pa.node()),
587 "both created")
587 "both created")
588 else:
588 else:
589 a = ma[f]
589 a = ma[f]
590 fla = ma.flags(f)
590 fla = ma.flags(f)
591 nol = 'l' not in fl1 + fl2 + fla
591 nol = 'l' not in fl1 + fl2 + fla
592 if n2 == a and fl2 == fla:
592 if n2 == a and fl2 == fla:
593 actions[f] = ('k' , (), "remote unchanged")
593 actions[f] = ('k' , (), "remote unchanged")
594 elif n1 == a and fl1 == fla: # local unchanged - use remote
594 elif n1 == a and fl1 == fla: # local unchanged - use remote
595 if n1 == n2: # optimization: keep local content
595 if n1 == n2: # optimization: keep local content
596 actions[f] = ('e', (fl2,), "update permissions")
596 actions[f] = ('e', (fl2,), "update permissions")
597 else:
597 else:
598 actions[f] = ('g', (fl2,), "remote is newer")
598 actions[f] = ('g', (fl2,), "remote is newer")
599 elif nol and n2 == a: # remote only changed 'x'
599 elif nol and n2 == a: # remote only changed 'x'
600 actions[f] = ('e', (fl2,), "update permissions")
600 actions[f] = ('e', (fl2,), "update permissions")
601 elif nol and n1 == a: # local only changed 'x'
601 elif nol and n1 == a: # local only changed 'x'
602 actions[f] = ('g', (fl1,), "remote is newer")
602 actions[f] = ('g', (fl1,), "remote is newer")
603 else: # both changed something
603 else: # both changed something
604 actions[f] = ('m', (f, f, f, False, pa.node()),
604 actions[f] = ('m', (f, f, f, False, pa.node()),
605 "versions differ")
605 "versions differ")
606 elif n1: # file exists only on local side
606 elif n1: # file exists only on local side
607 if f in copied:
607 if f in copied:
608 pass # we'll deal with it on m2 side
608 pass # we'll deal with it on m2 side
609 elif f in movewithdir: # directory rename, move local
609 elif f in movewithdir: # directory rename, move local
610 f2 = movewithdir[f]
610 f2 = movewithdir[f]
611 if f2 in m2:
611 if f2 in m2:
612 actions[f2] = ('m', (f, f2, None, True, pa.node()),
612 actions[f2] = ('m', (f, f2, None, True, pa.node()),
613 "remote directory rename, both created")
613 "remote directory rename, both created")
614 else:
614 else:
615 actions[f2] = ('dm', (f, fl1),
615 actions[f2] = ('dm', (f, fl1),
616 "remote directory rename - move from " + f)
616 "remote directory rename - move from " + f)
617 elif f in copy:
617 elif f in copy:
618 f2 = copy[f]
618 f2 = copy[f]
619 actions[f] = ('m', (f, f2, f2, False, pa.node()),
619 actions[f] = ('m', (f, f2, f2, False, pa.node()),
620 "local copied/moved from " + f2)
620 "local copied/moved from " + f2)
621 elif f in ma: # clean, a different, no remote
621 elif f in ma: # clean, a different, no remote
622 if n1 != ma[f]:
622 if n1 != ma[f]:
623 if acceptremote:
623 if acceptremote:
624 actions[f] = ('r', None, "remote delete")
624 actions[f] = ('r', None, "remote delete")
625 else:
625 else:
626 actions[f] = ('cd', None, "prompt changed/deleted")
626 actions[f] = ('cd', (f, None, f, False, pa.node()),
627 "prompt changed/deleted")
627 elif n1[20:] == 'a':
628 elif n1[20:] == 'a':
628 # This extra 'a' is added by working copy manifest to mark
629 # This extra 'a' is added by working copy manifest to mark
629 # the file as locally added. We should forget it instead of
630 # the file as locally added. We should forget it instead of
630 # deleting it.
631 # deleting it.
631 actions[f] = ('f', None, "remote deleted")
632 actions[f] = ('f', None, "remote deleted")
632 else:
633 else:
633 actions[f] = ('r', None, "other deleted")
634 actions[f] = ('r', None, "other deleted")
634 elif n2: # file exists only on remote side
635 elif n2: # file exists only on remote side
635 if f in copied:
636 if f in copied:
636 pass # we'll deal with it on m1 side
637 pass # we'll deal with it on m1 side
637 elif f in movewithdir:
638 elif f in movewithdir:
638 f2 = movewithdir[f]
639 f2 = movewithdir[f]
639 if f2 in m1:
640 if f2 in m1:
640 actions[f2] = ('m', (f2, f, None, False, pa.node()),
641 actions[f2] = ('m', (f2, f, None, False, pa.node()),
641 "local directory rename, both created")
642 "local directory rename, both created")
642 else:
643 else:
643 actions[f2] = ('dg', (f, fl2),
644 actions[f2] = ('dg', (f, fl2),
644 "local directory rename - get from " + f)
645 "local directory rename - get from " + f)
645 elif f in copy:
646 elif f in copy:
646 f2 = copy[f]
647 f2 = copy[f]
647 if f2 in m2:
648 if f2 in m2:
648 actions[f] = ('m', (f2, f, f2, False, pa.node()),
649 actions[f] = ('m', (f2, f, f2, False, pa.node()),
649 "remote copied from " + f2)
650 "remote copied from " + f2)
650 else:
651 else:
651 actions[f] = ('m', (f2, f, f2, True, pa.node()),
652 actions[f] = ('m', (f2, f, f2, True, pa.node()),
652 "remote moved from " + f2)
653 "remote moved from " + f2)
653 elif f not in ma:
654 elif f not in ma:
654 # local unknown, remote created: the logic is described by the
655 # local unknown, remote created: the logic is described by the
655 # following table:
656 # following table:
656 #
657 #
657 # force branchmerge different | action
658 # force branchmerge different | action
658 # n * * | create
659 # n * * | create
659 # y n * | create
660 # y n * | create
660 # y y n | create
661 # y y n | create
661 # y y y | merge
662 # y y y | merge
662 #
663 #
663 # Checking whether the files are different is expensive, so we
664 # Checking whether the files are different is expensive, so we
664 # don't do that when we can avoid it.
665 # don't do that when we can avoid it.
665 if not force:
666 if not force:
666 actions[f] = ('c', (fl2,), "remote created")
667 actions[f] = ('c', (fl2,), "remote created")
667 elif not branchmerge:
668 elif not branchmerge:
668 actions[f] = ('c', (fl2,), "remote created")
669 actions[f] = ('c', (fl2,), "remote created")
669 else:
670 else:
670 actions[f] = ('cm', (fl2, pa.node()),
671 actions[f] = ('cm', (fl2, pa.node()),
671 "remote created, get or merge")
672 "remote created, get or merge")
672 elif n2 != ma[f]:
673 elif n2 != ma[f]:
673 if acceptremote:
674 if acceptremote:
674 actions[f] = ('c', (fl2,), "remote recreating")
675 actions[f] = ('c', (fl2,), "remote recreating")
675 else:
676 else:
676 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
677 actions[f] = ('dc', (None, f, f, False, pa.node()),
678 "prompt deleted/changed")
677
679
678 return actions, diverge, renamedelete
680 return actions, diverge, renamedelete
679
681
680 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
682 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
681 """Resolves false conflicts where the nodeid changed but the content
683 """Resolves false conflicts where the nodeid changed but the content
682 remained the same."""
684 remained the same."""
683
685
684 for f, (m, args, msg) in actions.items():
686 for f, (m, args, msg) in actions.items():
685 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
687 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
686 # local did change but ended up with same content
688 # local did change but ended up with same content
687 actions[f] = 'r', None, "prompt same"
689 actions[f] = 'r', None, "prompt same"
688 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
690 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
689 # remote did change but ended up with same content
691 # remote did change but ended up with same content
690 del actions[f] # don't get = keep local deleted
692 del actions[f] # don't get = keep local deleted
691
693
692 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
694 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
693 acceptremote, followcopies):
695 acceptremote, followcopies):
694 "Calculate the actions needed to merge mctx into wctx using ancestors"
696 "Calculate the actions needed to merge mctx into wctx using ancestors"
695
697
696 if len(ancestors) == 1: # default
698 if len(ancestors) == 1: # default
697 actions, diverge, renamedelete = manifestmerge(
699 actions, diverge, renamedelete = manifestmerge(
698 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
700 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
699 acceptremote, followcopies)
701 acceptremote, followcopies)
700 _checkunknownfiles(repo, wctx, mctx, force, actions)
702 _checkunknownfiles(repo, wctx, mctx, force, actions)
701
703
702 else: # only when merge.preferancestor=* - the default
704 else: # only when merge.preferancestor=* - the default
703 repo.ui.note(
705 repo.ui.note(
704 _("note: merging %s and %s using bids from ancestors %s\n") %
706 _("note: merging %s and %s using bids from ancestors %s\n") %
705 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
707 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
706
708
707 # Call for bids
709 # Call for bids
708 fbids = {} # mapping filename to bids (action method to list af actions)
710 fbids = {} # mapping filename to bids (action method to list af actions)
709 diverge, renamedelete = None, None
711 diverge, renamedelete = None, None
710 for ancestor in ancestors:
712 for ancestor in ancestors:
711 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
713 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
712 actions, diverge1, renamedelete1 = manifestmerge(
714 actions, diverge1, renamedelete1 = manifestmerge(
713 repo, wctx, mctx, ancestor, branchmerge, force, partial,
715 repo, wctx, mctx, ancestor, branchmerge, force, partial,
714 acceptremote, followcopies)
716 acceptremote, followcopies)
715 _checkunknownfiles(repo, wctx, mctx, force, actions)
717 _checkunknownfiles(repo, wctx, mctx, force, actions)
716
718
717 # Track the shortest set of warning on the theory that bid
719 # Track the shortest set of warning on the theory that bid
718 # merge will correctly incorporate more information
720 # merge will correctly incorporate more information
719 if diverge is None or len(diverge1) < len(diverge):
721 if diverge is None or len(diverge1) < len(diverge):
720 diverge = diverge1
722 diverge = diverge1
721 if renamedelete is None or len(renamedelete) < len(renamedelete1):
723 if renamedelete is None or len(renamedelete) < len(renamedelete1):
722 renamedelete = renamedelete1
724 renamedelete = renamedelete1
723
725
724 for f, a in sorted(actions.iteritems()):
726 for f, a in sorted(actions.iteritems()):
725 m, args, msg = a
727 m, args, msg = a
726 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
728 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
727 if f in fbids:
729 if f in fbids:
728 d = fbids[f]
730 d = fbids[f]
729 if m in d:
731 if m in d:
730 d[m].append(a)
732 d[m].append(a)
731 else:
733 else:
732 d[m] = [a]
734 d[m] = [a]
733 else:
735 else:
734 fbids[f] = {m: [a]}
736 fbids[f] = {m: [a]}
735
737
736 # Pick the best bid for each file
738 # Pick the best bid for each file
737 repo.ui.note(_('\nauction for merging merge bids\n'))
739 repo.ui.note(_('\nauction for merging merge bids\n'))
738 actions = {}
740 actions = {}
739 for f, bids in sorted(fbids.items()):
741 for f, bids in sorted(fbids.items()):
740 # bids is a mapping from action method to list af actions
742 # bids is a mapping from action method to list af actions
741 # Consensus?
743 # Consensus?
742 if len(bids) == 1: # all bids are the same kind of method
744 if len(bids) == 1: # all bids are the same kind of method
743 m, l = bids.items()[0]
745 m, l = bids.items()[0]
744 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
746 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
745 repo.ui.note(" %s: consensus for %s\n" % (f, m))
747 repo.ui.note(" %s: consensus for %s\n" % (f, m))
746 actions[f] = l[0]
748 actions[f] = l[0]
747 continue
749 continue
748 # If keep is an option, just do it.
750 # If keep is an option, just do it.
749 if 'k' in bids:
751 if 'k' in bids:
750 repo.ui.note(" %s: picking 'keep' action\n" % f)
752 repo.ui.note(" %s: picking 'keep' action\n" % f)
751 actions[f] = bids['k'][0]
753 actions[f] = bids['k'][0]
752 continue
754 continue
753 # If there are gets and they all agree [how could they not?], do it.
755 # If there are gets and they all agree [how could they not?], do it.
754 if 'g' in bids:
756 if 'g' in bids:
755 ga0 = bids['g'][0]
757 ga0 = bids['g'][0]
756 if all(a == ga0 for a in bids['g'][1:]):
758 if all(a == ga0 for a in bids['g'][1:]):
757 repo.ui.note(" %s: picking 'get' action\n" % f)
759 repo.ui.note(" %s: picking 'get' action\n" % f)
758 actions[f] = ga0
760 actions[f] = ga0
759 continue
761 continue
760 # TODO: Consider other simple actions such as mode changes
762 # TODO: Consider other simple actions such as mode changes
761 # Handle inefficient democrazy.
763 # Handle inefficient democrazy.
762 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
764 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
763 for m, l in sorted(bids.items()):
765 for m, l in sorted(bids.items()):
764 for _f, args, msg in l:
766 for _f, args, msg in l:
765 repo.ui.note(' %s -> %s\n' % (msg, m))
767 repo.ui.note(' %s -> %s\n' % (msg, m))
766 # Pick random action. TODO: Instead, prompt user when resolving
768 # Pick random action. TODO: Instead, prompt user when resolving
767 m, l = bids.items()[0]
769 m, l = bids.items()[0]
768 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
770 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
769 (f, m))
771 (f, m))
770 actions[f] = l[0]
772 actions[f] = l[0]
771 continue
773 continue
772 repo.ui.note(_('end of auction\n\n'))
774 repo.ui.note(_('end of auction\n\n'))
773
775
774 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
776 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
775
777
776 if wctx.rev() is None:
778 if wctx.rev() is None:
777 fractions = _forgetremoved(wctx, mctx, branchmerge)
779 fractions = _forgetremoved(wctx, mctx, branchmerge)
778 actions.update(fractions)
780 actions.update(fractions)
779
781
780 return actions, diverge, renamedelete
782 return actions, diverge, renamedelete
781
783
782 def batchremove(repo, actions):
784 def batchremove(repo, actions):
783 """apply removes to the working directory
785 """apply removes to the working directory
784
786
785 yields tuples for progress updates
787 yields tuples for progress updates
786 """
788 """
787 verbose = repo.ui.verbose
789 verbose = repo.ui.verbose
788 unlink = util.unlinkpath
790 unlink = util.unlinkpath
789 wjoin = repo.wjoin
791 wjoin = repo.wjoin
790 audit = repo.wvfs.audit
792 audit = repo.wvfs.audit
791 i = 0
793 i = 0
792 for f, args, msg in actions:
794 for f, args, msg in actions:
793 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
795 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
794 if verbose:
796 if verbose:
795 repo.ui.note(_("removing %s\n") % f)
797 repo.ui.note(_("removing %s\n") % f)
796 audit(f)
798 audit(f)
797 try:
799 try:
798 unlink(wjoin(f), ignoremissing=True)
800 unlink(wjoin(f), ignoremissing=True)
799 except OSError as inst:
801 except OSError as inst:
800 repo.ui.warn(_("update failed to remove %s: %s!\n") %
802 repo.ui.warn(_("update failed to remove %s: %s!\n") %
801 (f, inst.strerror))
803 (f, inst.strerror))
802 if i == 100:
804 if i == 100:
803 yield i, f
805 yield i, f
804 i = 0
806 i = 0
805 i += 1
807 i += 1
806 if i > 0:
808 if i > 0:
807 yield i, f
809 yield i, f
808
810
809 def batchget(repo, mctx, actions):
811 def batchget(repo, mctx, actions):
810 """apply gets to the working directory
812 """apply gets to the working directory
811
813
812 mctx is the context to get from
814 mctx is the context to get from
813
815
814 yields tuples for progress updates
816 yields tuples for progress updates
815 """
817 """
816 verbose = repo.ui.verbose
818 verbose = repo.ui.verbose
817 fctx = mctx.filectx
819 fctx = mctx.filectx
818 wwrite = repo.wwrite
820 wwrite = repo.wwrite
819 i = 0
821 i = 0
820 for f, args, msg in actions:
822 for f, args, msg in actions:
821 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
823 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
822 if verbose:
824 if verbose:
823 repo.ui.note(_("getting %s\n") % f)
825 repo.ui.note(_("getting %s\n") % f)
824 wwrite(f, fctx(f).data(), args[0])
826 wwrite(f, fctx(f).data(), args[0])
825 if i == 100:
827 if i == 100:
826 yield i, f
828 yield i, f
827 i = 0
829 i = 0
828 i += 1
830 i += 1
829 if i > 0:
831 if i > 0:
830 yield i, f
832 yield i, f
831
833
832 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
834 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
833 """apply the merge action list to the working directory
835 """apply the merge action list to the working directory
834
836
835 wctx is the working copy context
837 wctx is the working copy context
836 mctx is the context to be merged into the working copy
838 mctx is the context to be merged into the working copy
837
839
838 Return a tuple of counts (updated, merged, removed, unresolved) that
840 Return a tuple of counts (updated, merged, removed, unresolved) that
839 describes how many files were affected by the update.
841 describes how many files were affected by the update.
840 """
842 """
841
843
842 updated, merged, removed, unresolved = 0, 0, 0, 0
844 updated, merged, removed, unresolved = 0, 0, 0, 0
843 ms = mergestate(repo)
845 ms = mergestate(repo)
844 ms.reset(wctx.p1().node(), mctx.node())
846 ms.reset(wctx.p1().node(), mctx.node())
845 moves = []
847 moves = []
846 for m, l in actions.items():
848 for m, l in actions.items():
847 l.sort()
849 l.sort()
848
850
849 # prescan for merges
851 # prescan for merges
850 for f, args, msg in actions['m']:
852 for f, args, msg in actions['m']:
851 f1, f2, fa, move, anc = args
853 f1, f2, fa, move, anc = args
852 if f == '.hgsubstate': # merged internally
854 if f == '.hgsubstate': # merged internally
853 continue
855 continue
854 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
856 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
855 fcl = wctx[f1]
857 fcl = wctx[f1]
856 fco = mctx[f2]
858 fco = mctx[f2]
857 actx = repo[anc]
859 actx = repo[anc]
858 if fa in actx:
860 if fa in actx:
859 fca = actx[fa]
861 fca = actx[fa]
860 else:
862 else:
861 fca = repo.filectx(f1, fileid=nullrev)
863 fca = repo.filectx(f1, fileid=nullrev)
862 ms.add(fcl, fco, fca, f)
864 ms.add(fcl, fco, fca, f)
863 if f1 != f and move:
865 if f1 != f and move:
864 moves.append(f1)
866 moves.append(f1)
865
867
866 audit = repo.wvfs.audit
868 audit = repo.wvfs.audit
867 _updating = _('updating')
869 _updating = _('updating')
868 _files = _('files')
870 _files = _('files')
869 progress = repo.ui.progress
871 progress = repo.ui.progress
870
872
871 # remove renamed files after safely stored
873 # remove renamed files after safely stored
872 for f in moves:
874 for f in moves:
873 if os.path.lexists(repo.wjoin(f)):
875 if os.path.lexists(repo.wjoin(f)):
874 repo.ui.debug("removing %s\n" % f)
876 repo.ui.debug("removing %s\n" % f)
875 audit(f)
877 audit(f)
876 util.unlinkpath(repo.wjoin(f))
878 util.unlinkpath(repo.wjoin(f))
877
879
878 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
880 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
879
881
880 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
882 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
881 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
883 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
882
884
883 # remove in parallel (must come first)
885 # remove in parallel (must come first)
884 z = 0
886 z = 0
885 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
887 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
886 for i, item in prog:
888 for i, item in prog:
887 z += i
889 z += i
888 progress(_updating, z, item=item, total=numupdates, unit=_files)
890 progress(_updating, z, item=item, total=numupdates, unit=_files)
889 removed = len(actions['r'])
891 removed = len(actions['r'])
890
892
891 # get in parallel
893 # get in parallel
892 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
894 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
893 for i, item in prog:
895 for i, item in prog:
894 z += i
896 z += i
895 progress(_updating, z, item=item, total=numupdates, unit=_files)
897 progress(_updating, z, item=item, total=numupdates, unit=_files)
896 updated = len(actions['g'])
898 updated = len(actions['g'])
897
899
898 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
900 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
899 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
901 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
900
902
901 # forget (manifest only, just log it) (must come first)
903 # forget (manifest only, just log it) (must come first)
902 for f, args, msg in actions['f']:
904 for f, args, msg in actions['f']:
903 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
905 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
904 z += 1
906 z += 1
905 progress(_updating, z, item=f, total=numupdates, unit=_files)
907 progress(_updating, z, item=f, total=numupdates, unit=_files)
906
908
907 # re-add (manifest only, just log it)
909 # re-add (manifest only, just log it)
908 for f, args, msg in actions['a']:
910 for f, args, msg in actions['a']:
909 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
911 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
910 z += 1
912 z += 1
911 progress(_updating, z, item=f, total=numupdates, unit=_files)
913 progress(_updating, z, item=f, total=numupdates, unit=_files)
912
914
913 # keep (noop, just log it)
915 # keep (noop, just log it)
914 for f, args, msg in actions['k']:
916 for f, args, msg in actions['k']:
915 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
917 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
916 # no progress
918 # no progress
917
919
918 # directory rename, move local
920 # directory rename, move local
919 for f, args, msg in actions['dm']:
921 for f, args, msg in actions['dm']:
920 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
922 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
921 z += 1
923 z += 1
922 progress(_updating, z, item=f, total=numupdates, unit=_files)
924 progress(_updating, z, item=f, total=numupdates, unit=_files)
923 f0, flags = args
925 f0, flags = args
924 repo.ui.note(_("moving %s to %s\n") % (f0, f))
926 repo.ui.note(_("moving %s to %s\n") % (f0, f))
925 audit(f)
927 audit(f)
926 repo.wwrite(f, wctx.filectx(f0).data(), flags)
928 repo.wwrite(f, wctx.filectx(f0).data(), flags)
927 util.unlinkpath(repo.wjoin(f0))
929 util.unlinkpath(repo.wjoin(f0))
928 updated += 1
930 updated += 1
929
931
930 # local directory rename, get
932 # local directory rename, get
931 for f, args, msg in actions['dg']:
933 for f, args, msg in actions['dg']:
932 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
934 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
933 z += 1
935 z += 1
934 progress(_updating, z, item=f, total=numupdates, unit=_files)
936 progress(_updating, z, item=f, total=numupdates, unit=_files)
935 f0, flags = args
937 f0, flags = args
936 repo.ui.note(_("getting %s to %s\n") % (f0, f))
938 repo.ui.note(_("getting %s to %s\n") % (f0, f))
937 repo.wwrite(f, mctx.filectx(f0).data(), flags)
939 repo.wwrite(f, mctx.filectx(f0).data(), flags)
938 updated += 1
940 updated += 1
939
941
940 # exec
942 # exec
941 for f, args, msg in actions['e']:
943 for f, args, msg in actions['e']:
942 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
944 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
943 z += 1
945 z += 1
944 progress(_updating, z, item=f, total=numupdates, unit=_files)
946 progress(_updating, z, item=f, total=numupdates, unit=_files)
945 flags, = args
947 flags, = args
946 audit(f)
948 audit(f)
947 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
949 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
948 updated += 1
950 updated += 1
949
951
950 mergeactions = actions['m']
952 mergeactions = actions['m']
951 # the ordering is important here -- ms.mergedriver will raise if the merge
953 # the ordering is important here -- ms.mergedriver will raise if the merge
952 # driver has changed, and we want to be able to bypass it when overwrite is
954 # driver has changed, and we want to be able to bypass it when overwrite is
953 # True
955 # True
954 usemergedriver = not overwrite and mergeactions and ms.mergedriver
956 usemergedriver = not overwrite and mergeactions and ms.mergedriver
955
957
956 if usemergedriver:
958 if usemergedriver:
957 ms.commit()
959 ms.commit()
958 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
960 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
959 # the driver might leave some files unresolved
961 # the driver might leave some files unresolved
960 unresolvedf = set(ms.unresolved())
962 unresolvedf = set(ms.unresolved())
961 if not proceed:
963 if not proceed:
962 # XXX setting unresolved to at least 1 is a hack to make sure we
964 # XXX setting unresolved to at least 1 is a hack to make sure we
963 # error out
965 # error out
964 return updated, merged, removed, max(len(unresolvedf), 1)
966 return updated, merged, removed, max(len(unresolvedf), 1)
965 newactions = []
967 newactions = []
966 for f, args, msg in mergeactions:
968 for f, args, msg in mergeactions:
967 if f in unresolvedf:
969 if f in unresolvedf:
968 newactions.append((f, args, msg))
970 newactions.append((f, args, msg))
969 mergeactions = newactions
971 mergeactions = newactions
970
972
971 # premerge
973 # premerge
972 tocomplete = []
974 tocomplete = []
973 for f, args, msg in mergeactions:
975 for f, args, msg in mergeactions:
974 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
976 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
975 z += 1
977 z += 1
976 progress(_updating, z, item=f, total=numupdates, unit=_files)
978 progress(_updating, z, item=f, total=numupdates, unit=_files)
977 if f == '.hgsubstate': # subrepo states need updating
979 if f == '.hgsubstate': # subrepo states need updating
978 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
980 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
979 overwrite)
981 overwrite)
980 continue
982 continue
981 audit(f)
983 audit(f)
982 complete, r = ms.preresolve(f, wctx, labels=labels)
984 complete, r = ms.preresolve(f, wctx, labels=labels)
983 if complete:
985 if complete:
984 if r is not None and r > 0:
986 if r is not None and r > 0:
985 unresolved += 1
987 unresolved += 1
986 else:
988 else:
987 if r is None:
989 if r is None:
988 updated += 1
990 updated += 1
989 else:
991 else:
990 merged += 1
992 merged += 1
991 else:
993 else:
992 numupdates += 1
994 numupdates += 1
993 tocomplete.append((f, args, msg))
995 tocomplete.append((f, args, msg))
994
996
995 # merge
997 # merge
996 for f, args, msg in tocomplete:
998 for f, args, msg in tocomplete:
997 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
999 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
998 z += 1
1000 z += 1
999 progress(_updating, z, item=f, total=numupdates, unit=_files)
1001 progress(_updating, z, item=f, total=numupdates, unit=_files)
1000 r = ms.resolve(f, wctx, labels=labels)
1002 r = ms.resolve(f, wctx, labels=labels)
1001 if r is not None and r > 0:
1003 if r is not None and r > 0:
1002 unresolved += 1
1004 unresolved += 1
1003 else:
1005 else:
1004 if r is None:
1006 if r is None:
1005 updated += 1
1007 updated += 1
1006 else:
1008 else:
1007 merged += 1
1009 merged += 1
1008
1010
1009 ms.commit()
1011 ms.commit()
1010
1012
1011 if usemergedriver and not unresolved and ms.mdstate() != 's':
1013 if usemergedriver and not unresolved and ms.mdstate() != 's':
1012 if not driverconclude(repo, ms, wctx, labels=labels):
1014 if not driverconclude(repo, ms, wctx, labels=labels):
1013 # XXX setting unresolved to at least 1 is a hack to make sure we
1015 # XXX setting unresolved to at least 1 is a hack to make sure we
1014 # error out
1016 # error out
1015 return updated, merged, removed, max(unresolved, 1)
1017 return updated, merged, removed, max(unresolved, 1)
1016
1018
1017 ms.commit()
1019 ms.commit()
1018
1020
1019 progress(_updating, None, total=numupdates, unit=_files)
1021 progress(_updating, None, total=numupdates, unit=_files)
1020
1022
1021 return updated, merged, removed, unresolved
1023 return updated, merged, removed, unresolved
1022
1024
1023 def recordupdates(repo, actions, branchmerge):
1025 def recordupdates(repo, actions, branchmerge):
1024 "record merge actions to the dirstate"
1026 "record merge actions to the dirstate"
1025 # remove (must come first)
1027 # remove (must come first)
1026 for f, args, msg in actions['r']:
1028 for f, args, msg in actions['r']:
1027 if branchmerge:
1029 if branchmerge:
1028 repo.dirstate.remove(f)
1030 repo.dirstate.remove(f)
1029 else:
1031 else:
1030 repo.dirstate.drop(f)
1032 repo.dirstate.drop(f)
1031
1033
1032 # forget (must come first)
1034 # forget (must come first)
1033 for f, args, msg in actions['f']:
1035 for f, args, msg in actions['f']:
1034 repo.dirstate.drop(f)
1036 repo.dirstate.drop(f)
1035
1037
1036 # re-add
1038 # re-add
1037 for f, args, msg in actions['a']:
1039 for f, args, msg in actions['a']:
1038 if not branchmerge:
1040 if not branchmerge:
1039 repo.dirstate.add(f)
1041 repo.dirstate.add(f)
1040
1042
1041 # exec change
1043 # exec change
1042 for f, args, msg in actions['e']:
1044 for f, args, msg in actions['e']:
1043 repo.dirstate.normallookup(f)
1045 repo.dirstate.normallookup(f)
1044
1046
1045 # keep
1047 # keep
1046 for f, args, msg in actions['k']:
1048 for f, args, msg in actions['k']:
1047 pass
1049 pass
1048
1050
1049 # get
1051 # get
1050 for f, args, msg in actions['g']:
1052 for f, args, msg in actions['g']:
1051 if branchmerge:
1053 if branchmerge:
1052 repo.dirstate.otherparent(f)
1054 repo.dirstate.otherparent(f)
1053 else:
1055 else:
1054 repo.dirstate.normal(f)
1056 repo.dirstate.normal(f)
1055
1057
1056 # merge
1058 # merge
1057 for f, args, msg in actions['m']:
1059 for f, args, msg in actions['m']:
1058 f1, f2, fa, move, anc = args
1060 f1, f2, fa, move, anc = args
1059 if branchmerge:
1061 if branchmerge:
1060 # We've done a branch merge, mark this file as merged
1062 # We've done a branch merge, mark this file as merged
1061 # so that we properly record the merger later
1063 # so that we properly record the merger later
1062 repo.dirstate.merge(f)
1064 repo.dirstate.merge(f)
1063 if f1 != f2: # copy/rename
1065 if f1 != f2: # copy/rename
1064 if move:
1066 if move:
1065 repo.dirstate.remove(f1)
1067 repo.dirstate.remove(f1)
1066 if f1 != f:
1068 if f1 != f:
1067 repo.dirstate.copy(f1, f)
1069 repo.dirstate.copy(f1, f)
1068 else:
1070 else:
1069 repo.dirstate.copy(f2, f)
1071 repo.dirstate.copy(f2, f)
1070 else:
1072 else:
1071 # We've update-merged a locally modified file, so
1073 # We've update-merged a locally modified file, so
1072 # we set the dirstate to emulate a normal checkout
1074 # we set the dirstate to emulate a normal checkout
1073 # of that file some time in the past. Thus our
1075 # of that file some time in the past. Thus our
1074 # merge will appear as a normal local file
1076 # merge will appear as a normal local file
1075 # modification.
1077 # modification.
1076 if f2 == f: # file not locally copied/moved
1078 if f2 == f: # file not locally copied/moved
1077 repo.dirstate.normallookup(f)
1079 repo.dirstate.normallookup(f)
1078 if move:
1080 if move:
1079 repo.dirstate.drop(f1)
1081 repo.dirstate.drop(f1)
1080
1082
1081 # directory rename, move local
1083 # directory rename, move local
1082 for f, args, msg in actions['dm']:
1084 for f, args, msg in actions['dm']:
1083 f0, flag = args
1085 f0, flag = args
1084 if branchmerge:
1086 if branchmerge:
1085 repo.dirstate.add(f)
1087 repo.dirstate.add(f)
1086 repo.dirstate.remove(f0)
1088 repo.dirstate.remove(f0)
1087 repo.dirstate.copy(f0, f)
1089 repo.dirstate.copy(f0, f)
1088 else:
1090 else:
1089 repo.dirstate.normal(f)
1091 repo.dirstate.normal(f)
1090 repo.dirstate.drop(f0)
1092 repo.dirstate.drop(f0)
1091
1093
1092 # directory rename, get
1094 # directory rename, get
1093 for f, args, msg in actions['dg']:
1095 for f, args, msg in actions['dg']:
1094 f0, flag = args
1096 f0, flag = args
1095 if branchmerge:
1097 if branchmerge:
1096 repo.dirstate.add(f)
1098 repo.dirstate.add(f)
1097 repo.dirstate.copy(f0, f)
1099 repo.dirstate.copy(f0, f)
1098 else:
1100 else:
1099 repo.dirstate.normal(f)
1101 repo.dirstate.normal(f)
1100
1102
1101 def update(repo, node, branchmerge, force, partial, ancestor=None,
1103 def update(repo, node, branchmerge, force, partial, ancestor=None,
1102 mergeancestor=False, labels=None):
1104 mergeancestor=False, labels=None):
1103 """
1105 """
1104 Perform a merge between the working directory and the given node
1106 Perform a merge between the working directory and the given node
1105
1107
1106 node = the node to update to, or None if unspecified
1108 node = the node to update to, or None if unspecified
1107 branchmerge = whether to merge between branches
1109 branchmerge = whether to merge between branches
1108 force = whether to force branch merging or file overwriting
1110 force = whether to force branch merging or file overwriting
1109 partial = a function to filter file lists (dirstate not updated)
1111 partial = a function to filter file lists (dirstate not updated)
1110 mergeancestor = whether it is merging with an ancestor. If true,
1112 mergeancestor = whether it is merging with an ancestor. If true,
1111 we should accept the incoming changes for any prompts that occur.
1113 we should accept the incoming changes for any prompts that occur.
1112 If false, merging with an ancestor (fast-forward) is only allowed
1114 If false, merging with an ancestor (fast-forward) is only allowed
1113 between different named branches. This flag is used by rebase extension
1115 between different named branches. This flag is used by rebase extension
1114 as a temporary fix and should be avoided in general.
1116 as a temporary fix and should be avoided in general.
1115
1117
1116 The table below shows all the behaviors of the update command
1118 The table below shows all the behaviors of the update command
1117 given the -c and -C or no options, whether the working directory
1119 given the -c and -C or no options, whether the working directory
1118 is dirty, whether a revision is specified, and the relationship of
1120 is dirty, whether a revision is specified, and the relationship of
1119 the parent rev to the target rev (linear, on the same named
1121 the parent rev to the target rev (linear, on the same named
1120 branch, or on another named branch).
1122 branch, or on another named branch).
1121
1123
1122 This logic is tested by test-update-branches.t.
1124 This logic is tested by test-update-branches.t.
1123
1125
1124 -c -C dirty rev | linear same cross
1126 -c -C dirty rev | linear same cross
1125 n n n n | ok (1) x
1127 n n n n | ok (1) x
1126 n n n y | ok ok ok
1128 n n n y | ok ok ok
1127 n n y n | merge (2) (2)
1129 n n y n | merge (2) (2)
1128 n n y y | merge (3) (3)
1130 n n y y | merge (3) (3)
1129 n y * * | discard discard discard
1131 n y * * | discard discard discard
1130 y n y * | (4) (4) (4)
1132 y n y * | (4) (4) (4)
1131 y n n * | ok ok ok
1133 y n n * | ok ok ok
1132 y y * * | (5) (5) (5)
1134 y y * * | (5) (5) (5)
1133
1135
1134 x = can't happen
1136 x = can't happen
1135 * = don't-care
1137 * = don't-care
1136 1 = abort: not a linear update (merge or update --check to force update)
1138 1 = abort: not a linear update (merge or update --check to force update)
1137 2 = abort: uncommitted changes (commit and merge, or update --clean to
1139 2 = abort: uncommitted changes (commit and merge, or update --clean to
1138 discard changes)
1140 discard changes)
1139 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1141 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1140 4 = abort: uncommitted changes (checked in commands.py)
1142 4 = abort: uncommitted changes (checked in commands.py)
1141 5 = incompatible options (checked in commands.py)
1143 5 = incompatible options (checked in commands.py)
1142
1144
1143 Return the same tuple as applyupdates().
1145 Return the same tuple as applyupdates().
1144 """
1146 """
1145
1147
1146 onode = node
1148 onode = node
1147 wlock = repo.wlock()
1149 wlock = repo.wlock()
1148 try:
1150 try:
1149 wc = repo[None]
1151 wc = repo[None]
1150 pl = wc.parents()
1152 pl = wc.parents()
1151 p1 = pl[0]
1153 p1 = pl[0]
1152 pas = [None]
1154 pas = [None]
1153 if ancestor is not None:
1155 if ancestor is not None:
1154 pas = [repo[ancestor]]
1156 pas = [repo[ancestor]]
1155
1157
1156 if node is None:
1158 if node is None:
1157 if (repo.ui.configbool('devel', 'all-warnings')
1159 if (repo.ui.configbool('devel', 'all-warnings')
1158 or repo.ui.configbool('devel', 'oldapi')):
1160 or repo.ui.configbool('devel', 'oldapi')):
1159 repo.ui.develwarn('update with no target')
1161 repo.ui.develwarn('update with no target')
1160 rev, _mark, _act = destutil.destupdate(repo)
1162 rev, _mark, _act = destutil.destupdate(repo)
1161 node = repo[rev].node()
1163 node = repo[rev].node()
1162
1164
1163 overwrite = force and not branchmerge
1165 overwrite = force and not branchmerge
1164
1166
1165 p2 = repo[node]
1167 p2 = repo[node]
1166 if pas[0] is None:
1168 if pas[0] is None:
1167 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1169 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1168 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1170 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1169 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1171 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1170 else:
1172 else:
1171 pas = [p1.ancestor(p2, warn=branchmerge)]
1173 pas = [p1.ancestor(p2, warn=branchmerge)]
1172
1174
1173 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1175 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1174
1176
1175 ### check phase
1177 ### check phase
1176 if not overwrite and len(pl) > 1:
1178 if not overwrite and len(pl) > 1:
1177 raise error.Abort(_("outstanding uncommitted merge"))
1179 raise error.Abort(_("outstanding uncommitted merge"))
1178 if branchmerge:
1180 if branchmerge:
1179 if pas == [p2]:
1181 if pas == [p2]:
1180 raise error.Abort(_("merging with a working directory ancestor"
1182 raise error.Abort(_("merging with a working directory ancestor"
1181 " has no effect"))
1183 " has no effect"))
1182 elif pas == [p1]:
1184 elif pas == [p1]:
1183 if not mergeancestor and p1.branch() == p2.branch():
1185 if not mergeancestor and p1.branch() == p2.branch():
1184 raise error.Abort(_("nothing to merge"),
1186 raise error.Abort(_("nothing to merge"),
1185 hint=_("use 'hg update' "
1187 hint=_("use 'hg update' "
1186 "or check 'hg heads'"))
1188 "or check 'hg heads'"))
1187 if not force and (wc.files() or wc.deleted()):
1189 if not force and (wc.files() or wc.deleted()):
1188 raise error.Abort(_("uncommitted changes"),
1190 raise error.Abort(_("uncommitted changes"),
1189 hint=_("use 'hg status' to list changes"))
1191 hint=_("use 'hg status' to list changes"))
1190 for s in sorted(wc.substate):
1192 for s in sorted(wc.substate):
1191 wc.sub(s).bailifchanged()
1193 wc.sub(s).bailifchanged()
1192
1194
1193 elif not overwrite:
1195 elif not overwrite:
1194 if p1 == p2: # no-op update
1196 if p1 == p2: # no-op update
1195 # call the hooks and exit early
1197 # call the hooks and exit early
1196 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1198 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1197 repo.hook('update', parent1=xp2, parent2='', error=0)
1199 repo.hook('update', parent1=xp2, parent2='', error=0)
1198 return 0, 0, 0, 0
1200 return 0, 0, 0, 0
1199
1201
1200 if pas not in ([p1], [p2]): # nonlinear
1202 if pas not in ([p1], [p2]): # nonlinear
1201 dirty = wc.dirty(missing=True)
1203 dirty = wc.dirty(missing=True)
1202 if dirty or onode is None:
1204 if dirty or onode is None:
1203 # Branching is a bit strange to ensure we do the minimal
1205 # Branching is a bit strange to ensure we do the minimal
1204 # amount of call to obsolete.background.
1206 # amount of call to obsolete.background.
1205 foreground = obsolete.foreground(repo, [p1.node()])
1207 foreground = obsolete.foreground(repo, [p1.node()])
1206 # note: the <node> variable contains a random identifier
1208 # note: the <node> variable contains a random identifier
1207 if repo[node].node() in foreground:
1209 if repo[node].node() in foreground:
1208 pas = [p1] # allow updating to successors
1210 pas = [p1] # allow updating to successors
1209 elif dirty:
1211 elif dirty:
1210 msg = _("uncommitted changes")
1212 msg = _("uncommitted changes")
1211 if onode is None:
1213 if onode is None:
1212 hint = _("commit and merge, or update --clean to"
1214 hint = _("commit and merge, or update --clean to"
1213 " discard changes")
1215 " discard changes")
1214 else:
1216 else:
1215 hint = _("commit or update --clean to discard"
1217 hint = _("commit or update --clean to discard"
1216 " changes")
1218 " changes")
1217 raise error.Abort(msg, hint=hint)
1219 raise error.Abort(msg, hint=hint)
1218 else: # node is none
1220 else: # node is none
1219 msg = _("not a linear update")
1221 msg = _("not a linear update")
1220 hint = _("merge or update --check to force update")
1222 hint = _("merge or update --check to force update")
1221 raise error.Abort(msg, hint=hint)
1223 raise error.Abort(msg, hint=hint)
1222 else:
1224 else:
1223 # Allow jumping branches if clean and specific rev given
1225 # Allow jumping branches if clean and specific rev given
1224 pas = [p1]
1226 pas = [p1]
1225
1227
1226 # deprecated config: merge.followcopies
1228 # deprecated config: merge.followcopies
1227 followcopies = False
1229 followcopies = False
1228 if overwrite:
1230 if overwrite:
1229 pas = [wc]
1231 pas = [wc]
1230 elif pas == [p2]: # backwards
1232 elif pas == [p2]: # backwards
1231 pas = [wc.p1()]
1233 pas = [wc.p1()]
1232 elif not branchmerge and not wc.dirty(missing=True):
1234 elif not branchmerge and not wc.dirty(missing=True):
1233 pass
1235 pass
1234 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1236 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1235 followcopies = True
1237 followcopies = True
1236
1238
1237 ### calculate phase
1239 ### calculate phase
1238 actionbyfile, diverge, renamedelete = calculateupdates(
1240 actionbyfile, diverge, renamedelete = calculateupdates(
1239 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1241 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1240 followcopies)
1242 followcopies)
1241 # Convert to dictionary-of-lists format
1243 # Convert to dictionary-of-lists format
1242 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1244 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1243 for f, (m, args, msg) in actionbyfile.iteritems():
1245 for f, (m, args, msg) in actionbyfile.iteritems():
1244 if m not in actions:
1246 if m not in actions:
1245 actions[m] = []
1247 actions[m] = []
1246 actions[m].append((f, args, msg))
1248 actions[m].append((f, args, msg))
1247
1249
1248 if not util.checkcase(repo.path):
1250 if not util.checkcase(repo.path):
1249 # check collision between files only in p2 for clean update
1251 # check collision between files only in p2 for clean update
1250 if (not branchmerge and
1252 if (not branchmerge and
1251 (force or not wc.dirty(missing=True, branch=False))):
1253 (force or not wc.dirty(missing=True, branch=False))):
1252 _checkcollision(repo, p2.manifest(), None)
1254 _checkcollision(repo, p2.manifest(), None)
1253 else:
1255 else:
1254 _checkcollision(repo, wc.manifest(), actions)
1256 _checkcollision(repo, wc.manifest(), actions)
1255
1257
1256 # Prompt and create actions. TODO: Move this towards resolve phase.
1258 # Prompt and create actions. TODO: Move this towards resolve phase.
1257 for f, args, msg in sorted(actions['cd']):
1259 for f, args, msg in sorted(actions['cd']):
1258 if repo.ui.promptchoice(
1260 if repo.ui.promptchoice(
1259 _("local changed %s which remote deleted\n"
1261 _("local changed %s which remote deleted\n"
1260 "use (c)hanged version or (d)elete?"
1262 "use (c)hanged version or (d)elete?"
1261 "$$ &Changed $$ &Delete") % f, 0):
1263 "$$ &Changed $$ &Delete") % f, 0):
1262 actions['r'].append((f, None, "prompt delete"))
1264 actions['r'].append((f, None, "prompt delete"))
1263 else:
1265 else:
1264 actions['a'].append((f, None, "prompt keep"))
1266 actions['a'].append((f, None, "prompt keep"))
1265
1267
1266 for f, args, msg in sorted(actions['dc']):
1268 for f, args, msg in sorted(actions['dc']):
1267 flags, = args
1269 f1, f2, fa, move, anc = args
1270 flags = p2[f2].flags()
1268 if repo.ui.promptchoice(
1271 if repo.ui.promptchoice(
1269 _("remote changed %s which local deleted\n"
1272 _("remote changed %s which local deleted\n"
1270 "use (c)hanged version or leave (d)eleted?"
1273 "use (c)hanged version or leave (d)eleted?"
1271 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1274 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1272 actions['g'].append((f, (flags,), "prompt recreating"))
1275 actions['g'].append((f, (flags,), "prompt recreating"))
1273
1276
1274 # divergent renames
1277 # divergent renames
1275 for f, fl in sorted(diverge.iteritems()):
1278 for f, fl in sorted(diverge.iteritems()):
1276 repo.ui.warn(_("note: possible conflict - %s was renamed "
1279 repo.ui.warn(_("note: possible conflict - %s was renamed "
1277 "multiple times to:\n") % f)
1280 "multiple times to:\n") % f)
1278 for nf in fl:
1281 for nf in fl:
1279 repo.ui.warn(" %s\n" % nf)
1282 repo.ui.warn(" %s\n" % nf)
1280
1283
1281 # rename and delete
1284 # rename and delete
1282 for f, fl in sorted(renamedelete.iteritems()):
1285 for f, fl in sorted(renamedelete.iteritems()):
1283 repo.ui.warn(_("note: possible conflict - %s was deleted "
1286 repo.ui.warn(_("note: possible conflict - %s was deleted "
1284 "and renamed to:\n") % f)
1287 "and renamed to:\n") % f)
1285 for nf in fl:
1288 for nf in fl:
1286 repo.ui.warn(" %s\n" % nf)
1289 repo.ui.warn(" %s\n" % nf)
1287
1290
1288 ### apply phase
1291 ### apply phase
1289 if not branchmerge: # just jump to the new rev
1292 if not branchmerge: # just jump to the new rev
1290 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1293 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1291 if not partial:
1294 if not partial:
1292 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1295 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1293 # note that we're in the middle of an update
1296 # note that we're in the middle of an update
1294 repo.vfs.write('updatestate', p2.hex())
1297 repo.vfs.write('updatestate', p2.hex())
1295
1298
1296 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1299 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1297
1300
1298 if not partial:
1301 if not partial:
1299 repo.dirstate.beginparentchange()
1302 repo.dirstate.beginparentchange()
1300 repo.setparents(fp1, fp2)
1303 repo.setparents(fp1, fp2)
1301 recordupdates(repo, actions, branchmerge)
1304 recordupdates(repo, actions, branchmerge)
1302 # update completed, clear state
1305 # update completed, clear state
1303 util.unlink(repo.join('updatestate'))
1306 util.unlink(repo.join('updatestate'))
1304
1307
1305 if not branchmerge:
1308 if not branchmerge:
1306 repo.dirstate.setbranch(p2.branch())
1309 repo.dirstate.setbranch(p2.branch())
1307 repo.dirstate.endparentchange()
1310 repo.dirstate.endparentchange()
1308 finally:
1311 finally:
1309 wlock.release()
1312 wlock.release()
1310
1313
1311 if not partial:
1314 if not partial:
1312 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1315 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1313 return stats
1316 return stats
1314
1317
1315 def graft(repo, ctx, pctx, labels):
1318 def graft(repo, ctx, pctx, labels):
1316 """Do a graft-like merge.
1319 """Do a graft-like merge.
1317
1320
1318 This is a merge where the merge ancestor is chosen such that one
1321 This is a merge where the merge ancestor is chosen such that one
1319 or more changesets are grafted onto the current changeset. In
1322 or more changesets are grafted onto the current changeset. In
1320 addition to the merge, this fixes up the dirstate to include only
1323 addition to the merge, this fixes up the dirstate to include only
1321 a single parent and tries to duplicate any renames/copies
1324 a single parent and tries to duplicate any renames/copies
1322 appropriately.
1325 appropriately.
1323
1326
1324 ctx - changeset to rebase
1327 ctx - changeset to rebase
1325 pctx - merge base, usually ctx.p1()
1328 pctx - merge base, usually ctx.p1()
1326 labels - merge labels eg ['local', 'graft']
1329 labels - merge labels eg ['local', 'graft']
1327
1330
1328 """
1331 """
1329 # If we're grafting a descendant onto an ancestor, be sure to pass
1332 # If we're grafting a descendant onto an ancestor, be sure to pass
1330 # mergeancestor=True to update. This does two things: 1) allows the merge if
1333 # mergeancestor=True to update. This does two things: 1) allows the merge if
1331 # the destination is the same as the parent of the ctx (so we can use graft
1334 # the destination is the same as the parent of the ctx (so we can use graft
1332 # to copy commits), and 2) informs update that the incoming changes are
1335 # to copy commits), and 2) informs update that the incoming changes are
1333 # newer than the destination so it doesn't prompt about "remote changed foo
1336 # newer than the destination so it doesn't prompt about "remote changed foo
1334 # which local deleted".
1337 # which local deleted".
1335 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1338 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1336
1339
1337 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1340 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1338 mergeancestor=mergeancestor, labels=labels)
1341 mergeancestor=mergeancestor, labels=labels)
1339
1342
1340 # drop the second merge parent
1343 # drop the second merge parent
1341 repo.dirstate.beginparentchange()
1344 repo.dirstate.beginparentchange()
1342 repo.setparents(repo['.'].node(), nullid)
1345 repo.setparents(repo['.'].node(), nullid)
1343 repo.dirstate.write(repo.currenttransaction())
1346 repo.dirstate.write(repo.currenttransaction())
1344 # fix up dirstate for copies and renames
1347 # fix up dirstate for copies and renames
1345 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1348 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1346 repo.dirstate.endparentchange()
1349 repo.dirstate.endparentchange()
1347 return stats
1350 return stats
General Comments 0
You need to be logged in to leave comments. Login now