##// END OF EJS Templates
merge: add a new 'backup' argument to get actions...
Siddharth Agarwal -
r27655:af13eaf9 default
parent child Browse files
Show More
@@ -1,1435 +1,1435
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset, error
15 archival, pathutil, revset, error
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
53 def overridematch(ctx, pats=(), opts=None, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 if opts is None:
55 if opts is None:
56 opts = {}
56 opts = {}
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
57 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
58 return composenormalfilematcher(match, manifest)
58 return composenormalfilematcher(match, manifest)
59 oldmatch = installmatchfn(overridematch)
59 oldmatch = installmatchfn(overridematch)
60
60
61 def installmatchfn(f):
61 def installmatchfn(f):
62 '''monkey patch the scmutil module with a custom match function.
62 '''monkey patch the scmutil module with a custom match function.
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
64 oldmatch = scmutil.match
64 oldmatch = scmutil.match
65 setattr(f, 'oldmatch', oldmatch)
65 setattr(f, 'oldmatch', oldmatch)
66 scmutil.match = f
66 scmutil.match = f
67 return oldmatch
67 return oldmatch
68
68
69 def restorematchfn():
69 def restorematchfn():
70 '''restores scmutil.match to what it was before installmatchfn
70 '''restores scmutil.match to what it was before installmatchfn
71 was called. no-op if scmutil.match is its original function.
71 was called. no-op if scmutil.match is its original function.
72
72
73 Note that n calls to installmatchfn will require n calls to
73 Note that n calls to installmatchfn will require n calls to
74 restore the original matchfn.'''
74 restore the original matchfn.'''
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
75 scmutil.match = getattr(scmutil.match, 'oldmatch')
76
76
77 def installmatchandpatsfn(f):
77 def installmatchandpatsfn(f):
78 oldmatchandpats = scmutil.matchandpats
78 oldmatchandpats = scmutil.matchandpats
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 setattr(f, 'oldmatchandpats', oldmatchandpats)
80 scmutil.matchandpats = f
80 scmutil.matchandpats = f
81 return oldmatchandpats
81 return oldmatchandpats
82
82
83 def restorematchandpatsfn():
83 def restorematchandpatsfn():
84 '''restores scmutil.matchandpats to what it was before
84 '''restores scmutil.matchandpats to what it was before
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 installmatchandpatsfn was called. No-op if scmutil.matchandpats
86 is its original function.
86 is its original function.
87
87
88 Note that n calls to installmatchandpatsfn will require n calls
88 Note that n calls to installmatchandpatsfn will require n calls
89 to restore the original matchfn.'''
89 to restore the original matchfn.'''
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
91 scmutil.matchandpats)
91 scmutil.matchandpats)
92
92
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
94 large = opts.get('large')
94 large = opts.get('large')
95 lfsize = lfutil.getminsize(
95 lfsize = lfutil.getminsize(
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
97
97
98 lfmatcher = None
98 lfmatcher = None
99 if lfutil.islfilesrepo(repo):
99 if lfutil.islfilesrepo(repo):
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
101 if lfpats:
101 if lfpats:
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
102 lfmatcher = match_.match(repo.root, '', list(lfpats))
103
103
104 lfnames = []
104 lfnames = []
105 m = matcher
105 m = matcher
106
106
107 wctx = repo[None]
107 wctx = repo[None]
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
108 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
109 exact = m.exact(f)
109 exact = m.exact(f)
110 lfile = lfutil.standin(f) in wctx
110 lfile = lfutil.standin(f) in wctx
111 nfile = f in wctx
111 nfile = f in wctx
112 exists = lfile or nfile
112 exists = lfile or nfile
113
113
114 # addremove in core gets fancy with the name, add doesn't
114 # addremove in core gets fancy with the name, add doesn't
115 if isaddremove:
115 if isaddremove:
116 name = m.uipath(f)
116 name = m.uipath(f)
117 else:
117 else:
118 name = m.rel(f)
118 name = m.rel(f)
119
119
120 # Don't warn the user when they attempt to add a normal tracked file.
120 # Don't warn the user when they attempt to add a normal tracked file.
121 # The normal add code will do that for us.
121 # The normal add code will do that for us.
122 if exact and exists:
122 if exact and exists:
123 if lfile:
123 if lfile:
124 ui.warn(_('%s already a largefile\n') % name)
124 ui.warn(_('%s already a largefile\n') % name)
125 continue
125 continue
126
126
127 if (exact or not exists) and not lfutil.isstandin(f):
127 if (exact or not exists) and not lfutil.isstandin(f):
128 # In case the file was removed previously, but not committed
128 # In case the file was removed previously, but not committed
129 # (issue3507)
129 # (issue3507)
130 if not repo.wvfs.exists(f):
130 if not repo.wvfs.exists(f):
131 continue
131 continue
132
132
133 abovemin = (lfsize and
133 abovemin = (lfsize and
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 if large or abovemin or (lfmatcher and lfmatcher(f)):
136 lfnames.append(f)
136 lfnames.append(f)
137 if ui.verbose or not exact:
137 if ui.verbose or not exact:
138 ui.status(_('adding %s as a largefile\n') % name)
138 ui.status(_('adding %s as a largefile\n') % name)
139
139
140 bad = []
140 bad = []
141
141
142 # Need to lock, otherwise there could be a race condition between
142 # Need to lock, otherwise there could be a race condition between
143 # when standins are created and added to the repo.
143 # when standins are created and added to the repo.
144 wlock = repo.wlock()
144 wlock = repo.wlock()
145 try:
145 try:
146 if not opts.get('dry_run'):
146 if not opts.get('dry_run'):
147 standins = []
147 standins = []
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 lfdirstate = lfutil.openlfdirstate(ui, repo)
149 for f in lfnames:
149 for f in lfnames:
150 standinname = lfutil.standin(f)
150 standinname = lfutil.standin(f)
151 lfutil.writestandin(repo, standinname, hash='',
151 lfutil.writestandin(repo, standinname, hash='',
152 executable=lfutil.getexecutable(repo.wjoin(f)))
152 executable=lfutil.getexecutable(repo.wjoin(f)))
153 standins.append(standinname)
153 standins.append(standinname)
154 if lfdirstate[f] == 'r':
154 if lfdirstate[f] == 'r':
155 lfdirstate.normallookup(f)
155 lfdirstate.normallookup(f)
156 else:
156 else:
157 lfdirstate.add(f)
157 lfdirstate.add(f)
158 lfdirstate.write()
158 lfdirstate.write()
159 bad += [lfutil.splitstandin(f)
159 bad += [lfutil.splitstandin(f)
160 for f in repo[None].add(standins)
160 for f in repo[None].add(standins)
161 if f in m.files()]
161 if f in m.files()]
162
162
163 added = [f for f in lfnames if f not in bad]
163 added = [f for f in lfnames if f not in bad]
164 finally:
164 finally:
165 wlock.release()
165 wlock.release()
166 return added, bad
166 return added, bad
167
167
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
169 after = opts.get('after')
169 after = opts.get('after')
170 m = composelargefilematcher(matcher, repo[None].manifest())
170 m = composelargefilematcher(matcher, repo[None].manifest())
171 try:
171 try:
172 repo.lfstatus = True
172 repo.lfstatus = True
173 s = repo.status(match=m, clean=not isaddremove)
173 s = repo.status(match=m, clean=not isaddremove)
174 finally:
174 finally:
175 repo.lfstatus = False
175 repo.lfstatus = False
176 manifest = repo[None].manifest()
176 manifest = repo[None].manifest()
177 modified, added, deleted, clean = [[f for f in list
177 modified, added, deleted, clean = [[f for f in list
178 if lfutil.standin(f) in manifest]
178 if lfutil.standin(f) in manifest]
179 for list in (s.modified, s.added,
179 for list in (s.modified, s.added,
180 s.deleted, s.clean)]
180 s.deleted, s.clean)]
181
181
182 def warn(files, msg):
182 def warn(files, msg):
183 for f in files:
183 for f in files:
184 ui.warn(msg % m.rel(f))
184 ui.warn(msg % m.rel(f))
185 return int(len(files) > 0)
185 return int(len(files) > 0)
186
186
187 result = 0
187 result = 0
188
188
189 if after:
189 if after:
190 remove = deleted
190 remove = deleted
191 result = warn(modified + added + clean,
191 result = warn(modified + added + clean,
192 _('not removing %s: file still exists\n'))
192 _('not removing %s: file still exists\n'))
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(modified, _('not removing %s: file is modified (use -f'
195 result = warn(modified, _('not removing %s: file is modified (use -f'
196 ' to force removal)\n'))
196 ' to force removal)\n'))
197 result = warn(added, _('not removing %s: file has been marked for add'
197 result = warn(added, _('not removing %s: file has been marked for add'
198 ' (use forget to undo)\n')) or result
198 ' (use forget to undo)\n')) or result
199
199
200 # Need to lock because standin files are deleted then removed from the
200 # Need to lock because standin files are deleted then removed from the
201 # repository and we could race in-between.
201 # repository and we could race in-between.
202 wlock = repo.wlock()
202 wlock = repo.wlock()
203 try:
203 try:
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 lfdirstate = lfutil.openlfdirstate(ui, repo)
205 for f in sorted(remove):
205 for f in sorted(remove):
206 if ui.verbose or not m.exact(f):
206 if ui.verbose or not m.exact(f):
207 # addremove in core gets fancy with the name, remove doesn't
207 # addremove in core gets fancy with the name, remove doesn't
208 if isaddremove:
208 if isaddremove:
209 name = m.uipath(f)
209 name = m.uipath(f)
210 else:
210 else:
211 name = m.rel(f)
211 name = m.rel(f)
212 ui.status(_('removing %s\n') % name)
212 ui.status(_('removing %s\n') % name)
213
213
214 if not opts.get('dry_run'):
214 if not opts.get('dry_run'):
215 if not after:
215 if not after:
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217
217
218 if opts.get('dry_run'):
218 if opts.get('dry_run'):
219 return result
219 return result
220
220
221 remove = [lfutil.standin(f) for f in remove]
221 remove = [lfutil.standin(f) for f in remove]
222 # If this is being called by addremove, let the original addremove
222 # If this is being called by addremove, let the original addremove
223 # function handle this.
223 # function handle this.
224 if not isaddremove:
224 if not isaddremove:
225 for f in remove:
225 for f in remove:
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
227 repo[None].forget(remove)
227 repo[None].forget(remove)
228
228
229 for f in remove:
229 for f in remove:
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
231 False)
231 False)
232
232
233 lfdirstate.write()
233 lfdirstate.write()
234 finally:
234 finally:
235 wlock.release()
235 wlock.release()
236
236
237 return result
237 return result
238
238
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # For overriding mercurial.hgweb.webcommands so that largefiles will
240 # appear at their right place in the manifests.
240 # appear at their right place in the manifests.
241 def decodepath(orig, path):
241 def decodepath(orig, path):
242 return lfutil.splitstandin(path) or path
242 return lfutil.splitstandin(path) or path
243
243
244 # -- Wrappers: modify existing commands --------------------------------
244 # -- Wrappers: modify existing commands --------------------------------
245
245
246 def overrideadd(orig, ui, repo, *pats, **opts):
246 def overrideadd(orig, ui, repo, *pats, **opts):
247 if opts.get('normal') and opts.get('large'):
247 if opts.get('normal') and opts.get('large'):
248 raise error.Abort(_('--normal cannot be used with --large'))
248 raise error.Abort(_('--normal cannot be used with --large'))
249 return orig(ui, repo, *pats, **opts)
249 return orig(ui, repo, *pats, **opts)
250
250
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
252 # The --normal flag short circuits this override
252 # The --normal flag short circuits this override
253 if opts.get('normal'):
253 if opts.get('normal'):
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
255
255
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
258 ladded)
258 ladded)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
260
260
261 bad.extend(f for f in lbad)
261 bad.extend(f for f in lbad)
262 return bad
262 return bad
263
263
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
267 return removelargefiles(ui, repo, False, matcher, after=after,
267 return removelargefiles(ui, repo, False, matcher, after=after,
268 force=force) or result
268 force=force) or result
269
269
270 def overridestatusfn(orig, repo, rev2, **opts):
270 def overridestatusfn(orig, repo, rev2, **opts):
271 try:
271 try:
272 repo._repo.lfstatus = True
272 repo._repo.lfstatus = True
273 return orig(repo, rev2, **opts)
273 return orig(repo, rev2, **opts)
274 finally:
274 finally:
275 repo._repo.lfstatus = False
275 repo._repo.lfstatus = False
276
276
277 def overridestatus(orig, ui, repo, *pats, **opts):
277 def overridestatus(orig, ui, repo, *pats, **opts):
278 try:
278 try:
279 repo.lfstatus = True
279 repo.lfstatus = True
280 return orig(ui, repo, *pats, **opts)
280 return orig(ui, repo, *pats, **opts)
281 finally:
281 finally:
282 repo.lfstatus = False
282 repo.lfstatus = False
283
283
284 def overridedirty(orig, repo, ignoreupdate=False):
284 def overridedirty(orig, repo, ignoreupdate=False):
285 try:
285 try:
286 repo._repo.lfstatus = True
286 repo._repo.lfstatus = True
287 return orig(repo, ignoreupdate)
287 return orig(repo, ignoreupdate)
288 finally:
288 finally:
289 repo._repo.lfstatus = False
289 repo._repo.lfstatus = False
290
290
291 def overridelog(orig, ui, repo, *pats, **opts):
291 def overridelog(orig, ui, repo, *pats, **opts):
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
292 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
293 default='relpath', badfn=None):
293 default='relpath', badfn=None):
294 """Matcher that merges root directory with .hglf, suitable for log.
294 """Matcher that merges root directory with .hglf, suitable for log.
295 It is still possible to match .hglf directly.
295 It is still possible to match .hglf directly.
296 For any listed files run log on the standin too.
296 For any listed files run log on the standin too.
297 matchfn tries both the given filename and with .hglf stripped.
297 matchfn tries both the given filename and with .hglf stripped.
298 """
298 """
299 if opts is None:
299 if opts is None:
300 opts = {}
300 opts = {}
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
301 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
302 badfn=badfn)
302 badfn=badfn)
303 m, p = copy.copy(matchandpats)
303 m, p = copy.copy(matchandpats)
304
304
305 if m.always():
305 if m.always():
306 # We want to match everything anyway, so there's no benefit trying
306 # We want to match everything anyway, so there's no benefit trying
307 # to add standins.
307 # to add standins.
308 return matchandpats
308 return matchandpats
309
309
310 pats = set(p)
310 pats = set(p)
311
311
312 def fixpats(pat, tostandin=lfutil.standin):
312 def fixpats(pat, tostandin=lfutil.standin):
313 if pat.startswith('set:'):
313 if pat.startswith('set:'):
314 return pat
314 return pat
315
315
316 kindpat = match_._patsplit(pat, None)
316 kindpat = match_._patsplit(pat, None)
317
317
318 if kindpat[0] is not None:
318 if kindpat[0] is not None:
319 return kindpat[0] + ':' + tostandin(kindpat[1])
319 return kindpat[0] + ':' + tostandin(kindpat[1])
320 return tostandin(kindpat[1])
320 return tostandin(kindpat[1])
321
321
322 if m._cwd:
322 if m._cwd:
323 hglf = lfutil.shortname
323 hglf = lfutil.shortname
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
324 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
325
325
326 def tostandin(f):
326 def tostandin(f):
327 # The file may already be a standin, so truncate the back
327 # The file may already be a standin, so truncate the back
328 # prefix and test before mangling it. This avoids turning
328 # prefix and test before mangling it. This avoids turning
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
329 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
330 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
331 return f
331 return f
332
332
333 # An absolute path is from outside the repo, so truncate the
333 # An absolute path is from outside the repo, so truncate the
334 # path to the root before building the standin. Otherwise cwd
334 # path to the root before building the standin. Otherwise cwd
335 # is somewhere in the repo, relative to root, and needs to be
335 # is somewhere in the repo, relative to root, and needs to be
336 # prepended before building the standin.
336 # prepended before building the standin.
337 if os.path.isabs(m._cwd):
337 if os.path.isabs(m._cwd):
338 f = f[len(back):]
338 f = f[len(back):]
339 else:
339 else:
340 f = m._cwd + '/' + f
340 f = m._cwd + '/' + f
341 return back + lfutil.standin(f)
341 return back + lfutil.standin(f)
342
342
343 pats.update(fixpats(f, tostandin) for f in p)
343 pats.update(fixpats(f, tostandin) for f in p)
344 else:
344 else:
345 def tostandin(f):
345 def tostandin(f):
346 if lfutil.splitstandin(f):
346 if lfutil.splitstandin(f):
347 return f
347 return f
348 return lfutil.standin(f)
348 return lfutil.standin(f)
349 pats.update(fixpats(f, tostandin) for f in p)
349 pats.update(fixpats(f, tostandin) for f in p)
350
350
351 for i in range(0, len(m._files)):
351 for i in range(0, len(m._files)):
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
352 # Don't add '.hglf' to m.files, since that is already covered by '.'
353 if m._files[i] == '.':
353 if m._files[i] == '.':
354 continue
354 continue
355 standin = lfutil.standin(m._files[i])
355 standin = lfutil.standin(m._files[i])
356 # If the "standin" is a directory, append instead of replace to
356 # If the "standin" is a directory, append instead of replace to
357 # support naming a directory on the command line with only
357 # support naming a directory on the command line with only
358 # largefiles. The original directory is kept to support normal
358 # largefiles. The original directory is kept to support normal
359 # files.
359 # files.
360 if standin in repo[ctx.node()]:
360 if standin in repo[ctx.node()]:
361 m._files[i] = standin
361 m._files[i] = standin
362 elif m._files[i] not in repo[ctx.node()] \
362 elif m._files[i] not in repo[ctx.node()] \
363 and repo.wvfs.isdir(standin):
363 and repo.wvfs.isdir(standin):
364 m._files.append(standin)
364 m._files.append(standin)
365
365
366 m._fileroots = set(m._files)
366 m._fileroots = set(m._files)
367 m._always = False
367 m._always = False
368 origmatchfn = m.matchfn
368 origmatchfn = m.matchfn
369 def lfmatchfn(f):
369 def lfmatchfn(f):
370 lf = lfutil.splitstandin(f)
370 lf = lfutil.splitstandin(f)
371 if lf is not None and origmatchfn(lf):
371 if lf is not None and origmatchfn(lf):
372 return True
372 return True
373 r = origmatchfn(f)
373 r = origmatchfn(f)
374 return r
374 return r
375 m.matchfn = lfmatchfn
375 m.matchfn = lfmatchfn
376
376
377 ui.debug('updated patterns: %s\n' % sorted(pats))
377 ui.debug('updated patterns: %s\n' % sorted(pats))
378 return m, pats
378 return m, pats
379
379
380 # For hg log --patch, the match object is used in two different senses:
380 # For hg log --patch, the match object is used in two different senses:
381 # (1) to determine what revisions should be printed out, and
381 # (1) to determine what revisions should be printed out, and
382 # (2) to determine what files to print out diffs for.
382 # (2) to determine what files to print out diffs for.
383 # The magic matchandpats override should be used for case (1) but not for
383 # The magic matchandpats override should be used for case (1) but not for
384 # case (2).
384 # case (2).
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
385 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
386 wctx = repo[None]
386 wctx = repo[None]
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
387 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
388 return lambda rev: match
388 return lambda rev: match
389
389
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
390 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
391 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
392 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
393
393
394 try:
394 try:
395 return orig(ui, repo, *pats, **opts)
395 return orig(ui, repo, *pats, **opts)
396 finally:
396 finally:
397 restorematchandpatsfn()
397 restorematchandpatsfn()
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
398 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
399
399
400 def overrideverify(orig, ui, repo, *pats, **opts):
400 def overrideverify(orig, ui, repo, *pats, **opts):
401 large = opts.pop('large', False)
401 large = opts.pop('large', False)
402 all = opts.pop('lfa', False)
402 all = opts.pop('lfa', False)
403 contents = opts.pop('lfc', False)
403 contents = opts.pop('lfc', False)
404
404
405 result = orig(ui, repo, *pats, **opts)
405 result = orig(ui, repo, *pats, **opts)
406 if large or all or contents:
406 if large or all or contents:
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
407 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
408 return result
408 return result
409
409
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
410 def overridedebugstate(orig, ui, repo, *pats, **opts):
411 large = opts.pop('large', False)
411 large = opts.pop('large', False)
412 if large:
412 if large:
413 class fakerepo(object):
413 class fakerepo(object):
414 dirstate = lfutil.openlfdirstate(ui, repo)
414 dirstate = lfutil.openlfdirstate(ui, repo)
415 orig(ui, fakerepo, *pats, **opts)
415 orig(ui, fakerepo, *pats, **opts)
416 else:
416 else:
417 orig(ui, repo, *pats, **opts)
417 orig(ui, repo, *pats, **opts)
418
418
419 # Before starting the manifest merge, merge.updates will call
419 # Before starting the manifest merge, merge.updates will call
420 # _checkunknownfile to check if there are any files in the merged-in
420 # _checkunknownfile to check if there are any files in the merged-in
421 # changeset that collide with unknown files in the working copy.
421 # changeset that collide with unknown files in the working copy.
422 #
422 #
423 # The largefiles are seen as unknown, so this prevents us from merging
423 # The largefiles are seen as unknown, so this prevents us from merging
424 # in a file 'foo' if we already have a largefile with the same name.
424 # in a file 'foo' if we already have a largefile with the same name.
425 #
425 #
426 # The overridden function filters the unknown files by removing any
426 # The overridden function filters the unknown files by removing any
427 # largefiles. This makes the merge proceed and we can then handle this
427 # largefiles. This makes the merge proceed and we can then handle this
428 # case further in the overridden calculateupdates function below.
428 # case further in the overridden calculateupdates function below.
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
429 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
430 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
431 return False
431 return False
432 return origfn(repo, wctx, mctx, f, f2)
432 return origfn(repo, wctx, mctx, f, f2)
433
433
434 # The manifest merge handles conflicts on the manifest level. We want
434 # The manifest merge handles conflicts on the manifest level. We want
435 # to handle changes in largefile-ness of files at this level too.
435 # to handle changes in largefile-ness of files at this level too.
436 #
436 #
437 # The strategy is to run the original calculateupdates and then process
437 # The strategy is to run the original calculateupdates and then process
438 # the action list it outputs. There are two cases we need to deal with:
438 # the action list it outputs. There are two cases we need to deal with:
439 #
439 #
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
440 # 1. Normal file in p1, largefile in p2. Here the largefile is
441 # detected via its standin file, which will enter the working copy
441 # detected via its standin file, which will enter the working copy
442 # with a "get" action. It is not "merge" since the standin is all
442 # with a "get" action. It is not "merge" since the standin is all
443 # Mercurial is concerned with at this level -- the link to the
443 # Mercurial is concerned with at this level -- the link to the
444 # existing normal file is not relevant here.
444 # existing normal file is not relevant here.
445 #
445 #
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
446 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
447 # since the largefile will be present in the working copy and
447 # since the largefile will be present in the working copy and
448 # different from the normal file in p2. Mercurial therefore
448 # different from the normal file in p2. Mercurial therefore
449 # triggers a merge action.
449 # triggers a merge action.
450 #
450 #
451 # In both cases, we prompt the user and emit new actions to either
451 # In both cases, we prompt the user and emit new actions to either
452 # remove the standin (if the normal file was kept) or to remove the
452 # remove the standin (if the normal file was kept) or to remove the
453 # normal file and get the standin (if the largefile was kept). The
453 # normal file and get the standin (if the largefile was kept). The
454 # default prompt answer is to use the largefile version since it was
454 # default prompt answer is to use the largefile version since it was
455 # presumably changed on purpose.
455 # presumably changed on purpose.
456 #
456 #
457 # Finally, the merge.applyupdates function will then take care of
457 # Finally, the merge.applyupdates function will then take care of
458 # writing the files into the working copy and lfcommands.updatelfiles
458 # writing the files into the working copy and lfcommands.updatelfiles
459 # will update the largefiles.
459 # will update the largefiles.
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
460 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
461 acceptremote, followcopies, matcher=None):
461 acceptremote, followcopies, matcher=None):
462 overwrite = force and not branchmerge
462 overwrite = force and not branchmerge
463 actions, diverge, renamedelete = origfn(
463 actions, diverge, renamedelete = origfn(
464 repo, p1, p2, pas, branchmerge, force, acceptremote,
464 repo, p1, p2, pas, branchmerge, force, acceptremote,
465 followcopies, matcher=matcher)
465 followcopies, matcher=matcher)
466
466
467 if overwrite:
467 if overwrite:
468 return actions, diverge, renamedelete
468 return actions, diverge, renamedelete
469
469
470 # Convert to dictionary with filename as key and action as value.
470 # Convert to dictionary with filename as key and action as value.
471 lfiles = set()
471 lfiles = set()
472 for f in actions:
472 for f in actions:
473 splitstandin = f and lfutil.splitstandin(f)
473 splitstandin = f and lfutil.splitstandin(f)
474 if splitstandin in p1:
474 if splitstandin in p1:
475 lfiles.add(splitstandin)
475 lfiles.add(splitstandin)
476 elif lfutil.standin(f) in p1:
476 elif lfutil.standin(f) in p1:
477 lfiles.add(f)
477 lfiles.add(f)
478
478
479 for lfile in lfiles:
479 for lfile in lfiles:
480 standin = lfutil.standin(lfile)
480 standin = lfutil.standin(lfile)
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
481 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
482 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
483 if sm in ('g', 'dc') and lm != 'r':
483 if sm in ('g', 'dc') and lm != 'r':
484 if sm == 'dc':
484 if sm == 'dc':
485 f1, f2, fa, move, anc = sargs
485 f1, f2, fa, move, anc = sargs
486 sargs = (p2[f2].flags(),)
486 sargs = (p2[f2].flags(), False)
487 # Case 1: normal file in the working copy, largefile in
487 # Case 1: normal file in the working copy, largefile in
488 # the second parent
488 # the second parent
489 usermsg = _('remote turned local normal file %s into a largefile\n'
489 usermsg = _('remote turned local normal file %s into a largefile\n'
490 'use (l)argefile or keep (n)ormal file?'
490 'use (l)argefile or keep (n)ormal file?'
491 '$$ &Largefile $$ &Normal file') % lfile
491 '$$ &Largefile $$ &Normal file') % lfile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
492 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
493 actions[lfile] = ('r', None, 'replaced by standin')
493 actions[lfile] = ('r', None, 'replaced by standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
494 actions[standin] = ('g', sargs, 'replaces standin')
495 else: # keep local normal file
495 else: # keep local normal file
496 actions[lfile] = ('k', None, 'replaces standin')
496 actions[lfile] = ('k', None, 'replaces standin')
497 if branchmerge:
497 if branchmerge:
498 actions[standin] = ('k', None, 'replaced by non-standin')
498 actions[standin] = ('k', None, 'replaced by non-standin')
499 else:
499 else:
500 actions[standin] = ('r', None, 'replaced by non-standin')
500 actions[standin] = ('r', None, 'replaced by non-standin')
501 elif lm in ('g', 'dc') and sm != 'r':
501 elif lm in ('g', 'dc') and sm != 'r':
502 if lm == 'dc':
502 if lm == 'dc':
503 f1, f2, fa, move, anc = largs
503 f1, f2, fa, move, anc = largs
504 largs = (p2[f2].flags(),)
504 largs = (p2[f2].flags(), False)
505 # Case 2: largefile in the working copy, normal file in
505 # Case 2: largefile in the working copy, normal file in
506 # the second parent
506 # the second parent
507 usermsg = _('remote turned local largefile %s into a normal file\n'
507 usermsg = _('remote turned local largefile %s into a normal file\n'
508 'keep (l)argefile or use (n)ormal file?'
508 'keep (l)argefile or use (n)ormal file?'
509 '$$ &Largefile $$ &Normal file') % lfile
509 '$$ &Largefile $$ &Normal file') % lfile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
510 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
511 if branchmerge:
511 if branchmerge:
512 # largefile can be restored from standin safely
512 # largefile can be restored from standin safely
513 actions[lfile] = ('k', None, 'replaced by standin')
513 actions[lfile] = ('k', None, 'replaced by standin')
514 actions[standin] = ('k', None, 'replaces standin')
514 actions[standin] = ('k', None, 'replaces standin')
515 else:
515 else:
516 # "lfile" should be marked as "removed" without
516 # "lfile" should be marked as "removed" without
517 # removal of itself
517 # removal of itself
518 actions[lfile] = ('lfmr', None,
518 actions[lfile] = ('lfmr', None,
519 'forget non-standin largefile')
519 'forget non-standin largefile')
520
520
521 # linear-merge should treat this largefile as 're-added'
521 # linear-merge should treat this largefile as 're-added'
522 actions[standin] = ('a', None, 'keep standin')
522 actions[standin] = ('a', None, 'keep standin')
523 else: # pick remote normal file
523 else: # pick remote normal file
524 actions[lfile] = ('g', largs, 'replaces standin')
524 actions[lfile] = ('g', largs, 'replaces standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
525 actions[standin] = ('r', None, 'replaced by non-standin')
526
526
527 return actions, diverge, renamedelete
527 return actions, diverge, renamedelete
528
528
529 def mergerecordupdates(orig, repo, actions, branchmerge):
529 def mergerecordupdates(orig, repo, actions, branchmerge):
530 if 'lfmr' in actions:
530 if 'lfmr' in actions:
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
531 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
532 for lfile, args, msg in actions['lfmr']:
532 for lfile, args, msg in actions['lfmr']:
533 # this should be executed before 'orig', to execute 'remove'
533 # this should be executed before 'orig', to execute 'remove'
534 # before all other actions
534 # before all other actions
535 repo.dirstate.remove(lfile)
535 repo.dirstate.remove(lfile)
536 # make sure lfile doesn't get synclfdirstate'd as normal
536 # make sure lfile doesn't get synclfdirstate'd as normal
537 lfdirstate.add(lfile)
537 lfdirstate.add(lfile)
538 lfdirstate.write()
538 lfdirstate.write()
539
539
540 return orig(repo, actions, branchmerge)
540 return orig(repo, actions, branchmerge)
541
541
542
542
543 # Override filemerge to prompt the user about how they wish to merge
543 # Override filemerge to prompt the user about how they wish to merge
544 # largefiles. This will handle identical edits without prompting the user.
544 # largefiles. This will handle identical edits without prompting the user.
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
545 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
546 labels=None):
546 labels=None):
547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
547 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
548 return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
549 labels=labels)
549 labels=labels)
550
550
551 ahash = fca.data().strip().lower()
551 ahash = fca.data().strip().lower()
552 dhash = fcd.data().strip().lower()
552 dhash = fcd.data().strip().lower()
553 ohash = fco.data().strip().lower()
553 ohash = fco.data().strip().lower()
554 if (ohash != ahash and
554 if (ohash != ahash and
555 ohash != dhash and
555 ohash != dhash and
556 (dhash == ahash or
556 (dhash == ahash or
557 repo.ui.promptchoice(
557 repo.ui.promptchoice(
558 _('largefile %s has a merge conflict\nancestor was %s\n'
558 _('largefile %s has a merge conflict\nancestor was %s\n'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
559 'keep (l)ocal %s or\ntake (o)ther %s?'
560 '$$ &Local $$ &Other') %
560 '$$ &Local $$ &Other') %
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
561 (lfutil.splitstandin(orig), ahash, dhash, ohash),
562 0) == 1)):
562 0) == 1)):
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
563 repo.wwrite(fcd.path(), fco.data(), fco.flags())
564 return True, 0, False
564 return True, 0, False
565
565
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
566 def copiespathcopies(orig, ctx1, ctx2, match=None):
567 copies = orig(ctx1, ctx2, match=match)
567 copies = orig(ctx1, ctx2, match=match)
568 updated = {}
568 updated = {}
569
569
570 for k, v in copies.iteritems():
570 for k, v in copies.iteritems():
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
571 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
572
572
573 return updated
573 return updated
574
574
575 # Copy first changes the matchers to match standins instead of
575 # Copy first changes the matchers to match standins instead of
576 # largefiles. Then it overrides util.copyfile in that function it
576 # largefiles. Then it overrides util.copyfile in that function it
577 # checks if the destination largefile already exists. It also keeps a
577 # checks if the destination largefile already exists. It also keeps a
578 # list of copied files so that the largefiles can be copied and the
578 # list of copied files so that the largefiles can be copied and the
579 # dirstate updated.
579 # dirstate updated.
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
580 def overridecopy(orig, ui, repo, pats, opts, rename=False):
581 # doesn't remove largefile on rename
581 # doesn't remove largefile on rename
582 if len(pats) < 2:
582 if len(pats) < 2:
583 # this isn't legal, let the original function deal with it
583 # this isn't legal, let the original function deal with it
584 return orig(ui, repo, pats, opts, rename)
584 return orig(ui, repo, pats, opts, rename)
585
585
586 # This could copy both lfiles and normal files in one command,
586 # This could copy both lfiles and normal files in one command,
587 # but we don't want to do that. First replace their matcher to
587 # but we don't want to do that. First replace their matcher to
588 # only match normal files and run it, then replace it to just
588 # only match normal files and run it, then replace it to just
589 # match largefiles and run it again.
589 # match largefiles and run it again.
590 nonormalfiles = False
590 nonormalfiles = False
591 nolfiles = False
591 nolfiles = False
592 installnormalfilesmatchfn(repo[None].manifest())
592 installnormalfilesmatchfn(repo[None].manifest())
593 try:
593 try:
594 result = orig(ui, repo, pats, opts, rename)
594 result = orig(ui, repo, pats, opts, rename)
595 except error.Abort as e:
595 except error.Abort as e:
596 if str(e) != _('no files to copy'):
596 if str(e) != _('no files to copy'):
597 raise e
597 raise e
598 else:
598 else:
599 nonormalfiles = True
599 nonormalfiles = True
600 result = 0
600 result = 0
601 finally:
601 finally:
602 restorematchfn()
602 restorematchfn()
603
603
604 # The first rename can cause our current working directory to be removed.
604 # The first rename can cause our current working directory to be removed.
605 # In that case there is nothing left to copy/rename so just quit.
605 # In that case there is nothing left to copy/rename so just quit.
606 try:
606 try:
607 repo.getcwd()
607 repo.getcwd()
608 except OSError:
608 except OSError:
609 return result
609 return result
610
610
611 def makestandin(relpath):
611 def makestandin(relpath):
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
612 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
613 return os.path.join(repo.wjoin(lfutil.standin(path)))
614
614
615 fullpats = scmutil.expandpats(pats)
615 fullpats = scmutil.expandpats(pats)
616 dest = fullpats[-1]
616 dest = fullpats[-1]
617
617
618 if os.path.isdir(dest):
618 if os.path.isdir(dest):
619 if not os.path.isdir(makestandin(dest)):
619 if not os.path.isdir(makestandin(dest)):
620 os.makedirs(makestandin(dest))
620 os.makedirs(makestandin(dest))
621
621
622 try:
622 try:
623 # When we call orig below it creates the standins but we don't add
623 # When we call orig below it creates the standins but we don't add
624 # them to the dir state until later so lock during that time.
624 # them to the dir state until later so lock during that time.
625 wlock = repo.wlock()
625 wlock = repo.wlock()
626
626
627 manifest = repo[None].manifest()
627 manifest = repo[None].manifest()
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
628 def overridematch(ctx, pats=(), opts=None, globbed=False,
629 default='relpath', badfn=None):
629 default='relpath', badfn=None):
630 if opts is None:
630 if opts is None:
631 opts = {}
631 opts = {}
632 newpats = []
632 newpats = []
633 # The patterns were previously mangled to add the standin
633 # The patterns were previously mangled to add the standin
634 # directory; we need to remove that now
634 # directory; we need to remove that now
635 for pat in pats:
635 for pat in pats:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
636 if match_.patkind(pat) is None and lfutil.shortname in pat:
637 newpats.append(pat.replace(lfutil.shortname, ''))
637 newpats.append(pat.replace(lfutil.shortname, ''))
638 else:
638 else:
639 newpats.append(pat)
639 newpats.append(pat)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
640 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
641 m = copy.copy(match)
641 m = copy.copy(match)
642 lfile = lambda f: lfutil.standin(f) in manifest
642 lfile = lambda f: lfutil.standin(f) in manifest
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
643 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
644 m._fileroots = set(m._files)
644 m._fileroots = set(m._files)
645 origmatchfn = m.matchfn
645 origmatchfn = m.matchfn
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
646 m.matchfn = lambda f: (lfutil.isstandin(f) and
647 (f in manifest) and
647 (f in manifest) and
648 origmatchfn(lfutil.splitstandin(f)) or
648 origmatchfn(lfutil.splitstandin(f)) or
649 None)
649 None)
650 return m
650 return m
651 oldmatch = installmatchfn(overridematch)
651 oldmatch = installmatchfn(overridematch)
652 listpats = []
652 listpats = []
653 for pat in pats:
653 for pat in pats:
654 if match_.patkind(pat) is not None:
654 if match_.patkind(pat) is not None:
655 listpats.append(pat)
655 listpats.append(pat)
656 else:
656 else:
657 listpats.append(makestandin(pat))
657 listpats.append(makestandin(pat))
658
658
659 try:
659 try:
660 origcopyfile = util.copyfile
660 origcopyfile = util.copyfile
661 copiedfiles = []
661 copiedfiles = []
662 def overridecopyfile(src, dest):
662 def overridecopyfile(src, dest):
663 if (lfutil.shortname in src and
663 if (lfutil.shortname in src and
664 dest.startswith(repo.wjoin(lfutil.shortname))):
664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 destlfile = dest.replace(lfutil.shortname, '')
665 destlfile = dest.replace(lfutil.shortname, '')
666 if not opts['force'] and os.path.exists(destlfile):
666 if not opts['force'] and os.path.exists(destlfile):
667 raise IOError('',
667 raise IOError('',
668 _('destination largefile already exists'))
668 _('destination largefile already exists'))
669 copiedfiles.append((src, dest))
669 copiedfiles.append((src, dest))
670 origcopyfile(src, dest)
670 origcopyfile(src, dest)
671
671
672 util.copyfile = overridecopyfile
672 util.copyfile = overridecopyfile
673 result += orig(ui, repo, listpats, opts, rename)
673 result += orig(ui, repo, listpats, opts, rename)
674 finally:
674 finally:
675 util.copyfile = origcopyfile
675 util.copyfile = origcopyfile
676
676
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 for (src, dest) in copiedfiles:
678 for (src, dest) in copiedfiles:
679 if (lfutil.shortname in src and
679 if (lfutil.shortname in src and
680 dest.startswith(repo.wjoin(lfutil.shortname))):
680 dest.startswith(repo.wjoin(lfutil.shortname))):
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
681 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
682 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
683 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
684 if not os.path.isdir(destlfiledir):
684 if not os.path.isdir(destlfiledir):
685 os.makedirs(destlfiledir)
685 os.makedirs(destlfiledir)
686 if rename:
686 if rename:
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
687 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
688
688
689 # The file is gone, but this deletes any empty parent
689 # The file is gone, but this deletes any empty parent
690 # directories as a side-effect.
690 # directories as a side-effect.
691 util.unlinkpath(repo.wjoin(srclfile), True)
691 util.unlinkpath(repo.wjoin(srclfile), True)
692 lfdirstate.remove(srclfile)
692 lfdirstate.remove(srclfile)
693 else:
693 else:
694 util.copyfile(repo.wjoin(srclfile),
694 util.copyfile(repo.wjoin(srclfile),
695 repo.wjoin(destlfile))
695 repo.wjoin(destlfile))
696
696
697 lfdirstate.add(destlfile)
697 lfdirstate.add(destlfile)
698 lfdirstate.write()
698 lfdirstate.write()
699 except error.Abort as e:
699 except error.Abort as e:
700 if str(e) != _('no files to copy'):
700 if str(e) != _('no files to copy'):
701 raise e
701 raise e
702 else:
702 else:
703 nolfiles = True
703 nolfiles = True
704 finally:
704 finally:
705 restorematchfn()
705 restorematchfn()
706 wlock.release()
706 wlock.release()
707
707
708 if nolfiles and nonormalfiles:
708 if nolfiles and nonormalfiles:
709 raise error.Abort(_('no files to copy'))
709 raise error.Abort(_('no files to copy'))
710
710
711 return result
711 return result
712
712
713 # When the user calls revert, we have to be careful to not revert any
713 # When the user calls revert, we have to be careful to not revert any
714 # changes to other largefiles accidentally. This means we have to keep
714 # changes to other largefiles accidentally. This means we have to keep
715 # track of the largefiles that are being reverted so we only pull down
715 # track of the largefiles that are being reverted so we only pull down
716 # the necessary largefiles.
716 # the necessary largefiles.
717 #
717 #
718 # Standins are only updated (to match the hash of largefiles) before
718 # Standins are only updated (to match the hash of largefiles) before
719 # commits. Update the standins then run the original revert, changing
719 # commits. Update the standins then run the original revert, changing
720 # the matcher to hit standins instead of largefiles. Based on the
720 # the matcher to hit standins instead of largefiles. Based on the
721 # resulting standins update the largefiles.
721 # resulting standins update the largefiles.
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
722 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
723 # Because we put the standins in a bad state (by updating them)
723 # Because we put the standins in a bad state (by updating them)
724 # and then return them to a correct state we need to lock to
724 # and then return them to a correct state we need to lock to
725 # prevent others from changing them in their incorrect state.
725 # prevent others from changing them in their incorrect state.
726 wlock = repo.wlock()
726 wlock = repo.wlock()
727 try:
727 try:
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
728 lfdirstate = lfutil.openlfdirstate(ui, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
729 s = lfutil.lfdirstatestatus(lfdirstate, repo)
730 lfdirstate.write()
730 lfdirstate.write()
731 for lfile in s.modified:
731 for lfile in s.modified:
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
732 lfutil.updatestandin(repo, lfutil.standin(lfile))
733 for lfile in s.deleted:
733 for lfile in s.deleted:
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
734 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
735 os.unlink(repo.wjoin(lfutil.standin(lfile)))
736
736
737 oldstandins = lfutil.getstandinsstate(repo)
737 oldstandins = lfutil.getstandinsstate(repo)
738
738
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
739 def overridematch(mctx, pats=(), opts=None, globbed=False,
740 default='relpath', badfn=None):
740 default='relpath', badfn=None):
741 if opts is None:
741 if opts is None:
742 opts = {}
742 opts = {}
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
743 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
744 m = copy.copy(match)
744 m = copy.copy(match)
745
745
746 # revert supports recursing into subrepos, and though largefiles
746 # revert supports recursing into subrepos, and though largefiles
747 # currently doesn't work correctly in that case, this match is
747 # currently doesn't work correctly in that case, this match is
748 # called, so the lfdirstate above may not be the correct one for
748 # called, so the lfdirstate above may not be the correct one for
749 # this invocation of match.
749 # this invocation of match.
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
750 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
751 False)
751 False)
752
752
753 def tostandin(f):
753 def tostandin(f):
754 standin = lfutil.standin(f)
754 standin = lfutil.standin(f)
755 if standin in ctx or standin in mctx:
755 if standin in ctx or standin in mctx:
756 return standin
756 return standin
757 elif standin in repo[None] or lfdirstate[f] == 'r':
757 elif standin in repo[None] or lfdirstate[f] == 'r':
758 return None
758 return None
759 return f
759 return f
760 m._files = [tostandin(f) for f in m._files]
760 m._files = [tostandin(f) for f in m._files]
761 m._files = [f for f in m._files if f is not None]
761 m._files = [f for f in m._files if f is not None]
762 m._fileroots = set(m._files)
762 m._fileroots = set(m._files)
763 origmatchfn = m.matchfn
763 origmatchfn = m.matchfn
764 def matchfn(f):
764 def matchfn(f):
765 if lfutil.isstandin(f):
765 if lfutil.isstandin(f):
766 return (origmatchfn(lfutil.splitstandin(f)) and
766 return (origmatchfn(lfutil.splitstandin(f)) and
767 (f in ctx or f in mctx))
767 (f in ctx or f in mctx))
768 return origmatchfn(f)
768 return origmatchfn(f)
769 m.matchfn = matchfn
769 m.matchfn = matchfn
770 return m
770 return m
771 oldmatch = installmatchfn(overridematch)
771 oldmatch = installmatchfn(overridematch)
772 try:
772 try:
773 orig(ui, repo, ctx, parents, *pats, **opts)
773 orig(ui, repo, ctx, parents, *pats, **opts)
774 finally:
774 finally:
775 restorematchfn()
775 restorematchfn()
776
776
777 newstandins = lfutil.getstandinsstate(repo)
777 newstandins = lfutil.getstandinsstate(repo)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
778 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
779 # lfdirstate should be 'normallookup'-ed for updated files,
779 # lfdirstate should be 'normallookup'-ed for updated files,
780 # because reverting doesn't touch dirstate for 'normal' files
780 # because reverting doesn't touch dirstate for 'normal' files
781 # when target revision is explicitly specified: in such case,
781 # when target revision is explicitly specified: in such case,
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
782 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
783 # of target (standin) file.
783 # of target (standin) file.
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
784 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
785 normallookup=True)
785 normallookup=True)
786
786
787 finally:
787 finally:
788 wlock.release()
788 wlock.release()
789
789
790 # after pulling changesets, we need to take some extra care to get
790 # after pulling changesets, we need to take some extra care to get
791 # largefiles updated remotely
791 # largefiles updated remotely
792 def overridepull(orig, ui, repo, source=None, **opts):
792 def overridepull(orig, ui, repo, source=None, **opts):
793 revsprepull = len(repo)
793 revsprepull = len(repo)
794 if not source:
794 if not source:
795 source = 'default'
795 source = 'default'
796 repo.lfpullsource = source
796 repo.lfpullsource = source
797 result = orig(ui, repo, source, **opts)
797 result = orig(ui, repo, source, **opts)
798 revspostpull = len(repo)
798 revspostpull = len(repo)
799 lfrevs = opts.get('lfrev', [])
799 lfrevs = opts.get('lfrev', [])
800 if opts.get('all_largefiles'):
800 if opts.get('all_largefiles'):
801 lfrevs.append('pulled()')
801 lfrevs.append('pulled()')
802 if lfrevs and revspostpull > revsprepull:
802 if lfrevs and revspostpull > revsprepull:
803 numcached = 0
803 numcached = 0
804 repo.firstpulled = revsprepull # for pulled() revset expression
804 repo.firstpulled = revsprepull # for pulled() revset expression
805 try:
805 try:
806 for rev in scmutil.revrange(repo, lfrevs):
806 for rev in scmutil.revrange(repo, lfrevs):
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
807 ui.note(_('pulling largefiles for revision %s\n') % rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
808 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
809 numcached += len(cached)
809 numcached += len(cached)
810 finally:
810 finally:
811 del repo.firstpulled
811 del repo.firstpulled
812 ui.status(_("%d largefiles cached\n") % numcached)
812 ui.status(_("%d largefiles cached\n") % numcached)
813 return result
813 return result
814
814
815 revsetpredicate = revset.extpredicate()
815 revsetpredicate = revset.extpredicate()
816
816
817 @revsetpredicate('pulled()')
817 @revsetpredicate('pulled()')
818 def pulledrevsetsymbol(repo, subset, x):
818 def pulledrevsetsymbol(repo, subset, x):
819 """Changesets that just has been pulled.
819 """Changesets that just has been pulled.
820
820
821 Only available with largefiles from pull --lfrev expressions.
821 Only available with largefiles from pull --lfrev expressions.
822
822
823 .. container:: verbose
823 .. container:: verbose
824
824
825 Some examples:
825 Some examples:
826
826
827 - pull largefiles for all new changesets::
827 - pull largefiles for all new changesets::
828
828
829 hg pull -lfrev "pulled()"
829 hg pull -lfrev "pulled()"
830
830
831 - pull largefiles for all new branch heads::
831 - pull largefiles for all new branch heads::
832
832
833 hg pull -lfrev "head(pulled()) and not closed()"
833 hg pull -lfrev "head(pulled()) and not closed()"
834
834
835 """
835 """
836
836
837 try:
837 try:
838 firstpulled = repo.firstpulled
838 firstpulled = repo.firstpulled
839 except AttributeError:
839 except AttributeError:
840 raise error.Abort(_("pulled() only available in --lfrev"))
840 raise error.Abort(_("pulled() only available in --lfrev"))
841 return revset.baseset([r for r in subset if r >= firstpulled])
841 return revset.baseset([r for r in subset if r >= firstpulled])
842
842
843 def overrideclone(orig, ui, source, dest=None, **opts):
843 def overrideclone(orig, ui, source, dest=None, **opts):
844 d = dest
844 d = dest
845 if d is None:
845 if d is None:
846 d = hg.defaultdest(source)
846 d = hg.defaultdest(source)
847 if opts.get('all_largefiles') and not hg.islocal(d):
847 if opts.get('all_largefiles') and not hg.islocal(d):
848 raise error.Abort(_(
848 raise error.Abort(_(
849 '--all-largefiles is incompatible with non-local destination %s') %
849 '--all-largefiles is incompatible with non-local destination %s') %
850 d)
850 d)
851
851
852 return orig(ui, source, dest, **opts)
852 return orig(ui, source, dest, **opts)
853
853
854 def hgclone(orig, ui, opts, *args, **kwargs):
854 def hgclone(orig, ui, opts, *args, **kwargs):
855 result = orig(ui, opts, *args, **kwargs)
855 result = orig(ui, opts, *args, **kwargs)
856
856
857 if result is not None:
857 if result is not None:
858 sourcerepo, destrepo = result
858 sourcerepo, destrepo = result
859 repo = destrepo.local()
859 repo = destrepo.local()
860
860
861 # When cloning to a remote repo (like through SSH), no repo is available
861 # When cloning to a remote repo (like through SSH), no repo is available
862 # from the peer. Therefore the largefiles can't be downloaded and the
862 # from the peer. Therefore the largefiles can't be downloaded and the
863 # hgrc can't be updated.
863 # hgrc can't be updated.
864 if not repo:
864 if not repo:
865 return result
865 return result
866
866
867 # If largefiles is required for this repo, permanently enable it locally
867 # If largefiles is required for this repo, permanently enable it locally
868 if 'largefiles' in repo.requirements:
868 if 'largefiles' in repo.requirements:
869 fp = repo.vfs('hgrc', 'a', text=True)
869 fp = repo.vfs('hgrc', 'a', text=True)
870 try:
870 try:
871 fp.write('\n[extensions]\nlargefiles=\n')
871 fp.write('\n[extensions]\nlargefiles=\n')
872 finally:
872 finally:
873 fp.close()
873 fp.close()
874
874
875 # Caching is implicitly limited to 'rev' option, since the dest repo was
875 # Caching is implicitly limited to 'rev' option, since the dest repo was
876 # truncated at that point. The user may expect a download count with
876 # truncated at that point. The user may expect a download count with
877 # this option, so attempt whether or not this is a largefile repo.
877 # this option, so attempt whether or not this is a largefile repo.
878 if opts.get('all_largefiles'):
878 if opts.get('all_largefiles'):
879 success, missing = lfcommands.downloadlfiles(ui, repo, None)
879 success, missing = lfcommands.downloadlfiles(ui, repo, None)
880
880
881 if missing != 0:
881 if missing != 0:
882 return None
882 return None
883
883
884 return result
884 return result
885
885
886 def overriderebase(orig, ui, repo, **opts):
886 def overriderebase(orig, ui, repo, **opts):
887 if not util.safehasattr(repo, '_largefilesenabled'):
887 if not util.safehasattr(repo, '_largefilesenabled'):
888 return orig(ui, repo, **opts)
888 return orig(ui, repo, **opts)
889
889
890 resuming = opts.get('continue')
890 resuming = opts.get('continue')
891 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
891 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
892 repo._lfstatuswriters.append(lambda *msg, **opts: None)
892 repo._lfstatuswriters.append(lambda *msg, **opts: None)
893 try:
893 try:
894 return orig(ui, repo, **opts)
894 return orig(ui, repo, **opts)
895 finally:
895 finally:
896 repo._lfstatuswriters.pop()
896 repo._lfstatuswriters.pop()
897 repo._lfcommithooks.pop()
897 repo._lfcommithooks.pop()
898
898
899 def overridearchivecmd(orig, ui, repo, dest, **opts):
899 def overridearchivecmd(orig, ui, repo, dest, **opts):
900 repo.unfiltered().lfstatus = True
900 repo.unfiltered().lfstatus = True
901
901
902 try:
902 try:
903 return orig(ui, repo.unfiltered(), dest, **opts)
903 return orig(ui, repo.unfiltered(), dest, **opts)
904 finally:
904 finally:
905 repo.unfiltered().lfstatus = False
905 repo.unfiltered().lfstatus = False
906
906
907 def hgwebarchive(orig, web, req, tmpl):
907 def hgwebarchive(orig, web, req, tmpl):
908 web.repo.lfstatus = True
908 web.repo.lfstatus = True
909
909
910 try:
910 try:
911 return orig(web, req, tmpl)
911 return orig(web, req, tmpl)
912 finally:
912 finally:
913 web.repo.lfstatus = False
913 web.repo.lfstatus = False
914
914
915 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
915 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
916 prefix='', mtime=None, subrepos=None):
916 prefix='', mtime=None, subrepos=None):
917 # For some reason setting repo.lfstatus in hgwebarchive only changes the
917 # For some reason setting repo.lfstatus in hgwebarchive only changes the
918 # unfiltered repo's attr, so check that as well.
918 # unfiltered repo's attr, so check that as well.
919 if not repo.lfstatus and not repo.unfiltered().lfstatus:
919 if not repo.lfstatus and not repo.unfiltered().lfstatus:
920 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
920 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
921 subrepos)
921 subrepos)
922
922
923 # No need to lock because we are only reading history and
923 # No need to lock because we are only reading history and
924 # largefile caches, neither of which are modified.
924 # largefile caches, neither of which are modified.
925 if node is not None:
925 if node is not None:
926 lfcommands.cachelfiles(repo.ui, repo, node)
926 lfcommands.cachelfiles(repo.ui, repo, node)
927
927
928 if kind not in archival.archivers:
928 if kind not in archival.archivers:
929 raise error.Abort(_("unknown archive type '%s'") % kind)
929 raise error.Abort(_("unknown archive type '%s'") % kind)
930
930
931 ctx = repo[node]
931 ctx = repo[node]
932
932
933 if kind == 'files':
933 if kind == 'files':
934 if prefix:
934 if prefix:
935 raise error.Abort(
935 raise error.Abort(
936 _('cannot give prefix when archiving to files'))
936 _('cannot give prefix when archiving to files'))
937 else:
937 else:
938 prefix = archival.tidyprefix(dest, kind, prefix)
938 prefix = archival.tidyprefix(dest, kind, prefix)
939
939
940 def write(name, mode, islink, getdata):
940 def write(name, mode, islink, getdata):
941 if matchfn and not matchfn(name):
941 if matchfn and not matchfn(name):
942 return
942 return
943 data = getdata()
943 data = getdata()
944 if decode:
944 if decode:
945 data = repo.wwritedata(name, data)
945 data = repo.wwritedata(name, data)
946 archiver.addfile(prefix + name, mode, islink, data)
946 archiver.addfile(prefix + name, mode, islink, data)
947
947
948 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
948 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
949
949
950 if repo.ui.configbool("ui", "archivemeta", True):
950 if repo.ui.configbool("ui", "archivemeta", True):
951 write('.hg_archival.txt', 0o644, False,
951 write('.hg_archival.txt', 0o644, False,
952 lambda: archival.buildmetadata(ctx))
952 lambda: archival.buildmetadata(ctx))
953
953
954 for f in ctx:
954 for f in ctx:
955 ff = ctx.flags(f)
955 ff = ctx.flags(f)
956 getdata = ctx[f].data
956 getdata = ctx[f].data
957 if lfutil.isstandin(f):
957 if lfutil.isstandin(f):
958 if node is not None:
958 if node is not None:
959 path = lfutil.findfile(repo, getdata().strip())
959 path = lfutil.findfile(repo, getdata().strip())
960
960
961 if path is None:
961 if path is None:
962 raise error.Abort(
962 raise error.Abort(
963 _('largefile %s not found in repo store or system cache')
963 _('largefile %s not found in repo store or system cache')
964 % lfutil.splitstandin(f))
964 % lfutil.splitstandin(f))
965 else:
965 else:
966 path = lfutil.splitstandin(f)
966 path = lfutil.splitstandin(f)
967
967
968 f = lfutil.splitstandin(f)
968 f = lfutil.splitstandin(f)
969
969
970 def getdatafn():
970 def getdatafn():
971 fd = None
971 fd = None
972 try:
972 try:
973 fd = open(path, 'rb')
973 fd = open(path, 'rb')
974 return fd.read()
974 return fd.read()
975 finally:
975 finally:
976 if fd:
976 if fd:
977 fd.close()
977 fd.close()
978
978
979 getdata = getdatafn
979 getdata = getdatafn
980 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
980 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
981
981
982 if subrepos:
982 if subrepos:
983 for subpath in sorted(ctx.substate):
983 for subpath in sorted(ctx.substate):
984 sub = ctx.workingsub(subpath)
984 sub = ctx.workingsub(subpath)
985 submatch = match_.narrowmatcher(subpath, matchfn)
985 submatch = match_.narrowmatcher(subpath, matchfn)
986 sub._repo.lfstatus = True
986 sub._repo.lfstatus = True
987 sub.archive(archiver, prefix, submatch)
987 sub.archive(archiver, prefix, submatch)
988
988
989 archiver.done()
989 archiver.done()
990
990
991 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
991 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
992 if not repo._repo.lfstatus:
992 if not repo._repo.lfstatus:
993 return orig(repo, archiver, prefix, match)
993 return orig(repo, archiver, prefix, match)
994
994
995 repo._get(repo._state + ('hg',))
995 repo._get(repo._state + ('hg',))
996 rev = repo._state[1]
996 rev = repo._state[1]
997 ctx = repo._repo[rev]
997 ctx = repo._repo[rev]
998
998
999 if ctx.node() is not None:
999 if ctx.node() is not None:
1000 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1000 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1001
1001
1002 def write(name, mode, islink, getdata):
1002 def write(name, mode, islink, getdata):
1003 # At this point, the standin has been replaced with the largefile name,
1003 # At this point, the standin has been replaced with the largefile name,
1004 # so the normal matcher works here without the lfutil variants.
1004 # so the normal matcher works here without the lfutil variants.
1005 if match and not match(f):
1005 if match and not match(f):
1006 return
1006 return
1007 data = getdata()
1007 data = getdata()
1008
1008
1009 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1009 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1010
1010
1011 for f in ctx:
1011 for f in ctx:
1012 ff = ctx.flags(f)
1012 ff = ctx.flags(f)
1013 getdata = ctx[f].data
1013 getdata = ctx[f].data
1014 if lfutil.isstandin(f):
1014 if lfutil.isstandin(f):
1015 if ctx.node() is not None:
1015 if ctx.node() is not None:
1016 path = lfutil.findfile(repo._repo, getdata().strip())
1016 path = lfutil.findfile(repo._repo, getdata().strip())
1017
1017
1018 if path is None:
1018 if path is None:
1019 raise error.Abort(
1019 raise error.Abort(
1020 _('largefile %s not found in repo store or system cache')
1020 _('largefile %s not found in repo store or system cache')
1021 % lfutil.splitstandin(f))
1021 % lfutil.splitstandin(f))
1022 else:
1022 else:
1023 path = lfutil.splitstandin(f)
1023 path = lfutil.splitstandin(f)
1024
1024
1025 f = lfutil.splitstandin(f)
1025 f = lfutil.splitstandin(f)
1026
1026
1027 def getdatafn():
1027 def getdatafn():
1028 fd = None
1028 fd = None
1029 try:
1029 try:
1030 fd = open(os.path.join(prefix, path), 'rb')
1030 fd = open(os.path.join(prefix, path), 'rb')
1031 return fd.read()
1031 return fd.read()
1032 finally:
1032 finally:
1033 if fd:
1033 if fd:
1034 fd.close()
1034 fd.close()
1035
1035
1036 getdata = getdatafn
1036 getdata = getdatafn
1037
1037
1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1038 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1039
1039
1040 for subpath in sorted(ctx.substate):
1040 for subpath in sorted(ctx.substate):
1041 sub = ctx.workingsub(subpath)
1041 sub = ctx.workingsub(subpath)
1042 submatch = match_.narrowmatcher(subpath, match)
1042 submatch = match_.narrowmatcher(subpath, match)
1043 sub._repo.lfstatus = True
1043 sub._repo.lfstatus = True
1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1044 sub.archive(archiver, prefix + repo._path + '/', submatch)
1045
1045
1046 # If a largefile is modified, the change is not reflected in its
1046 # If a largefile is modified, the change is not reflected in its
1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1047 # standin until a commit. cmdutil.bailifchanged() raises an exception
1048 # if the repo has uncommitted changes. Wrap it to also check if
1048 # if the repo has uncommitted changes. Wrap it to also check if
1049 # largefiles were changed. This is used by bisect, backout and fetch.
1049 # largefiles were changed. This is used by bisect, backout and fetch.
1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1050 def overridebailifchanged(orig, repo, *args, **kwargs):
1051 orig(repo, *args, **kwargs)
1051 orig(repo, *args, **kwargs)
1052 repo.lfstatus = True
1052 repo.lfstatus = True
1053 s = repo.status()
1053 s = repo.status()
1054 repo.lfstatus = False
1054 repo.lfstatus = False
1055 if s.modified or s.added or s.removed or s.deleted:
1055 if s.modified or s.added or s.removed or s.deleted:
1056 raise error.Abort(_('uncommitted changes'))
1056 raise error.Abort(_('uncommitted changes'))
1057
1057
1058 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1058 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1059 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1059 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1060 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1060 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1061 m = composelargefilematcher(match, repo[None].manifest())
1061 m = composelargefilematcher(match, repo[None].manifest())
1062
1062
1063 try:
1063 try:
1064 repo.lfstatus = True
1064 repo.lfstatus = True
1065 s = repo.status(match=m, clean=True)
1065 s = repo.status(match=m, clean=True)
1066 finally:
1066 finally:
1067 repo.lfstatus = False
1067 repo.lfstatus = False
1068 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1068 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1069 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1069 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1070
1070
1071 for f in forget:
1071 for f in forget:
1072 if lfutil.standin(f) not in repo.dirstate and not \
1072 if lfutil.standin(f) not in repo.dirstate and not \
1073 repo.wvfs.isdir(lfutil.standin(f)):
1073 repo.wvfs.isdir(lfutil.standin(f)):
1074 ui.warn(_('not removing %s: file is already untracked\n')
1074 ui.warn(_('not removing %s: file is already untracked\n')
1075 % m.rel(f))
1075 % m.rel(f))
1076 bad.append(f)
1076 bad.append(f)
1077
1077
1078 for f in forget:
1078 for f in forget:
1079 if ui.verbose or not m.exact(f):
1079 if ui.verbose or not m.exact(f):
1080 ui.status(_('removing %s\n') % m.rel(f))
1080 ui.status(_('removing %s\n') % m.rel(f))
1081
1081
1082 # Need to lock because standin files are deleted then removed from the
1082 # Need to lock because standin files are deleted then removed from the
1083 # repository and we could race in-between.
1083 # repository and we could race in-between.
1084 wlock = repo.wlock()
1084 wlock = repo.wlock()
1085 try:
1085 try:
1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1087 for f in forget:
1087 for f in forget:
1088 if lfdirstate[f] == 'a':
1088 if lfdirstate[f] == 'a':
1089 lfdirstate.drop(f)
1089 lfdirstate.drop(f)
1090 else:
1090 else:
1091 lfdirstate.remove(f)
1091 lfdirstate.remove(f)
1092 lfdirstate.write()
1092 lfdirstate.write()
1093 standins = [lfutil.standin(f) for f in forget]
1093 standins = [lfutil.standin(f) for f in forget]
1094 for f in standins:
1094 for f in standins:
1095 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1095 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1096 rejected = repo[None].forget(standins)
1096 rejected = repo[None].forget(standins)
1097 finally:
1097 finally:
1098 wlock.release()
1098 wlock.release()
1099
1099
1100 bad.extend(f for f in rejected if f in m.files())
1100 bad.extend(f for f in rejected if f in m.files())
1101 forgot.extend(f for f in forget if f not in rejected)
1101 forgot.extend(f for f in forget if f not in rejected)
1102 return bad, forgot
1102 return bad, forgot
1103
1103
1104 def _getoutgoings(repo, other, missing, addfunc):
1104 def _getoutgoings(repo, other, missing, addfunc):
1105 """get pairs of filename and largefile hash in outgoing revisions
1105 """get pairs of filename and largefile hash in outgoing revisions
1106 in 'missing'.
1106 in 'missing'.
1107
1107
1108 largefiles already existing on 'other' repository are ignored.
1108 largefiles already existing on 'other' repository are ignored.
1109
1109
1110 'addfunc' is invoked with each unique pairs of filename and
1110 'addfunc' is invoked with each unique pairs of filename and
1111 largefile hash value.
1111 largefile hash value.
1112 """
1112 """
1113 knowns = set()
1113 knowns = set()
1114 lfhashes = set()
1114 lfhashes = set()
1115 def dedup(fn, lfhash):
1115 def dedup(fn, lfhash):
1116 k = (fn, lfhash)
1116 k = (fn, lfhash)
1117 if k not in knowns:
1117 if k not in knowns:
1118 knowns.add(k)
1118 knowns.add(k)
1119 lfhashes.add(lfhash)
1119 lfhashes.add(lfhash)
1120 lfutil.getlfilestoupload(repo, missing, dedup)
1120 lfutil.getlfilestoupload(repo, missing, dedup)
1121 if lfhashes:
1121 if lfhashes:
1122 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1122 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1123 for fn, lfhash in knowns:
1123 for fn, lfhash in knowns:
1124 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1124 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1125 addfunc(fn, lfhash)
1125 addfunc(fn, lfhash)
1126
1126
1127 def outgoinghook(ui, repo, other, opts, missing):
1127 def outgoinghook(ui, repo, other, opts, missing):
1128 if opts.pop('large', None):
1128 if opts.pop('large', None):
1129 lfhashes = set()
1129 lfhashes = set()
1130 if ui.debugflag:
1130 if ui.debugflag:
1131 toupload = {}
1131 toupload = {}
1132 def addfunc(fn, lfhash):
1132 def addfunc(fn, lfhash):
1133 if fn not in toupload:
1133 if fn not in toupload:
1134 toupload[fn] = []
1134 toupload[fn] = []
1135 toupload[fn].append(lfhash)
1135 toupload[fn].append(lfhash)
1136 lfhashes.add(lfhash)
1136 lfhashes.add(lfhash)
1137 def showhashes(fn):
1137 def showhashes(fn):
1138 for lfhash in sorted(toupload[fn]):
1138 for lfhash in sorted(toupload[fn]):
1139 ui.debug(' %s\n' % (lfhash))
1139 ui.debug(' %s\n' % (lfhash))
1140 else:
1140 else:
1141 toupload = set()
1141 toupload = set()
1142 def addfunc(fn, lfhash):
1142 def addfunc(fn, lfhash):
1143 toupload.add(fn)
1143 toupload.add(fn)
1144 lfhashes.add(lfhash)
1144 lfhashes.add(lfhash)
1145 def showhashes(fn):
1145 def showhashes(fn):
1146 pass
1146 pass
1147 _getoutgoings(repo, other, missing, addfunc)
1147 _getoutgoings(repo, other, missing, addfunc)
1148
1148
1149 if not toupload:
1149 if not toupload:
1150 ui.status(_('largefiles: no files to upload\n'))
1150 ui.status(_('largefiles: no files to upload\n'))
1151 else:
1151 else:
1152 ui.status(_('largefiles to upload (%d entities):\n')
1152 ui.status(_('largefiles to upload (%d entities):\n')
1153 % (len(lfhashes)))
1153 % (len(lfhashes)))
1154 for file in sorted(toupload):
1154 for file in sorted(toupload):
1155 ui.status(lfutil.splitstandin(file) + '\n')
1155 ui.status(lfutil.splitstandin(file) + '\n')
1156 showhashes(file)
1156 showhashes(file)
1157 ui.status('\n')
1157 ui.status('\n')
1158
1158
1159 def summaryremotehook(ui, repo, opts, changes):
1159 def summaryremotehook(ui, repo, opts, changes):
1160 largeopt = opts.get('large', False)
1160 largeopt = opts.get('large', False)
1161 if changes is None:
1161 if changes is None:
1162 if largeopt:
1162 if largeopt:
1163 return (False, True) # only outgoing check is needed
1163 return (False, True) # only outgoing check is needed
1164 else:
1164 else:
1165 return (False, False)
1165 return (False, False)
1166 elif largeopt:
1166 elif largeopt:
1167 url, branch, peer, outgoing = changes[1]
1167 url, branch, peer, outgoing = changes[1]
1168 if peer is None:
1168 if peer is None:
1169 # i18n: column positioning for "hg summary"
1169 # i18n: column positioning for "hg summary"
1170 ui.status(_('largefiles: (no remote repo)\n'))
1170 ui.status(_('largefiles: (no remote repo)\n'))
1171 return
1171 return
1172
1172
1173 toupload = set()
1173 toupload = set()
1174 lfhashes = set()
1174 lfhashes = set()
1175 def addfunc(fn, lfhash):
1175 def addfunc(fn, lfhash):
1176 toupload.add(fn)
1176 toupload.add(fn)
1177 lfhashes.add(lfhash)
1177 lfhashes.add(lfhash)
1178 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1178 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1179
1179
1180 if not toupload:
1180 if not toupload:
1181 # i18n: column positioning for "hg summary"
1181 # i18n: column positioning for "hg summary"
1182 ui.status(_('largefiles: (no files to upload)\n'))
1182 ui.status(_('largefiles: (no files to upload)\n'))
1183 else:
1183 else:
1184 # i18n: column positioning for "hg summary"
1184 # i18n: column positioning for "hg summary"
1185 ui.status(_('largefiles: %d entities for %d files to upload\n')
1185 ui.status(_('largefiles: %d entities for %d files to upload\n')
1186 % (len(lfhashes), len(toupload)))
1186 % (len(lfhashes), len(toupload)))
1187
1187
1188 def overridesummary(orig, ui, repo, *pats, **opts):
1188 def overridesummary(orig, ui, repo, *pats, **opts):
1189 try:
1189 try:
1190 repo.lfstatus = True
1190 repo.lfstatus = True
1191 orig(ui, repo, *pats, **opts)
1191 orig(ui, repo, *pats, **opts)
1192 finally:
1192 finally:
1193 repo.lfstatus = False
1193 repo.lfstatus = False
1194
1194
1195 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1195 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1196 similarity=None):
1196 similarity=None):
1197 if opts is None:
1197 if opts is None:
1198 opts = {}
1198 opts = {}
1199 if not lfutil.islfilesrepo(repo):
1199 if not lfutil.islfilesrepo(repo):
1200 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1200 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1201 # Get the list of missing largefiles so we can remove them
1201 # Get the list of missing largefiles so we can remove them
1202 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1202 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1203 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1203 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1204 False, False, False)
1204 False, False, False)
1205
1205
1206 # Call into the normal remove code, but the removing of the standin, we want
1206 # Call into the normal remove code, but the removing of the standin, we want
1207 # to have handled by original addremove. Monkey patching here makes sure
1207 # to have handled by original addremove. Monkey patching here makes sure
1208 # we don't remove the standin in the largefiles code, preventing a very
1208 # we don't remove the standin in the largefiles code, preventing a very
1209 # confused state later.
1209 # confused state later.
1210 if s.deleted:
1210 if s.deleted:
1211 m = copy.copy(matcher)
1211 m = copy.copy(matcher)
1212
1212
1213 # The m._files and m._map attributes are not changed to the deleted list
1213 # The m._files and m._map attributes are not changed to the deleted list
1214 # because that affects the m.exact() test, which in turn governs whether
1214 # because that affects the m.exact() test, which in turn governs whether
1215 # or not the file name is printed, and how. Simply limit the original
1215 # or not the file name is printed, and how. Simply limit the original
1216 # matches to those in the deleted status list.
1216 # matches to those in the deleted status list.
1217 matchfn = m.matchfn
1217 matchfn = m.matchfn
1218 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1218 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1219
1219
1220 removelargefiles(repo.ui, repo, True, m, **opts)
1220 removelargefiles(repo.ui, repo, True, m, **opts)
1221 # Call into the normal add code, and any files that *should* be added as
1221 # Call into the normal add code, and any files that *should* be added as
1222 # largefiles will be
1222 # largefiles will be
1223 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1223 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1224 # Now that we've handled largefiles, hand off to the original addremove
1224 # Now that we've handled largefiles, hand off to the original addremove
1225 # function to take care of the rest. Make sure it doesn't do anything with
1225 # function to take care of the rest. Make sure it doesn't do anything with
1226 # largefiles by passing a matcher that will ignore them.
1226 # largefiles by passing a matcher that will ignore them.
1227 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1227 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1228 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1228 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1229
1229
1230 # Calling purge with --all will cause the largefiles to be deleted.
1230 # Calling purge with --all will cause the largefiles to be deleted.
1231 # Override repo.status to prevent this from happening.
1231 # Override repo.status to prevent this from happening.
1232 def overridepurge(orig, ui, repo, *dirs, **opts):
1232 def overridepurge(orig, ui, repo, *dirs, **opts):
1233 # XXX Monkey patching a repoview will not work. The assigned attribute will
1233 # XXX Monkey patching a repoview will not work. The assigned attribute will
1234 # be set on the unfiltered repo, but we will only lookup attributes in the
1234 # be set on the unfiltered repo, but we will only lookup attributes in the
1235 # unfiltered repo if the lookup in the repoview object itself fails. As the
1235 # unfiltered repo if the lookup in the repoview object itself fails. As the
1236 # monkey patched method exists on the repoview class the lookup will not
1236 # monkey patched method exists on the repoview class the lookup will not
1237 # fail. As a result, the original version will shadow the monkey patched
1237 # fail. As a result, the original version will shadow the monkey patched
1238 # one, defeating the monkey patch.
1238 # one, defeating the monkey patch.
1239 #
1239 #
1240 # As a work around we use an unfiltered repo here. We should do something
1240 # As a work around we use an unfiltered repo here. We should do something
1241 # cleaner instead.
1241 # cleaner instead.
1242 repo = repo.unfiltered()
1242 repo = repo.unfiltered()
1243 oldstatus = repo.status
1243 oldstatus = repo.status
1244 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1244 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1245 clean=False, unknown=False, listsubrepos=False):
1245 clean=False, unknown=False, listsubrepos=False):
1246 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1246 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1247 listsubrepos)
1247 listsubrepos)
1248 lfdirstate = lfutil.openlfdirstate(ui, repo)
1248 lfdirstate = lfutil.openlfdirstate(ui, repo)
1249 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1249 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1250 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1250 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1251 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1251 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1252 unknown, ignored, r.clean)
1252 unknown, ignored, r.clean)
1253 repo.status = overridestatus
1253 repo.status = overridestatus
1254 orig(ui, repo, *dirs, **opts)
1254 orig(ui, repo, *dirs, **opts)
1255 repo.status = oldstatus
1255 repo.status = oldstatus
1256 def overriderollback(orig, ui, repo, **opts):
1256 def overriderollback(orig, ui, repo, **opts):
1257 wlock = repo.wlock()
1257 wlock = repo.wlock()
1258 try:
1258 try:
1259 before = repo.dirstate.parents()
1259 before = repo.dirstate.parents()
1260 orphans = set(f for f in repo.dirstate
1260 orphans = set(f for f in repo.dirstate
1261 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1261 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1262 result = orig(ui, repo, **opts)
1262 result = orig(ui, repo, **opts)
1263 after = repo.dirstate.parents()
1263 after = repo.dirstate.parents()
1264 if before == after:
1264 if before == after:
1265 return result # no need to restore standins
1265 return result # no need to restore standins
1266
1266
1267 pctx = repo['.']
1267 pctx = repo['.']
1268 for f in repo.dirstate:
1268 for f in repo.dirstate:
1269 if lfutil.isstandin(f):
1269 if lfutil.isstandin(f):
1270 orphans.discard(f)
1270 orphans.discard(f)
1271 if repo.dirstate[f] == 'r':
1271 if repo.dirstate[f] == 'r':
1272 repo.wvfs.unlinkpath(f, ignoremissing=True)
1272 repo.wvfs.unlinkpath(f, ignoremissing=True)
1273 elif f in pctx:
1273 elif f in pctx:
1274 fctx = pctx[f]
1274 fctx = pctx[f]
1275 repo.wwrite(f, fctx.data(), fctx.flags())
1275 repo.wwrite(f, fctx.data(), fctx.flags())
1276 else:
1276 else:
1277 # content of standin is not so important in 'a',
1277 # content of standin is not so important in 'a',
1278 # 'm' or 'n' (coming from the 2nd parent) cases
1278 # 'm' or 'n' (coming from the 2nd parent) cases
1279 lfutil.writestandin(repo, f, '', False)
1279 lfutil.writestandin(repo, f, '', False)
1280 for standin in orphans:
1280 for standin in orphans:
1281 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1281 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1282
1282
1283 lfdirstate = lfutil.openlfdirstate(ui, repo)
1283 lfdirstate = lfutil.openlfdirstate(ui, repo)
1284 orphans = set(lfdirstate)
1284 orphans = set(lfdirstate)
1285 lfiles = lfutil.listlfiles(repo)
1285 lfiles = lfutil.listlfiles(repo)
1286 for file in lfiles:
1286 for file in lfiles:
1287 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1287 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1288 orphans.discard(file)
1288 orphans.discard(file)
1289 for lfile in orphans:
1289 for lfile in orphans:
1290 lfdirstate.drop(lfile)
1290 lfdirstate.drop(lfile)
1291 lfdirstate.write()
1291 lfdirstate.write()
1292 finally:
1292 finally:
1293 wlock.release()
1293 wlock.release()
1294 return result
1294 return result
1295
1295
1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1296 def overridetransplant(orig, ui, repo, *revs, **opts):
1297 resuming = opts.get('continue')
1297 resuming = opts.get('continue')
1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1298 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1299 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1300 try:
1300 try:
1301 result = orig(ui, repo, *revs, **opts)
1301 result = orig(ui, repo, *revs, **opts)
1302 finally:
1302 finally:
1303 repo._lfstatuswriters.pop()
1303 repo._lfstatuswriters.pop()
1304 repo._lfcommithooks.pop()
1304 repo._lfcommithooks.pop()
1305 return result
1305 return result
1306
1306
1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1307 def overridecat(orig, ui, repo, file1, *pats, **opts):
1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1308 ctx = scmutil.revsingle(repo, opts.get('rev'))
1309 err = 1
1309 err = 1
1310 notbad = set()
1310 notbad = set()
1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1311 m = scmutil.match(ctx, (file1,) + pats, opts)
1312 origmatchfn = m.matchfn
1312 origmatchfn = m.matchfn
1313 def lfmatchfn(f):
1313 def lfmatchfn(f):
1314 if origmatchfn(f):
1314 if origmatchfn(f):
1315 return True
1315 return True
1316 lf = lfutil.splitstandin(f)
1316 lf = lfutil.splitstandin(f)
1317 if lf is None:
1317 if lf is None:
1318 return False
1318 return False
1319 notbad.add(lf)
1319 notbad.add(lf)
1320 return origmatchfn(lf)
1320 return origmatchfn(lf)
1321 m.matchfn = lfmatchfn
1321 m.matchfn = lfmatchfn
1322 origbadfn = m.bad
1322 origbadfn = m.bad
1323 def lfbadfn(f, msg):
1323 def lfbadfn(f, msg):
1324 if not f in notbad:
1324 if not f in notbad:
1325 origbadfn(f, msg)
1325 origbadfn(f, msg)
1326 m.bad = lfbadfn
1326 m.bad = lfbadfn
1327
1327
1328 origvisitdirfn = m.visitdir
1328 origvisitdirfn = m.visitdir
1329 def lfvisitdirfn(dir):
1329 def lfvisitdirfn(dir):
1330 if dir == lfutil.shortname:
1330 if dir == lfutil.shortname:
1331 return True
1331 return True
1332 ret = origvisitdirfn(dir)
1332 ret = origvisitdirfn(dir)
1333 if ret:
1333 if ret:
1334 return ret
1334 return ret
1335 lf = lfutil.splitstandin(dir)
1335 lf = lfutil.splitstandin(dir)
1336 if lf is None:
1336 if lf is None:
1337 return False
1337 return False
1338 return origvisitdirfn(lf)
1338 return origvisitdirfn(lf)
1339 m.visitdir = lfvisitdirfn
1339 m.visitdir = lfvisitdirfn
1340
1340
1341 for f in ctx.walk(m):
1341 for f in ctx.walk(m):
1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1342 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1343 pathname=f)
1343 pathname=f)
1344 lf = lfutil.splitstandin(f)
1344 lf = lfutil.splitstandin(f)
1345 if lf is None or origmatchfn(f):
1345 if lf is None or origmatchfn(f):
1346 # duplicating unreachable code from commands.cat
1346 # duplicating unreachable code from commands.cat
1347 data = ctx[f].data()
1347 data = ctx[f].data()
1348 if opts.get('decode'):
1348 if opts.get('decode'):
1349 data = repo.wwritedata(f, data)
1349 data = repo.wwritedata(f, data)
1350 fp.write(data)
1350 fp.write(data)
1351 else:
1351 else:
1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1352 hash = lfutil.readstandin(repo, lf, ctx.rev())
1353 if not lfutil.inusercache(repo.ui, hash):
1353 if not lfutil.inusercache(repo.ui, hash):
1354 store = basestore._openstore(repo)
1354 store = basestore._openstore(repo)
1355 success, missing = store.get([(lf, hash)])
1355 success, missing = store.get([(lf, hash)])
1356 if len(success) != 1:
1356 if len(success) != 1:
1357 raise error.Abort(
1357 raise error.Abort(
1358 _('largefile %s is not in cache and could not be '
1358 _('largefile %s is not in cache and could not be '
1359 'downloaded') % lf)
1359 'downloaded') % lf)
1360 path = lfutil.usercachepath(repo.ui, hash)
1360 path = lfutil.usercachepath(repo.ui, hash)
1361 fpin = open(path, "rb")
1361 fpin = open(path, "rb")
1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1362 for chunk in util.filechunkiter(fpin, 128 * 1024):
1363 fp.write(chunk)
1363 fp.write(chunk)
1364 fpin.close()
1364 fpin.close()
1365 fp.close()
1365 fp.close()
1366 err = 0
1366 err = 0
1367 return err
1367 return err
1368
1368
1369 def mergeupdate(orig, repo, node, branchmerge, force,
1369 def mergeupdate(orig, repo, node, branchmerge, force,
1370 *args, **kwargs):
1370 *args, **kwargs):
1371 matcher = kwargs.get('matcher', None)
1371 matcher = kwargs.get('matcher', None)
1372 # note if this is a partial update
1372 # note if this is a partial update
1373 partial = matcher and not matcher.always()
1373 partial = matcher and not matcher.always()
1374 wlock = repo.wlock()
1374 wlock = repo.wlock()
1375 try:
1375 try:
1376 # branch | | |
1376 # branch | | |
1377 # merge | force | partial | action
1377 # merge | force | partial | action
1378 # -------+-------+---------+--------------
1378 # -------+-------+---------+--------------
1379 # x | x | x | linear-merge
1379 # x | x | x | linear-merge
1380 # o | x | x | branch-merge
1380 # o | x | x | branch-merge
1381 # x | o | x | overwrite (as clean update)
1381 # x | o | x | overwrite (as clean update)
1382 # o | o | x | force-branch-merge (*1)
1382 # o | o | x | force-branch-merge (*1)
1383 # x | x | o | (*)
1383 # x | x | o | (*)
1384 # o | x | o | (*)
1384 # o | x | o | (*)
1385 # x | o | o | overwrite (as revert)
1385 # x | o | o | overwrite (as revert)
1386 # o | o | o | (*)
1386 # o | o | o | (*)
1387 #
1387 #
1388 # (*) don't care
1388 # (*) don't care
1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1389 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1390
1390
1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1391 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1392 unsure, s = lfdirstate.status(match_.always(repo.root,
1392 unsure, s = lfdirstate.status(match_.always(repo.root,
1393 repo.getcwd()),
1393 repo.getcwd()),
1394 [], False, False, False)
1394 [], False, False, False)
1395 pctx = repo['.']
1395 pctx = repo['.']
1396 for lfile in unsure + s.modified:
1396 for lfile in unsure + s.modified:
1397 lfileabs = repo.wvfs.join(lfile)
1397 lfileabs = repo.wvfs.join(lfile)
1398 if not os.path.exists(lfileabs):
1398 if not os.path.exists(lfileabs):
1399 continue
1399 continue
1400 lfhash = lfutil.hashrepofile(repo, lfile)
1400 lfhash = lfutil.hashrepofile(repo, lfile)
1401 standin = lfutil.standin(lfile)
1401 standin = lfutil.standin(lfile)
1402 lfutil.writestandin(repo, standin, lfhash,
1402 lfutil.writestandin(repo, standin, lfhash,
1403 lfutil.getexecutable(lfileabs))
1403 lfutil.getexecutable(lfileabs))
1404 if (standin in pctx and
1404 if (standin in pctx and
1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1405 lfhash == lfutil.readstandin(repo, lfile, '.')):
1406 lfdirstate.normal(lfile)
1406 lfdirstate.normal(lfile)
1407 for lfile in s.added:
1407 for lfile in s.added:
1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1408 lfutil.updatestandin(repo, lfutil.standin(lfile))
1409 lfdirstate.write()
1409 lfdirstate.write()
1410
1410
1411 oldstandins = lfutil.getstandinsstate(repo)
1411 oldstandins = lfutil.getstandinsstate(repo)
1412
1412
1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1413 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1414
1414
1415 newstandins = lfutil.getstandinsstate(repo)
1415 newstandins = lfutil.getstandinsstate(repo)
1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1416 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1417 if branchmerge or force or partial:
1417 if branchmerge or force or partial:
1418 filelist.extend(s.deleted + s.removed)
1418 filelist.extend(s.deleted + s.removed)
1419
1419
1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1420 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1421 normallookup=partial)
1421 normallookup=partial)
1422
1422
1423 return result
1423 return result
1424 finally:
1424 finally:
1425 wlock.release()
1425 wlock.release()
1426
1426
1427 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1427 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1428 result = orig(repo, files, *args, **kwargs)
1428 result = orig(repo, files, *args, **kwargs)
1429
1429
1430 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1430 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1431 if filelist:
1431 if filelist:
1432 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1432 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1433 printmessage=False, normallookup=True)
1433 printmessage=False, normallookup=True)
1434
1434
1435 return result
1435 return result
@@ -1,1543 +1,1544
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullhex,
19 nullhex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 )
22 )
23 from . import (
23 from . import (
24 copies,
24 copies,
25 destutil,
25 destutil,
26 error,
26 error,
27 filemerge,
27 filemerge,
28 obsolete,
28 obsolete,
29 subrepo,
29 subrepo,
30 util,
30 util,
31 worker,
31 worker,
32 )
32 )
33
33
34 _pack = struct.pack
34 _pack = struct.pack
35 _unpack = struct.unpack
35 _unpack = struct.unpack
36
36
37 def _droponode(data):
37 def _droponode(data):
38 # used for compatibility for v1
38 # used for compatibility for v1
39 bits = data.split('\0')
39 bits = data.split('\0')
40 bits = bits[:-2] + bits[-1:]
40 bits = bits[:-2] + bits[-1:]
41 return '\0'.join(bits)
41 return '\0'.join(bits)
42
42
43 class mergestate(object):
43 class mergestate(object):
44 '''track 3-way merge state of individual files
44 '''track 3-way merge state of individual files
45
45
46 The merge state is stored on disk when needed. Two files are used: one with
46 The merge state is stored on disk when needed. Two files are used: one with
47 an old format (version 1), and one with a new format (version 2). Version 2
47 an old format (version 1), and one with a new format (version 2). Version 2
48 stores a superset of the data in version 1, including new kinds of records
48 stores a superset of the data in version 1, including new kinds of records
49 in the future. For more about the new format, see the documentation for
49 in the future. For more about the new format, see the documentation for
50 `_readrecordsv2`.
50 `_readrecordsv2`.
51
51
52 Each record can contain arbitrary content, and has an associated type. This
52 Each record can contain arbitrary content, and has an associated type. This
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
53 `type` should be a letter. If `type` is uppercase, the record is mandatory:
54 versions of Mercurial that don't support it should abort. If `type` is
54 versions of Mercurial that don't support it should abort. If `type` is
55 lowercase, the record can be safely ignored.
55 lowercase, the record can be safely ignored.
56
56
57 Currently known records:
57 Currently known records:
58
58
59 L: the node of the "local" part of the merge (hexified version)
59 L: the node of the "local" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
60 O: the node of the "other" part of the merge (hexified version)
61 F: a file to be merged entry
61 F: a file to be merged entry
62 C: a change/delete or delete/change conflict
62 C: a change/delete or delete/change conflict
63 D: a file that the external merge driver will merge internally
63 D: a file that the external merge driver will merge internally
64 (experimental)
64 (experimental)
65 m: the external merge driver defined for this merge plus its run state
65 m: the external merge driver defined for this merge plus its run state
66 (experimental)
66 (experimental)
67 X: unsupported mandatory record type (used in tests)
67 X: unsupported mandatory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
68 x: unsupported advisory record type (used in tests)
69
69
70 Merge driver run states (experimental):
70 Merge driver run states (experimental):
71 u: driver-resolved files unmarked -- needs to be run next time we're about
71 u: driver-resolved files unmarked -- needs to be run next time we're about
72 to resolve or commit
72 to resolve or commit
73 m: driver-resolved files marked -- only needs to be run before commit
73 m: driver-resolved files marked -- only needs to be run before commit
74 s: success/skipped -- does not need to be run any more
74 s: success/skipped -- does not need to be run any more
75
75
76 '''
76 '''
77 statepathv1 = 'merge/state'
77 statepathv1 = 'merge/state'
78 statepathv2 = 'merge/state2'
78 statepathv2 = 'merge/state2'
79
79
80 @staticmethod
80 @staticmethod
81 def clean(repo, node=None, other=None):
81 def clean(repo, node=None, other=None):
82 """Initialize a brand new merge state, removing any existing state on
82 """Initialize a brand new merge state, removing any existing state on
83 disk."""
83 disk."""
84 ms = mergestate(repo)
84 ms = mergestate(repo)
85 ms.reset(node, other)
85 ms.reset(node, other)
86 return ms
86 return ms
87
87
88 @staticmethod
88 @staticmethod
89 def read(repo):
89 def read(repo):
90 """Initialize the merge state, reading it from disk."""
90 """Initialize the merge state, reading it from disk."""
91 ms = mergestate(repo)
91 ms = mergestate(repo)
92 ms._read()
92 ms._read()
93 return ms
93 return ms
94
94
95 def __init__(self, repo):
95 def __init__(self, repo):
96 """Initialize the merge state.
96 """Initialize the merge state.
97
97
98 Do not use this directly! Instead call read() or clean()."""
98 Do not use this directly! Instead call read() or clean()."""
99 self._repo = repo
99 self._repo = repo
100 self._dirty = False
100 self._dirty = False
101
101
102 def reset(self, node=None, other=None):
102 def reset(self, node=None, other=None):
103 self._state = {}
103 self._state = {}
104 self._local = None
104 self._local = None
105 self._other = None
105 self._other = None
106 for var in ('localctx', 'otherctx'):
106 for var in ('localctx', 'otherctx'):
107 if var in vars(self):
107 if var in vars(self):
108 delattr(self, var)
108 delattr(self, var)
109 if node:
109 if node:
110 self._local = node
110 self._local = node
111 self._other = other
111 self._other = other
112 self._readmergedriver = None
112 self._readmergedriver = None
113 if self.mergedriver:
113 if self.mergedriver:
114 self._mdstate = 's'
114 self._mdstate = 's'
115 else:
115 else:
116 self._mdstate = 'u'
116 self._mdstate = 'u'
117 shutil.rmtree(self._repo.join('merge'), True)
117 shutil.rmtree(self._repo.join('merge'), True)
118 self._results = {}
118 self._results = {}
119 self._dirty = False
119 self._dirty = False
120
120
121 def _read(self):
121 def _read(self):
122 """Analyse each record content to restore a serialized state from disk
122 """Analyse each record content to restore a serialized state from disk
123
123
124 This function process "record" entry produced by the de-serialization
124 This function process "record" entry produced by the de-serialization
125 of on disk file.
125 of on disk file.
126 """
126 """
127 self._state = {}
127 self._state = {}
128 self._local = None
128 self._local = None
129 self._other = None
129 self._other = None
130 for var in ('localctx', 'otherctx'):
130 for var in ('localctx', 'otherctx'):
131 if var in vars(self):
131 if var in vars(self):
132 delattr(self, var)
132 delattr(self, var)
133 self._readmergedriver = None
133 self._readmergedriver = None
134 self._mdstate = 's'
134 self._mdstate = 's'
135 unsupported = set()
135 unsupported = set()
136 records = self._readrecords()
136 records = self._readrecords()
137 for rtype, record in records:
137 for rtype, record in records:
138 if rtype == 'L':
138 if rtype == 'L':
139 self._local = bin(record)
139 self._local = bin(record)
140 elif rtype == 'O':
140 elif rtype == 'O':
141 self._other = bin(record)
141 self._other = bin(record)
142 elif rtype == 'm':
142 elif rtype == 'm':
143 bits = record.split('\0', 1)
143 bits = record.split('\0', 1)
144 mdstate = bits[1]
144 mdstate = bits[1]
145 if len(mdstate) != 1 or mdstate not in 'ums':
145 if len(mdstate) != 1 or mdstate not in 'ums':
146 # the merge driver should be idempotent, so just rerun it
146 # the merge driver should be idempotent, so just rerun it
147 mdstate = 'u'
147 mdstate = 'u'
148
148
149 self._readmergedriver = bits[0]
149 self._readmergedriver = bits[0]
150 self._mdstate = mdstate
150 self._mdstate = mdstate
151 elif rtype in 'FDC':
151 elif rtype in 'FDC':
152 bits = record.split('\0')
152 bits = record.split('\0')
153 self._state[bits[0]] = bits[1:]
153 self._state[bits[0]] = bits[1:]
154 elif not rtype.islower():
154 elif not rtype.islower():
155 unsupported.add(rtype)
155 unsupported.add(rtype)
156 self._results = {}
156 self._results = {}
157 self._dirty = False
157 self._dirty = False
158
158
159 if unsupported:
159 if unsupported:
160 raise error.UnsupportedMergeRecords(unsupported)
160 raise error.UnsupportedMergeRecords(unsupported)
161
161
162 def _readrecords(self):
162 def _readrecords(self):
163 """Read merge state from disk and return a list of record (TYPE, data)
163 """Read merge state from disk and return a list of record (TYPE, data)
164
164
165 We read data from both v1 and v2 files and decide which one to use.
165 We read data from both v1 and v2 files and decide which one to use.
166
166
167 V1 has been used by version prior to 2.9.1 and contains less data than
167 V1 has been used by version prior to 2.9.1 and contains less data than
168 v2. We read both versions and check if no data in v2 contradicts
168 v2. We read both versions and check if no data in v2 contradicts
169 v1. If there is not contradiction we can safely assume that both v1
169 v1. If there is not contradiction we can safely assume that both v1
170 and v2 were written at the same time and use the extract data in v2. If
170 and v2 were written at the same time and use the extract data in v2. If
171 there is contradiction we ignore v2 content as we assume an old version
171 there is contradiction we ignore v2 content as we assume an old version
172 of Mercurial has overwritten the mergestate file and left an old v2
172 of Mercurial has overwritten the mergestate file and left an old v2
173 file around.
173 file around.
174
174
175 returns list of record [(TYPE, data), ...]"""
175 returns list of record [(TYPE, data), ...]"""
176 v1records = self._readrecordsv1()
176 v1records = self._readrecordsv1()
177 v2records = self._readrecordsv2()
177 v2records = self._readrecordsv2()
178 if self._v1v2match(v1records, v2records):
178 if self._v1v2match(v1records, v2records):
179 return v2records
179 return v2records
180 else:
180 else:
181 # v1 file is newer than v2 file, use it
181 # v1 file is newer than v2 file, use it
182 # we have to infer the "other" changeset of the merge
182 # we have to infer the "other" changeset of the merge
183 # we cannot do better than that with v1 of the format
183 # we cannot do better than that with v1 of the format
184 mctx = self._repo[None].parents()[-1]
184 mctx = self._repo[None].parents()[-1]
185 v1records.append(('O', mctx.hex()))
185 v1records.append(('O', mctx.hex()))
186 # add place holder "other" file node information
186 # add place holder "other" file node information
187 # nobody is using it yet so we do no need to fetch the data
187 # nobody is using it yet so we do no need to fetch the data
188 # if mctx was wrong `mctx[bits[-2]]` may fails.
188 # if mctx was wrong `mctx[bits[-2]]` may fails.
189 for idx, r in enumerate(v1records):
189 for idx, r in enumerate(v1records):
190 if r[0] == 'F':
190 if r[0] == 'F':
191 bits = r[1].split('\0')
191 bits = r[1].split('\0')
192 bits.insert(-2, '')
192 bits.insert(-2, '')
193 v1records[idx] = (r[0], '\0'.join(bits))
193 v1records[idx] = (r[0], '\0'.join(bits))
194 return v1records
194 return v1records
195
195
196 def _v1v2match(self, v1records, v2records):
196 def _v1v2match(self, v1records, v2records):
197 oldv2 = set() # old format version of v2 record
197 oldv2 = set() # old format version of v2 record
198 for rec in v2records:
198 for rec in v2records:
199 if rec[0] == 'L':
199 if rec[0] == 'L':
200 oldv2.add(rec)
200 oldv2.add(rec)
201 elif rec[0] == 'F':
201 elif rec[0] == 'F':
202 # drop the onode data (not contained in v1)
202 # drop the onode data (not contained in v1)
203 oldv2.add(('F', _droponode(rec[1])))
203 oldv2.add(('F', _droponode(rec[1])))
204 for rec in v1records:
204 for rec in v1records:
205 if rec not in oldv2:
205 if rec not in oldv2:
206 return False
206 return False
207 else:
207 else:
208 return True
208 return True
209
209
210 def _readrecordsv1(self):
210 def _readrecordsv1(self):
211 """read on disk merge state for version 1 file
211 """read on disk merge state for version 1 file
212
212
213 returns list of record [(TYPE, data), ...]
213 returns list of record [(TYPE, data), ...]
214
214
215 Note: the "F" data from this file are one entry short
215 Note: the "F" data from this file are one entry short
216 (no "other file node" entry)
216 (no "other file node" entry)
217 """
217 """
218 records = []
218 records = []
219 try:
219 try:
220 f = self._repo.vfs(self.statepathv1)
220 f = self._repo.vfs(self.statepathv1)
221 for i, l in enumerate(f):
221 for i, l in enumerate(f):
222 if i == 0:
222 if i == 0:
223 records.append(('L', l[:-1]))
223 records.append(('L', l[:-1]))
224 else:
224 else:
225 records.append(('F', l[:-1]))
225 records.append(('F', l[:-1]))
226 f.close()
226 f.close()
227 except IOError as err:
227 except IOError as err:
228 if err.errno != errno.ENOENT:
228 if err.errno != errno.ENOENT:
229 raise
229 raise
230 return records
230 return records
231
231
232 def _readrecordsv2(self):
232 def _readrecordsv2(self):
233 """read on disk merge state for version 2 file
233 """read on disk merge state for version 2 file
234
234
235 This format is a list of arbitrary records of the form:
235 This format is a list of arbitrary records of the form:
236
236
237 [type][length][content]
237 [type][length][content]
238
238
239 `type` is a single character, `length` is a 4 byte integer, and
239 `type` is a single character, `length` is a 4 byte integer, and
240 `content` is an arbitrary byte sequence of length `length`.
240 `content` is an arbitrary byte sequence of length `length`.
241
241
242 Mercurial versions prior to 3.7 have a bug where if there are
242 Mercurial versions prior to 3.7 have a bug where if there are
243 unsupported mandatory merge records, attempting to clear out the merge
243 unsupported mandatory merge records, attempting to clear out the merge
244 state with hg update --clean or similar aborts. The 't' record type
244 state with hg update --clean or similar aborts. The 't' record type
245 works around that by writing out what those versions treat as an
245 works around that by writing out what those versions treat as an
246 advisory record, but later versions interpret as special: the first
246 advisory record, but later versions interpret as special: the first
247 character is the 'real' record type and everything onwards is the data.
247 character is the 'real' record type and everything onwards is the data.
248
248
249 Returns list of records [(TYPE, data), ...]."""
249 Returns list of records [(TYPE, data), ...]."""
250 records = []
250 records = []
251 try:
251 try:
252 f = self._repo.vfs(self.statepathv2)
252 f = self._repo.vfs(self.statepathv2)
253 data = f.read()
253 data = f.read()
254 off = 0
254 off = 0
255 end = len(data)
255 end = len(data)
256 while off < end:
256 while off < end:
257 rtype = data[off]
257 rtype = data[off]
258 off += 1
258 off += 1
259 length = _unpack('>I', data[off:(off + 4)])[0]
259 length = _unpack('>I', data[off:(off + 4)])[0]
260 off += 4
260 off += 4
261 record = data[off:(off + length)]
261 record = data[off:(off + length)]
262 off += length
262 off += length
263 if rtype == 't':
263 if rtype == 't':
264 rtype, record = record[0], record[1:]
264 rtype, record = record[0], record[1:]
265 records.append((rtype, record))
265 records.append((rtype, record))
266 f.close()
266 f.close()
267 except IOError as err:
267 except IOError as err:
268 if err.errno != errno.ENOENT:
268 if err.errno != errno.ENOENT:
269 raise
269 raise
270 return records
270 return records
271
271
272 @util.propertycache
272 @util.propertycache
273 def mergedriver(self):
273 def mergedriver(self):
274 # protect against the following:
274 # protect against the following:
275 # - A configures a malicious merge driver in their hgrc, then
275 # - A configures a malicious merge driver in their hgrc, then
276 # pauses the merge
276 # pauses the merge
277 # - A edits their hgrc to remove references to the merge driver
277 # - A edits their hgrc to remove references to the merge driver
278 # - A gives a copy of their entire repo, including .hg, to B
278 # - A gives a copy of their entire repo, including .hg, to B
279 # - B inspects .hgrc and finds it to be clean
279 # - B inspects .hgrc and finds it to be clean
280 # - B then continues the merge and the malicious merge driver
280 # - B then continues the merge and the malicious merge driver
281 # gets invoked
281 # gets invoked
282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
282 configmergedriver = self._repo.ui.config('experimental', 'mergedriver')
283 if (self._readmergedriver is not None
283 if (self._readmergedriver is not None
284 and self._readmergedriver != configmergedriver):
284 and self._readmergedriver != configmergedriver):
285 raise error.ConfigError(
285 raise error.ConfigError(
286 _("merge driver changed since merge started"),
286 _("merge driver changed since merge started"),
287 hint=_("revert merge driver change or abort merge"))
287 hint=_("revert merge driver change or abort merge"))
288
288
289 return configmergedriver
289 return configmergedriver
290
290
291 @util.propertycache
291 @util.propertycache
292 def localctx(self):
292 def localctx(self):
293 if self._local is None:
293 if self._local is None:
294 raise RuntimeError("localctx accessed but self._local isn't set")
294 raise RuntimeError("localctx accessed but self._local isn't set")
295 return self._repo[self._local]
295 return self._repo[self._local]
296
296
297 @util.propertycache
297 @util.propertycache
298 def otherctx(self):
298 def otherctx(self):
299 if self._other is None:
299 if self._other is None:
300 raise RuntimeError("localctx accessed but self._local isn't set")
300 raise RuntimeError("localctx accessed but self._local isn't set")
301 return self._repo[self._other]
301 return self._repo[self._other]
302
302
303 def active(self):
303 def active(self):
304 """Whether mergestate is active.
304 """Whether mergestate is active.
305
305
306 Returns True if there appears to be mergestate. This is a rough proxy
306 Returns True if there appears to be mergestate. This is a rough proxy
307 for "is a merge in progress."
307 for "is a merge in progress."
308 """
308 """
309 # Check local variables before looking at filesystem for performance
309 # Check local variables before looking at filesystem for performance
310 # reasons.
310 # reasons.
311 return bool(self._local) or bool(self._state) or \
311 return bool(self._local) or bool(self._state) or \
312 self._repo.vfs.exists(self.statepathv1) or \
312 self._repo.vfs.exists(self.statepathv1) or \
313 self._repo.vfs.exists(self.statepathv2)
313 self._repo.vfs.exists(self.statepathv2)
314
314
315 def commit(self):
315 def commit(self):
316 """Write current state on disk (if necessary)"""
316 """Write current state on disk (if necessary)"""
317 if self._dirty:
317 if self._dirty:
318 records = self._makerecords()
318 records = self._makerecords()
319 self._writerecords(records)
319 self._writerecords(records)
320 self._dirty = False
320 self._dirty = False
321
321
322 def _makerecords(self):
322 def _makerecords(self):
323 records = []
323 records = []
324 records.append(('L', hex(self._local)))
324 records.append(('L', hex(self._local)))
325 records.append(('O', hex(self._other)))
325 records.append(('O', hex(self._other)))
326 if self.mergedriver:
326 if self.mergedriver:
327 records.append(('m', '\0'.join([
327 records.append(('m', '\0'.join([
328 self.mergedriver, self._mdstate])))
328 self.mergedriver, self._mdstate])))
329 for d, v in self._state.iteritems():
329 for d, v in self._state.iteritems():
330 if v[0] == 'd':
330 if v[0] == 'd':
331 records.append(('D', '\0'.join([d] + v)))
331 records.append(('D', '\0'.join([d] + v)))
332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
332 # v[1] == local ('cd'), v[6] == other ('dc') -- not supported by
333 # older versions of Mercurial
333 # older versions of Mercurial
334 elif v[1] == nullhex or v[6] == nullhex:
334 elif v[1] == nullhex or v[6] == nullhex:
335 records.append(('C', '\0'.join([d] + v)))
335 records.append(('C', '\0'.join([d] + v)))
336 else:
336 else:
337 records.append(('F', '\0'.join([d] + v)))
337 records.append(('F', '\0'.join([d] + v)))
338 return records
338 return records
339
339
340 def _writerecords(self, records):
340 def _writerecords(self, records):
341 """Write current state on disk (both v1 and v2)"""
341 """Write current state on disk (both v1 and v2)"""
342 self._writerecordsv1(records)
342 self._writerecordsv1(records)
343 self._writerecordsv2(records)
343 self._writerecordsv2(records)
344
344
345 def _writerecordsv1(self, records):
345 def _writerecordsv1(self, records):
346 """Write current state on disk in a version 1 file"""
346 """Write current state on disk in a version 1 file"""
347 f = self._repo.vfs(self.statepathv1, 'w')
347 f = self._repo.vfs(self.statepathv1, 'w')
348 irecords = iter(records)
348 irecords = iter(records)
349 lrecords = irecords.next()
349 lrecords = irecords.next()
350 assert lrecords[0] == 'L'
350 assert lrecords[0] == 'L'
351 f.write(hex(self._local) + '\n')
351 f.write(hex(self._local) + '\n')
352 for rtype, data in irecords:
352 for rtype, data in irecords:
353 if rtype == 'F':
353 if rtype == 'F':
354 f.write('%s\n' % _droponode(data))
354 f.write('%s\n' % _droponode(data))
355 f.close()
355 f.close()
356
356
357 def _writerecordsv2(self, records):
357 def _writerecordsv2(self, records):
358 """Write current state on disk in a version 2 file
358 """Write current state on disk in a version 2 file
359
359
360 See the docstring for _readrecordsv2 for why we use 't'."""
360 See the docstring for _readrecordsv2 for why we use 't'."""
361 # these are the records that all version 2 clients can read
361 # these are the records that all version 2 clients can read
362 whitelist = 'LOF'
362 whitelist = 'LOF'
363 f = self._repo.vfs(self.statepathv2, 'w')
363 f = self._repo.vfs(self.statepathv2, 'w')
364 for key, data in records:
364 for key, data in records:
365 assert len(key) == 1
365 assert len(key) == 1
366 if key not in whitelist:
366 if key not in whitelist:
367 key, data = 't', '%s%s' % (key, data)
367 key, data = 't', '%s%s' % (key, data)
368 format = '>sI%is' % len(data)
368 format = '>sI%is' % len(data)
369 f.write(_pack(format, key, len(data), data))
369 f.write(_pack(format, key, len(data), data))
370 f.close()
370 f.close()
371
371
372 def add(self, fcl, fco, fca, fd):
372 def add(self, fcl, fco, fca, fd):
373 """add a new (potentially?) conflicting file the merge state
373 """add a new (potentially?) conflicting file the merge state
374 fcl: file context for local,
374 fcl: file context for local,
375 fco: file context for remote,
375 fco: file context for remote,
376 fca: file context for ancestors,
376 fca: file context for ancestors,
377 fd: file path of the resulting merge.
377 fd: file path of the resulting merge.
378
378
379 note: also write the local version to the `.hg/merge` directory.
379 note: also write the local version to the `.hg/merge` directory.
380 """
380 """
381 if fcl.isabsent():
381 if fcl.isabsent():
382 hash = nullhex
382 hash = nullhex
383 else:
383 else:
384 hash = util.sha1(fcl.path()).hexdigest()
384 hash = util.sha1(fcl.path()).hexdigest()
385 self._repo.vfs.write('merge/' + hash, fcl.data())
385 self._repo.vfs.write('merge/' + hash, fcl.data())
386 self._state[fd] = ['u', hash, fcl.path(),
386 self._state[fd] = ['u', hash, fcl.path(),
387 fca.path(), hex(fca.filenode()),
387 fca.path(), hex(fca.filenode()),
388 fco.path(), hex(fco.filenode()),
388 fco.path(), hex(fco.filenode()),
389 fcl.flags()]
389 fcl.flags()]
390 self._dirty = True
390 self._dirty = True
391
391
392 def __contains__(self, dfile):
392 def __contains__(self, dfile):
393 return dfile in self._state
393 return dfile in self._state
394
394
395 def __getitem__(self, dfile):
395 def __getitem__(self, dfile):
396 return self._state[dfile][0]
396 return self._state[dfile][0]
397
397
398 def __iter__(self):
398 def __iter__(self):
399 return iter(sorted(self._state))
399 return iter(sorted(self._state))
400
400
401 def files(self):
401 def files(self):
402 return self._state.keys()
402 return self._state.keys()
403
403
404 def mark(self, dfile, state):
404 def mark(self, dfile, state):
405 self._state[dfile][0] = state
405 self._state[dfile][0] = state
406 self._dirty = True
406 self._dirty = True
407
407
408 def mdstate(self):
408 def mdstate(self):
409 return self._mdstate
409 return self._mdstate
410
410
411 def unresolved(self):
411 def unresolved(self):
412 """Obtain the paths of unresolved files."""
412 """Obtain the paths of unresolved files."""
413
413
414 for f, entry in self._state.items():
414 for f, entry in self._state.items():
415 if entry[0] == 'u':
415 if entry[0] == 'u':
416 yield f
416 yield f
417
417
418 def driverresolved(self):
418 def driverresolved(self):
419 """Obtain the paths of driver-resolved files."""
419 """Obtain the paths of driver-resolved files."""
420
420
421 for f, entry in self._state.items():
421 for f, entry in self._state.items():
422 if entry[0] == 'd':
422 if entry[0] == 'd':
423 yield f
423 yield f
424
424
425 def _resolve(self, preresolve, dfile, wctx, labels=None):
425 def _resolve(self, preresolve, dfile, wctx, labels=None):
426 """rerun merge process for file path `dfile`"""
426 """rerun merge process for file path `dfile`"""
427 if self[dfile] in 'rd':
427 if self[dfile] in 'rd':
428 return True, 0
428 return True, 0
429 stateentry = self._state[dfile]
429 stateentry = self._state[dfile]
430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
430 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
431 octx = self._repo[self._other]
431 octx = self._repo[self._other]
432 fcd = self._filectxorabsent(hash, wctx, dfile)
432 fcd = self._filectxorabsent(hash, wctx, dfile)
433 fco = self._filectxorabsent(onode, octx, ofile)
433 fco = self._filectxorabsent(onode, octx, ofile)
434 # TODO: move this to filectxorabsent
434 # TODO: move this to filectxorabsent
435 fca = self._repo.filectx(afile, fileid=anode)
435 fca = self._repo.filectx(afile, fileid=anode)
436 # "premerge" x flags
436 # "premerge" x flags
437 flo = fco.flags()
437 flo = fco.flags()
438 fla = fca.flags()
438 fla = fca.flags()
439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
439 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
440 if fca.node() == nullid:
440 if fca.node() == nullid:
441 if preresolve:
441 if preresolve:
442 self._repo.ui.warn(
442 self._repo.ui.warn(
443 _('warning: cannot merge flags for %s\n') % afile)
443 _('warning: cannot merge flags for %s\n') % afile)
444 elif flags == fla:
444 elif flags == fla:
445 flags = flo
445 flags = flo
446 if preresolve:
446 if preresolve:
447 # restore local
447 # restore local
448 if hash != nullhex:
448 if hash != nullhex:
449 f = self._repo.vfs('merge/' + hash)
449 f = self._repo.vfs('merge/' + hash)
450 self._repo.wwrite(dfile, f.read(), flags)
450 self._repo.wwrite(dfile, f.read(), flags)
451 f.close()
451 f.close()
452 else:
452 else:
453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
453 self._repo.wvfs.unlinkpath(dfile, ignoremissing=True)
454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
454 complete, r, deleted = filemerge.premerge(self._repo, self._local,
455 lfile, fcd, fco, fca,
455 lfile, fcd, fco, fca,
456 labels=labels)
456 labels=labels)
457 else:
457 else:
458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
458 complete, r, deleted = filemerge.filemerge(self._repo, self._local,
459 lfile, fcd, fco, fca,
459 lfile, fcd, fco, fca,
460 labels=labels)
460 labels=labels)
461 if r is None:
461 if r is None:
462 # no real conflict
462 # no real conflict
463 del self._state[dfile]
463 del self._state[dfile]
464 self._dirty = True
464 self._dirty = True
465 elif not r:
465 elif not r:
466 self.mark(dfile, 'r')
466 self.mark(dfile, 'r')
467
467
468 if complete:
468 if complete:
469 action = None
469 action = None
470 if deleted:
470 if deleted:
471 if fcd.isabsent():
471 if fcd.isabsent():
472 # dc: local picked. Need to drop if present, which may
472 # dc: local picked. Need to drop if present, which may
473 # happen on re-resolves.
473 # happen on re-resolves.
474 action = 'f'
474 action = 'f'
475 else:
475 else:
476 # cd: remote picked (or otherwise deleted)
476 # cd: remote picked (or otherwise deleted)
477 action = 'r'
477 action = 'r'
478 else:
478 else:
479 if fcd.isabsent(): # dc: remote picked
479 if fcd.isabsent(): # dc: remote picked
480 action = 'g'
480 action = 'g'
481 elif fco.isabsent(): # cd: local picked
481 elif fco.isabsent(): # cd: local picked
482 if dfile in self.localctx:
482 if dfile in self.localctx:
483 action = 'am'
483 action = 'am'
484 else:
484 else:
485 action = 'a'
485 action = 'a'
486 # else: regular merges (no action necessary)
486 # else: regular merges (no action necessary)
487 self._results[dfile] = r, action
487 self._results[dfile] = r, action
488
488
489 return complete, r
489 return complete, r
490
490
491 def _filectxorabsent(self, hexnode, ctx, f):
491 def _filectxorabsent(self, hexnode, ctx, f):
492 if hexnode == nullhex:
492 if hexnode == nullhex:
493 return filemerge.absentfilectx(ctx, f)
493 return filemerge.absentfilectx(ctx, f)
494 else:
494 else:
495 return ctx[f]
495 return ctx[f]
496
496
497 def preresolve(self, dfile, wctx, labels=None):
497 def preresolve(self, dfile, wctx, labels=None):
498 """run premerge process for dfile
498 """run premerge process for dfile
499
499
500 Returns whether the merge is complete, and the exit code."""
500 Returns whether the merge is complete, and the exit code."""
501 return self._resolve(True, dfile, wctx, labels=labels)
501 return self._resolve(True, dfile, wctx, labels=labels)
502
502
503 def resolve(self, dfile, wctx, labels=None):
503 def resolve(self, dfile, wctx, labels=None):
504 """run merge process (assuming premerge was run) for dfile
504 """run merge process (assuming premerge was run) for dfile
505
505
506 Returns the exit code of the merge."""
506 Returns the exit code of the merge."""
507 return self._resolve(False, dfile, wctx, labels=labels)[1]
507 return self._resolve(False, dfile, wctx, labels=labels)[1]
508
508
509 def counts(self):
509 def counts(self):
510 """return counts for updated, merged and removed files in this
510 """return counts for updated, merged and removed files in this
511 session"""
511 session"""
512 updated, merged, removed = 0, 0, 0
512 updated, merged, removed = 0, 0, 0
513 for r, action in self._results.itervalues():
513 for r, action in self._results.itervalues():
514 if r is None:
514 if r is None:
515 updated += 1
515 updated += 1
516 elif r == 0:
516 elif r == 0:
517 if action == 'r':
517 if action == 'r':
518 removed += 1
518 removed += 1
519 else:
519 else:
520 merged += 1
520 merged += 1
521 return updated, merged, removed
521 return updated, merged, removed
522
522
523 def unresolvedcount(self):
523 def unresolvedcount(self):
524 """get unresolved count for this merge (persistent)"""
524 """get unresolved count for this merge (persistent)"""
525 return len([True for f, entry in self._state.iteritems()
525 return len([True for f, entry in self._state.iteritems()
526 if entry[0] == 'u'])
526 if entry[0] == 'u'])
527
527
528 def actions(self):
528 def actions(self):
529 """return lists of actions to perform on the dirstate"""
529 """return lists of actions to perform on the dirstate"""
530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
530 actions = {'r': [], 'f': [], 'a': [], 'am': [], 'g': []}
531 for f, (r, action) in self._results.iteritems():
531 for f, (r, action) in self._results.iteritems():
532 if action is not None:
532 if action is not None:
533 actions[action].append((f, None, "merge result"))
533 actions[action].append((f, None, "merge result"))
534 return actions
534 return actions
535
535
536 def recordactions(self):
536 def recordactions(self):
537 """record remove/add/get actions in the dirstate"""
537 """record remove/add/get actions in the dirstate"""
538 branchmerge = self._repo.dirstate.p2() != nullid
538 branchmerge = self._repo.dirstate.p2() != nullid
539 recordupdates(self._repo, self.actions(), branchmerge)
539 recordupdates(self._repo, self.actions(), branchmerge)
540
540
541 def queueremove(self, f):
541 def queueremove(self, f):
542 """queues a file to be removed from the dirstate
542 """queues a file to be removed from the dirstate
543
543
544 Meant for use by custom merge drivers."""
544 Meant for use by custom merge drivers."""
545 self._results[f] = 0, 'r'
545 self._results[f] = 0, 'r'
546
546
547 def queueadd(self, f):
547 def queueadd(self, f):
548 """queues a file to be added to the dirstate
548 """queues a file to be added to the dirstate
549
549
550 Meant for use by custom merge drivers."""
550 Meant for use by custom merge drivers."""
551 self._results[f] = 0, 'a'
551 self._results[f] = 0, 'a'
552
552
553 def queueget(self, f):
553 def queueget(self, f):
554 """queues a file to be marked modified in the dirstate
554 """queues a file to be marked modified in the dirstate
555
555
556 Meant for use by custom merge drivers."""
556 Meant for use by custom merge drivers."""
557 self._results[f] = 0, 'g'
557 self._results[f] = 0, 'g'
558
558
559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
559 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
560 if f2 is None:
560 if f2 is None:
561 f2 = f
561 f2 = f
562 return (repo.wvfs.isfileorlink(f)
562 return (repo.wvfs.isfileorlink(f)
563 and repo.wvfs.audit.check(f)
563 and repo.wvfs.audit.check(f)
564 and repo.dirstate.normalize(f) not in repo.dirstate
564 and repo.dirstate.normalize(f) not in repo.dirstate
565 and mctx[f2].cmp(wctx[f]))
565 and mctx[f2].cmp(wctx[f]))
566
566
567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
567 def _checkunknownfiles(repo, wctx, mctx, force, actions):
568 """
568 """
569 Considers any actions that care about the presence of conflicting unknown
569 Considers any actions that care about the presence of conflicting unknown
570 files. For some actions, the result is to abort; for others, it is to
570 files. For some actions, the result is to abort; for others, it is to
571 choose a different action.
571 choose a different action.
572 """
572 """
573 conflicts = set()
573 conflicts = set()
574 if not force:
574 if not force:
575 for f, (m, args, msg) in actions.iteritems():
575 for f, (m, args, msg) in actions.iteritems():
576 if m in ('c', 'dc'):
576 if m in ('c', 'dc'):
577 if _checkunknownfile(repo, wctx, mctx, f):
577 if _checkunknownfile(repo, wctx, mctx, f):
578 conflicts.add(f)
578 conflicts.add(f)
579 elif m == 'dg':
579 elif m == 'dg':
580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
580 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
581 conflicts.add(f)
581 conflicts.add(f)
582
582
583 for f in sorted(conflicts):
583 for f in sorted(conflicts):
584 repo.ui.warn(_("%s: untracked file differs\n") % f)
584 repo.ui.warn(_("%s: untracked file differs\n") % f)
585 if conflicts:
585 if conflicts:
586 raise error.Abort(_("untracked files in working directory differ "
586 raise error.Abort(_("untracked files in working directory differ "
587 "from files in requested revision"))
587 "from files in requested revision"))
588
588
589 for f, (m, args, msg) in actions.iteritems():
589 for f, (m, args, msg) in actions.iteritems():
590 if m == 'c':
590 if m == 'c':
591 actions[f] = ('g', args, msg)
591 flags, = args
592 actions[f] = ('g', (flags, False), msg)
592 elif m == 'cm':
593 elif m == 'cm':
593 fl2, anc = args
594 fl2, anc = args
594 different = _checkunknownfile(repo, wctx, mctx, f)
595 different = _checkunknownfile(repo, wctx, mctx, f)
595 if different:
596 if different:
596 actions[f] = ('m', (f, f, None, False, anc),
597 actions[f] = ('m', (f, f, None, False, anc),
597 "remote differs from untracked local")
598 "remote differs from untracked local")
598 else:
599 else:
599 actions[f] = ('g', (fl2,), "remote created")
600 actions[f] = ('g', (fl2, False), "remote created")
600
601
601 def _forgetremoved(wctx, mctx, branchmerge):
602 def _forgetremoved(wctx, mctx, branchmerge):
602 """
603 """
603 Forget removed files
604 Forget removed files
604
605
605 If we're jumping between revisions (as opposed to merging), and if
606 If we're jumping between revisions (as opposed to merging), and if
606 neither the working directory nor the target rev has the file,
607 neither the working directory nor the target rev has the file,
607 then we need to remove it from the dirstate, to prevent the
608 then we need to remove it from the dirstate, to prevent the
608 dirstate from listing the file when it is no longer in the
609 dirstate from listing the file when it is no longer in the
609 manifest.
610 manifest.
610
611
611 If we're merging, and the other revision has removed a file
612 If we're merging, and the other revision has removed a file
612 that is not present in the working directory, we need to mark it
613 that is not present in the working directory, we need to mark it
613 as removed.
614 as removed.
614 """
615 """
615
616
616 actions = {}
617 actions = {}
617 m = 'f'
618 m = 'f'
618 if branchmerge:
619 if branchmerge:
619 m = 'r'
620 m = 'r'
620 for f in wctx.deleted():
621 for f in wctx.deleted():
621 if f not in mctx:
622 if f not in mctx:
622 actions[f] = m, None, "forget deleted"
623 actions[f] = m, None, "forget deleted"
623
624
624 if not branchmerge:
625 if not branchmerge:
625 for f in wctx.removed():
626 for f in wctx.removed():
626 if f not in mctx:
627 if f not in mctx:
627 actions[f] = 'f', None, "forget removed"
628 actions[f] = 'f', None, "forget removed"
628
629
629 return actions
630 return actions
630
631
631 def _checkcollision(repo, wmf, actions):
632 def _checkcollision(repo, wmf, actions):
632 # build provisional merged manifest up
633 # build provisional merged manifest up
633 pmmf = set(wmf)
634 pmmf = set(wmf)
634
635
635 if actions:
636 if actions:
636 # k, dr, e and rd are no-op
637 # k, dr, e and rd are no-op
637 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
638 for m in 'a', 'am', 'f', 'g', 'cd', 'dc':
638 for f, args, msg in actions[m]:
639 for f, args, msg in actions[m]:
639 pmmf.add(f)
640 pmmf.add(f)
640 for f, args, msg in actions['r']:
641 for f, args, msg in actions['r']:
641 pmmf.discard(f)
642 pmmf.discard(f)
642 for f, args, msg in actions['dm']:
643 for f, args, msg in actions['dm']:
643 f2, flags = args
644 f2, flags = args
644 pmmf.discard(f2)
645 pmmf.discard(f2)
645 pmmf.add(f)
646 pmmf.add(f)
646 for f, args, msg in actions['dg']:
647 for f, args, msg in actions['dg']:
647 pmmf.add(f)
648 pmmf.add(f)
648 for f, args, msg in actions['m']:
649 for f, args, msg in actions['m']:
649 f1, f2, fa, move, anc = args
650 f1, f2, fa, move, anc = args
650 if move:
651 if move:
651 pmmf.discard(f1)
652 pmmf.discard(f1)
652 pmmf.add(f)
653 pmmf.add(f)
653
654
654 # check case-folding collision in provisional merged manifest
655 # check case-folding collision in provisional merged manifest
655 foldmap = {}
656 foldmap = {}
656 for f in sorted(pmmf):
657 for f in sorted(pmmf):
657 fold = util.normcase(f)
658 fold = util.normcase(f)
658 if fold in foldmap:
659 if fold in foldmap:
659 raise error.Abort(_("case-folding collision between %s and %s")
660 raise error.Abort(_("case-folding collision between %s and %s")
660 % (f, foldmap[fold]))
661 % (f, foldmap[fold]))
661 foldmap[fold] = f
662 foldmap[fold] = f
662
663
663 # check case-folding of directories
664 # check case-folding of directories
664 foldprefix = unfoldprefix = lastfull = ''
665 foldprefix = unfoldprefix = lastfull = ''
665 for fold, f in sorted(foldmap.items()):
666 for fold, f in sorted(foldmap.items()):
666 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
667 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
667 # the folded prefix matches but actual casing is different
668 # the folded prefix matches but actual casing is different
668 raise error.Abort(_("case-folding collision between "
669 raise error.Abort(_("case-folding collision between "
669 "%s and directory of %s") % (lastfull, f))
670 "%s and directory of %s") % (lastfull, f))
670 foldprefix = fold + '/'
671 foldprefix = fold + '/'
671 unfoldprefix = f + '/'
672 unfoldprefix = f + '/'
672 lastfull = f
673 lastfull = f
673
674
674 def driverpreprocess(repo, ms, wctx, labels=None):
675 def driverpreprocess(repo, ms, wctx, labels=None):
675 """run the preprocess step of the merge driver, if any
676 """run the preprocess step of the merge driver, if any
676
677
677 This is currently not implemented -- it's an extension point."""
678 This is currently not implemented -- it's an extension point."""
678 return True
679 return True
679
680
680 def driverconclude(repo, ms, wctx, labels=None):
681 def driverconclude(repo, ms, wctx, labels=None):
681 """run the conclude step of the merge driver, if any
682 """run the conclude step of the merge driver, if any
682
683
683 This is currently not implemented -- it's an extension point."""
684 This is currently not implemented -- it's an extension point."""
684 return True
685 return True
685
686
686 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
687 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, matcher,
687 acceptremote, followcopies):
688 acceptremote, followcopies):
688 """
689 """
689 Merge p1 and p2 with ancestor pa and generate merge action list
690 Merge p1 and p2 with ancestor pa and generate merge action list
690
691
691 branchmerge and force are as passed in to update
692 branchmerge and force are as passed in to update
692 matcher = matcher to filter file lists
693 matcher = matcher to filter file lists
693 acceptremote = accept the incoming changes without prompting
694 acceptremote = accept the incoming changes without prompting
694 """
695 """
695 if matcher is not None and matcher.always():
696 if matcher is not None and matcher.always():
696 matcher = None
697 matcher = None
697
698
698 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
699 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
699
700
700 # manifests fetched in order are going to be faster, so prime the caches
701 # manifests fetched in order are going to be faster, so prime the caches
701 [x.manifest() for x in
702 [x.manifest() for x in
702 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
703 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
703
704
704 if followcopies:
705 if followcopies:
705 ret = copies.mergecopies(repo, wctx, p2, pa)
706 ret = copies.mergecopies(repo, wctx, p2, pa)
706 copy, movewithdir, diverge, renamedelete = ret
707 copy, movewithdir, diverge, renamedelete = ret
707
708
708 repo.ui.note(_("resolving manifests\n"))
709 repo.ui.note(_("resolving manifests\n"))
709 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
710 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
710 % (bool(branchmerge), bool(force), bool(matcher)))
711 % (bool(branchmerge), bool(force), bool(matcher)))
711 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
712 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
712
713
713 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
714 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
714 copied = set(copy.values())
715 copied = set(copy.values())
715 copied.update(movewithdir.values())
716 copied.update(movewithdir.values())
716
717
717 if '.hgsubstate' in m1:
718 if '.hgsubstate' in m1:
718 # check whether sub state is modified
719 # check whether sub state is modified
719 for s in sorted(wctx.substate):
720 for s in sorted(wctx.substate):
720 if wctx.sub(s).dirty():
721 if wctx.sub(s).dirty():
721 m1['.hgsubstate'] += '+'
722 m1['.hgsubstate'] += '+'
722 break
723 break
723
724
724 # Compare manifests
725 # Compare manifests
725 if matcher is not None:
726 if matcher is not None:
726 m1 = m1.matches(matcher)
727 m1 = m1.matches(matcher)
727 m2 = m2.matches(matcher)
728 m2 = m2.matches(matcher)
728 diff = m1.diff(m2)
729 diff = m1.diff(m2)
729
730
730 actions = {}
731 actions = {}
731 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
732 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
732 if n1 and n2: # file exists on both local and remote side
733 if n1 and n2: # file exists on both local and remote side
733 if f not in ma:
734 if f not in ma:
734 fa = copy.get(f, None)
735 fa = copy.get(f, None)
735 if fa is not None:
736 if fa is not None:
736 actions[f] = ('m', (f, f, fa, False, pa.node()),
737 actions[f] = ('m', (f, f, fa, False, pa.node()),
737 "both renamed from " + fa)
738 "both renamed from " + fa)
738 else:
739 else:
739 actions[f] = ('m', (f, f, None, False, pa.node()),
740 actions[f] = ('m', (f, f, None, False, pa.node()),
740 "both created")
741 "both created")
741 else:
742 else:
742 a = ma[f]
743 a = ma[f]
743 fla = ma.flags(f)
744 fla = ma.flags(f)
744 nol = 'l' not in fl1 + fl2 + fla
745 nol = 'l' not in fl1 + fl2 + fla
745 if n2 == a and fl2 == fla:
746 if n2 == a and fl2 == fla:
746 actions[f] = ('k' , (), "remote unchanged")
747 actions[f] = ('k' , (), "remote unchanged")
747 elif n1 == a and fl1 == fla: # local unchanged - use remote
748 elif n1 == a and fl1 == fla: # local unchanged - use remote
748 if n1 == n2: # optimization: keep local content
749 if n1 == n2: # optimization: keep local content
749 actions[f] = ('e', (fl2,), "update permissions")
750 actions[f] = ('e', (fl2,), "update permissions")
750 else:
751 else:
751 actions[f] = ('g', (fl2,), "remote is newer")
752 actions[f] = ('g', (fl2, False), "remote is newer")
752 elif nol and n2 == a: # remote only changed 'x'
753 elif nol and n2 == a: # remote only changed 'x'
753 actions[f] = ('e', (fl2,), "update permissions")
754 actions[f] = ('e', (fl2,), "update permissions")
754 elif nol and n1 == a: # local only changed 'x'
755 elif nol and n1 == a: # local only changed 'x'
755 actions[f] = ('g', (fl1,), "remote is newer")
756 actions[f] = ('g', (fl1, False), "remote is newer")
756 else: # both changed something
757 else: # both changed something
757 actions[f] = ('m', (f, f, f, False, pa.node()),
758 actions[f] = ('m', (f, f, f, False, pa.node()),
758 "versions differ")
759 "versions differ")
759 elif n1: # file exists only on local side
760 elif n1: # file exists only on local side
760 if f in copied:
761 if f in copied:
761 pass # we'll deal with it on m2 side
762 pass # we'll deal with it on m2 side
762 elif f in movewithdir: # directory rename, move local
763 elif f in movewithdir: # directory rename, move local
763 f2 = movewithdir[f]
764 f2 = movewithdir[f]
764 if f2 in m2:
765 if f2 in m2:
765 actions[f2] = ('m', (f, f2, None, True, pa.node()),
766 actions[f2] = ('m', (f, f2, None, True, pa.node()),
766 "remote directory rename, both created")
767 "remote directory rename, both created")
767 else:
768 else:
768 actions[f2] = ('dm', (f, fl1),
769 actions[f2] = ('dm', (f, fl1),
769 "remote directory rename - move from " + f)
770 "remote directory rename - move from " + f)
770 elif f in copy:
771 elif f in copy:
771 f2 = copy[f]
772 f2 = copy[f]
772 actions[f] = ('m', (f, f2, f2, False, pa.node()),
773 actions[f] = ('m', (f, f2, f2, False, pa.node()),
773 "local copied/moved from " + f2)
774 "local copied/moved from " + f2)
774 elif f in ma: # clean, a different, no remote
775 elif f in ma: # clean, a different, no remote
775 if n1 != ma[f]:
776 if n1 != ma[f]:
776 if acceptremote:
777 if acceptremote:
777 actions[f] = ('r', None, "remote delete")
778 actions[f] = ('r', None, "remote delete")
778 else:
779 else:
779 actions[f] = ('cd', (f, None, f, False, pa.node()),
780 actions[f] = ('cd', (f, None, f, False, pa.node()),
780 "prompt changed/deleted")
781 "prompt changed/deleted")
781 elif n1[20:] == 'a':
782 elif n1[20:] == 'a':
782 # This extra 'a' is added by working copy manifest to mark
783 # This extra 'a' is added by working copy manifest to mark
783 # the file as locally added. We should forget it instead of
784 # the file as locally added. We should forget it instead of
784 # deleting it.
785 # deleting it.
785 actions[f] = ('f', None, "remote deleted")
786 actions[f] = ('f', None, "remote deleted")
786 else:
787 else:
787 actions[f] = ('r', None, "other deleted")
788 actions[f] = ('r', None, "other deleted")
788 elif n2: # file exists only on remote side
789 elif n2: # file exists only on remote side
789 if f in copied:
790 if f in copied:
790 pass # we'll deal with it on m1 side
791 pass # we'll deal with it on m1 side
791 elif f in movewithdir:
792 elif f in movewithdir:
792 f2 = movewithdir[f]
793 f2 = movewithdir[f]
793 if f2 in m1:
794 if f2 in m1:
794 actions[f2] = ('m', (f2, f, None, False, pa.node()),
795 actions[f2] = ('m', (f2, f, None, False, pa.node()),
795 "local directory rename, both created")
796 "local directory rename, both created")
796 else:
797 else:
797 actions[f2] = ('dg', (f, fl2),
798 actions[f2] = ('dg', (f, fl2),
798 "local directory rename - get from " + f)
799 "local directory rename - get from " + f)
799 elif f in copy:
800 elif f in copy:
800 f2 = copy[f]
801 f2 = copy[f]
801 if f2 in m2:
802 if f2 in m2:
802 actions[f] = ('m', (f2, f, f2, False, pa.node()),
803 actions[f] = ('m', (f2, f, f2, False, pa.node()),
803 "remote copied from " + f2)
804 "remote copied from " + f2)
804 else:
805 else:
805 actions[f] = ('m', (f2, f, f2, True, pa.node()),
806 actions[f] = ('m', (f2, f, f2, True, pa.node()),
806 "remote moved from " + f2)
807 "remote moved from " + f2)
807 elif f not in ma:
808 elif f not in ma:
808 # local unknown, remote created: the logic is described by the
809 # local unknown, remote created: the logic is described by the
809 # following table:
810 # following table:
810 #
811 #
811 # force branchmerge different | action
812 # force branchmerge different | action
812 # n * * | create
813 # n * * | create
813 # y n * | create
814 # y n * | create
814 # y y n | create
815 # y y n | create
815 # y y y | merge
816 # y y y | merge
816 #
817 #
817 # Checking whether the files are different is expensive, so we
818 # Checking whether the files are different is expensive, so we
818 # don't do that when we can avoid it.
819 # don't do that when we can avoid it.
819 if not force:
820 if not force:
820 actions[f] = ('c', (fl2,), "remote created")
821 actions[f] = ('c', (fl2,), "remote created")
821 elif not branchmerge:
822 elif not branchmerge:
822 actions[f] = ('c', (fl2,), "remote created")
823 actions[f] = ('c', (fl2,), "remote created")
823 else:
824 else:
824 actions[f] = ('cm', (fl2, pa.node()),
825 actions[f] = ('cm', (fl2, pa.node()),
825 "remote created, get or merge")
826 "remote created, get or merge")
826 elif n2 != ma[f]:
827 elif n2 != ma[f]:
827 if acceptremote:
828 if acceptremote:
828 actions[f] = ('c', (fl2,), "remote recreating")
829 actions[f] = ('c', (fl2,), "remote recreating")
829 else:
830 else:
830 actions[f] = ('dc', (None, f, f, False, pa.node()),
831 actions[f] = ('dc', (None, f, f, False, pa.node()),
831 "prompt deleted/changed")
832 "prompt deleted/changed")
832
833
833 return actions, diverge, renamedelete
834 return actions, diverge, renamedelete
834
835
835 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
836 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
836 """Resolves false conflicts where the nodeid changed but the content
837 """Resolves false conflicts where the nodeid changed but the content
837 remained the same."""
838 remained the same."""
838
839
839 for f, (m, args, msg) in actions.items():
840 for f, (m, args, msg) in actions.items():
840 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
841 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
841 # local did change but ended up with same content
842 # local did change but ended up with same content
842 actions[f] = 'r', None, "prompt same"
843 actions[f] = 'r', None, "prompt same"
843 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
844 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
844 # remote did change but ended up with same content
845 # remote did change but ended up with same content
845 del actions[f] # don't get = keep local deleted
846 del actions[f] # don't get = keep local deleted
846
847
847 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
848 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force,
848 acceptremote, followcopies, matcher=None):
849 acceptremote, followcopies, matcher=None):
849 "Calculate the actions needed to merge mctx into wctx using ancestors"
850 "Calculate the actions needed to merge mctx into wctx using ancestors"
850 if len(ancestors) == 1: # default
851 if len(ancestors) == 1: # default
851 actions, diverge, renamedelete = manifestmerge(
852 actions, diverge, renamedelete = manifestmerge(
852 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
853 repo, wctx, mctx, ancestors[0], branchmerge, force, matcher,
853 acceptremote, followcopies)
854 acceptremote, followcopies)
854 _checkunknownfiles(repo, wctx, mctx, force, actions)
855 _checkunknownfiles(repo, wctx, mctx, force, actions)
855
856
856 else: # only when merge.preferancestor=* - the default
857 else: # only when merge.preferancestor=* - the default
857 repo.ui.note(
858 repo.ui.note(
858 _("note: merging %s and %s using bids from ancestors %s\n") %
859 _("note: merging %s and %s using bids from ancestors %s\n") %
859 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
860 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
860
861
861 # Call for bids
862 # Call for bids
862 fbids = {} # mapping filename to bids (action method to list af actions)
863 fbids = {} # mapping filename to bids (action method to list af actions)
863 diverge, renamedelete = None, None
864 diverge, renamedelete = None, None
864 for ancestor in ancestors:
865 for ancestor in ancestors:
865 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
866 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
866 actions, diverge1, renamedelete1 = manifestmerge(
867 actions, diverge1, renamedelete1 = manifestmerge(
867 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
868 repo, wctx, mctx, ancestor, branchmerge, force, matcher,
868 acceptremote, followcopies)
869 acceptremote, followcopies)
869 _checkunknownfiles(repo, wctx, mctx, force, actions)
870 _checkunknownfiles(repo, wctx, mctx, force, actions)
870
871
871 # Track the shortest set of warning on the theory that bid
872 # Track the shortest set of warning on the theory that bid
872 # merge will correctly incorporate more information
873 # merge will correctly incorporate more information
873 if diverge is None or len(diverge1) < len(diverge):
874 if diverge is None or len(diverge1) < len(diverge):
874 diverge = diverge1
875 diverge = diverge1
875 if renamedelete is None or len(renamedelete) < len(renamedelete1):
876 if renamedelete is None or len(renamedelete) < len(renamedelete1):
876 renamedelete = renamedelete1
877 renamedelete = renamedelete1
877
878
878 for f, a in sorted(actions.iteritems()):
879 for f, a in sorted(actions.iteritems()):
879 m, args, msg = a
880 m, args, msg = a
880 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
881 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
881 if f in fbids:
882 if f in fbids:
882 d = fbids[f]
883 d = fbids[f]
883 if m in d:
884 if m in d:
884 d[m].append(a)
885 d[m].append(a)
885 else:
886 else:
886 d[m] = [a]
887 d[m] = [a]
887 else:
888 else:
888 fbids[f] = {m: [a]}
889 fbids[f] = {m: [a]}
889
890
890 # Pick the best bid for each file
891 # Pick the best bid for each file
891 repo.ui.note(_('\nauction for merging merge bids\n'))
892 repo.ui.note(_('\nauction for merging merge bids\n'))
892 actions = {}
893 actions = {}
893 for f, bids in sorted(fbids.items()):
894 for f, bids in sorted(fbids.items()):
894 # bids is a mapping from action method to list af actions
895 # bids is a mapping from action method to list af actions
895 # Consensus?
896 # Consensus?
896 if len(bids) == 1: # all bids are the same kind of method
897 if len(bids) == 1: # all bids are the same kind of method
897 m, l = bids.items()[0]
898 m, l = bids.items()[0]
898 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
899 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
899 repo.ui.note(" %s: consensus for %s\n" % (f, m))
900 repo.ui.note(" %s: consensus for %s\n" % (f, m))
900 actions[f] = l[0]
901 actions[f] = l[0]
901 continue
902 continue
902 # If keep is an option, just do it.
903 # If keep is an option, just do it.
903 if 'k' in bids:
904 if 'k' in bids:
904 repo.ui.note(" %s: picking 'keep' action\n" % f)
905 repo.ui.note(" %s: picking 'keep' action\n" % f)
905 actions[f] = bids['k'][0]
906 actions[f] = bids['k'][0]
906 continue
907 continue
907 # If there are gets and they all agree [how could they not?], do it.
908 # If there are gets and they all agree [how could they not?], do it.
908 if 'g' in bids:
909 if 'g' in bids:
909 ga0 = bids['g'][0]
910 ga0 = bids['g'][0]
910 if all(a == ga0 for a in bids['g'][1:]):
911 if all(a == ga0 for a in bids['g'][1:]):
911 repo.ui.note(" %s: picking 'get' action\n" % f)
912 repo.ui.note(" %s: picking 'get' action\n" % f)
912 actions[f] = ga0
913 actions[f] = ga0
913 continue
914 continue
914 # TODO: Consider other simple actions such as mode changes
915 # TODO: Consider other simple actions such as mode changes
915 # Handle inefficient democrazy.
916 # Handle inefficient democrazy.
916 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
917 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
917 for m, l in sorted(bids.items()):
918 for m, l in sorted(bids.items()):
918 for _f, args, msg in l:
919 for _f, args, msg in l:
919 repo.ui.note(' %s -> %s\n' % (msg, m))
920 repo.ui.note(' %s -> %s\n' % (msg, m))
920 # Pick random action. TODO: Instead, prompt user when resolving
921 # Pick random action. TODO: Instead, prompt user when resolving
921 m, l = bids.items()[0]
922 m, l = bids.items()[0]
922 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
923 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
923 (f, m))
924 (f, m))
924 actions[f] = l[0]
925 actions[f] = l[0]
925 continue
926 continue
926 repo.ui.note(_('end of auction\n\n'))
927 repo.ui.note(_('end of auction\n\n'))
927
928
928 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
929 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
929
930
930 if wctx.rev() is None:
931 if wctx.rev() is None:
931 fractions = _forgetremoved(wctx, mctx, branchmerge)
932 fractions = _forgetremoved(wctx, mctx, branchmerge)
932 actions.update(fractions)
933 actions.update(fractions)
933
934
934 return actions, diverge, renamedelete
935 return actions, diverge, renamedelete
935
936
936 def batchremove(repo, actions):
937 def batchremove(repo, actions):
937 """apply removes to the working directory
938 """apply removes to the working directory
938
939
939 yields tuples for progress updates
940 yields tuples for progress updates
940 """
941 """
941 verbose = repo.ui.verbose
942 verbose = repo.ui.verbose
942 unlink = util.unlinkpath
943 unlink = util.unlinkpath
943 wjoin = repo.wjoin
944 wjoin = repo.wjoin
944 audit = repo.wvfs.audit
945 audit = repo.wvfs.audit
945 i = 0
946 i = 0
946 for f, args, msg in actions:
947 for f, args, msg in actions:
947 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
948 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
948 if verbose:
949 if verbose:
949 repo.ui.note(_("removing %s\n") % f)
950 repo.ui.note(_("removing %s\n") % f)
950 audit(f)
951 audit(f)
951 try:
952 try:
952 unlink(wjoin(f), ignoremissing=True)
953 unlink(wjoin(f), ignoremissing=True)
953 except OSError as inst:
954 except OSError as inst:
954 repo.ui.warn(_("update failed to remove %s: %s!\n") %
955 repo.ui.warn(_("update failed to remove %s: %s!\n") %
955 (f, inst.strerror))
956 (f, inst.strerror))
956 if i == 100:
957 if i == 100:
957 yield i, f
958 yield i, f
958 i = 0
959 i = 0
959 i += 1
960 i += 1
960 if i > 0:
961 if i > 0:
961 yield i, f
962 yield i, f
962
963
963 def batchget(repo, mctx, actions):
964 def batchget(repo, mctx, actions):
964 """apply gets to the working directory
965 """apply gets to the working directory
965
966
966 mctx is the context to get from
967 mctx is the context to get from
967
968
968 yields tuples for progress updates
969 yields tuples for progress updates
969 """
970 """
970 verbose = repo.ui.verbose
971 verbose = repo.ui.verbose
971 fctx = mctx.filectx
972 fctx = mctx.filectx
972 wwrite = repo.wwrite
973 wwrite = repo.wwrite
973 i = 0
974 i = 0
974 for f, args, msg in actions:
975 for f, args, msg in actions:
975 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
976 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
976 if verbose:
977 if verbose:
977 repo.ui.note(_("getting %s\n") % f)
978 repo.ui.note(_("getting %s\n") % f)
978 wwrite(f, fctx(f).data(), args[0])
979 wwrite(f, fctx(f).data(), args[0])
979 if i == 100:
980 if i == 100:
980 yield i, f
981 yield i, f
981 i = 0
982 i = 0
982 i += 1
983 i += 1
983 if i > 0:
984 if i > 0:
984 yield i, f
985 yield i, f
985
986
986 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
987 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
987 """apply the merge action list to the working directory
988 """apply the merge action list to the working directory
988
989
989 wctx is the working copy context
990 wctx is the working copy context
990 mctx is the context to be merged into the working copy
991 mctx is the context to be merged into the working copy
991
992
992 Return a tuple of counts (updated, merged, removed, unresolved) that
993 Return a tuple of counts (updated, merged, removed, unresolved) that
993 describes how many files were affected by the update.
994 describes how many files were affected by the update.
994 """
995 """
995
996
996 updated, merged, removed = 0, 0, 0
997 updated, merged, removed = 0, 0, 0
997 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
998 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node())
998 moves = []
999 moves = []
999 for m, l in actions.items():
1000 for m, l in actions.items():
1000 l.sort()
1001 l.sort()
1001
1002
1002 # 'cd' and 'dc' actions are treated like other merge conflicts
1003 # 'cd' and 'dc' actions are treated like other merge conflicts
1003 mergeactions = sorted(actions['cd'])
1004 mergeactions = sorted(actions['cd'])
1004 mergeactions.extend(sorted(actions['dc']))
1005 mergeactions.extend(sorted(actions['dc']))
1005 mergeactions.extend(actions['m'])
1006 mergeactions.extend(actions['m'])
1006 for f, args, msg in mergeactions:
1007 for f, args, msg in mergeactions:
1007 f1, f2, fa, move, anc = args
1008 f1, f2, fa, move, anc = args
1008 if f == '.hgsubstate': # merged internally
1009 if f == '.hgsubstate': # merged internally
1009 continue
1010 continue
1010 if f1 is None:
1011 if f1 is None:
1011 fcl = filemerge.absentfilectx(wctx, fa)
1012 fcl = filemerge.absentfilectx(wctx, fa)
1012 else:
1013 else:
1013 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1014 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
1014 fcl = wctx[f1]
1015 fcl = wctx[f1]
1015 if f2 is None:
1016 if f2 is None:
1016 fco = filemerge.absentfilectx(mctx, fa)
1017 fco = filemerge.absentfilectx(mctx, fa)
1017 else:
1018 else:
1018 fco = mctx[f2]
1019 fco = mctx[f2]
1019 actx = repo[anc]
1020 actx = repo[anc]
1020 if fa in actx:
1021 if fa in actx:
1021 fca = actx[fa]
1022 fca = actx[fa]
1022 else:
1023 else:
1023 # TODO: move to absentfilectx
1024 # TODO: move to absentfilectx
1024 fca = repo.filectx(f1, fileid=nullrev)
1025 fca = repo.filectx(f1, fileid=nullrev)
1025 ms.add(fcl, fco, fca, f)
1026 ms.add(fcl, fco, fca, f)
1026 if f1 != f and move:
1027 if f1 != f and move:
1027 moves.append(f1)
1028 moves.append(f1)
1028
1029
1029 audit = repo.wvfs.audit
1030 audit = repo.wvfs.audit
1030 _updating = _('updating')
1031 _updating = _('updating')
1031 _files = _('files')
1032 _files = _('files')
1032 progress = repo.ui.progress
1033 progress = repo.ui.progress
1033
1034
1034 # remove renamed files after safely stored
1035 # remove renamed files after safely stored
1035 for f in moves:
1036 for f in moves:
1036 if os.path.lexists(repo.wjoin(f)):
1037 if os.path.lexists(repo.wjoin(f)):
1037 repo.ui.debug("removing %s\n" % f)
1038 repo.ui.debug("removing %s\n" % f)
1038 audit(f)
1039 audit(f)
1039 util.unlinkpath(repo.wjoin(f))
1040 util.unlinkpath(repo.wjoin(f))
1040
1041
1041 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1042 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
1042
1043
1043 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1044 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
1044 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1045 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1045
1046
1046 # remove in parallel (must come first)
1047 # remove in parallel (must come first)
1047 z = 0
1048 z = 0
1048 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1049 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
1049 for i, item in prog:
1050 for i, item in prog:
1050 z += i
1051 z += i
1051 progress(_updating, z, item=item, total=numupdates, unit=_files)
1052 progress(_updating, z, item=item, total=numupdates, unit=_files)
1052 removed = len(actions['r'])
1053 removed = len(actions['r'])
1053
1054
1054 # get in parallel
1055 # get in parallel
1055 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1056 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
1056 for i, item in prog:
1057 for i, item in prog:
1057 z += i
1058 z += i
1058 progress(_updating, z, item=item, total=numupdates, unit=_files)
1059 progress(_updating, z, item=item, total=numupdates, unit=_files)
1059 updated = len(actions['g'])
1060 updated = len(actions['g'])
1060
1061
1061 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1062 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
1062 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1063 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
1063
1064
1064 # forget (manifest only, just log it) (must come first)
1065 # forget (manifest only, just log it) (must come first)
1065 for f, args, msg in actions['f']:
1066 for f, args, msg in actions['f']:
1066 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1067 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
1067 z += 1
1068 z += 1
1068 progress(_updating, z, item=f, total=numupdates, unit=_files)
1069 progress(_updating, z, item=f, total=numupdates, unit=_files)
1069
1070
1070 # re-add (manifest only, just log it)
1071 # re-add (manifest only, just log it)
1071 for f, args, msg in actions['a']:
1072 for f, args, msg in actions['a']:
1072 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1073 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
1073 z += 1
1074 z += 1
1074 progress(_updating, z, item=f, total=numupdates, unit=_files)
1075 progress(_updating, z, item=f, total=numupdates, unit=_files)
1075
1076
1076 # re-add/mark as modified (manifest only, just log it)
1077 # re-add/mark as modified (manifest only, just log it)
1077 for f, args, msg in actions['am']:
1078 for f, args, msg in actions['am']:
1078 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1079 repo.ui.debug(" %s: %s -> am\n" % (f, msg))
1079 z += 1
1080 z += 1
1080 progress(_updating, z, item=f, total=numupdates, unit=_files)
1081 progress(_updating, z, item=f, total=numupdates, unit=_files)
1081
1082
1082 # keep (noop, just log it)
1083 # keep (noop, just log it)
1083 for f, args, msg in actions['k']:
1084 for f, args, msg in actions['k']:
1084 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1085 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
1085 # no progress
1086 # no progress
1086
1087
1087 # directory rename, move local
1088 # directory rename, move local
1088 for f, args, msg in actions['dm']:
1089 for f, args, msg in actions['dm']:
1089 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1090 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
1090 z += 1
1091 z += 1
1091 progress(_updating, z, item=f, total=numupdates, unit=_files)
1092 progress(_updating, z, item=f, total=numupdates, unit=_files)
1092 f0, flags = args
1093 f0, flags = args
1093 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1094 repo.ui.note(_("moving %s to %s\n") % (f0, f))
1094 audit(f)
1095 audit(f)
1095 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1096 repo.wwrite(f, wctx.filectx(f0).data(), flags)
1096 util.unlinkpath(repo.wjoin(f0))
1097 util.unlinkpath(repo.wjoin(f0))
1097 updated += 1
1098 updated += 1
1098
1099
1099 # local directory rename, get
1100 # local directory rename, get
1100 for f, args, msg in actions['dg']:
1101 for f, args, msg in actions['dg']:
1101 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1102 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
1102 z += 1
1103 z += 1
1103 progress(_updating, z, item=f, total=numupdates, unit=_files)
1104 progress(_updating, z, item=f, total=numupdates, unit=_files)
1104 f0, flags = args
1105 f0, flags = args
1105 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1106 repo.ui.note(_("getting %s to %s\n") % (f0, f))
1106 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1107 repo.wwrite(f, mctx.filectx(f0).data(), flags)
1107 updated += 1
1108 updated += 1
1108
1109
1109 # exec
1110 # exec
1110 for f, args, msg in actions['e']:
1111 for f, args, msg in actions['e']:
1111 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1112 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
1112 z += 1
1113 z += 1
1113 progress(_updating, z, item=f, total=numupdates, unit=_files)
1114 progress(_updating, z, item=f, total=numupdates, unit=_files)
1114 flags, = args
1115 flags, = args
1115 audit(f)
1116 audit(f)
1116 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1117 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
1117 updated += 1
1118 updated += 1
1118
1119
1119 # the ordering is important here -- ms.mergedriver will raise if the merge
1120 # the ordering is important here -- ms.mergedriver will raise if the merge
1120 # driver has changed, and we want to be able to bypass it when overwrite is
1121 # driver has changed, and we want to be able to bypass it when overwrite is
1121 # True
1122 # True
1122 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1123 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1123
1124
1124 if usemergedriver:
1125 if usemergedriver:
1125 ms.commit()
1126 ms.commit()
1126 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1127 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1127 # the driver might leave some files unresolved
1128 # the driver might leave some files unresolved
1128 unresolvedf = set(ms.unresolved())
1129 unresolvedf = set(ms.unresolved())
1129 if not proceed:
1130 if not proceed:
1130 # XXX setting unresolved to at least 1 is a hack to make sure we
1131 # XXX setting unresolved to at least 1 is a hack to make sure we
1131 # error out
1132 # error out
1132 return updated, merged, removed, max(len(unresolvedf), 1)
1133 return updated, merged, removed, max(len(unresolvedf), 1)
1133 newactions = []
1134 newactions = []
1134 for f, args, msg in mergeactions:
1135 for f, args, msg in mergeactions:
1135 if f in unresolvedf:
1136 if f in unresolvedf:
1136 newactions.append((f, args, msg))
1137 newactions.append((f, args, msg))
1137 mergeactions = newactions
1138 mergeactions = newactions
1138
1139
1139 # premerge
1140 # premerge
1140 tocomplete = []
1141 tocomplete = []
1141 for f, args, msg in mergeactions:
1142 for f, args, msg in mergeactions:
1142 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1143 repo.ui.debug(" %s: %s -> m (premerge)\n" % (f, msg))
1143 z += 1
1144 z += 1
1144 progress(_updating, z, item=f, total=numupdates, unit=_files)
1145 progress(_updating, z, item=f, total=numupdates, unit=_files)
1145 if f == '.hgsubstate': # subrepo states need updating
1146 if f == '.hgsubstate': # subrepo states need updating
1146 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1147 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
1147 overwrite)
1148 overwrite)
1148 continue
1149 continue
1149 audit(f)
1150 audit(f)
1150 complete, r = ms.preresolve(f, wctx, labels=labels)
1151 complete, r = ms.preresolve(f, wctx, labels=labels)
1151 if not complete:
1152 if not complete:
1152 numupdates += 1
1153 numupdates += 1
1153 tocomplete.append((f, args, msg))
1154 tocomplete.append((f, args, msg))
1154
1155
1155 # merge
1156 # merge
1156 for f, args, msg in tocomplete:
1157 for f, args, msg in tocomplete:
1157 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1158 repo.ui.debug(" %s: %s -> m (merge)\n" % (f, msg))
1158 z += 1
1159 z += 1
1159 progress(_updating, z, item=f, total=numupdates, unit=_files)
1160 progress(_updating, z, item=f, total=numupdates, unit=_files)
1160 ms.resolve(f, wctx, labels=labels)
1161 ms.resolve(f, wctx, labels=labels)
1161
1162
1162 ms.commit()
1163 ms.commit()
1163
1164
1164 unresolved = ms.unresolvedcount()
1165 unresolved = ms.unresolvedcount()
1165
1166
1166 if usemergedriver and not unresolved and ms.mdstate() != 's':
1167 if usemergedriver and not unresolved and ms.mdstate() != 's':
1167 if not driverconclude(repo, ms, wctx, labels=labels):
1168 if not driverconclude(repo, ms, wctx, labels=labels):
1168 # XXX setting unresolved to at least 1 is a hack to make sure we
1169 # XXX setting unresolved to at least 1 is a hack to make sure we
1169 # error out
1170 # error out
1170 unresolved = max(unresolved, 1)
1171 unresolved = max(unresolved, 1)
1171
1172
1172 ms.commit()
1173 ms.commit()
1173
1174
1174 msupdated, msmerged, msremoved = ms.counts()
1175 msupdated, msmerged, msremoved = ms.counts()
1175 updated += msupdated
1176 updated += msupdated
1176 merged += msmerged
1177 merged += msmerged
1177 removed += msremoved
1178 removed += msremoved
1178
1179
1179 extraactions = ms.actions()
1180 extraactions = ms.actions()
1180 for k, acts in extraactions.iteritems():
1181 for k, acts in extraactions.iteritems():
1181 actions[k].extend(acts)
1182 actions[k].extend(acts)
1182
1183
1183 progress(_updating, None, total=numupdates, unit=_files)
1184 progress(_updating, None, total=numupdates, unit=_files)
1184
1185
1185 return updated, merged, removed, unresolved
1186 return updated, merged, removed, unresolved
1186
1187
1187 def recordupdates(repo, actions, branchmerge):
1188 def recordupdates(repo, actions, branchmerge):
1188 "record merge actions to the dirstate"
1189 "record merge actions to the dirstate"
1189 # remove (must come first)
1190 # remove (must come first)
1190 for f, args, msg in actions.get('r', []):
1191 for f, args, msg in actions.get('r', []):
1191 if branchmerge:
1192 if branchmerge:
1192 repo.dirstate.remove(f)
1193 repo.dirstate.remove(f)
1193 else:
1194 else:
1194 repo.dirstate.drop(f)
1195 repo.dirstate.drop(f)
1195
1196
1196 # forget (must come first)
1197 # forget (must come first)
1197 for f, args, msg in actions.get('f', []):
1198 for f, args, msg in actions.get('f', []):
1198 repo.dirstate.drop(f)
1199 repo.dirstate.drop(f)
1199
1200
1200 # re-add
1201 # re-add
1201 for f, args, msg in actions.get('a', []):
1202 for f, args, msg in actions.get('a', []):
1202 repo.dirstate.add(f)
1203 repo.dirstate.add(f)
1203
1204
1204 # re-add/mark as modified
1205 # re-add/mark as modified
1205 for f, args, msg in actions.get('am', []):
1206 for f, args, msg in actions.get('am', []):
1206 if branchmerge:
1207 if branchmerge:
1207 repo.dirstate.normallookup(f)
1208 repo.dirstate.normallookup(f)
1208 else:
1209 else:
1209 repo.dirstate.add(f)
1210 repo.dirstate.add(f)
1210
1211
1211 # exec change
1212 # exec change
1212 for f, args, msg in actions.get('e', []):
1213 for f, args, msg in actions.get('e', []):
1213 repo.dirstate.normallookup(f)
1214 repo.dirstate.normallookup(f)
1214
1215
1215 # keep
1216 # keep
1216 for f, args, msg in actions.get('k', []):
1217 for f, args, msg in actions.get('k', []):
1217 pass
1218 pass
1218
1219
1219 # get
1220 # get
1220 for f, args, msg in actions.get('g', []):
1221 for f, args, msg in actions.get('g', []):
1221 if branchmerge:
1222 if branchmerge:
1222 repo.dirstate.otherparent(f)
1223 repo.dirstate.otherparent(f)
1223 else:
1224 else:
1224 repo.dirstate.normal(f)
1225 repo.dirstate.normal(f)
1225
1226
1226 # merge
1227 # merge
1227 for f, args, msg in actions.get('m', []):
1228 for f, args, msg in actions.get('m', []):
1228 f1, f2, fa, move, anc = args
1229 f1, f2, fa, move, anc = args
1229 if branchmerge:
1230 if branchmerge:
1230 # We've done a branch merge, mark this file as merged
1231 # We've done a branch merge, mark this file as merged
1231 # so that we properly record the merger later
1232 # so that we properly record the merger later
1232 repo.dirstate.merge(f)
1233 repo.dirstate.merge(f)
1233 if f1 != f2: # copy/rename
1234 if f1 != f2: # copy/rename
1234 if move:
1235 if move:
1235 repo.dirstate.remove(f1)
1236 repo.dirstate.remove(f1)
1236 if f1 != f:
1237 if f1 != f:
1237 repo.dirstate.copy(f1, f)
1238 repo.dirstate.copy(f1, f)
1238 else:
1239 else:
1239 repo.dirstate.copy(f2, f)
1240 repo.dirstate.copy(f2, f)
1240 else:
1241 else:
1241 # We've update-merged a locally modified file, so
1242 # We've update-merged a locally modified file, so
1242 # we set the dirstate to emulate a normal checkout
1243 # we set the dirstate to emulate a normal checkout
1243 # of that file some time in the past. Thus our
1244 # of that file some time in the past. Thus our
1244 # merge will appear as a normal local file
1245 # merge will appear as a normal local file
1245 # modification.
1246 # modification.
1246 if f2 == f: # file not locally copied/moved
1247 if f2 == f: # file not locally copied/moved
1247 repo.dirstate.normallookup(f)
1248 repo.dirstate.normallookup(f)
1248 if move:
1249 if move:
1249 repo.dirstate.drop(f1)
1250 repo.dirstate.drop(f1)
1250
1251
1251 # directory rename, move local
1252 # directory rename, move local
1252 for f, args, msg in actions.get('dm', []):
1253 for f, args, msg in actions.get('dm', []):
1253 f0, flag = args
1254 f0, flag = args
1254 if branchmerge:
1255 if branchmerge:
1255 repo.dirstate.add(f)
1256 repo.dirstate.add(f)
1256 repo.dirstate.remove(f0)
1257 repo.dirstate.remove(f0)
1257 repo.dirstate.copy(f0, f)
1258 repo.dirstate.copy(f0, f)
1258 else:
1259 else:
1259 repo.dirstate.normal(f)
1260 repo.dirstate.normal(f)
1260 repo.dirstate.drop(f0)
1261 repo.dirstate.drop(f0)
1261
1262
1262 # directory rename, get
1263 # directory rename, get
1263 for f, args, msg in actions.get('dg', []):
1264 for f, args, msg in actions.get('dg', []):
1264 f0, flag = args
1265 f0, flag = args
1265 if branchmerge:
1266 if branchmerge:
1266 repo.dirstate.add(f)
1267 repo.dirstate.add(f)
1267 repo.dirstate.copy(f0, f)
1268 repo.dirstate.copy(f0, f)
1268 else:
1269 else:
1269 repo.dirstate.normal(f)
1270 repo.dirstate.normal(f)
1270
1271
1271 def update(repo, node, branchmerge, force, ancestor=None,
1272 def update(repo, node, branchmerge, force, ancestor=None,
1272 mergeancestor=False, labels=None, matcher=None):
1273 mergeancestor=False, labels=None, matcher=None):
1273 """
1274 """
1274 Perform a merge between the working directory and the given node
1275 Perform a merge between the working directory and the given node
1275
1276
1276 node = the node to update to, or None if unspecified
1277 node = the node to update to, or None if unspecified
1277 branchmerge = whether to merge between branches
1278 branchmerge = whether to merge between branches
1278 force = whether to force branch merging or file overwriting
1279 force = whether to force branch merging or file overwriting
1279 matcher = a matcher to filter file lists (dirstate not updated)
1280 matcher = a matcher to filter file lists (dirstate not updated)
1280 mergeancestor = whether it is merging with an ancestor. If true,
1281 mergeancestor = whether it is merging with an ancestor. If true,
1281 we should accept the incoming changes for any prompts that occur.
1282 we should accept the incoming changes for any prompts that occur.
1282 If false, merging with an ancestor (fast-forward) is only allowed
1283 If false, merging with an ancestor (fast-forward) is only allowed
1283 between different named branches. This flag is used by rebase extension
1284 between different named branches. This flag is used by rebase extension
1284 as a temporary fix and should be avoided in general.
1285 as a temporary fix and should be avoided in general.
1285
1286
1286 The table below shows all the behaviors of the update command
1287 The table below shows all the behaviors of the update command
1287 given the -c and -C or no options, whether the working directory
1288 given the -c and -C or no options, whether the working directory
1288 is dirty, whether a revision is specified, and the relationship of
1289 is dirty, whether a revision is specified, and the relationship of
1289 the parent rev to the target rev (linear, on the same named
1290 the parent rev to the target rev (linear, on the same named
1290 branch, or on another named branch).
1291 branch, or on another named branch).
1291
1292
1292 This logic is tested by test-update-branches.t.
1293 This logic is tested by test-update-branches.t.
1293
1294
1294 -c -C dirty rev | linear same cross
1295 -c -C dirty rev | linear same cross
1295 n n n n | ok (1) x
1296 n n n n | ok (1) x
1296 n n n y | ok ok ok
1297 n n n y | ok ok ok
1297 n n y n | merge (2) (2)
1298 n n y n | merge (2) (2)
1298 n n y y | merge (3) (3)
1299 n n y y | merge (3) (3)
1299 n y * * | discard discard discard
1300 n y * * | discard discard discard
1300 y n y * | (4) (4) (4)
1301 y n y * | (4) (4) (4)
1301 y n n * | ok ok ok
1302 y n n * | ok ok ok
1302 y y * * | (5) (5) (5)
1303 y y * * | (5) (5) (5)
1303
1304
1304 x = can't happen
1305 x = can't happen
1305 * = don't-care
1306 * = don't-care
1306 1 = abort: not a linear update (merge or update --check to force update)
1307 1 = abort: not a linear update (merge or update --check to force update)
1307 2 = abort: uncommitted changes (commit and merge, or update --clean to
1308 2 = abort: uncommitted changes (commit and merge, or update --clean to
1308 discard changes)
1309 discard changes)
1309 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1310 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1310 4 = abort: uncommitted changes (checked in commands.py)
1311 4 = abort: uncommitted changes (checked in commands.py)
1311 5 = incompatible options (checked in commands.py)
1312 5 = incompatible options (checked in commands.py)
1312
1313
1313 Return the same tuple as applyupdates().
1314 Return the same tuple as applyupdates().
1314 """
1315 """
1315
1316
1316 onode = node
1317 onode = node
1317 wlock = repo.wlock()
1318 wlock = repo.wlock()
1318 # If we're doing a partial update, we need to skip updating
1319 # If we're doing a partial update, we need to skip updating
1319 # the dirstate, so make a note of any partial-ness to the
1320 # the dirstate, so make a note of any partial-ness to the
1320 # update here.
1321 # update here.
1321 if matcher is None or matcher.always():
1322 if matcher is None or matcher.always():
1322 partial = False
1323 partial = False
1323 else:
1324 else:
1324 partial = True
1325 partial = True
1325 try:
1326 try:
1326 wc = repo[None]
1327 wc = repo[None]
1327 pl = wc.parents()
1328 pl = wc.parents()
1328 p1 = pl[0]
1329 p1 = pl[0]
1329 pas = [None]
1330 pas = [None]
1330 if ancestor is not None:
1331 if ancestor is not None:
1331 pas = [repo[ancestor]]
1332 pas = [repo[ancestor]]
1332
1333
1333 if node is None:
1334 if node is None:
1334 if (repo.ui.configbool('devel', 'all-warnings')
1335 if (repo.ui.configbool('devel', 'all-warnings')
1335 or repo.ui.configbool('devel', 'oldapi')):
1336 or repo.ui.configbool('devel', 'oldapi')):
1336 repo.ui.develwarn('update with no target')
1337 repo.ui.develwarn('update with no target')
1337 rev, _mark, _act = destutil.destupdate(repo)
1338 rev, _mark, _act = destutil.destupdate(repo)
1338 node = repo[rev].node()
1339 node = repo[rev].node()
1339
1340
1340 overwrite = force and not branchmerge
1341 overwrite = force and not branchmerge
1341
1342
1342 p2 = repo[node]
1343 p2 = repo[node]
1343 if pas[0] is None:
1344 if pas[0] is None:
1344 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1345 if repo.ui.configlist('merge', 'preferancestor', ['*']) == ['*']:
1345 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1346 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1346 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1347 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1347 else:
1348 else:
1348 pas = [p1.ancestor(p2, warn=branchmerge)]
1349 pas = [p1.ancestor(p2, warn=branchmerge)]
1349
1350
1350 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1351 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1351
1352
1352 ### check phase
1353 ### check phase
1353 if not overwrite:
1354 if not overwrite:
1354 if len(pl) > 1:
1355 if len(pl) > 1:
1355 raise error.Abort(_("outstanding uncommitted merge"))
1356 raise error.Abort(_("outstanding uncommitted merge"))
1356 ms = mergestate.read(repo)
1357 ms = mergestate.read(repo)
1357 if list(ms.unresolved()):
1358 if list(ms.unresolved()):
1358 raise error.Abort(_("outstanding merge conflicts"))
1359 raise error.Abort(_("outstanding merge conflicts"))
1359 if branchmerge:
1360 if branchmerge:
1360 if pas == [p2]:
1361 if pas == [p2]:
1361 raise error.Abort(_("merging with a working directory ancestor"
1362 raise error.Abort(_("merging with a working directory ancestor"
1362 " has no effect"))
1363 " has no effect"))
1363 elif pas == [p1]:
1364 elif pas == [p1]:
1364 if not mergeancestor and p1.branch() == p2.branch():
1365 if not mergeancestor and p1.branch() == p2.branch():
1365 raise error.Abort(_("nothing to merge"),
1366 raise error.Abort(_("nothing to merge"),
1366 hint=_("use 'hg update' "
1367 hint=_("use 'hg update' "
1367 "or check 'hg heads'"))
1368 "or check 'hg heads'"))
1368 if not force and (wc.files() or wc.deleted()):
1369 if not force and (wc.files() or wc.deleted()):
1369 raise error.Abort(_("uncommitted changes"),
1370 raise error.Abort(_("uncommitted changes"),
1370 hint=_("use 'hg status' to list changes"))
1371 hint=_("use 'hg status' to list changes"))
1371 for s in sorted(wc.substate):
1372 for s in sorted(wc.substate):
1372 wc.sub(s).bailifchanged()
1373 wc.sub(s).bailifchanged()
1373
1374
1374 elif not overwrite:
1375 elif not overwrite:
1375 if p1 == p2: # no-op update
1376 if p1 == p2: # no-op update
1376 # call the hooks and exit early
1377 # call the hooks and exit early
1377 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1378 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1378 repo.hook('update', parent1=xp2, parent2='', error=0)
1379 repo.hook('update', parent1=xp2, parent2='', error=0)
1379 return 0, 0, 0, 0
1380 return 0, 0, 0, 0
1380
1381
1381 if pas not in ([p1], [p2]): # nonlinear
1382 if pas not in ([p1], [p2]): # nonlinear
1382 dirty = wc.dirty(missing=True)
1383 dirty = wc.dirty(missing=True)
1383 if dirty or onode is None:
1384 if dirty or onode is None:
1384 # Branching is a bit strange to ensure we do the minimal
1385 # Branching is a bit strange to ensure we do the minimal
1385 # amount of call to obsolete.background.
1386 # amount of call to obsolete.background.
1386 foreground = obsolete.foreground(repo, [p1.node()])
1387 foreground = obsolete.foreground(repo, [p1.node()])
1387 # note: the <node> variable contains a random identifier
1388 # note: the <node> variable contains a random identifier
1388 if repo[node].node() in foreground:
1389 if repo[node].node() in foreground:
1389 pas = [p1] # allow updating to successors
1390 pas = [p1] # allow updating to successors
1390 elif dirty:
1391 elif dirty:
1391 msg = _("uncommitted changes")
1392 msg = _("uncommitted changes")
1392 if onode is None:
1393 if onode is None:
1393 hint = _("commit and merge, or update --clean to"
1394 hint = _("commit and merge, or update --clean to"
1394 " discard changes")
1395 " discard changes")
1395 else:
1396 else:
1396 hint = _("commit or update --clean to discard"
1397 hint = _("commit or update --clean to discard"
1397 " changes")
1398 " changes")
1398 raise error.Abort(msg, hint=hint)
1399 raise error.Abort(msg, hint=hint)
1399 else: # node is none
1400 else: # node is none
1400 msg = _("not a linear update")
1401 msg = _("not a linear update")
1401 hint = _("merge or update --check to force update")
1402 hint = _("merge or update --check to force update")
1402 raise error.Abort(msg, hint=hint)
1403 raise error.Abort(msg, hint=hint)
1403 else:
1404 else:
1404 # Allow jumping branches if clean and specific rev given
1405 # Allow jumping branches if clean and specific rev given
1405 pas = [p1]
1406 pas = [p1]
1406
1407
1407 # deprecated config: merge.followcopies
1408 # deprecated config: merge.followcopies
1408 followcopies = False
1409 followcopies = False
1409 if overwrite:
1410 if overwrite:
1410 pas = [wc]
1411 pas = [wc]
1411 elif pas == [p2]: # backwards
1412 elif pas == [p2]: # backwards
1412 pas = [wc.p1()]
1413 pas = [wc.p1()]
1413 elif not branchmerge and not wc.dirty(missing=True):
1414 elif not branchmerge and not wc.dirty(missing=True):
1414 pass
1415 pass
1415 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1416 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1416 followcopies = True
1417 followcopies = True
1417
1418
1418 ### calculate phase
1419 ### calculate phase
1419 actionbyfile, diverge, renamedelete = calculateupdates(
1420 actionbyfile, diverge, renamedelete = calculateupdates(
1420 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1421 repo, wc, p2, pas, branchmerge, force, mergeancestor,
1421 followcopies, matcher=matcher)
1422 followcopies, matcher=matcher)
1422 # Convert to dictionary-of-lists format
1423 # Convert to dictionary-of-lists format
1423 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1424 actions = dict((m, []) for m in 'a am f g cd dc r dm dg m e k'.split())
1424 for f, (m, args, msg) in actionbyfile.iteritems():
1425 for f, (m, args, msg) in actionbyfile.iteritems():
1425 if m not in actions:
1426 if m not in actions:
1426 actions[m] = []
1427 actions[m] = []
1427 actions[m].append((f, args, msg))
1428 actions[m].append((f, args, msg))
1428
1429
1429 if not util.checkcase(repo.path):
1430 if not util.checkcase(repo.path):
1430 # check collision between files only in p2 for clean update
1431 # check collision between files only in p2 for clean update
1431 if (not branchmerge and
1432 if (not branchmerge and
1432 (force or not wc.dirty(missing=True, branch=False))):
1433 (force or not wc.dirty(missing=True, branch=False))):
1433 _checkcollision(repo, p2.manifest(), None)
1434 _checkcollision(repo, p2.manifest(), None)
1434 else:
1435 else:
1435 _checkcollision(repo, wc.manifest(), actions)
1436 _checkcollision(repo, wc.manifest(), actions)
1436
1437
1437 # Prompt and create actions. Most of this is in the resolve phase
1438 # Prompt and create actions. Most of this is in the resolve phase
1438 # already, but we can't handle .hgsubstate in filemerge or
1439 # already, but we can't handle .hgsubstate in filemerge or
1439 # subrepo.submerge yet so we have to keep prompting for it.
1440 # subrepo.submerge yet so we have to keep prompting for it.
1440 for f, args, msg in sorted(actions['cd']):
1441 for f, args, msg in sorted(actions['cd']):
1441 if f != '.hgsubstate':
1442 if f != '.hgsubstate':
1442 continue
1443 continue
1443 if repo.ui.promptchoice(
1444 if repo.ui.promptchoice(
1444 _("local changed %s which remote deleted\n"
1445 _("local changed %s which remote deleted\n"
1445 "use (c)hanged version or (d)elete?"
1446 "use (c)hanged version or (d)elete?"
1446 "$$ &Changed $$ &Delete") % f, 0):
1447 "$$ &Changed $$ &Delete") % f, 0):
1447 actions['r'].append((f, None, "prompt delete"))
1448 actions['r'].append((f, None, "prompt delete"))
1448 elif f in p1:
1449 elif f in p1:
1449 actions['am'].append((f, None, "prompt keep"))
1450 actions['am'].append((f, None, "prompt keep"))
1450 else:
1451 else:
1451 actions['a'].append((f, None, "prompt keep"))
1452 actions['a'].append((f, None, "prompt keep"))
1452
1453
1453 for f, args, msg in sorted(actions['dc']):
1454 for f, args, msg in sorted(actions['dc']):
1454 if f != '.hgsubstate':
1455 if f != '.hgsubstate':
1455 continue
1456 continue
1456 f1, f2, fa, move, anc = args
1457 f1, f2, fa, move, anc = args
1457 flags = p2[f2].flags()
1458 flags = p2[f2].flags()
1458 if repo.ui.promptchoice(
1459 if repo.ui.promptchoice(
1459 _("remote changed %s which local deleted\n"
1460 _("remote changed %s which local deleted\n"
1460 "use (c)hanged version or leave (d)eleted?"
1461 "use (c)hanged version or leave (d)eleted?"
1461 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1462 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1462 actions['g'].append((f, (flags,), "prompt recreating"))
1463 actions['g'].append((f, (flags, False), "prompt recreating"))
1463
1464
1464 # divergent renames
1465 # divergent renames
1465 for f, fl in sorted(diverge.iteritems()):
1466 for f, fl in sorted(diverge.iteritems()):
1466 repo.ui.warn(_("note: possible conflict - %s was renamed "
1467 repo.ui.warn(_("note: possible conflict - %s was renamed "
1467 "multiple times to:\n") % f)
1468 "multiple times to:\n") % f)
1468 for nf in fl:
1469 for nf in fl:
1469 repo.ui.warn(" %s\n" % nf)
1470 repo.ui.warn(" %s\n" % nf)
1470
1471
1471 # rename and delete
1472 # rename and delete
1472 for f, fl in sorted(renamedelete.iteritems()):
1473 for f, fl in sorted(renamedelete.iteritems()):
1473 repo.ui.warn(_("note: possible conflict - %s was deleted "
1474 repo.ui.warn(_("note: possible conflict - %s was deleted "
1474 "and renamed to:\n") % f)
1475 "and renamed to:\n") % f)
1475 for nf in fl:
1476 for nf in fl:
1476 repo.ui.warn(" %s\n" % nf)
1477 repo.ui.warn(" %s\n" % nf)
1477
1478
1478 ### apply phase
1479 ### apply phase
1479 if not branchmerge: # just jump to the new rev
1480 if not branchmerge: # just jump to the new rev
1480 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1481 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1481 if not partial:
1482 if not partial:
1482 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1483 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1483 # note that we're in the middle of an update
1484 # note that we're in the middle of an update
1484 repo.vfs.write('updatestate', p2.hex())
1485 repo.vfs.write('updatestate', p2.hex())
1485
1486
1486 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1487 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1487
1488
1488 if not partial:
1489 if not partial:
1489 repo.dirstate.beginparentchange()
1490 repo.dirstate.beginparentchange()
1490 repo.setparents(fp1, fp2)
1491 repo.setparents(fp1, fp2)
1491 recordupdates(repo, actions, branchmerge)
1492 recordupdates(repo, actions, branchmerge)
1492 # update completed, clear state
1493 # update completed, clear state
1493 util.unlink(repo.join('updatestate'))
1494 util.unlink(repo.join('updatestate'))
1494
1495
1495 if not branchmerge:
1496 if not branchmerge:
1496 repo.dirstate.setbranch(p2.branch())
1497 repo.dirstate.setbranch(p2.branch())
1497 repo.dirstate.endparentchange()
1498 repo.dirstate.endparentchange()
1498 finally:
1499 finally:
1499 wlock.release()
1500 wlock.release()
1500
1501
1501 if not partial:
1502 if not partial:
1502 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1503 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1503 return stats
1504 return stats
1504
1505
1505 def graft(repo, ctx, pctx, labels, keepparent=False):
1506 def graft(repo, ctx, pctx, labels, keepparent=False):
1506 """Do a graft-like merge.
1507 """Do a graft-like merge.
1507
1508
1508 This is a merge where the merge ancestor is chosen such that one
1509 This is a merge where the merge ancestor is chosen such that one
1509 or more changesets are grafted onto the current changeset. In
1510 or more changesets are grafted onto the current changeset. In
1510 addition to the merge, this fixes up the dirstate to include only
1511 addition to the merge, this fixes up the dirstate to include only
1511 a single parent (if keepparent is False) and tries to duplicate any
1512 a single parent (if keepparent is False) and tries to duplicate any
1512 renames/copies appropriately.
1513 renames/copies appropriately.
1513
1514
1514 ctx - changeset to rebase
1515 ctx - changeset to rebase
1515 pctx - merge base, usually ctx.p1()
1516 pctx - merge base, usually ctx.p1()
1516 labels - merge labels eg ['local', 'graft']
1517 labels - merge labels eg ['local', 'graft']
1517 keepparent - keep second parent if any
1518 keepparent - keep second parent if any
1518
1519
1519 """
1520 """
1520 # If we're grafting a descendant onto an ancestor, be sure to pass
1521 # If we're grafting a descendant onto an ancestor, be sure to pass
1521 # mergeancestor=True to update. This does two things: 1) allows the merge if
1522 # mergeancestor=True to update. This does two things: 1) allows the merge if
1522 # the destination is the same as the parent of the ctx (so we can use graft
1523 # the destination is the same as the parent of the ctx (so we can use graft
1523 # to copy commits), and 2) informs update that the incoming changes are
1524 # to copy commits), and 2) informs update that the incoming changes are
1524 # newer than the destination so it doesn't prompt about "remote changed foo
1525 # newer than the destination so it doesn't prompt about "remote changed foo
1525 # which local deleted".
1526 # which local deleted".
1526 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1527 mergeancestor = repo.changelog.isancestor(repo['.'].node(), ctx.node())
1527
1528
1528 stats = update(repo, ctx.node(), True, True, pctx.node(),
1529 stats = update(repo, ctx.node(), True, True, pctx.node(),
1529 mergeancestor=mergeancestor, labels=labels)
1530 mergeancestor=mergeancestor, labels=labels)
1530
1531
1531 pother = nullid
1532 pother = nullid
1532 parents = ctx.parents()
1533 parents = ctx.parents()
1533 if keepparent and len(parents) == 2 and pctx in parents:
1534 if keepparent and len(parents) == 2 and pctx in parents:
1534 parents.remove(pctx)
1535 parents.remove(pctx)
1535 pother = parents[0].node()
1536 pother = parents[0].node()
1536
1537
1537 repo.dirstate.beginparentchange()
1538 repo.dirstate.beginparentchange()
1538 repo.setparents(repo['.'].node(), pother)
1539 repo.setparents(repo['.'].node(), pother)
1539 repo.dirstate.write(repo.currenttransaction())
1540 repo.dirstate.write(repo.currenttransaction())
1540 # fix up dirstate for copies and renames
1541 # fix up dirstate for copies and renames
1541 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1542 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1542 repo.dirstate.endparentchange()
1543 repo.dirstate.endparentchange()
1543 return stats
1544 return stats
General Comments 0
You need to be logged in to leave comments. Login now