##// END OF EJS Templates
merge: use separate lists for each action type...
Mads Kiilerich -
r21545:43eecb4e default
parent child Browse files
Show More
@@ -1,1174 +1,1175 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 archival, merge, pathutil, revset
15 archival, merge, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''installmatchfn with a matchfn that ignores all largefiles'''
27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 default='relpath'):
29 default='relpath'):
30 match = oldmatch(ctx, pats, opts, globbed, default)
30 match = oldmatch(ctx, pats, opts, globbed, default)
31 m = copy.copy(match)
31 m = copy.copy(match)
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 manifest)
33 manifest)
34 m._files = filter(notlfile, m._files)
34 m._files = filter(notlfile, m._files)
35 m._fmap = set(m._files)
35 m._fmap = set(m._files)
36 m._always = False
36 m._always = False
37 origmatchfn = m.matchfn
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 return m
39 return m
40 oldmatch = installmatchfn(overridematch)
40 oldmatch = installmatchfn(overridematch)
41
41
42 def installmatchfn(f):
42 def installmatchfn(f):
43 '''monkey patch the scmutil module with a custom match function.
43 '''monkey patch the scmutil module with a custom match function.
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installmatchfn
51 '''restores scmutil.match to what it was before installmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installmatchfn will require n calls to
54 Note that n calls to installmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57
57
58 def installmatchandpatsfn(f):
58 def installmatchandpatsfn(f):
59 oldmatchandpats = scmutil.matchandpats
59 oldmatchandpats = scmutil.matchandpats
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 scmutil.matchandpats = f
61 scmutil.matchandpats = f
62 return oldmatchandpats
62 return oldmatchandpats
63
63
64 def restorematchandpatsfn():
64 def restorematchandpatsfn():
65 '''restores scmutil.matchandpats to what it was before
65 '''restores scmutil.matchandpats to what it was before
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 is its original function.
67 is its original function.
68
68
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 to restore matchfn to reverse'''
70 to restore matchfn to reverse'''
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 scmutil.matchandpats)
72 scmutil.matchandpats)
73
73
74 def addlargefiles(ui, repo, *pats, **opts):
74 def addlargefiles(ui, repo, *pats, **opts):
75 large = opts.pop('large', None)
75 large = opts.pop('large', None)
76 lfsize = lfutil.getminsize(
76 lfsize = lfutil.getminsize(
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78
78
79 lfmatcher = None
79 lfmatcher = None
80 if lfutil.islfilesrepo(repo):
80 if lfutil.islfilesrepo(repo):
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 if lfpats:
82 if lfpats:
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84
84
85 lfnames = []
85 lfnames = []
86 m = scmutil.match(repo[None], pats, opts)
86 m = scmutil.match(repo[None], pats, opts)
87 m.bad = lambda x, y: None
87 m.bad = lambda x, y: None
88 wctx = repo[None]
88 wctx = repo[None]
89 for f in repo.walk(m):
89 for f in repo.walk(m):
90 exact = m.exact(f)
90 exact = m.exact(f)
91 lfile = lfutil.standin(f) in wctx
91 lfile = lfutil.standin(f) in wctx
92 nfile = f in wctx
92 nfile = f in wctx
93 exists = lfile or nfile
93 exists = lfile or nfile
94
94
95 # Don't warn the user when they attempt to add a normal tracked file.
95 # Don't warn the user when they attempt to add a normal tracked file.
96 # The normal add code will do that for us.
96 # The normal add code will do that for us.
97 if exact and exists:
97 if exact and exists:
98 if lfile:
98 if lfile:
99 ui.warn(_('%s already a largefile\n') % f)
99 ui.warn(_('%s already a largefile\n') % f)
100 continue
100 continue
101
101
102 if (exact or not exists) and not lfutil.isstandin(f):
102 if (exact or not exists) and not lfutil.isstandin(f):
103 wfile = repo.wjoin(f)
103 wfile = repo.wjoin(f)
104
104
105 # In case the file was removed previously, but not committed
105 # In case the file was removed previously, but not committed
106 # (issue3507)
106 # (issue3507)
107 if not os.path.exists(wfile):
107 if not os.path.exists(wfile):
108 continue
108 continue
109
109
110 abovemin = (lfsize and
110 abovemin = (lfsize and
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 lfnames.append(f)
113 lfnames.append(f)
114 if ui.verbose or not exact:
114 if ui.verbose or not exact:
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116
116
117 bad = []
117 bad = []
118 standins = []
118 standins = []
119
119
120 # Need to lock, otherwise there could be a race condition between
120 # Need to lock, otherwise there could be a race condition between
121 # when standins are created and added to the repo.
121 # when standins are created and added to the repo.
122 wlock = repo.wlock()
122 wlock = repo.wlock()
123 try:
123 try:
124 if not opts.get('dry_run'):
124 if not opts.get('dry_run'):
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 for f in lfnames:
126 for f in lfnames:
127 standinname = lfutil.standin(f)
127 standinname = lfutil.standin(f)
128 lfutil.writestandin(repo, standinname, hash='',
128 lfutil.writestandin(repo, standinname, hash='',
129 executable=lfutil.getexecutable(repo.wjoin(f)))
129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 standins.append(standinname)
130 standins.append(standinname)
131 if lfdirstate[f] == 'r':
131 if lfdirstate[f] == 'r':
132 lfdirstate.normallookup(f)
132 lfdirstate.normallookup(f)
133 else:
133 else:
134 lfdirstate.add(f)
134 lfdirstate.add(f)
135 lfdirstate.write()
135 lfdirstate.write()
136 bad += [lfutil.splitstandin(f)
136 bad += [lfutil.splitstandin(f)
137 for f in repo[None].add(standins)
137 for f in repo[None].add(standins)
138 if f in m.files()]
138 if f in m.files()]
139 finally:
139 finally:
140 wlock.release()
140 wlock.release()
141 return bad
141 return bad
142
142
143 def removelargefiles(ui, repo, *pats, **opts):
143 def removelargefiles(ui, repo, *pats, **opts):
144 after = opts.get('after')
144 after = opts.get('after')
145 if not pats and not after:
145 if not pats and not after:
146 raise util.Abort(_('no files specified'))
146 raise util.Abort(_('no files specified'))
147 m = scmutil.match(repo[None], pats, opts)
147 m = scmutil.match(repo[None], pats, opts)
148 try:
148 try:
149 repo.lfstatus = True
149 repo.lfstatus = True
150 s = repo.status(match=m, clean=True)
150 s = repo.status(match=m, clean=True)
151 finally:
151 finally:
152 repo.lfstatus = False
152 repo.lfstatus = False
153 manifest = repo[None].manifest()
153 manifest = repo[None].manifest()
154 modified, added, deleted, clean = [[f for f in list
154 modified, added, deleted, clean = [[f for f in list
155 if lfutil.standin(f) in manifest]
155 if lfutil.standin(f) in manifest]
156 for list in [s[0], s[1], s[3], s[6]]]
156 for list in [s[0], s[1], s[3], s[6]]]
157
157
158 def warn(files, msg):
158 def warn(files, msg):
159 for f in files:
159 for f in files:
160 ui.warn(msg % m.rel(f))
160 ui.warn(msg % m.rel(f))
161 return int(len(files) > 0)
161 return int(len(files) > 0)
162
162
163 result = 0
163 result = 0
164
164
165 if after:
165 if after:
166 remove, forget = deleted, []
166 remove, forget = deleted, []
167 result = warn(modified + added + clean,
167 result = warn(modified + added + clean,
168 _('not removing %s: file still exists\n'))
168 _('not removing %s: file still exists\n'))
169 else:
169 else:
170 remove, forget = deleted + clean, []
170 remove, forget = deleted + clean, []
171 result = warn(modified, _('not removing %s: file is modified (use -f'
171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 ' to force removal)\n'))
172 ' to force removal)\n'))
173 result = warn(added, _('not removing %s: file has been marked for add'
173 result = warn(added, _('not removing %s: file has been marked for add'
174 ' (use forget to undo)\n')) or result
174 ' (use forget to undo)\n')) or result
175
175
176 for f in sorted(remove + forget):
176 for f in sorted(remove + forget):
177 if ui.verbose or not m.exact(f):
177 if ui.verbose or not m.exact(f):
178 ui.status(_('removing %s\n') % m.rel(f))
178 ui.status(_('removing %s\n') % m.rel(f))
179
179
180 # Need to lock because standin files are deleted then removed from the
180 # Need to lock because standin files are deleted then removed from the
181 # repository and we could race in-between.
181 # repository and we could race in-between.
182 wlock = repo.wlock()
182 wlock = repo.wlock()
183 try:
183 try:
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 for f in remove:
185 for f in remove:
186 if not after:
186 if not after:
187 # If this is being called by addremove, notify the user that we
187 # If this is being called by addremove, notify the user that we
188 # are removing the file.
188 # are removing the file.
189 if getattr(repo, "_isaddremove", False):
189 if getattr(repo, "_isaddremove", False):
190 ui.status(_('removing %s\n') % f)
190 ui.status(_('removing %s\n') % f)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 lfdirstate.remove(f)
192 lfdirstate.remove(f)
193 lfdirstate.write()
193 lfdirstate.write()
194 forget = [lfutil.standin(f) for f in forget]
194 forget = [lfutil.standin(f) for f in forget]
195 remove = [lfutil.standin(f) for f in remove]
195 remove = [lfutil.standin(f) for f in remove]
196 repo[None].forget(forget)
196 repo[None].forget(forget)
197 # If this is being called by addremove, let the original addremove
197 # If this is being called by addremove, let the original addremove
198 # function handle this.
198 # function handle this.
199 if not getattr(repo, "_isaddremove", False):
199 if not getattr(repo, "_isaddremove", False):
200 for f in remove:
200 for f in remove:
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
201 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
202 repo[None].forget(remove)
202 repo[None].forget(remove)
203 finally:
203 finally:
204 wlock.release()
204 wlock.release()
205
205
206 return result
206 return result
207
207
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
208 # For overriding mercurial.hgweb.webcommands so that largefiles will
209 # appear at their right place in the manifests.
209 # appear at their right place in the manifests.
210 def decodepath(orig, path):
210 def decodepath(orig, path):
211 return lfutil.splitstandin(path) or path
211 return lfutil.splitstandin(path) or path
212
212
213 # -- Wrappers: modify existing commands --------------------------------
213 # -- Wrappers: modify existing commands --------------------------------
214
214
215 # Add works by going through the files that the user wanted to add and
215 # Add works by going through the files that the user wanted to add and
216 # checking if they should be added as largefiles. Then it makes a new
216 # checking if they should be added as largefiles. Then it makes a new
217 # matcher which matches only the normal files and runs the original
217 # matcher which matches only the normal files and runs the original
218 # version of add.
218 # version of add.
219 def overrideadd(orig, ui, repo, *pats, **opts):
219 def overrideadd(orig, ui, repo, *pats, **opts):
220 normal = opts.pop('normal')
220 normal = opts.pop('normal')
221 if normal:
221 if normal:
222 if opts.get('large'):
222 if opts.get('large'):
223 raise util.Abort(_('--normal cannot be used with --large'))
223 raise util.Abort(_('--normal cannot be used with --large'))
224 return orig(ui, repo, *pats, **opts)
224 return orig(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
225 bad = addlargefiles(ui, repo, *pats, **opts)
226 installnormalfilesmatchfn(repo[None].manifest())
226 installnormalfilesmatchfn(repo[None].manifest())
227 result = orig(ui, repo, *pats, **opts)
227 result = orig(ui, repo, *pats, **opts)
228 restorematchfn()
228 restorematchfn()
229
229
230 return (result == 1 or bad) and 1 or 0
230 return (result == 1 or bad) and 1 or 0
231
231
232 def overrideremove(orig, ui, repo, *pats, **opts):
232 def overrideremove(orig, ui, repo, *pats, **opts):
233 installnormalfilesmatchfn(repo[None].manifest())
233 installnormalfilesmatchfn(repo[None].manifest())
234 result = orig(ui, repo, *pats, **opts)
234 result = orig(ui, repo, *pats, **opts)
235 restorematchfn()
235 restorematchfn()
236 return removelargefiles(ui, repo, *pats, **opts) or result
236 return removelargefiles(ui, repo, *pats, **opts) or result
237
237
238 def overridestatusfn(orig, repo, rev2, **opts):
238 def overridestatusfn(orig, repo, rev2, **opts):
239 try:
239 try:
240 repo._repo.lfstatus = True
240 repo._repo.lfstatus = True
241 return orig(repo, rev2, **opts)
241 return orig(repo, rev2, **opts)
242 finally:
242 finally:
243 repo._repo.lfstatus = False
243 repo._repo.lfstatus = False
244
244
245 def overridestatus(orig, ui, repo, *pats, **opts):
245 def overridestatus(orig, ui, repo, *pats, **opts):
246 try:
246 try:
247 repo.lfstatus = True
247 repo.lfstatus = True
248 return orig(ui, repo, *pats, **opts)
248 return orig(ui, repo, *pats, **opts)
249 finally:
249 finally:
250 repo.lfstatus = False
250 repo.lfstatus = False
251
251
252 def overridedirty(orig, repo, ignoreupdate=False):
252 def overridedirty(orig, repo, ignoreupdate=False):
253 try:
253 try:
254 repo._repo.lfstatus = True
254 repo._repo.lfstatus = True
255 return orig(repo, ignoreupdate)
255 return orig(repo, ignoreupdate)
256 finally:
256 finally:
257 repo._repo.lfstatus = False
257 repo._repo.lfstatus = False
258
258
259 def overridelog(orig, ui, repo, *pats, **opts):
259 def overridelog(orig, ui, repo, *pats, **opts):
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
260 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
261 default='relpath'):
261 default='relpath'):
262 """Matcher that merges root directory with .hglf, suitable for log.
262 """Matcher that merges root directory with .hglf, suitable for log.
263 It is still possible to match .hglf directly.
263 It is still possible to match .hglf directly.
264 For any listed files run log on the standin too.
264 For any listed files run log on the standin too.
265 matchfn tries both the given filename and with .hglf stripped.
265 matchfn tries both the given filename and with .hglf stripped.
266 """
266 """
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
267 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
268 m, p = copy.copy(matchandpats)
268 m, p = copy.copy(matchandpats)
269
269
270 pats = set(p)
270 pats = set(p)
271 # TODO: handling of patterns in both cases below
271 # TODO: handling of patterns in both cases below
272 if m._cwd:
272 if m._cwd:
273 if os.path.isabs(m._cwd):
273 if os.path.isabs(m._cwd):
274 # TODO: handle largefile magic when invoked from other cwd
274 # TODO: handle largefile magic when invoked from other cwd
275 return matchandpats
275 return matchandpats
276 back = (m._cwd.count('/') + 1) * '../'
276 back = (m._cwd.count('/') + 1) * '../'
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
277 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
278 else:
278 else:
279 pats.update(lfutil.standin(f) for f in p)
279 pats.update(lfutil.standin(f) for f in p)
280
280
281 for i in range(0, len(m._files)):
281 for i in range(0, len(m._files)):
282 standin = lfutil.standin(m._files[i])
282 standin = lfutil.standin(m._files[i])
283 if standin in repo[ctx.node()]:
283 if standin in repo[ctx.node()]:
284 m._files[i] = standin
284 m._files[i] = standin
285 elif m._files[i] not in repo[ctx.node()]:
285 elif m._files[i] not in repo[ctx.node()]:
286 m._files.append(standin)
286 m._files.append(standin)
287 pats.add(standin)
287 pats.add(standin)
288
288
289 m._fmap = set(m._files)
289 m._fmap = set(m._files)
290 m._always = False
290 m._always = False
291 origmatchfn = m.matchfn
291 origmatchfn = m.matchfn
292 def lfmatchfn(f):
292 def lfmatchfn(f):
293 lf = lfutil.splitstandin(f)
293 lf = lfutil.splitstandin(f)
294 if lf is not None and origmatchfn(lf):
294 if lf is not None and origmatchfn(lf):
295 return True
295 return True
296 r = origmatchfn(f)
296 r = origmatchfn(f)
297 return r
297 return r
298 m.matchfn = lfmatchfn
298 m.matchfn = lfmatchfn
299
299
300 return m, pats
300 return m, pats
301
301
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
302 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
303 try:
303 try:
304 repo.lfstatus = True
304 repo.lfstatus = True
305 return orig(ui, repo, *pats, **opts)
305 return orig(ui, repo, *pats, **opts)
306 finally:
306 finally:
307 repo.lfstatus = False
307 repo.lfstatus = False
308 restorematchandpatsfn()
308 restorematchandpatsfn()
309
309
310 def overrideverify(orig, ui, repo, *pats, **opts):
310 def overrideverify(orig, ui, repo, *pats, **opts):
311 large = opts.pop('large', False)
311 large = opts.pop('large', False)
312 all = opts.pop('lfa', False)
312 all = opts.pop('lfa', False)
313 contents = opts.pop('lfc', False)
313 contents = opts.pop('lfc', False)
314
314
315 result = orig(ui, repo, *pats, **opts)
315 result = orig(ui, repo, *pats, **opts)
316 if large or all or contents:
316 if large or all or contents:
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
317 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
318 return result
318 return result
319
319
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
320 def overridedebugstate(orig, ui, repo, *pats, **opts):
321 large = opts.pop('large', False)
321 large = opts.pop('large', False)
322 if large:
322 if large:
323 class fakerepo(object):
323 class fakerepo(object):
324 dirstate = lfutil.openlfdirstate(ui, repo)
324 dirstate = lfutil.openlfdirstate(ui, repo)
325 orig(ui, fakerepo, *pats, **opts)
325 orig(ui, fakerepo, *pats, **opts)
326 else:
326 else:
327 orig(ui, repo, *pats, **opts)
327 orig(ui, repo, *pats, **opts)
328
328
329 # Override needs to refresh standins so that update's normal merge
329 # Override needs to refresh standins so that update's normal merge
330 # will go through properly. Then the other update hook (overriding repo.update)
330 # will go through properly. Then the other update hook (overriding repo.update)
331 # will get the new files. Filemerge is also overridden so that the merge
331 # will get the new files. Filemerge is also overridden so that the merge
332 # will merge standins correctly.
332 # will merge standins correctly.
333 def overrideupdate(orig, ui, repo, *pats, **opts):
333 def overrideupdate(orig, ui, repo, *pats, **opts):
334 # Need to lock between the standins getting updated and their
334 # Need to lock between the standins getting updated and their
335 # largefiles getting updated
335 # largefiles getting updated
336 wlock = repo.wlock()
336 wlock = repo.wlock()
337 try:
337 try:
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
338 lfdirstate = lfutil.openlfdirstate(ui, repo)
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
339 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
340 [], False, False, False)
340 [], False, False, False)
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
341 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
342
342
343 if opts['check']:
343 if opts['check']:
344 mod = len(modified) > 0
344 mod = len(modified) > 0
345 for lfile in unsure:
345 for lfile in unsure:
346 standin = lfutil.standin(lfile)
346 standin = lfutil.standin(lfile)
347 if repo['.'][standin].data().strip() != \
347 if repo['.'][standin].data().strip() != \
348 lfutil.hashfile(repo.wjoin(lfile)):
348 lfutil.hashfile(repo.wjoin(lfile)):
349 mod = True
349 mod = True
350 else:
350 else:
351 lfdirstate.normal(lfile)
351 lfdirstate.normal(lfile)
352 lfdirstate.write()
352 lfdirstate.write()
353 if mod:
353 if mod:
354 raise util.Abort(_('uncommitted changes'))
354 raise util.Abort(_('uncommitted changes'))
355 # XXX handle removed differently
355 # XXX handle removed differently
356 if not opts['clean']:
356 if not opts['clean']:
357 for lfile in unsure + modified + added:
357 for lfile in unsure + modified + added:
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
358 lfutil.updatestandin(repo, lfutil.standin(lfile))
359 return orig(ui, repo, *pats, **opts)
359 return orig(ui, repo, *pats, **opts)
360 finally:
360 finally:
361 wlock.release()
361 wlock.release()
362
362
363 # Before starting the manifest merge, merge.updates will call
363 # Before starting the manifest merge, merge.updates will call
364 # _checkunknown to check if there are any files in the merged-in
364 # _checkunknown to check if there are any files in the merged-in
365 # changeset that collide with unknown files in the working copy.
365 # changeset that collide with unknown files in the working copy.
366 #
366 #
367 # The largefiles are seen as unknown, so this prevents us from merging
367 # The largefiles are seen as unknown, so this prevents us from merging
368 # in a file 'foo' if we already have a largefile with the same name.
368 # in a file 'foo' if we already have a largefile with the same name.
369 #
369 #
370 # The overridden function filters the unknown files by removing any
370 # The overridden function filters the unknown files by removing any
371 # largefiles. This makes the merge proceed and we can then handle this
371 # largefiles. This makes the merge proceed and we can then handle this
372 # case further in the overridden manifestmerge function below.
372 # case further in the overridden manifestmerge function below.
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
373 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
374 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
375 return False
375 return False
376 return origfn(repo, wctx, mctx, f)
376 return origfn(repo, wctx, mctx, f)
377
377
378 # The manifest merge handles conflicts on the manifest level. We want
378 # The manifest merge handles conflicts on the manifest level. We want
379 # to handle changes in largefile-ness of files at this level too.
379 # to handle changes in largefile-ness of files at this level too.
380 #
380 #
381 # The strategy is to run the original manifestmerge and then process
381 # The strategy is to run the original manifestmerge and then process
382 # the action list it outputs. There are two cases we need to deal with:
382 # the action list it outputs. There are two cases we need to deal with:
383 #
383 #
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
384 # 1. Normal file in p1, largefile in p2. Here the largefile is
385 # detected via its standin file, which will enter the working copy
385 # detected via its standin file, which will enter the working copy
386 # with a "get" action. It is not "merge" since the standin is all
386 # with a "get" action. It is not "merge" since the standin is all
387 # Mercurial is concerned with at this level -- the link to the
387 # Mercurial is concerned with at this level -- the link to the
388 # existing normal file is not relevant here.
388 # existing normal file is not relevant here.
389 #
389 #
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
390 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
391 # since the largefile will be present in the working copy and
391 # since the largefile will be present in the working copy and
392 # different from the normal file in p2. Mercurial therefore
392 # different from the normal file in p2. Mercurial therefore
393 # triggers a merge action.
393 # triggers a merge action.
394 #
394 #
395 # In both cases, we prompt the user and emit new actions to either
395 # In both cases, we prompt the user and emit new actions to either
396 # remove the standin (if the normal file was kept) or to remove the
396 # remove the standin (if the normal file was kept) or to remove the
397 # normal file and get the standin (if the largefile was kept). The
397 # normal file and get the standin (if the largefile was kept). The
398 # default prompt answer is to use the largefile version since it was
398 # default prompt answer is to use the largefile version since it was
399 # presumably changed on purpose.
399 # presumably changed on purpose.
400 #
400 #
401 # Finally, the merge.applyupdates function will then take care of
401 # Finally, the merge.applyupdates function will then take care of
402 # writing the files into the working copy and lfcommands.updatelfiles
402 # writing the files into the working copy and lfcommands.updatelfiles
403 # will update the largefiles.
403 # will update the largefiles.
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
404 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
405 partial, acceptremote, followcopies):
405 partial, acceptremote, followcopies):
406 overwrite = force and not branchmerge
406 overwrite = force and not branchmerge
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
407 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
408 acceptremote, followcopies)
408 acceptremote, followcopies)
409
409
410 if overwrite:
410 if overwrite:
411 return actions
411 return actions
412
412
413 removes = set(a[0] for a in actions if a[1] == 'r')
413 removes = set(a[0] for a in actions['r'])
414 processed = []
415
414
416 for action in actions:
415 newglist = []
417 f, m, args, msg = action
416 for action in actions['g']:
418
417 f, args, msg = action
419 splitstandin = f and lfutil.splitstandin(f)
418 splitstandin = f and lfutil.splitstandin(f)
420 if (m == "g" and splitstandin is not None and
419 if (splitstandin is not None and
421 splitstandin in p1 and splitstandin not in removes):
420 splitstandin in p1 and splitstandin not in removes):
422 # Case 1: normal file in the working copy, largefile in
421 # Case 1: normal file in the working copy, largefile in
423 # the second parent
422 # the second parent
424 lfile = splitstandin
423 lfile = splitstandin
425 standin = f
424 standin = f
426 msg = _('remote turned local normal file %s into a largefile\n'
425 msg = _('remote turned local normal file %s into a largefile\n'
427 'use (l)argefile or keep (n)ormal file?'
426 'use (l)argefile or keep (n)ormal file?'
428 '$$ &Largefile $$ &Normal file') % lfile
427 '$$ &Largefile $$ &Normal file') % lfile
429 if repo.ui.promptchoice(msg, 0) == 0:
428 if repo.ui.promptchoice(msg, 0) == 0:
430 processed.append((lfile, "r", None, msg))
429 actions['r'].append((lfile, None, msg))
431 processed.append((standin, "g", (p2.flags(standin),), msg))
430 newglist.append((standin, (p2.flags(standin),), msg))
432 else:
431 else:
433 processed.append((standin, "r", None, msg))
432 actions['r'].append((standin, None, msg))
434 elif (m == "g" and
433 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
435 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
436 # Case 2: largefile in the working copy, normal file in
434 # Case 2: largefile in the working copy, normal file in
437 # the second parent
435 # the second parent
438 standin = lfutil.standin(f)
436 standin = lfutil.standin(f)
439 lfile = f
437 lfile = f
440 msg = _('remote turned local largefile %s into a normal file\n'
438 msg = _('remote turned local largefile %s into a normal file\n'
441 'keep (l)argefile or use (n)ormal file?'
439 'keep (l)argefile or use (n)ormal file?'
442 '$$ &Largefile $$ &Normal file') % lfile
440 '$$ &Largefile $$ &Normal file') % lfile
443 if repo.ui.promptchoice(msg, 0) == 0:
441 if repo.ui.promptchoice(msg, 0) == 0:
444 processed.append((lfile, "r", None, msg))
442 actions['r'].append((lfile, None, msg))
445 else:
443 else:
446 processed.append((standin, "r", None, msg))
444 actions['r'].append((standin, None, msg))
447 processed.append((lfile, "g", (p2.flags(lfile),), msg))
445 newglist.append((lfile, (p2.flags(lfile),), msg))
448 else:
446 else:
449 processed.append(action)
447 newglist.append(action)
450
448
451 return processed
449 newglist.sort()
450 actions['g'] = newglist
451
452 return actions
452
453
453 # Override filemerge to prompt the user about how they wish to merge
454 # Override filemerge to prompt the user about how they wish to merge
454 # largefiles. This will handle identical edits without prompting the user.
455 # largefiles. This will handle identical edits without prompting the user.
455 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
456 if not lfutil.isstandin(orig):
457 if not lfutil.isstandin(orig):
457 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
458
459
459 ahash = fca.data().strip().lower()
460 ahash = fca.data().strip().lower()
460 dhash = fcd.data().strip().lower()
461 dhash = fcd.data().strip().lower()
461 ohash = fco.data().strip().lower()
462 ohash = fco.data().strip().lower()
462 if (ohash != ahash and
463 if (ohash != ahash and
463 ohash != dhash and
464 ohash != dhash and
464 (dhash == ahash or
465 (dhash == ahash or
465 repo.ui.promptchoice(
466 repo.ui.promptchoice(
466 _('largefile %s has a merge conflict\nancestor was %s\n'
467 _('largefile %s has a merge conflict\nancestor was %s\n'
467 'keep (l)ocal %s or\ntake (o)ther %s?'
468 'keep (l)ocal %s or\ntake (o)ther %s?'
468 '$$ &Local $$ &Other') %
469 '$$ &Local $$ &Other') %
469 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 (lfutil.splitstandin(orig), ahash, dhash, ohash),
470 0) == 1)):
471 0) == 1)):
471 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 repo.wwrite(fcd.path(), fco.data(), fco.flags())
472 return 0
473 return 0
473
474
474 # Copy first changes the matchers to match standins instead of
475 # Copy first changes the matchers to match standins instead of
475 # largefiles. Then it overrides util.copyfile in that function it
476 # largefiles. Then it overrides util.copyfile in that function it
476 # checks if the destination largefile already exists. It also keeps a
477 # checks if the destination largefile already exists. It also keeps a
477 # list of copied files so that the largefiles can be copied and the
478 # list of copied files so that the largefiles can be copied and the
478 # dirstate updated.
479 # dirstate updated.
479 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 def overridecopy(orig, ui, repo, pats, opts, rename=False):
480 # doesn't remove largefile on rename
481 # doesn't remove largefile on rename
481 if len(pats) < 2:
482 if len(pats) < 2:
482 # this isn't legal, let the original function deal with it
483 # this isn't legal, let the original function deal with it
483 return orig(ui, repo, pats, opts, rename)
484 return orig(ui, repo, pats, opts, rename)
484
485
485 def makestandin(relpath):
486 def makestandin(relpath):
486 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
487 return os.path.join(repo.wjoin(lfutil.standin(path)))
488 return os.path.join(repo.wjoin(lfutil.standin(path)))
488
489
489 fullpats = scmutil.expandpats(pats)
490 fullpats = scmutil.expandpats(pats)
490 dest = fullpats[-1]
491 dest = fullpats[-1]
491
492
492 if os.path.isdir(dest):
493 if os.path.isdir(dest):
493 if not os.path.isdir(makestandin(dest)):
494 if not os.path.isdir(makestandin(dest)):
494 os.makedirs(makestandin(dest))
495 os.makedirs(makestandin(dest))
495 # This could copy both lfiles and normal files in one command,
496 # This could copy both lfiles and normal files in one command,
496 # but we don't want to do that. First replace their matcher to
497 # but we don't want to do that. First replace their matcher to
497 # only match normal files and run it, then replace it to just
498 # only match normal files and run it, then replace it to just
498 # match largefiles and run it again.
499 # match largefiles and run it again.
499 nonormalfiles = False
500 nonormalfiles = False
500 nolfiles = False
501 nolfiles = False
501 installnormalfilesmatchfn(repo[None].manifest())
502 installnormalfilesmatchfn(repo[None].manifest())
502 try:
503 try:
503 try:
504 try:
504 result = orig(ui, repo, pats, opts, rename)
505 result = orig(ui, repo, pats, opts, rename)
505 except util.Abort, e:
506 except util.Abort, e:
506 if str(e) != _('no files to copy'):
507 if str(e) != _('no files to copy'):
507 raise e
508 raise e
508 else:
509 else:
509 nonormalfiles = True
510 nonormalfiles = True
510 result = 0
511 result = 0
511 finally:
512 finally:
512 restorematchfn()
513 restorematchfn()
513
514
514 # The first rename can cause our current working directory to be removed.
515 # The first rename can cause our current working directory to be removed.
515 # In that case there is nothing left to copy/rename so just quit.
516 # In that case there is nothing left to copy/rename so just quit.
516 try:
517 try:
517 repo.getcwd()
518 repo.getcwd()
518 except OSError:
519 except OSError:
519 return result
520 return result
520
521
521 try:
522 try:
522 try:
523 try:
523 # When we call orig below it creates the standins but we don't add
524 # When we call orig below it creates the standins but we don't add
524 # them to the dir state until later so lock during that time.
525 # them to the dir state until later so lock during that time.
525 wlock = repo.wlock()
526 wlock = repo.wlock()
526
527
527 manifest = repo[None].manifest()
528 manifest = repo[None].manifest()
528 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 def overridematch(ctx, pats=[], opts={}, globbed=False,
529 default='relpath'):
530 default='relpath'):
530 newpats = []
531 newpats = []
531 # The patterns were previously mangled to add the standin
532 # The patterns were previously mangled to add the standin
532 # directory; we need to remove that now
533 # directory; we need to remove that now
533 for pat in pats:
534 for pat in pats:
534 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 if match_.patkind(pat) is None and lfutil.shortname in pat:
535 newpats.append(pat.replace(lfutil.shortname, ''))
536 newpats.append(pat.replace(lfutil.shortname, ''))
536 else:
537 else:
537 newpats.append(pat)
538 newpats.append(pat)
538 match = oldmatch(ctx, newpats, opts, globbed, default)
539 match = oldmatch(ctx, newpats, opts, globbed, default)
539 m = copy.copy(match)
540 m = copy.copy(match)
540 lfile = lambda f: lfutil.standin(f) in manifest
541 lfile = lambda f: lfutil.standin(f) in manifest
541 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
542 m._fmap = set(m._files)
543 m._fmap = set(m._files)
543 m._always = False
544 m._always = False
544 origmatchfn = m.matchfn
545 origmatchfn = m.matchfn
545 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 m.matchfn = lambda f: (lfutil.isstandin(f) and
546 (f in manifest) and
547 (f in manifest) and
547 origmatchfn(lfutil.splitstandin(f)) or
548 origmatchfn(lfutil.splitstandin(f)) or
548 None)
549 None)
549 return m
550 return m
550 oldmatch = installmatchfn(overridematch)
551 oldmatch = installmatchfn(overridematch)
551 listpats = []
552 listpats = []
552 for pat in pats:
553 for pat in pats:
553 if match_.patkind(pat) is not None:
554 if match_.patkind(pat) is not None:
554 listpats.append(pat)
555 listpats.append(pat)
555 else:
556 else:
556 listpats.append(makestandin(pat))
557 listpats.append(makestandin(pat))
557
558
558 try:
559 try:
559 origcopyfile = util.copyfile
560 origcopyfile = util.copyfile
560 copiedfiles = []
561 copiedfiles = []
561 def overridecopyfile(src, dest):
562 def overridecopyfile(src, dest):
562 if (lfutil.shortname in src and
563 if (lfutil.shortname in src and
563 dest.startswith(repo.wjoin(lfutil.shortname))):
564 dest.startswith(repo.wjoin(lfutil.shortname))):
564 destlfile = dest.replace(lfutil.shortname, '')
565 destlfile = dest.replace(lfutil.shortname, '')
565 if not opts['force'] and os.path.exists(destlfile):
566 if not opts['force'] and os.path.exists(destlfile):
566 raise IOError('',
567 raise IOError('',
567 _('destination largefile already exists'))
568 _('destination largefile already exists'))
568 copiedfiles.append((src, dest))
569 copiedfiles.append((src, dest))
569 origcopyfile(src, dest)
570 origcopyfile(src, dest)
570
571
571 util.copyfile = overridecopyfile
572 util.copyfile = overridecopyfile
572 result += orig(ui, repo, listpats, opts, rename)
573 result += orig(ui, repo, listpats, opts, rename)
573 finally:
574 finally:
574 util.copyfile = origcopyfile
575 util.copyfile = origcopyfile
575
576
576 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 lfdirstate = lfutil.openlfdirstate(ui, repo)
577 for (src, dest) in copiedfiles:
578 for (src, dest) in copiedfiles:
578 if (lfutil.shortname in src and
579 if (lfutil.shortname in src and
579 dest.startswith(repo.wjoin(lfutil.shortname))):
580 dest.startswith(repo.wjoin(lfutil.shortname))):
580 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
581 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
582 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
583 if not os.path.isdir(destlfiledir):
584 if not os.path.isdir(destlfiledir):
584 os.makedirs(destlfiledir)
585 os.makedirs(destlfiledir)
585 if rename:
586 if rename:
586 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
587
588
588 # The file is gone, but this deletes any empty parent
589 # The file is gone, but this deletes any empty parent
589 # directories as a side-effect.
590 # directories as a side-effect.
590 util.unlinkpath(repo.wjoin(srclfile), True)
591 util.unlinkpath(repo.wjoin(srclfile), True)
591 lfdirstate.remove(srclfile)
592 lfdirstate.remove(srclfile)
592 else:
593 else:
593 util.copyfile(repo.wjoin(srclfile),
594 util.copyfile(repo.wjoin(srclfile),
594 repo.wjoin(destlfile))
595 repo.wjoin(destlfile))
595
596
596 lfdirstate.add(destlfile)
597 lfdirstate.add(destlfile)
597 lfdirstate.write()
598 lfdirstate.write()
598 except util.Abort, e:
599 except util.Abort, e:
599 if str(e) != _('no files to copy'):
600 if str(e) != _('no files to copy'):
600 raise e
601 raise e
601 else:
602 else:
602 nolfiles = True
603 nolfiles = True
603 finally:
604 finally:
604 restorematchfn()
605 restorematchfn()
605 wlock.release()
606 wlock.release()
606
607
607 if nolfiles and nonormalfiles:
608 if nolfiles and nonormalfiles:
608 raise util.Abort(_('no files to copy'))
609 raise util.Abort(_('no files to copy'))
609
610
610 return result
611 return result
611
612
612 # When the user calls revert, we have to be careful to not revert any
613 # When the user calls revert, we have to be careful to not revert any
613 # changes to other largefiles accidentally. This means we have to keep
614 # changes to other largefiles accidentally. This means we have to keep
614 # track of the largefiles that are being reverted so we only pull down
615 # track of the largefiles that are being reverted so we only pull down
615 # the necessary largefiles.
616 # the necessary largefiles.
616 #
617 #
617 # Standins are only updated (to match the hash of largefiles) before
618 # Standins are only updated (to match the hash of largefiles) before
618 # commits. Update the standins then run the original revert, changing
619 # commits. Update the standins then run the original revert, changing
619 # the matcher to hit standins instead of largefiles. Based on the
620 # the matcher to hit standins instead of largefiles. Based on the
620 # resulting standins update the largefiles.
621 # resulting standins update the largefiles.
621 def overriderevert(orig, ui, repo, *pats, **opts):
622 def overriderevert(orig, ui, repo, *pats, **opts):
622 # Because we put the standins in a bad state (by updating them)
623 # Because we put the standins in a bad state (by updating them)
623 # and then return them to a correct state we need to lock to
624 # and then return them to a correct state we need to lock to
624 # prevent others from changing them in their incorrect state.
625 # prevent others from changing them in their incorrect state.
625 wlock = repo.wlock()
626 wlock = repo.wlock()
626 try:
627 try:
627 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 lfdirstate = lfutil.openlfdirstate(ui, repo)
628 (modified, added, removed, missing, unknown, ignored, clean) = \
629 (modified, added, removed, missing, unknown, ignored, clean) = \
629 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
630 lfdirstate.write()
631 lfdirstate.write()
631 for lfile in modified:
632 for lfile in modified:
632 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 lfutil.updatestandin(repo, lfutil.standin(lfile))
633 for lfile in missing:
634 for lfile in missing:
634 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
635 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636 os.unlink(repo.wjoin(lfutil.standin(lfile)))
636
637
637 oldstandins = lfutil.getstandinsstate(repo)
638 oldstandins = lfutil.getstandinsstate(repo)
638
639
639 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 def overridematch(ctx, pats=[], opts={}, globbed=False,
640 default='relpath'):
641 default='relpath'):
641 match = oldmatch(ctx, pats, opts, globbed, default)
642 match = oldmatch(ctx, pats, opts, globbed, default)
642 m = copy.copy(match)
643 m = copy.copy(match)
643 def tostandin(f):
644 def tostandin(f):
644 if lfutil.standin(f) in ctx:
645 if lfutil.standin(f) in ctx:
645 return lfutil.standin(f)
646 return lfutil.standin(f)
646 elif lfutil.standin(f) in repo[None]:
647 elif lfutil.standin(f) in repo[None]:
647 return None
648 return None
648 return f
649 return f
649 m._files = [tostandin(f) for f in m._files]
650 m._files = [tostandin(f) for f in m._files]
650 m._files = [f for f in m._files if f is not None]
651 m._files = [f for f in m._files if f is not None]
651 m._fmap = set(m._files)
652 m._fmap = set(m._files)
652 m._always = False
653 m._always = False
653 origmatchfn = m.matchfn
654 origmatchfn = m.matchfn
654 def matchfn(f):
655 def matchfn(f):
655 if lfutil.isstandin(f):
656 if lfutil.isstandin(f):
656 return (origmatchfn(lfutil.splitstandin(f)) and
657 return (origmatchfn(lfutil.splitstandin(f)) and
657 (f in repo[None] or f in ctx))
658 (f in repo[None] or f in ctx))
658 return origmatchfn(f)
659 return origmatchfn(f)
659 m.matchfn = matchfn
660 m.matchfn = matchfn
660 return m
661 return m
661 oldmatch = installmatchfn(overridematch)
662 oldmatch = installmatchfn(overridematch)
662 try:
663 try:
663 orig(ui, repo, *pats, **opts)
664 orig(ui, repo, *pats, **opts)
664 finally:
665 finally:
665 restorematchfn()
666 restorematchfn()
666
667
667 newstandins = lfutil.getstandinsstate(repo)
668 newstandins = lfutil.getstandinsstate(repo)
668 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
669 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False)
670
671
671 finally:
672 finally:
672 wlock.release()
673 wlock.release()
673
674
674 def hgupdaterepo(orig, repo, node, overwrite):
675 def hgupdaterepo(orig, repo, node, overwrite):
675 if not overwrite:
676 if not overwrite:
676 # Only call updatelfiles on the standins that have changed to save time
677 # Only call updatelfiles on the standins that have changed to save time
677 oldstandins = lfutil.getstandinsstate(repo)
678 oldstandins = lfutil.getstandinsstate(repo)
678
679
679 result = orig(repo, node, overwrite)
680 result = orig(repo, node, overwrite)
680
681
681 filelist = None
682 filelist = None
682 if not overwrite:
683 if not overwrite:
683 newstandins = lfutil.getstandinsstate(repo)
684 newstandins = lfutil.getstandinsstate(repo)
684 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
685 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
685 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
686 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
686 return result
687 return result
687
688
688 def hgmerge(orig, repo, node, force=None, remind=True):
689 def hgmerge(orig, repo, node, force=None, remind=True):
689 result = orig(repo, node, force, remind)
690 result = orig(repo, node, force, remind)
690 lfcommands.updatelfiles(repo.ui, repo)
691 lfcommands.updatelfiles(repo.ui, repo)
691 return result
692 return result
692
693
693 # When we rebase a repository with remotely changed largefiles, we need to
694 # When we rebase a repository with remotely changed largefiles, we need to
694 # take some extra care so that the largefiles are correctly updated in the
695 # take some extra care so that the largefiles are correctly updated in the
695 # working copy
696 # working copy
696 def overridepull(orig, ui, repo, source=None, **opts):
697 def overridepull(orig, ui, repo, source=None, **opts):
697 revsprepull = len(repo)
698 revsprepull = len(repo)
698 if not source:
699 if not source:
699 source = 'default'
700 source = 'default'
700 repo.lfpullsource = source
701 repo.lfpullsource = source
701 if opts.get('rebase', False):
702 if opts.get('rebase', False):
702 repo._isrebasing = True
703 repo._isrebasing = True
703 try:
704 try:
704 if opts.get('update'):
705 if opts.get('update'):
705 del opts['update']
706 del opts['update']
706 ui.debug('--update and --rebase are not compatible, ignoring '
707 ui.debug('--update and --rebase are not compatible, ignoring '
707 'the update flag\n')
708 'the update flag\n')
708 del opts['rebase']
709 del opts['rebase']
709 origpostincoming = commands.postincoming
710 origpostincoming = commands.postincoming
710 def _dummy(*args, **kwargs):
711 def _dummy(*args, **kwargs):
711 pass
712 pass
712 commands.postincoming = _dummy
713 commands.postincoming = _dummy
713 try:
714 try:
714 result = commands.pull(ui, repo, source, **opts)
715 result = commands.pull(ui, repo, source, **opts)
715 finally:
716 finally:
716 commands.postincoming = origpostincoming
717 commands.postincoming = origpostincoming
717 revspostpull = len(repo)
718 revspostpull = len(repo)
718 if revspostpull > revsprepull:
719 if revspostpull > revsprepull:
719 result = result or rebase.rebase(ui, repo)
720 result = result or rebase.rebase(ui, repo)
720 finally:
721 finally:
721 repo._isrebasing = False
722 repo._isrebasing = False
722 else:
723 else:
723 result = orig(ui, repo, source, **opts)
724 result = orig(ui, repo, source, **opts)
724 revspostpull = len(repo)
725 revspostpull = len(repo)
725 lfrevs = opts.get('lfrev', [])
726 lfrevs = opts.get('lfrev', [])
726 if opts.get('all_largefiles'):
727 if opts.get('all_largefiles'):
727 lfrevs.append('pulled()')
728 lfrevs.append('pulled()')
728 if lfrevs and revspostpull > revsprepull:
729 if lfrevs and revspostpull > revsprepull:
729 numcached = 0
730 numcached = 0
730 repo.firstpulled = revsprepull # for pulled() revset expression
731 repo.firstpulled = revsprepull # for pulled() revset expression
731 try:
732 try:
732 for rev in scmutil.revrange(repo, lfrevs):
733 for rev in scmutil.revrange(repo, lfrevs):
733 ui.note(_('pulling largefiles for revision %s\n') % rev)
734 ui.note(_('pulling largefiles for revision %s\n') % rev)
734 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
735 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
735 numcached += len(cached)
736 numcached += len(cached)
736 finally:
737 finally:
737 del repo.firstpulled
738 del repo.firstpulled
738 ui.status(_("%d largefiles cached\n") % numcached)
739 ui.status(_("%d largefiles cached\n") % numcached)
739 return result
740 return result
740
741
741 def pulledrevsetsymbol(repo, subset, x):
742 def pulledrevsetsymbol(repo, subset, x):
742 """``pulled()``
743 """``pulled()``
743 Changesets that just has been pulled.
744 Changesets that just has been pulled.
744
745
745 Only available with largefiles from pull --lfrev expressions.
746 Only available with largefiles from pull --lfrev expressions.
746
747
747 .. container:: verbose
748 .. container:: verbose
748
749
749 Some examples:
750 Some examples:
750
751
751 - pull largefiles for all new changesets::
752 - pull largefiles for all new changesets::
752
753
753 hg pull -lfrev "pulled()"
754 hg pull -lfrev "pulled()"
754
755
755 - pull largefiles for all new branch heads::
756 - pull largefiles for all new branch heads::
756
757
757 hg pull -lfrev "head(pulled()) and not closed()"
758 hg pull -lfrev "head(pulled()) and not closed()"
758
759
759 """
760 """
760
761
761 try:
762 try:
762 firstpulled = repo.firstpulled
763 firstpulled = repo.firstpulled
763 except AttributeError:
764 except AttributeError:
764 raise util.Abort(_("pulled() only available in --lfrev"))
765 raise util.Abort(_("pulled() only available in --lfrev"))
765 return revset.baseset([r for r in subset if r >= firstpulled])
766 return revset.baseset([r for r in subset if r >= firstpulled])
766
767
767 def overrideclone(orig, ui, source, dest=None, **opts):
768 def overrideclone(orig, ui, source, dest=None, **opts):
768 d = dest
769 d = dest
769 if d is None:
770 if d is None:
770 d = hg.defaultdest(source)
771 d = hg.defaultdest(source)
771 if opts.get('all_largefiles') and not hg.islocal(d):
772 if opts.get('all_largefiles') and not hg.islocal(d):
772 raise util.Abort(_(
773 raise util.Abort(_(
773 '--all-largefiles is incompatible with non-local destination %s') %
774 '--all-largefiles is incompatible with non-local destination %s') %
774 d)
775 d)
775
776
776 return orig(ui, source, dest, **opts)
777 return orig(ui, source, dest, **opts)
777
778
778 def hgclone(orig, ui, opts, *args, **kwargs):
779 def hgclone(orig, ui, opts, *args, **kwargs):
779 result = orig(ui, opts, *args, **kwargs)
780 result = orig(ui, opts, *args, **kwargs)
780
781
781 if result is not None:
782 if result is not None:
782 sourcerepo, destrepo = result
783 sourcerepo, destrepo = result
783 repo = destrepo.local()
784 repo = destrepo.local()
784
785
785 # Caching is implicitly limited to 'rev' option, since the dest repo was
786 # Caching is implicitly limited to 'rev' option, since the dest repo was
786 # truncated at that point. The user may expect a download count with
787 # truncated at that point. The user may expect a download count with
787 # this option, so attempt whether or not this is a largefile repo.
788 # this option, so attempt whether or not this is a largefile repo.
788 if opts.get('all_largefiles'):
789 if opts.get('all_largefiles'):
789 success, missing = lfcommands.downloadlfiles(ui, repo, None)
790 success, missing = lfcommands.downloadlfiles(ui, repo, None)
790
791
791 if missing != 0:
792 if missing != 0:
792 return None
793 return None
793
794
794 return result
795 return result
795
796
796 def overriderebase(orig, ui, repo, **opts):
797 def overriderebase(orig, ui, repo, **opts):
797 repo._isrebasing = True
798 repo._isrebasing = True
798 try:
799 try:
799 return orig(ui, repo, **opts)
800 return orig(ui, repo, **opts)
800 finally:
801 finally:
801 repo._isrebasing = False
802 repo._isrebasing = False
802
803
803 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
804 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
804 prefix=None, mtime=None, subrepos=None):
805 prefix=None, mtime=None, subrepos=None):
805 # No need to lock because we are only reading history and
806 # No need to lock because we are only reading history and
806 # largefile caches, neither of which are modified.
807 # largefile caches, neither of which are modified.
807 lfcommands.cachelfiles(repo.ui, repo, node)
808 lfcommands.cachelfiles(repo.ui, repo, node)
808
809
809 if kind not in archival.archivers:
810 if kind not in archival.archivers:
810 raise util.Abort(_("unknown archive type '%s'") % kind)
811 raise util.Abort(_("unknown archive type '%s'") % kind)
811
812
812 ctx = repo[node]
813 ctx = repo[node]
813
814
814 if kind == 'files':
815 if kind == 'files':
815 if prefix:
816 if prefix:
816 raise util.Abort(
817 raise util.Abort(
817 _('cannot give prefix when archiving to files'))
818 _('cannot give prefix when archiving to files'))
818 else:
819 else:
819 prefix = archival.tidyprefix(dest, kind, prefix)
820 prefix = archival.tidyprefix(dest, kind, prefix)
820
821
821 def write(name, mode, islink, getdata):
822 def write(name, mode, islink, getdata):
822 if matchfn and not matchfn(name):
823 if matchfn and not matchfn(name):
823 return
824 return
824 data = getdata()
825 data = getdata()
825 if decode:
826 if decode:
826 data = repo.wwritedata(name, data)
827 data = repo.wwritedata(name, data)
827 archiver.addfile(prefix + name, mode, islink, data)
828 archiver.addfile(prefix + name, mode, islink, data)
828
829
829 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
830 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
830
831
831 if repo.ui.configbool("ui", "archivemeta", True):
832 if repo.ui.configbool("ui", "archivemeta", True):
832 def metadata():
833 def metadata():
833 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
834 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
834 hex(repo.changelog.node(0)), hex(node), ctx.branch())
835 hex(repo.changelog.node(0)), hex(node), ctx.branch())
835
836
836 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
837 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
837 if repo.tagtype(t) == 'global')
838 if repo.tagtype(t) == 'global')
838 if not tags:
839 if not tags:
839 repo.ui.pushbuffer()
840 repo.ui.pushbuffer()
840 opts = {'template': '{latesttag}\n{latesttagdistance}',
841 opts = {'template': '{latesttag}\n{latesttagdistance}',
841 'style': '', 'patch': None, 'git': None}
842 'style': '', 'patch': None, 'git': None}
842 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
843 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
843 ltags, dist = repo.ui.popbuffer().split('\n')
844 ltags, dist = repo.ui.popbuffer().split('\n')
844 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
845 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
845 tags += 'latesttagdistance: %s\n' % dist
846 tags += 'latesttagdistance: %s\n' % dist
846
847
847 return base + tags
848 return base + tags
848
849
849 write('.hg_archival.txt', 0644, False, metadata)
850 write('.hg_archival.txt', 0644, False, metadata)
850
851
851 for f in ctx:
852 for f in ctx:
852 ff = ctx.flags(f)
853 ff = ctx.flags(f)
853 getdata = ctx[f].data
854 getdata = ctx[f].data
854 if lfutil.isstandin(f):
855 if lfutil.isstandin(f):
855 path = lfutil.findfile(repo, getdata().strip())
856 path = lfutil.findfile(repo, getdata().strip())
856 if path is None:
857 if path is None:
857 raise util.Abort(
858 raise util.Abort(
858 _('largefile %s not found in repo store or system cache')
859 _('largefile %s not found in repo store or system cache')
859 % lfutil.splitstandin(f))
860 % lfutil.splitstandin(f))
860 f = lfutil.splitstandin(f)
861 f = lfutil.splitstandin(f)
861
862
862 def getdatafn():
863 def getdatafn():
863 fd = None
864 fd = None
864 try:
865 try:
865 fd = open(path, 'rb')
866 fd = open(path, 'rb')
866 return fd.read()
867 return fd.read()
867 finally:
868 finally:
868 if fd:
869 if fd:
869 fd.close()
870 fd.close()
870
871
871 getdata = getdatafn
872 getdata = getdatafn
872 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
873 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
873
874
874 if subrepos:
875 if subrepos:
875 for subpath in sorted(ctx.substate):
876 for subpath in sorted(ctx.substate):
876 sub = ctx.sub(subpath)
877 sub = ctx.sub(subpath)
877 submatch = match_.narrowmatcher(subpath, matchfn)
878 submatch = match_.narrowmatcher(subpath, matchfn)
878 sub.archive(repo.ui, archiver, prefix, submatch)
879 sub.archive(repo.ui, archiver, prefix, submatch)
879
880
880 archiver.done()
881 archiver.done()
881
882
882 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
883 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
883 repo._get(repo._state + ('hg',))
884 repo._get(repo._state + ('hg',))
884 rev = repo._state[1]
885 rev = repo._state[1]
885 ctx = repo._repo[rev]
886 ctx = repo._repo[rev]
886
887
887 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
888 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
888
889
889 def write(name, mode, islink, getdata):
890 def write(name, mode, islink, getdata):
890 # At this point, the standin has been replaced with the largefile name,
891 # At this point, the standin has been replaced with the largefile name,
891 # so the normal matcher works here without the lfutil variants.
892 # so the normal matcher works here without the lfutil variants.
892 if match and not match(f):
893 if match and not match(f):
893 return
894 return
894 data = getdata()
895 data = getdata()
895
896
896 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
897 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
897
898
898 for f in ctx:
899 for f in ctx:
899 ff = ctx.flags(f)
900 ff = ctx.flags(f)
900 getdata = ctx[f].data
901 getdata = ctx[f].data
901 if lfutil.isstandin(f):
902 if lfutil.isstandin(f):
902 path = lfutil.findfile(repo._repo, getdata().strip())
903 path = lfutil.findfile(repo._repo, getdata().strip())
903 if path is None:
904 if path is None:
904 raise util.Abort(
905 raise util.Abort(
905 _('largefile %s not found in repo store or system cache')
906 _('largefile %s not found in repo store or system cache')
906 % lfutil.splitstandin(f))
907 % lfutil.splitstandin(f))
907 f = lfutil.splitstandin(f)
908 f = lfutil.splitstandin(f)
908
909
909 def getdatafn():
910 def getdatafn():
910 fd = None
911 fd = None
911 try:
912 try:
912 fd = open(os.path.join(prefix, path), 'rb')
913 fd = open(os.path.join(prefix, path), 'rb')
913 return fd.read()
914 return fd.read()
914 finally:
915 finally:
915 if fd:
916 if fd:
916 fd.close()
917 fd.close()
917
918
918 getdata = getdatafn
919 getdata = getdatafn
919
920
920 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
921 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
921
922
922 for subpath in sorted(ctx.substate):
923 for subpath in sorted(ctx.substate):
923 sub = ctx.sub(subpath)
924 sub = ctx.sub(subpath)
924 submatch = match_.narrowmatcher(subpath, match)
925 submatch = match_.narrowmatcher(subpath, match)
925 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
926 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
926 submatch)
927 submatch)
927
928
928 # If a largefile is modified, the change is not reflected in its
929 # If a largefile is modified, the change is not reflected in its
929 # standin until a commit. cmdutil.bailifchanged() raises an exception
930 # standin until a commit. cmdutil.bailifchanged() raises an exception
930 # if the repo has uncommitted changes. Wrap it to also check if
931 # if the repo has uncommitted changes. Wrap it to also check if
931 # largefiles were changed. This is used by bisect and backout.
932 # largefiles were changed. This is used by bisect and backout.
932 def overridebailifchanged(orig, repo):
933 def overridebailifchanged(orig, repo):
933 orig(repo)
934 orig(repo)
934 repo.lfstatus = True
935 repo.lfstatus = True
935 modified, added, removed, deleted = repo.status()[:4]
936 modified, added, removed, deleted = repo.status()[:4]
936 repo.lfstatus = False
937 repo.lfstatus = False
937 if modified or added or removed or deleted:
938 if modified or added or removed or deleted:
938 raise util.Abort(_('uncommitted changes'))
939 raise util.Abort(_('uncommitted changes'))
939
940
940 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
941 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
941 def overridefetch(orig, ui, repo, *pats, **opts):
942 def overridefetch(orig, ui, repo, *pats, **opts):
942 repo.lfstatus = True
943 repo.lfstatus = True
943 modified, added, removed, deleted = repo.status()[:4]
944 modified, added, removed, deleted = repo.status()[:4]
944 repo.lfstatus = False
945 repo.lfstatus = False
945 if modified or added or removed or deleted:
946 if modified or added or removed or deleted:
946 raise util.Abort(_('uncommitted changes'))
947 raise util.Abort(_('uncommitted changes'))
947 return orig(ui, repo, *pats, **opts)
948 return orig(ui, repo, *pats, **opts)
948
949
949 def overrideforget(orig, ui, repo, *pats, **opts):
950 def overrideforget(orig, ui, repo, *pats, **opts):
950 installnormalfilesmatchfn(repo[None].manifest())
951 installnormalfilesmatchfn(repo[None].manifest())
951 result = orig(ui, repo, *pats, **opts)
952 result = orig(ui, repo, *pats, **opts)
952 restorematchfn()
953 restorematchfn()
953 m = scmutil.match(repo[None], pats, opts)
954 m = scmutil.match(repo[None], pats, opts)
954
955
955 try:
956 try:
956 repo.lfstatus = True
957 repo.lfstatus = True
957 s = repo.status(match=m, clean=True)
958 s = repo.status(match=m, clean=True)
958 finally:
959 finally:
959 repo.lfstatus = False
960 repo.lfstatus = False
960 forget = sorted(s[0] + s[1] + s[3] + s[6])
961 forget = sorted(s[0] + s[1] + s[3] + s[6])
961 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
962 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
962
963
963 for f in forget:
964 for f in forget:
964 if lfutil.standin(f) not in repo.dirstate and not \
965 if lfutil.standin(f) not in repo.dirstate and not \
965 os.path.isdir(m.rel(lfutil.standin(f))):
966 os.path.isdir(m.rel(lfutil.standin(f))):
966 ui.warn(_('not removing %s: file is already untracked\n')
967 ui.warn(_('not removing %s: file is already untracked\n')
967 % m.rel(f))
968 % m.rel(f))
968 result = 1
969 result = 1
969
970
970 for f in forget:
971 for f in forget:
971 if ui.verbose or not m.exact(f):
972 if ui.verbose or not m.exact(f):
972 ui.status(_('removing %s\n') % m.rel(f))
973 ui.status(_('removing %s\n') % m.rel(f))
973
974
974 # Need to lock because standin files are deleted then removed from the
975 # Need to lock because standin files are deleted then removed from the
975 # repository and we could race in-between.
976 # repository and we could race in-between.
976 wlock = repo.wlock()
977 wlock = repo.wlock()
977 try:
978 try:
978 lfdirstate = lfutil.openlfdirstate(ui, repo)
979 lfdirstate = lfutil.openlfdirstate(ui, repo)
979 for f in forget:
980 for f in forget:
980 if lfdirstate[f] == 'a':
981 if lfdirstate[f] == 'a':
981 lfdirstate.drop(f)
982 lfdirstate.drop(f)
982 else:
983 else:
983 lfdirstate.remove(f)
984 lfdirstate.remove(f)
984 lfdirstate.write()
985 lfdirstate.write()
985 standins = [lfutil.standin(f) for f in forget]
986 standins = [lfutil.standin(f) for f in forget]
986 for f in standins:
987 for f in standins:
987 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
988 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
988 repo[None].forget(standins)
989 repo[None].forget(standins)
989 finally:
990 finally:
990 wlock.release()
991 wlock.release()
991
992
992 return result
993 return result
993
994
994 def outgoinghook(ui, repo, other, opts, missing):
995 def outgoinghook(ui, repo, other, opts, missing):
995 if opts.pop('large', None):
996 if opts.pop('large', None):
996 toupload = set()
997 toupload = set()
997 lfutil.getlfilestoupload(repo, missing,
998 lfutil.getlfilestoupload(repo, missing,
998 lambda fn, lfhash: toupload.add(fn))
999 lambda fn, lfhash: toupload.add(fn))
999 if not toupload:
1000 if not toupload:
1000 ui.status(_('largefiles: no files to upload\n'))
1001 ui.status(_('largefiles: no files to upload\n'))
1001 else:
1002 else:
1002 ui.status(_('largefiles to upload:\n'))
1003 ui.status(_('largefiles to upload:\n'))
1003 for file in sorted(toupload):
1004 for file in sorted(toupload):
1004 ui.status(lfutil.splitstandin(file) + '\n')
1005 ui.status(lfutil.splitstandin(file) + '\n')
1005 ui.status('\n')
1006 ui.status('\n')
1006
1007
1007 def summaryremotehook(ui, repo, opts, changes):
1008 def summaryremotehook(ui, repo, opts, changes):
1008 largeopt = opts.get('large', False)
1009 largeopt = opts.get('large', False)
1009 if changes is None:
1010 if changes is None:
1010 if largeopt:
1011 if largeopt:
1011 return (False, True) # only outgoing check is needed
1012 return (False, True) # only outgoing check is needed
1012 else:
1013 else:
1013 return (False, False)
1014 return (False, False)
1014 elif largeopt:
1015 elif largeopt:
1015 url, branch, peer, outgoing = changes[1]
1016 url, branch, peer, outgoing = changes[1]
1016 if peer is None:
1017 if peer is None:
1017 # i18n: column positioning for "hg summary"
1018 # i18n: column positioning for "hg summary"
1018 ui.status(_('largefiles: (no remote repo)\n'))
1019 ui.status(_('largefiles: (no remote repo)\n'))
1019 return
1020 return
1020
1021
1021 toupload = set()
1022 toupload = set()
1022 lfutil.getlfilestoupload(repo, outgoing.missing,
1023 lfutil.getlfilestoupload(repo, outgoing.missing,
1023 lambda fn, lfhash: toupload.add(fn))
1024 lambda fn, lfhash: toupload.add(fn))
1024 if not toupload:
1025 if not toupload:
1025 # i18n: column positioning for "hg summary"
1026 # i18n: column positioning for "hg summary"
1026 ui.status(_('largefiles: (no files to upload)\n'))
1027 ui.status(_('largefiles: (no files to upload)\n'))
1027 else:
1028 else:
1028 # i18n: column positioning for "hg summary"
1029 # i18n: column positioning for "hg summary"
1029 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1030 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1030
1031
1031 def overridesummary(orig, ui, repo, *pats, **opts):
1032 def overridesummary(orig, ui, repo, *pats, **opts):
1032 try:
1033 try:
1033 repo.lfstatus = True
1034 repo.lfstatus = True
1034 orig(ui, repo, *pats, **opts)
1035 orig(ui, repo, *pats, **opts)
1035 finally:
1036 finally:
1036 repo.lfstatus = False
1037 repo.lfstatus = False
1037
1038
1038 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1039 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1039 similarity=None):
1040 similarity=None):
1040 if not lfutil.islfilesrepo(repo):
1041 if not lfutil.islfilesrepo(repo):
1041 return orig(repo, pats, opts, dry_run, similarity)
1042 return orig(repo, pats, opts, dry_run, similarity)
1042 # Get the list of missing largefiles so we can remove them
1043 # Get the list of missing largefiles so we can remove them
1043 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1044 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1044 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1045 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1045 False, False)
1046 False, False)
1046 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1047 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1047
1048
1048 # Call into the normal remove code, but the removing of the standin, we want
1049 # Call into the normal remove code, but the removing of the standin, we want
1049 # to have handled by original addremove. Monkey patching here makes sure
1050 # to have handled by original addremove. Monkey patching here makes sure
1050 # we don't remove the standin in the largefiles code, preventing a very
1051 # we don't remove the standin in the largefiles code, preventing a very
1051 # confused state later.
1052 # confused state later.
1052 if missing:
1053 if missing:
1053 m = [repo.wjoin(f) for f in missing]
1054 m = [repo.wjoin(f) for f in missing]
1054 repo._isaddremove = True
1055 repo._isaddremove = True
1055 removelargefiles(repo.ui, repo, *m, **opts)
1056 removelargefiles(repo.ui, repo, *m, **opts)
1056 repo._isaddremove = False
1057 repo._isaddremove = False
1057 # Call into the normal add code, and any files that *should* be added as
1058 # Call into the normal add code, and any files that *should* be added as
1058 # largefiles will be
1059 # largefiles will be
1059 addlargefiles(repo.ui, repo, *pats, **opts)
1060 addlargefiles(repo.ui, repo, *pats, **opts)
1060 # Now that we've handled largefiles, hand off to the original addremove
1061 # Now that we've handled largefiles, hand off to the original addremove
1061 # function to take care of the rest. Make sure it doesn't do anything with
1062 # function to take care of the rest. Make sure it doesn't do anything with
1062 # largefiles by installing a matcher that will ignore them.
1063 # largefiles by installing a matcher that will ignore them.
1063 installnormalfilesmatchfn(repo[None].manifest())
1064 installnormalfilesmatchfn(repo[None].manifest())
1064 result = orig(repo, pats, opts, dry_run, similarity)
1065 result = orig(repo, pats, opts, dry_run, similarity)
1065 restorematchfn()
1066 restorematchfn()
1066 return result
1067 return result
1067
1068
1068 # Calling purge with --all will cause the largefiles to be deleted.
1069 # Calling purge with --all will cause the largefiles to be deleted.
1069 # Override repo.status to prevent this from happening.
1070 # Override repo.status to prevent this from happening.
1070 def overridepurge(orig, ui, repo, *dirs, **opts):
1071 def overridepurge(orig, ui, repo, *dirs, **opts):
1071 # XXX large file status is buggy when used on repo proxy.
1072 # XXX large file status is buggy when used on repo proxy.
1072 # XXX this needs to be investigate.
1073 # XXX this needs to be investigate.
1073 repo = repo.unfiltered()
1074 repo = repo.unfiltered()
1074 oldstatus = repo.status
1075 oldstatus = repo.status
1075 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1076 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1076 clean=False, unknown=False, listsubrepos=False):
1077 clean=False, unknown=False, listsubrepos=False):
1077 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1078 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1078 listsubrepos)
1079 listsubrepos)
1079 lfdirstate = lfutil.openlfdirstate(ui, repo)
1080 lfdirstate = lfutil.openlfdirstate(ui, repo)
1080 modified, added, removed, deleted, unknown, ignored, clean = r
1081 modified, added, removed, deleted, unknown, ignored, clean = r
1081 unknown = [f for f in unknown if lfdirstate[f] == '?']
1082 unknown = [f for f in unknown if lfdirstate[f] == '?']
1082 ignored = [f for f in ignored if lfdirstate[f] == '?']
1083 ignored = [f for f in ignored if lfdirstate[f] == '?']
1083 return modified, added, removed, deleted, unknown, ignored, clean
1084 return modified, added, removed, deleted, unknown, ignored, clean
1084 repo.status = overridestatus
1085 repo.status = overridestatus
1085 orig(ui, repo, *dirs, **opts)
1086 orig(ui, repo, *dirs, **opts)
1086 repo.status = oldstatus
1087 repo.status = oldstatus
1087
1088
1088 def overriderollback(orig, ui, repo, **opts):
1089 def overriderollback(orig, ui, repo, **opts):
1089 result = orig(ui, repo, **opts)
1090 result = orig(ui, repo, **opts)
1090 merge.update(repo, node=None, branchmerge=False, force=True,
1091 merge.update(repo, node=None, branchmerge=False, force=True,
1091 partial=lfutil.isstandin)
1092 partial=lfutil.isstandin)
1092 wlock = repo.wlock()
1093 wlock = repo.wlock()
1093 try:
1094 try:
1094 lfdirstate = lfutil.openlfdirstate(ui, repo)
1095 lfdirstate = lfutil.openlfdirstate(ui, repo)
1095 lfiles = lfutil.listlfiles(repo)
1096 lfiles = lfutil.listlfiles(repo)
1096 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1097 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1097 for file in lfiles:
1098 for file in lfiles:
1098 if file in oldlfiles:
1099 if file in oldlfiles:
1099 lfdirstate.normallookup(file)
1100 lfdirstate.normallookup(file)
1100 else:
1101 else:
1101 lfdirstate.add(file)
1102 lfdirstate.add(file)
1102 lfdirstate.write()
1103 lfdirstate.write()
1103 finally:
1104 finally:
1104 wlock.release()
1105 wlock.release()
1105 return result
1106 return result
1106
1107
1107 def overridetransplant(orig, ui, repo, *revs, **opts):
1108 def overridetransplant(orig, ui, repo, *revs, **opts):
1108 try:
1109 try:
1109 oldstandins = lfutil.getstandinsstate(repo)
1110 oldstandins = lfutil.getstandinsstate(repo)
1110 repo._istransplanting = True
1111 repo._istransplanting = True
1111 result = orig(ui, repo, *revs, **opts)
1112 result = orig(ui, repo, *revs, **opts)
1112 newstandins = lfutil.getstandinsstate(repo)
1113 newstandins = lfutil.getstandinsstate(repo)
1113 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1114 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1114 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1115 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1115 printmessage=True)
1116 printmessage=True)
1116 finally:
1117 finally:
1117 repo._istransplanting = False
1118 repo._istransplanting = False
1118 return result
1119 return result
1119
1120
1120 def overridecat(orig, ui, repo, file1, *pats, **opts):
1121 def overridecat(orig, ui, repo, file1, *pats, **opts):
1121 ctx = scmutil.revsingle(repo, opts.get('rev'))
1122 ctx = scmutil.revsingle(repo, opts.get('rev'))
1122 err = 1
1123 err = 1
1123 notbad = set()
1124 notbad = set()
1124 m = scmutil.match(ctx, (file1,) + pats, opts)
1125 m = scmutil.match(ctx, (file1,) + pats, opts)
1125 origmatchfn = m.matchfn
1126 origmatchfn = m.matchfn
1126 def lfmatchfn(f):
1127 def lfmatchfn(f):
1127 if origmatchfn(f):
1128 if origmatchfn(f):
1128 return True
1129 return True
1129 lf = lfutil.splitstandin(f)
1130 lf = lfutil.splitstandin(f)
1130 if lf is None:
1131 if lf is None:
1131 return False
1132 return False
1132 notbad.add(lf)
1133 notbad.add(lf)
1133 return origmatchfn(lf)
1134 return origmatchfn(lf)
1134 m.matchfn = lfmatchfn
1135 m.matchfn = lfmatchfn
1135 origbadfn = m.bad
1136 origbadfn = m.bad
1136 def lfbadfn(f, msg):
1137 def lfbadfn(f, msg):
1137 if not f in notbad:
1138 if not f in notbad:
1138 origbadfn(f, msg)
1139 origbadfn(f, msg)
1139 m.bad = lfbadfn
1140 m.bad = lfbadfn
1140 for f in ctx.walk(m):
1141 for f in ctx.walk(m):
1141 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1142 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1142 pathname=f)
1143 pathname=f)
1143 lf = lfutil.splitstandin(f)
1144 lf = lfutil.splitstandin(f)
1144 if lf is None or origmatchfn(f):
1145 if lf is None or origmatchfn(f):
1145 # duplicating unreachable code from commands.cat
1146 # duplicating unreachable code from commands.cat
1146 data = ctx[f].data()
1147 data = ctx[f].data()
1147 if opts.get('decode'):
1148 if opts.get('decode'):
1148 data = repo.wwritedata(f, data)
1149 data = repo.wwritedata(f, data)
1149 fp.write(data)
1150 fp.write(data)
1150 else:
1151 else:
1151 hash = lfutil.readstandin(repo, lf, ctx.rev())
1152 hash = lfutil.readstandin(repo, lf, ctx.rev())
1152 if not lfutil.inusercache(repo.ui, hash):
1153 if not lfutil.inusercache(repo.ui, hash):
1153 store = basestore._openstore(repo)
1154 store = basestore._openstore(repo)
1154 success, missing = store.get([(lf, hash)])
1155 success, missing = store.get([(lf, hash)])
1155 if len(success) != 1:
1156 if len(success) != 1:
1156 raise util.Abort(
1157 raise util.Abort(
1157 _('largefile %s is not in cache and could not be '
1158 _('largefile %s is not in cache and could not be '
1158 'downloaded') % lf)
1159 'downloaded') % lf)
1159 path = lfutil.usercachepath(repo.ui, hash)
1160 path = lfutil.usercachepath(repo.ui, hash)
1160 fpin = open(path, "rb")
1161 fpin = open(path, "rb")
1161 for chunk in util.filechunkiter(fpin, 128 * 1024):
1162 for chunk in util.filechunkiter(fpin, 128 * 1024):
1162 fp.write(chunk)
1163 fp.write(chunk)
1163 fpin.close()
1164 fpin.close()
1164 fp.close()
1165 fp.close()
1165 err = 0
1166 err = 0
1166 return err
1167 return err
1167
1168
1168 def mercurialsinkbefore(orig, sink):
1169 def mercurialsinkbefore(orig, sink):
1169 sink.repo._isconverting = True
1170 sink.repo._isconverting = True
1170 orig(sink)
1171 orig(sink)
1171
1172
1172 def mercurialsinkafter(orig, sink):
1173 def mercurialsinkafter(orig, sink):
1173 sink.repo._isconverting = False
1174 sink.repo._isconverting = False
1174 orig(sink)
1175 orig(sink)
@@ -1,1189 +1,1156 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join("merge"), True)
63 shutil.rmtree(self._repo.join("merge"), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == "F":
81 elif rtype == "F":
82 bits = record.split("\0")
82 bits = record.split("\0")
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split("\0")
124 bits = r[1].split("\0")
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], "\0".join(bits))
126 v1records[idx] = (r[0], "\0".join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(("L", hex(self._local)))
194 records.append(("L", hex(self._local)))
195 records.append(("O", hex(self._other)))
195 records.append(("O", hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(("F", "\0".join([d] + v)))
197 records.append(("F", "\0".join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, "w")
208 f = self._repo.opener(self.statepathv1, "w")
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + "\n")
212 f.write(hex(self._local) + "\n")
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == "F":
214 if rtype == "F":
215 f.write("%s\n" % _droponode(data))
215 f.write("%s\n" % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, "w")
220 f = self._repo.opener(self.statepathv2, "w")
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = ">sI%is" % len(data)
223 format = ">sI%is" % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write("merge/" + hash, fcl.data())
237 self._repo.opener.write("merge/" + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener("merge/" + hash)
287 f = self._repo.opener("merge/" + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f):
301 return (not repo.dirstate._ignore(f)
301 return (not repo.dirstate._ignore(f)
302 and os.path.isfile(repo.wjoin(f))
302 and os.path.isfile(repo.wjoin(f))
303 and repo.wopener.audit.check(f)
303 and repo.wopener.audit.check(f)
304 and repo.dirstate.normalize(f) not in repo.dirstate
304 and repo.dirstate.normalize(f) not in repo.dirstate
305 and mctx[f].cmp(wctx[f]))
305 and mctx[f].cmp(wctx[f]))
306
306
307 def _checkunknown(repo, wctx, mctx):
307 def _checkunknown(repo, wctx, mctx):
308 "check for collisions between unknown files and files in mctx"
308 "check for collisions between unknown files and files in mctx"
309
309
310 error = False
310 error = False
311 for f in mctx:
311 for f in mctx:
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
312 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
313 error = True
313 error = True
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
314 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
315 if error:
315 if error:
316 raise util.Abort(_("untracked files in working directory differ "
316 raise util.Abort(_("untracked files in working directory differ "
317 "from files in requested revision"))
317 "from files in requested revision"))
318
318
319 def _forgetremoved(wctx, mctx, branchmerge):
319 def _forgetremoved(wctx, mctx, branchmerge):
320 """
320 """
321 Forget removed files
321 Forget removed files
322
322
323 If we're jumping between revisions (as opposed to merging), and if
323 If we're jumping between revisions (as opposed to merging), and if
324 neither the working directory nor the target rev has the file,
324 neither the working directory nor the target rev has the file,
325 then we need to remove it from the dirstate, to prevent the
325 then we need to remove it from the dirstate, to prevent the
326 dirstate from listing the file when it is no longer in the
326 dirstate from listing the file when it is no longer in the
327 manifest.
327 manifest.
328
328
329 If we're merging, and the other revision has removed a file
329 If we're merging, and the other revision has removed a file
330 that is not present in the working directory, we need to mark it
330 that is not present in the working directory, we need to mark it
331 as removed.
331 as removed.
332 """
332 """
333
333
334 actions = []
334 ractions = []
335 state = branchmerge and 'r' or 'f'
335 factions = xactions = []
336 if branchmerge:
337 xactions = ractions
336 for f in wctx.deleted():
338 for f in wctx.deleted():
337 if f not in mctx:
339 if f not in mctx:
338 actions.append((f, state, None, "forget deleted"))
340 xactions.append((f, None, "forget deleted"))
339
341
340 if not branchmerge:
342 if not branchmerge:
341 for f in wctx.removed():
343 for f in wctx.removed():
342 if f not in mctx:
344 if f not in mctx:
343 actions.append((f, "f", None, "forget removed"))
345 factions.append((f, None, "forget removed"))
344
346
345 return actions
347 return ractions, factions
346
348
347 def _checkcollision(repo, wmf, actions):
349 def _checkcollision(repo, wmf, actions):
348 # build provisional merged manifest up
350 # build provisional merged manifest up
349 pmmf = set(wmf)
351 pmmf = set(wmf)
350
352
351 def addop(f, args):
353 if actions:
352 pmmf.add(f)
354 # k, dr, e and rd are no-op
353 def removeop(f, args):
355 for m in 'a', 'f', 'g', 'cd', 'dc':
354 pmmf.discard(f)
356 for f, args, msg in actions[m]:
355 def nop(f, args):
357 pmmf.add(f)
356 pass
358 for f, args, msg in actions['r']:
357
359 pmmf.discard(f)
358 def renamemoveop(f, args):
360 for f, args, msg in actions['dm']:
359 f2, flags = args
361 f2, flags = args
360 pmmf.discard(f2)
362 pmmf.discard(f2)
361 pmmf.add(f)
363 pmmf.add(f)
362 def renamegetop(f, args):
364 for f, args, msg in actions['dg']:
363 f2, flags = args
365 f2, flags = args
364 pmmf.add(f)
366 pmmf.add(f)
365 def mergeop(f, args):
367 for f, args, msg in actions['m']:
366 f1, f2, fa, move, anc = args
368 f1, f2, fa, move, anc = args
367 if move:
369 if move:
368 pmmf.discard(f1)
370 pmmf.discard(f1)
369 pmmf.add(f)
371 pmmf.add(f)
370
371 opmap = {
372 "a": addop,
373 "dm": renamemoveop,
374 "dg": renamegetop,
375 "dr": nop,
376 "e": nop,
377 "k": nop,
378 "f": addop, # untracked file should be kept in working directory
379 "g": addop,
380 "m": mergeop,
381 "r": removeop,
382 "rd": nop,
383 "cd": addop,
384 "dc": addop,
385 }
386 for f, m, args, msg in actions:
387 op = opmap.get(m)
388 assert op, m
389 op(f, args)
390
372
391 # check case-folding collision in provisional merged manifest
373 # check case-folding collision in provisional merged manifest
392 foldmap = {}
374 foldmap = {}
393 for f in sorted(pmmf):
375 for f in sorted(pmmf):
394 fold = util.normcase(f)
376 fold = util.normcase(f)
395 if fold in foldmap:
377 if fold in foldmap:
396 raise util.Abort(_("case-folding collision between %s and %s")
378 raise util.Abort(_("case-folding collision between %s and %s")
397 % (f, foldmap[fold]))
379 % (f, foldmap[fold]))
398 foldmap[fold] = f
380 foldmap[fold] = f
399
381
400 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
382 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
401 acceptremote, followcopies):
383 acceptremote, followcopies):
402 """
384 """
403 Merge p1 and p2 with ancestor pa and generate merge action list
385 Merge p1 and p2 with ancestor pa and generate merge action list
404
386
405 branchmerge and force are as passed in to update
387 branchmerge and force are as passed in to update
406 partial = function to filter file lists
388 partial = function to filter file lists
407 acceptremote = accept the incoming changes without prompting
389 acceptremote = accept the incoming changes without prompting
408 """
390 """
409
391
410 actions, copy, movewithdir = [], {}, {}
392 actions = dict((m, []) for m in 'a f g cd dc r dm dg m dr e rd k'.split())
393 copy, movewithdir = {}, {}
411
394
412 # manifests fetched in order are going to be faster, so prime the caches
395 # manifests fetched in order are going to be faster, so prime the caches
413 [x.manifest() for x in
396 [x.manifest() for x in
414 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
397 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
415
398
416 if followcopies:
399 if followcopies:
417 ret = copies.mergecopies(repo, wctx, p2, pa)
400 ret = copies.mergecopies(repo, wctx, p2, pa)
418 copy, movewithdir, diverge, renamedelete = ret
401 copy, movewithdir, diverge, renamedelete = ret
419 for of, fl in diverge.iteritems():
402 for of, fl in diverge.iteritems():
420 actions.append((of, "dr", (fl,), "divergent renames"))
403 actions['dr'].append((of, (fl,), "divergent renames"))
421 for of, fl in renamedelete.iteritems():
404 for of, fl in renamedelete.iteritems():
422 actions.append((of, "rd", (fl,), "rename and delete"))
405 actions['rd'].append((of, (fl,), "rename and delete"))
423
406
424 repo.ui.note(_("resolving manifests\n"))
407 repo.ui.note(_("resolving manifests\n"))
425 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
408 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
426 % (bool(branchmerge), bool(force), bool(partial)))
409 % (bool(branchmerge), bool(force), bool(partial)))
427 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
410 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
428
411
429 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
412 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
430 copied = set(copy.values())
413 copied = set(copy.values())
431 copied.update(movewithdir.values())
414 copied.update(movewithdir.values())
432
415
433 if '.hgsubstate' in m1:
416 if '.hgsubstate' in m1:
434 # check whether sub state is modified
417 # check whether sub state is modified
435 for s in sorted(wctx.substate):
418 for s in sorted(wctx.substate):
436 if wctx.sub(s).dirty():
419 if wctx.sub(s).dirty():
437 m1['.hgsubstate'] += "+"
420 m1['.hgsubstate'] += "+"
438 break
421 break
439
422
440 aborts = []
423 aborts = []
441 # Compare manifests
424 # Compare manifests
442 fdiff = dicthelpers.diff(m1, m2)
425 fdiff = dicthelpers.diff(m1, m2)
443 flagsdiff = m1.flagsdiff(m2)
426 flagsdiff = m1.flagsdiff(m2)
444 diff12 = dicthelpers.join(fdiff, flagsdiff)
427 diff12 = dicthelpers.join(fdiff, flagsdiff)
445
428
446 for f, (n12, fl12) in diff12.iteritems():
429 for f, (n12, fl12) in diff12.iteritems():
447 if n12:
430 if n12:
448 n1, n2 = n12
431 n1, n2 = n12
449 else: # file contents didn't change, but flags did
432 else: # file contents didn't change, but flags did
450 n1 = n2 = m1.get(f, None)
433 n1 = n2 = m1.get(f, None)
451 if n1 is None:
434 if n1 is None:
452 # Since n1 == n2, the file isn't present in m2 either. This
435 # Since n1 == n2, the file isn't present in m2 either. This
453 # means that the file was removed or deleted locally and
436 # means that the file was removed or deleted locally and
454 # removed remotely, but that residual entries remain in flags.
437 # removed remotely, but that residual entries remain in flags.
455 # This can happen in manifests generated by workingctx.
438 # This can happen in manifests generated by workingctx.
456 continue
439 continue
457 if fl12:
440 if fl12:
458 fl1, fl2 = fl12
441 fl1, fl2 = fl12
459 else: # flags didn't change, file contents did
442 else: # flags didn't change, file contents did
460 fl1 = fl2 = m1.flags(f)
443 fl1 = fl2 = m1.flags(f)
461
444
462 if partial and not partial(f):
445 if partial and not partial(f):
463 continue
446 continue
464 if n1 and n2:
447 if n1 and n2:
465 fa = f
448 fa = f
466 a = ma.get(f, nullid)
449 a = ma.get(f, nullid)
467 if a == nullid:
450 if a == nullid:
468 fa = copy.get(f, f)
451 fa = copy.get(f, f)
469 # Note: f as default is wrong - we can't really make a 3-way
452 # Note: f as default is wrong - we can't really make a 3-way
470 # merge without an ancestor file.
453 # merge without an ancestor file.
471 fla = ma.flags(fa)
454 fla = ma.flags(fa)
472 nol = 'l' not in fl1 + fl2 + fla
455 nol = 'l' not in fl1 + fl2 + fla
473 if n2 == a and fl2 == fla:
456 if n2 == a and fl2 == fla:
474 actions.append((f, "k", (), "keep")) # remote unchanged
457 actions['k'].append((f, (), "keep")) # remote unchanged
475 elif n1 == a and fl1 == fla: # local unchanged - use remote
458 elif n1 == a and fl1 == fla: # local unchanged - use remote
476 if n1 == n2: # optimization: keep local content
459 if n1 == n2: # optimization: keep local content
477 actions.append((f, "e", (fl2,), "update permissions"))
460 actions['e'].append((f, (fl2,), "update permissions"))
478 else:
461 else:
479 actions.append((f, "g", (fl2,), "remote is newer"))
462 actions['g'].append((f, (fl2,), "remote is newer"))
480 elif nol and n2 == a: # remote only changed 'x'
463 elif nol and n2 == a: # remote only changed 'x'
481 actions.append((f, "e", (fl2,), "update permissions"))
464 actions['e'].append((f, (fl2,), "update permissions"))
482 elif nol and n1 == a: # local only changed 'x'
465 elif nol and n1 == a: # local only changed 'x'
483 actions.append((f, "g", (fl1,), "remote is newer"))
466 actions['g'].append((f, (fl1,), "remote is newer"))
484 else: # both changed something
467 else: # both changed something
485 actions.append((f, "m", (f, f, fa, False, pa.node()),
468 actions['m'].append((f, (f, f, fa, False, pa.node()),
486 "versions differ"))
469 "versions differ"))
487 elif f in copied: # files we'll deal with on m2 side
470 elif f in copied: # files we'll deal with on m2 side
488 pass
471 pass
489 elif n1 and f in movewithdir: # directory rename, move local
472 elif n1 and f in movewithdir: # directory rename, move local
490 f2 = movewithdir[f]
473 f2 = movewithdir[f]
491 actions.append((f2, "dm", (f, fl1),
474 actions['dm'].append((f2, (f, fl1),
492 "remote directory rename - move from " + f))
475 "remote directory rename - move from " + f))
493 elif n1 and f in copy:
476 elif n1 and f in copy:
494 f2 = copy[f]
477 f2 = copy[f]
495 actions.append((f, "m", (f, f2, f2, False, pa.node()),
478 actions['m'].append((f, (f, f2, f2, False, pa.node()),
496 "local copied/moved from " + f2))
479 "local copied/moved from " + f2))
497 elif n1 and f in ma: # clean, a different, no remote
480 elif n1 and f in ma: # clean, a different, no remote
498 if n1 != ma[f]:
481 if n1 != ma[f]:
499 if acceptremote:
482 if acceptremote:
500 actions.append((f, "r", None, "remote delete"))
483 actions['r'].append((f, None, "remote delete"))
501 else:
484 else:
502 actions.append((f, "cd", None, "prompt changed/deleted"))
485 actions['cd'].append((f, None, "prompt changed/deleted"))
503 elif n1[20:] == "a": # added, no remote
486 elif n1[20:] == "a": # added, no remote
504 actions.append((f, "f", None, "remote deleted"))
487 actions['f'].append((f, None, "remote deleted"))
505 else:
488 else:
506 actions.append((f, "r", None, "other deleted"))
489 actions['r'].append((f, None, "other deleted"))
507 elif n2 and f in movewithdir:
490 elif n2 and f in movewithdir:
508 f2 = movewithdir[f]
491 f2 = movewithdir[f]
509 actions.append((f2, "dg", (f, fl2),
492 actions['dg'].append((f2, (f, fl2),
510 "local directory rename - get from " + f))
493 "local directory rename - get from " + f))
511 elif n2 and f in copy:
494 elif n2 and f in copy:
512 f2 = copy[f]
495 f2 = copy[f]
513 if f2 in m2:
496 if f2 in m2:
514 actions.append((f, "m", (f2, f, f2, False, pa.node()),
497 actions['m'].append((f, (f2, f, f2, False, pa.node()),
515 "remote copied from " + f2))
498 "remote copied from " + f2))
516 else:
499 else:
517 actions.append((f, "m", (f2, f, f2, True, pa.node()),
500 actions['m'].append((f, (f2, f, f2, True, pa.node()),
518 "remote moved from " + f2))
501 "remote moved from " + f2))
519 elif n2 and f not in ma:
502 elif n2 and f not in ma:
520 # local unknown, remote created: the logic is described by the
503 # local unknown, remote created: the logic is described by the
521 # following table:
504 # following table:
522 #
505 #
523 # force branchmerge different | action
506 # force branchmerge different | action
524 # n * n | get
507 # n * n | get
525 # n * y | abort
508 # n * y | abort
526 # y n * | get
509 # y n * | get
527 # y y n | get
510 # y y n | get
528 # y y y | merge
511 # y y y | merge
529 #
512 #
530 # Checking whether the files are different is expensive, so we
513 # Checking whether the files are different is expensive, so we
531 # don't do that when we can avoid it.
514 # don't do that when we can avoid it.
532 if force and not branchmerge:
515 if force and not branchmerge:
533 actions.append((f, "g", (fl2,), "remote created"))
516 actions['g'].append((f, (fl2,), "remote created"))
534 else:
517 else:
535 different = _checkunknownfile(repo, wctx, p2, f)
518 different = _checkunknownfile(repo, wctx, p2, f)
536 if force and branchmerge and different:
519 if force and branchmerge and different:
537 # FIXME: This is wrong - f is not in ma ...
520 # FIXME: This is wrong - f is not in ma ...
538 actions.append((f, "m", (f, f, f, False, pa.node()),
521 actions['m'].append((f, (f, f, f, False, pa.node()),
539 "remote differs from untracked local"))
522 "remote differs from untracked local"))
540 elif not force and different:
523 elif not force and different:
541 aborts.append((f, "ud"))
524 aborts.append((f, "ud"))
542 else:
525 else:
543 actions.append((f, "g", (fl2,), "remote created"))
526 actions['g'].append((f, (fl2,), "remote created"))
544 elif n2 and n2 != ma[f]:
527 elif n2 and n2 != ma[f]:
545 different = _checkunknownfile(repo, wctx, p2, f)
528 different = _checkunknownfile(repo, wctx, p2, f)
546 if not force and different:
529 if not force and different:
547 aborts.append((f, "ud"))
530 aborts.append((f, "ud"))
548 else:
531 else:
549 # if different: old untracked f may be overwritten and lost
532 # if different: old untracked f may be overwritten and lost
550 if acceptremote:
533 if acceptremote:
551 actions.append((f, "g", (m2.flags(f),),
534 actions['g'].append((f, (m2.flags(f),),
552 "remote recreating"))
535 "remote recreating"))
553 else:
536 else:
554 actions.append((f, "dc", (m2.flags(f),),
537 actions['dc'].append((f, (m2.flags(f),),
555 "prompt deleted/changed"))
538 "prompt deleted/changed"))
556
539
557 for f, m in sorted(aborts):
540 for f, m in sorted(aborts):
558 if m == "ud":
541 if m == "ud":
559 repo.ui.warn(_("%s: untracked file differs\n") % f)
542 repo.ui.warn(_("%s: untracked file differs\n") % f)
560 else: assert False, m
543 else: assert False, m
561 if aborts:
544 if aborts:
562 raise util.Abort(_("untracked files in working directory differ "
545 raise util.Abort(_("untracked files in working directory differ "
563 "from files in requested revision"))
546 "from files in requested revision"))
564
547
565 if not util.checkcase(repo.path):
548 if not util.checkcase(repo.path):
566 # check collision between files only in p2 for clean update
549 # check collision between files only in p2 for clean update
567 if (not branchmerge and
550 if (not branchmerge and
568 (force or not wctx.dirty(missing=True, branch=False))):
551 (force or not wctx.dirty(missing=True, branch=False))):
569 _checkcollision(repo, m2, [])
552 _checkcollision(repo, m2, None)
570 else:
553 else:
571 _checkcollision(repo, m1, actions)
554 _checkcollision(repo, m1, actions)
572
555
573 return actions
556 return actions
574
557
575 actionpriority = dict((m, p) for p, m in enumerate(
576 ['r', 'f', 'g', 'a', 'k', 'm', 'dm', 'dg', 'dr', 'cd', 'dc', 'rd', 'e']))
577
578 def actionkey(a):
579 return actionpriority[a[1]], a
580
581 def batchremove(repo, actions):
558 def batchremove(repo, actions):
582 """apply removes to the working directory
559 """apply removes to the working directory
583
560
584 yields tuples for progress updates
561 yields tuples for progress updates
585 """
562 """
586 verbose = repo.ui.verbose
563 verbose = repo.ui.verbose
587 unlink = util.unlinkpath
564 unlink = util.unlinkpath
588 wjoin = repo.wjoin
565 wjoin = repo.wjoin
589 audit = repo.wopener.audit
566 audit = repo.wopener.audit
590 i = 0
567 i = 0
591 for f, m, args, msg in actions:
568 for f, args, msg in actions:
592 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
569 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
593 if True:
570 if True:
594 if verbose:
571 if verbose:
595 repo.ui.note(_("removing %s\n") % f)
572 repo.ui.note(_("removing %s\n") % f)
596 audit(f)
573 audit(f)
597 try:
574 try:
598 unlink(wjoin(f), ignoremissing=True)
575 unlink(wjoin(f), ignoremissing=True)
599 except OSError, inst:
576 except OSError, inst:
600 repo.ui.warn(_("update failed to remove %s: %s!\n") %
577 repo.ui.warn(_("update failed to remove %s: %s!\n") %
601 (f, inst.strerror))
578 (f, inst.strerror))
602 if i == 100:
579 if i == 100:
603 yield i, f
580 yield i, f
604 i = 0
581 i = 0
605 i += 1
582 i += 1
606 if i > 0:
583 if i > 0:
607 yield i, f
584 yield i, f
608
585
609 def batchget(repo, mctx, actions):
586 def batchget(repo, mctx, actions):
610 """apply gets to the working directory
587 """apply gets to the working directory
611
588
612 mctx is the context to get from
589 mctx is the context to get from
613
590
614 yields tuples for progress updates
591 yields tuples for progress updates
615 """
592 """
616 verbose = repo.ui.verbose
593 verbose = repo.ui.verbose
617 fctx = mctx.filectx
594 fctx = mctx.filectx
618 wwrite = repo.wwrite
595 wwrite = repo.wwrite
619 i = 0
596 i = 0
620 for f, m, args, msg in actions:
597 for f, args, msg in actions:
621 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
598 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
622 if True:
599 if True:
623 if verbose:
600 if verbose:
624 repo.ui.note(_("getting %s\n") % f)
601 repo.ui.note(_("getting %s\n") % f)
625 wwrite(f, fctx(f).data(), args[0])
602 wwrite(f, fctx(f).data(), args[0])
626 if i == 100:
603 if i == 100:
627 yield i, f
604 yield i, f
628 i = 0
605 i = 0
629 i += 1
606 i += 1
630 if i > 0:
607 if i > 0:
631 yield i, f
608 yield i, f
632
609
633 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
610 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
634 """apply the merge action list to the working directory
611 """apply the merge action list to the working directory
635
612
636 wctx is the working copy context
613 wctx is the working copy context
637 mctx is the context to be merged into the working copy
614 mctx is the context to be merged into the working copy
638
615
639 Return a tuple of counts (updated, merged, removed, unresolved) that
616 Return a tuple of counts (updated, merged, removed, unresolved) that
640 describes how many files were affected by the update.
617 describes how many files were affected by the update.
641 """
618 """
642
619
643 updated, merged, removed, unresolved = 0, 0, 0, 0
620 updated, merged, removed, unresolved = 0, 0, 0, 0
644 ms = mergestate(repo)
621 ms = mergestate(repo)
645 ms.reset(wctx.p1().node(), mctx.node())
622 ms.reset(wctx.p1().node(), mctx.node())
646 moves = []
623 moves = []
647 actions.sort(key=actionkey)
624 for m, l in actions.items():
625 l.sort()
648
626
649 # prescan for merges
627 # prescan for merges
650 for a in actions:
628 for f, args, msg in actions['m']:
651 f, m, args, msg = a
629 if True:
652 if m == "m": # merge
653 f1, f2, fa, move, anc = args
630 f1, f2, fa, move, anc = args
654 if f == '.hgsubstate': # merged internally
631 if f == '.hgsubstate': # merged internally
655 continue
632 continue
656 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
633 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
657 fcl = wctx[f1]
634 fcl = wctx[f1]
658 fco = mctx[f2]
635 fco = mctx[f2]
659 actx = repo[anc]
636 actx = repo[anc]
660 if fa in actx:
637 if fa in actx:
661 fca = actx[fa]
638 fca = actx[fa]
662 else:
639 else:
663 fca = repo.filectx(f1, fileid=nullrev)
640 fca = repo.filectx(f1, fileid=nullrev)
664 ms.add(fcl, fco, fca, f)
641 ms.add(fcl, fco, fca, f)
665 if f1 != f and move:
642 if f1 != f and move:
666 moves.append(f1)
643 moves.append(f1)
667
644
668 audit = repo.wopener.audit
645 audit = repo.wopener.audit
669 _updating = _('updating')
646 _updating = _('updating')
670 _files = _('files')
647 _files = _('files')
671 progress = repo.ui.progress
648 progress = repo.ui.progress
672
649
673 # remove renamed files after safely stored
650 # remove renamed files after safely stored
674 for f in moves:
651 for f in moves:
675 if os.path.lexists(repo.wjoin(f)):
652 if os.path.lexists(repo.wjoin(f)):
676 repo.ui.debug("removing %s\n" % f)
653 repo.ui.debug("removing %s\n" % f)
677 audit(f)
654 audit(f)
678 util.unlinkpath(repo.wjoin(f))
655 util.unlinkpath(repo.wjoin(f))
679
656
680 numupdates = len([a for a in actions if a[1] != 'k'])
657 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
681 workeractions = [a for a in actions if a[1] in 'gr']
682 updateactions = [a for a in workeractions if a[1] == 'g']
683 updated = len(updateactions)
684 removeactions = [a for a in workeractions if a[1] == 'r']
685 removed = len(removeactions)
686 actions = [a for a in actions if a[1] not in 'gr']
687
658
688 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
659 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
689 if hgsub and hgsub[0] == 'r':
690 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
660 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
691
661
692 # remove in parallel (must come first)
662 # remove in parallel (must come first)
693 z = 0
663 z = 0
694 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), removeactions)
664 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
695 for i, item in prog:
665 for i, item in prog:
696 z += i
666 z += i
697 progress(_updating, z, item=item, total=numupdates, unit=_files)
667 progress(_updating, z, item=item, total=numupdates, unit=_files)
668 removed = len(actions['r'])
698
669
699 # get in parallel
670 # get in parallel
700 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), updateactions)
671 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
701 for i, item in prog:
672 for i, item in prog:
702 z += i
673 z += i
703 progress(_updating, z, item=item, total=numupdates, unit=_files)
674 progress(_updating, z, item=item, total=numupdates, unit=_files)
675 updated = len(actions['g'])
704
676
705 if hgsub and hgsub[0] == 'g':
677 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
706 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
678 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
707
679
708 for f, m, args, msg in actions:
680 if True:
709
681
710 # forget (manifest only, just log it) (must come first)
682 # forget (manifest only, just log it) (must come first)
711 if m == "f":
683 for f, args, msg in actions['f']:
712 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
684 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
713 z += 1
685 z += 1
714 progress(_updating, z, item=f, total=numupdates, unit=_files)
686 progress(_updating, z, item=f, total=numupdates, unit=_files)
715
687
716 # re-add (manifest only, just log it)
688 # re-add (manifest only, just log it)
717 elif m == "a":
689 for f, args, msg in actions['a']:
718 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
690 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
719 z += 1
691 z += 1
720 progress(_updating, z, item=f, total=numupdates, unit=_files)
692 progress(_updating, z, item=f, total=numupdates, unit=_files)
721
693
722 # keep (noop, just log it)
694 # keep (noop, just log it)
723 elif m == "k":
695 for f, args, msg in actions['k']:
724 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
696 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
725 # no progress
697 # no progress
726
698
727 # merge
699 # merge
728 elif m == "m":
700 for f, args, msg in actions['m']:
729 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
701 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
730 z += 1
702 z += 1
731 progress(_updating, z, item=f, total=numupdates, unit=_files)
703 progress(_updating, z, item=f, total=numupdates, unit=_files)
732 f1, f2, fa, move, anc = args
704 f1, f2, fa, move, anc = args
733 if f == '.hgsubstate': # subrepo states need updating
705 if f == '.hgsubstate': # subrepo states need updating
734 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
706 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
735 overwrite)
707 overwrite)
736 continue
708 continue
737 audit(f)
709 audit(f)
738 r = ms.resolve(f, wctx, labels=labels)
710 r = ms.resolve(f, wctx, labels=labels)
739 if r is not None and r > 0:
711 if r is not None and r > 0:
740 unresolved += 1
712 unresolved += 1
741 else:
713 else:
742 if r is None:
714 if r is None:
743 updated += 1
715 updated += 1
744 else:
716 else:
745 merged += 1
717 merged += 1
746
718
747 # directory rename, move local
719 # directory rename, move local
748 elif m == "dm":
720 for f, args, msg in actions['dm']:
749 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
721 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
750 z += 1
722 z += 1
751 progress(_updating, z, item=f, total=numupdates, unit=_files)
723 progress(_updating, z, item=f, total=numupdates, unit=_files)
752 f0, flags = args
724 f0, flags = args
753 repo.ui.note(_("moving %s to %s\n") % (f0, f))
725 repo.ui.note(_("moving %s to %s\n") % (f0, f))
754 audit(f)
726 audit(f)
755 repo.wwrite(f, wctx.filectx(f0).data(), flags)
727 repo.wwrite(f, wctx.filectx(f0).data(), flags)
756 util.unlinkpath(repo.wjoin(f0))
728 util.unlinkpath(repo.wjoin(f0))
757 updated += 1
729 updated += 1
758
730
759 # local directory rename, get
731 # local directory rename, get
760 elif m == "dg":
732 for f, args, msg in actions['dg']:
761 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
733 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
762 z += 1
734 z += 1
763 progress(_updating, z, item=f, total=numupdates, unit=_files)
735 progress(_updating, z, item=f, total=numupdates, unit=_files)
764 f0, flags = args
736 f0, flags = args
765 repo.ui.note(_("getting %s to %s\n") % (f0, f))
737 repo.ui.note(_("getting %s to %s\n") % (f0, f))
766 repo.wwrite(f, mctx.filectx(f0).data(), flags)
738 repo.wwrite(f, mctx.filectx(f0).data(), flags)
767 updated += 1
739 updated += 1
768
740
769 # divergent renames
741 # divergent renames
770 elif m == "dr":
742 for f, args, msg in actions['dr']:
771 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
743 repo.ui.debug(" %s: %s -> dr\n" % (f, msg))
772 z += 1
744 z += 1
773 progress(_updating, z, item=f, total=numupdates, unit=_files)
745 progress(_updating, z, item=f, total=numupdates, unit=_files)
774 fl, = args
746 fl, = args
775 repo.ui.warn(_("note: possible conflict - %s was renamed "
747 repo.ui.warn(_("note: possible conflict - %s was renamed "
776 "multiple times to:\n") % f)
748 "multiple times to:\n") % f)
777 for nf in fl:
749 for nf in fl:
778 repo.ui.warn(" %s\n" % nf)
750 repo.ui.warn(" %s\n" % nf)
779
751
780 # rename and delete
752 # rename and delete
781 elif m == "rd":
753 for f, args, msg in actions['rd']:
782 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
754 repo.ui.debug(" %s: %s -> rd\n" % (f, msg))
783 z += 1
755 z += 1
784 progress(_updating, z, item=f, total=numupdates, unit=_files)
756 progress(_updating, z, item=f, total=numupdates, unit=_files)
785 fl, = args
757 fl, = args
786 repo.ui.warn(_("note: possible conflict - %s was deleted "
758 repo.ui.warn(_("note: possible conflict - %s was deleted "
787 "and renamed to:\n") % f)
759 "and renamed to:\n") % f)
788 for nf in fl:
760 for nf in fl:
789 repo.ui.warn(" %s\n" % nf)
761 repo.ui.warn(" %s\n" % nf)
790
762
791 # exec
763 # exec
792 elif m == "e":
764 for f, args, msg in actions['e']:
793 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
765 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
794 z += 1
766 z += 1
795 progress(_updating, z, item=f, total=numupdates, unit=_files)
767 progress(_updating, z, item=f, total=numupdates, unit=_files)
796 flags, = args
768 flags, = args
797 audit(f)
769 audit(f)
798 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
770 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
799 updated += 1
771 updated += 1
800
772
801 ms.commit()
773 ms.commit()
802 progress(_updating, None, total=numupdates, unit=_files)
774 progress(_updating, None, total=numupdates, unit=_files)
803
775
804 return updated, merged, removed, unresolved
776 return updated, merged, removed, unresolved
805
777
806 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
778 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
807 acceptremote, followcopies):
779 acceptremote, followcopies):
808 "Calculate the actions needed to merge mctx into wctx using ancestors"
780 "Calculate the actions needed to merge mctx into wctx using ancestors"
809
781
810 if len(ancestors) == 1: # default
782 if len(ancestors) == 1: # default
811 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
783 actions = manifestmerge(repo, wctx, mctx, ancestors[0],
812 branchmerge, force,
784 branchmerge, force,
813 partial, acceptremote, followcopies)
785 partial, acceptremote, followcopies)
814
786
815 else: # only when merge.preferancestor=* - experimentalish code
787 else: # only when merge.preferancestor=* - experimentalish code
816 repo.ui.status(
788 repo.ui.status(
817 _("note: merging %s and %s using bids from ancestors %s\n") %
789 _("note: merging %s and %s using bids from ancestors %s\n") %
818 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
790 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
819
791
820 # Call for bids
792 # Call for bids
821 fbids = {} # mapping filename to list af action bids
793 fbids = {} # mapping filename to bids (action method to list af actions)
822 for ancestor in ancestors:
794 for ancestor in ancestors:
823 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
795 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
824 actions = manifestmerge(repo, wctx, mctx, ancestor,
796 actions = manifestmerge(repo, wctx, mctx, ancestor,
825 branchmerge, force,
797 branchmerge, force,
826 partial, acceptremote, followcopies)
798 partial, acceptremote, followcopies)
827 for a in sorted(actions, key=lambda a: (a[1], a)):
799 for m, l in sorted(actions.items()):
828 f, m, args, msg = a
800 for a in l:
829 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
801 f, args, msg = a
830 if f in fbids:
802 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
831 fbids[f].append(a)
803 if f in fbids:
832 else:
804 d = fbids[f]
833 fbids[f] = [a]
805 if m in d:
806 d[m].append(a)
807 else:
808 d[m] = [a]
809 else:
810 fbids[f] = {m: [a]}
834
811
835 # Pick the best bid for each file
812 # Pick the best bid for each file
836 repo.ui.note(_('\nauction for merging merge bids\n'))
813 repo.ui.note(_('\nauction for merging merge bids\n'))
837 actions = []
814 actions = dict((m, []) for m in actions.keys())
838 for f, bidsl in sorted(fbids.items()):
815 for f, bids in sorted(fbids.items()):
816 # bids is a mapping from action method to list af actions
839 # Consensus?
817 # Consensus?
840 a0 = bidsl[0]
818 if len(bids) == 1: # all bids are the same kind of method
841 if util.all(a == a0 for a in bidsl[1:]): # len(bidsl) is > 1
819 m, l = bids.items()[0]
842 repo.ui.note(" %s: consensus for %s\n" % (f, a0[1]))
820 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
843 actions.append(a0)
821 repo.ui.note(" %s: consensus for %s\n" % (f, m))
844 continue
822 actions[m].append(l[0])
845 # Group bids by kind of action
823 continue
846 bids = {}
847 for a in bidsl:
848 m = a[1]
849 if m in bids:
850 bids[m].append(a)
851 else:
852 bids[m] = [a]
853 # If keep is an option, just do it.
824 # If keep is an option, just do it.
854 if "k" in bids:
825 if "k" in bids:
855 repo.ui.note(" %s: picking 'keep' action\n" % f)
826 repo.ui.note(" %s: picking 'keep' action\n" % f)
856 actions.append(bids["k"][0])
827 actions['k'].append(bids["k"][0])
857 continue
828 continue
858 # If all gets agree [how could they not?], just do it.
829 # If there are gets and they all agree [how could they not?], do it.
859 if "g" in bids:
830 if "g" in bids:
860 ga0 = bids["g"][0]
831 ga0 = bids["g"][0]
861 if util.all(a == ga0 for a in bids["g"][1:]):
832 if util.all(a == ga0 for a in bids["g"][1:]):
862 repo.ui.note(" %s: picking 'get' action\n" % f)
833 repo.ui.note(" %s: picking 'get' action\n" % f)
863 actions.append(ga0)
834 actions['g'].append(ga0)
864 continue
835 continue
865 # TODO: Consider other simple actions such as mode changes
836 # TODO: Consider other simple actions such as mode changes
866 # Handle inefficient democrazy.
837 # Handle inefficient democrazy.
867 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
838 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
868 for _f, m, args, msg in bidsl:
839 for m, l in sorted(bids.items()):
869 repo.ui.note(' %s -> %s\n' % (msg, m))
840 for _f, args, msg in l:
841 repo.ui.note(' %s -> %s\n' % (msg, m))
870 # Pick random action. TODO: Instead, prompt user when resolving
842 # Pick random action. TODO: Instead, prompt user when resolving
871 a0 = bidsl[0]
843 m, l = bids.items()[0]
872 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
844 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
873 (f, a0[1]))
845 (f, m))
874 actions.append(a0)
846 actions[m].append(l[0])
875 continue
847 continue
876 repo.ui.note(_('end of auction\n\n'))
848 repo.ui.note(_('end of auction\n\n'))
877
849
878 # Filter out prompts.
879 newactions, prompts = [], []
880 for a in actions:
881 if a[1] in ("cd", "dc"):
882 prompts.append(a)
883 else:
884 newactions.append(a)
885 # Prompt and create actions. TODO: Move this towards resolve phase.
850 # Prompt and create actions. TODO: Move this towards resolve phase.
886 for f, m, args, msg in sorted(prompts):
851 if True:
887 if m == "cd":
852 for f, args, msg in actions['cd']:
888 if repo.ui.promptchoice(
853 if repo.ui.promptchoice(
889 _("local changed %s which remote deleted\n"
854 _("local changed %s which remote deleted\n"
890 "use (c)hanged version or (d)elete?"
855 "use (c)hanged version or (d)elete?"
891 "$$ &Changed $$ &Delete") % f, 0):
856 "$$ &Changed $$ &Delete") % f, 0):
892 newactions.append((f, "r", None, "prompt delete"))
857 actions['r'].append((f, None, "prompt delete"))
893 else:
858 else:
894 newactions.append((f, "a", None, "prompt keep"))
859 actions['a'].append((f, None, "prompt keep"))
895 elif m == "dc":
860 del actions['cd'][:]
861
862 for f, args, msg in actions['dc']:
896 flags, = args
863 flags, = args
897 if repo.ui.promptchoice(
864 if repo.ui.promptchoice(
898 _("remote changed %s which local deleted\n"
865 _("remote changed %s which local deleted\n"
899 "use (c)hanged version or leave (d)eleted?"
866 "use (c)hanged version or leave (d)eleted?"
900 "$$ &Changed $$ &Deleted") % f, 0) == 0:
867 "$$ &Changed $$ &Deleted") % f, 0) == 0:
901 newactions.append((f, "g", (flags,), "prompt recreating"))
868 actions['g'].append((f, (flags,), "prompt recreating"))
902 else: assert False, m
869 del actions['dc'][:]
903
870
904 if wctx.rev() is None:
871 if wctx.rev() is None:
905 newactions += _forgetremoved(wctx, mctx, branchmerge)
872 ractions, factions = _forgetremoved(wctx, mctx, branchmerge)
873 actions['r'].extend(ractions)
874 actions['f'].extend(factions)
906
875
907 return newactions
876 return actions
908
877
909 def recordupdates(repo, actions, branchmerge):
878 def recordupdates(repo, actions, branchmerge):
910 "record merge actions to the dirstate"
879 "record merge actions to the dirstate"
911
880 if True:
912 for f, m, args, msg in actions:
913
914 # remove (must come first)
881 # remove (must come first)
915 if m == "r": # remove
882 for f, args, msg in actions['r']:
916 if branchmerge:
883 if branchmerge:
917 repo.dirstate.remove(f)
884 repo.dirstate.remove(f)
918 else:
885 else:
919 repo.dirstate.drop(f)
886 repo.dirstate.drop(f)
920
887
921 # forget (must come first)
888 # forget (must come first)
922 elif m == "f":
889 for f, args, msg in actions['f']:
923 repo.dirstate.drop(f)
890 repo.dirstate.drop(f)
924
891
925 # re-add
892 # re-add
926 elif m == "a":
893 for f, args, msg in actions['a']:
927 if not branchmerge:
894 if not branchmerge:
928 repo.dirstate.add(f)
895 repo.dirstate.add(f)
929
896
930 # exec change
897 # exec change
931 elif m == "e":
898 for f, args, msg in actions['e']:
932 repo.dirstate.normallookup(f)
899 repo.dirstate.normallookup(f)
933
900
934 # keep
901 # keep
935 elif m == "k":
902 for f, args, msg in actions['k']:
936 pass
903 pass
937
904
938 # get
905 # get
939 elif m == "g":
906 for f, args, msg in actions['g']:
940 if branchmerge:
907 if branchmerge:
941 repo.dirstate.otherparent(f)
908 repo.dirstate.otherparent(f)
942 else:
909 else:
943 repo.dirstate.normal(f)
910 repo.dirstate.normal(f)
944
911
945 # merge
912 # merge
946 elif m == "m":
913 for f, args, msg in actions['m']:
947 f1, f2, fa, move, anc = args
914 f1, f2, fa, move, anc = args
948 if branchmerge:
915 if branchmerge:
949 # We've done a branch merge, mark this file as merged
916 # We've done a branch merge, mark this file as merged
950 # so that we properly record the merger later
917 # so that we properly record the merger later
951 repo.dirstate.merge(f)
918 repo.dirstate.merge(f)
952 if f1 != f2: # copy/rename
919 if f1 != f2: # copy/rename
953 if move:
920 if move:
954 repo.dirstate.remove(f1)
921 repo.dirstate.remove(f1)
955 if f1 != f:
922 if f1 != f:
956 repo.dirstate.copy(f1, f)
923 repo.dirstate.copy(f1, f)
957 else:
924 else:
958 repo.dirstate.copy(f2, f)
925 repo.dirstate.copy(f2, f)
959 else:
926 else:
960 # We've update-merged a locally modified file, so
927 # We've update-merged a locally modified file, so
961 # we set the dirstate to emulate a normal checkout
928 # we set the dirstate to emulate a normal checkout
962 # of that file some time in the past. Thus our
929 # of that file some time in the past. Thus our
963 # merge will appear as a normal local file
930 # merge will appear as a normal local file
964 # modification.
931 # modification.
965 if f2 == f: # file not locally copied/moved
932 if f2 == f: # file not locally copied/moved
966 repo.dirstate.normallookup(f)
933 repo.dirstate.normallookup(f)
967 if move:
934 if move:
968 repo.dirstate.drop(f1)
935 repo.dirstate.drop(f1)
969
936
970 # directory rename, move local
937 # directory rename, move local
971 elif m == "dm":
938 for f, args, msg in actions['dm']:
972 f0, flag = args
939 f0, flag = args
973 if f0 not in repo.dirstate:
940 if f0 not in repo.dirstate:
974 # untracked file moved
941 # untracked file moved
975 continue
942 continue
976 if branchmerge:
943 if branchmerge:
977 repo.dirstate.add(f)
944 repo.dirstate.add(f)
978 repo.dirstate.remove(f0)
945 repo.dirstate.remove(f0)
979 repo.dirstate.copy(f0, f)
946 repo.dirstate.copy(f0, f)
980 else:
947 else:
981 repo.dirstate.normal(f)
948 repo.dirstate.normal(f)
982 repo.dirstate.drop(f0)
949 repo.dirstate.drop(f0)
983
950
984 # directory rename, get
951 # directory rename, get
985 elif m == "dg":
952 for f, args, msg in actions['dg']:
986 f0, flag = args
953 f0, flag = args
987 if branchmerge:
954 if branchmerge:
988 repo.dirstate.add(f)
955 repo.dirstate.add(f)
989 repo.dirstate.copy(f0, f)
956 repo.dirstate.copy(f0, f)
990 else:
957 else:
991 repo.dirstate.normal(f)
958 repo.dirstate.normal(f)
992
959
993 def update(repo, node, branchmerge, force, partial, ancestor=None,
960 def update(repo, node, branchmerge, force, partial, ancestor=None,
994 mergeancestor=False, labels=None):
961 mergeancestor=False, labels=None):
995 """
962 """
996 Perform a merge between the working directory and the given node
963 Perform a merge between the working directory and the given node
997
964
998 node = the node to update to, or None if unspecified
965 node = the node to update to, or None if unspecified
999 branchmerge = whether to merge between branches
966 branchmerge = whether to merge between branches
1000 force = whether to force branch merging or file overwriting
967 force = whether to force branch merging or file overwriting
1001 partial = a function to filter file lists (dirstate not updated)
968 partial = a function to filter file lists (dirstate not updated)
1002 mergeancestor = whether it is merging with an ancestor. If true,
969 mergeancestor = whether it is merging with an ancestor. If true,
1003 we should accept the incoming changes for any prompts that occur.
970 we should accept the incoming changes for any prompts that occur.
1004 If false, merging with an ancestor (fast-forward) is only allowed
971 If false, merging with an ancestor (fast-forward) is only allowed
1005 between different named branches. This flag is used by rebase extension
972 between different named branches. This flag is used by rebase extension
1006 as a temporary fix and should be avoided in general.
973 as a temporary fix and should be avoided in general.
1007
974
1008 The table below shows all the behaviors of the update command
975 The table below shows all the behaviors of the update command
1009 given the -c and -C or no options, whether the working directory
976 given the -c and -C or no options, whether the working directory
1010 is dirty, whether a revision is specified, and the relationship of
977 is dirty, whether a revision is specified, and the relationship of
1011 the parent rev to the target rev (linear, on the same named
978 the parent rev to the target rev (linear, on the same named
1012 branch, or on another named branch).
979 branch, or on another named branch).
1013
980
1014 This logic is tested by test-update-branches.t.
981 This logic is tested by test-update-branches.t.
1015
982
1016 -c -C dirty rev | linear same cross
983 -c -C dirty rev | linear same cross
1017 n n n n | ok (1) x
984 n n n n | ok (1) x
1018 n n n y | ok ok ok
985 n n n y | ok ok ok
1019 n n y n | merge (2) (2)
986 n n y n | merge (2) (2)
1020 n n y y | merge (3) (3)
987 n n y y | merge (3) (3)
1021 n y * * | --- discard ---
988 n y * * | --- discard ---
1022 y n y * | --- (4) ---
989 y n y * | --- (4) ---
1023 y n n * | --- ok ---
990 y n n * | --- ok ---
1024 y y * * | --- (5) ---
991 y y * * | --- (5) ---
1025
992
1026 x = can't happen
993 x = can't happen
1027 * = don't-care
994 * = don't-care
1028 1 = abort: not a linear update (merge or update --check to force update)
995 1 = abort: not a linear update (merge or update --check to force update)
1029 2 = abort: uncommitted changes (commit and merge, or update --clean to
996 2 = abort: uncommitted changes (commit and merge, or update --clean to
1030 discard changes)
997 discard changes)
1031 3 = abort: uncommitted changes (commit or update --clean to discard changes)
998 3 = abort: uncommitted changes (commit or update --clean to discard changes)
1032 4 = abort: uncommitted changes (checked in commands.py)
999 4 = abort: uncommitted changes (checked in commands.py)
1033 5 = incompatible options (checked in commands.py)
1000 5 = incompatible options (checked in commands.py)
1034
1001
1035 Return the same tuple as applyupdates().
1002 Return the same tuple as applyupdates().
1036 """
1003 """
1037
1004
1038 onode = node
1005 onode = node
1039 wlock = repo.wlock()
1006 wlock = repo.wlock()
1040 try:
1007 try:
1041 wc = repo[None]
1008 wc = repo[None]
1042 pl = wc.parents()
1009 pl = wc.parents()
1043 p1 = pl[0]
1010 p1 = pl[0]
1044 pas = [None]
1011 pas = [None]
1045 if ancestor:
1012 if ancestor:
1046 pas = [repo[ancestor]]
1013 pas = [repo[ancestor]]
1047
1014
1048 if node is None:
1015 if node is None:
1049 # Here is where we should consider bookmarks, divergent bookmarks,
1016 # Here is where we should consider bookmarks, divergent bookmarks,
1050 # foreground changesets (successors), and tip of current branch;
1017 # foreground changesets (successors), and tip of current branch;
1051 # but currently we are only checking the branch tips.
1018 # but currently we are only checking the branch tips.
1052 try:
1019 try:
1053 node = repo.branchtip(wc.branch())
1020 node = repo.branchtip(wc.branch())
1054 except error.RepoLookupError:
1021 except error.RepoLookupError:
1055 if wc.branch() == "default": # no default branch!
1022 if wc.branch() == "default": # no default branch!
1056 node = repo.lookup("tip") # update to tip
1023 node = repo.lookup("tip") # update to tip
1057 else:
1024 else:
1058 raise util.Abort(_("branch %s not found") % wc.branch())
1025 raise util.Abort(_("branch %s not found") % wc.branch())
1059
1026
1060 if p1.obsolete() and not p1.children():
1027 if p1.obsolete() and not p1.children():
1061 # allow updating to successors
1028 # allow updating to successors
1062 successors = obsolete.successorssets(repo, p1.node())
1029 successors = obsolete.successorssets(repo, p1.node())
1063
1030
1064 # behavior of certain cases is as follows,
1031 # behavior of certain cases is as follows,
1065 #
1032 #
1066 # divergent changesets: update to highest rev, similar to what
1033 # divergent changesets: update to highest rev, similar to what
1067 # is currently done when there are more than one head
1034 # is currently done when there are more than one head
1068 # (i.e. 'tip')
1035 # (i.e. 'tip')
1069 #
1036 #
1070 # replaced changesets: same as divergent except we know there
1037 # replaced changesets: same as divergent except we know there
1071 # is no conflict
1038 # is no conflict
1072 #
1039 #
1073 # pruned changeset: no update is done; though, we could
1040 # pruned changeset: no update is done; though, we could
1074 # consider updating to the first non-obsolete parent,
1041 # consider updating to the first non-obsolete parent,
1075 # similar to what is current done for 'hg prune'
1042 # similar to what is current done for 'hg prune'
1076
1043
1077 if successors:
1044 if successors:
1078 # flatten the list here handles both divergent (len > 1)
1045 # flatten the list here handles both divergent (len > 1)
1079 # and the usual case (len = 1)
1046 # and the usual case (len = 1)
1080 successors = [n for sub in successors for n in sub]
1047 successors = [n for sub in successors for n in sub]
1081
1048
1082 # get the max revision for the given successors set,
1049 # get the max revision for the given successors set,
1083 # i.e. the 'tip' of a set
1050 # i.e. the 'tip' of a set
1084 node = repo.revs("max(%ln)", successors)[0]
1051 node = repo.revs("max(%ln)", successors)[0]
1085 pas = [p1]
1052 pas = [p1]
1086
1053
1087 overwrite = force and not branchmerge
1054 overwrite = force and not branchmerge
1088
1055
1089 p2 = repo[node]
1056 p2 = repo[node]
1090 if pas[0] is None:
1057 if pas[0] is None:
1091 if repo.ui.config("merge", "preferancestor") == '*':
1058 if repo.ui.config("merge", "preferancestor") == '*':
1092 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1059 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1093 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1060 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1094 else:
1061 else:
1095 pas = [p1.ancestor(p2, warn=True)]
1062 pas = [p1.ancestor(p2, warn=True)]
1096
1063
1097 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1064 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1098
1065
1099 ### check phase
1066 ### check phase
1100 if not overwrite and len(pl) > 1:
1067 if not overwrite and len(pl) > 1:
1101 raise util.Abort(_("outstanding uncommitted merges"))
1068 raise util.Abort(_("outstanding uncommitted merges"))
1102 if branchmerge:
1069 if branchmerge:
1103 if pas == [p2]:
1070 if pas == [p2]:
1104 raise util.Abort(_("merging with a working directory ancestor"
1071 raise util.Abort(_("merging with a working directory ancestor"
1105 " has no effect"))
1072 " has no effect"))
1106 elif pas == [p1]:
1073 elif pas == [p1]:
1107 if not mergeancestor and p1.branch() == p2.branch():
1074 if not mergeancestor and p1.branch() == p2.branch():
1108 raise util.Abort(_("nothing to merge"),
1075 raise util.Abort(_("nothing to merge"),
1109 hint=_("use 'hg update' "
1076 hint=_("use 'hg update' "
1110 "or check 'hg heads'"))
1077 "or check 'hg heads'"))
1111 if not force and (wc.files() or wc.deleted()):
1078 if not force and (wc.files() or wc.deleted()):
1112 raise util.Abort(_("uncommitted changes"),
1079 raise util.Abort(_("uncommitted changes"),
1113 hint=_("use 'hg status' to list changes"))
1080 hint=_("use 'hg status' to list changes"))
1114 for s in sorted(wc.substate):
1081 for s in sorted(wc.substate):
1115 if wc.sub(s).dirty():
1082 if wc.sub(s).dirty():
1116 raise util.Abort(_("uncommitted changes in "
1083 raise util.Abort(_("uncommitted changes in "
1117 "subrepository '%s'") % s)
1084 "subrepository '%s'") % s)
1118
1085
1119 elif not overwrite:
1086 elif not overwrite:
1120 if p1 == p2: # no-op update
1087 if p1 == p2: # no-op update
1121 # call the hooks and exit early
1088 # call the hooks and exit early
1122 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1089 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1123 repo.hook('update', parent1=xp2, parent2='', error=0)
1090 repo.hook('update', parent1=xp2, parent2='', error=0)
1124 return 0, 0, 0, 0
1091 return 0, 0, 0, 0
1125
1092
1126 if pas not in ([p1], [p2]): # nonlinear
1093 if pas not in ([p1], [p2]): # nonlinear
1127 dirty = wc.dirty(missing=True)
1094 dirty = wc.dirty(missing=True)
1128 if dirty or onode is None:
1095 if dirty or onode is None:
1129 # Branching is a bit strange to ensure we do the minimal
1096 # Branching is a bit strange to ensure we do the minimal
1130 # amount of call to obsolete.background.
1097 # amount of call to obsolete.background.
1131 foreground = obsolete.foreground(repo, [p1.node()])
1098 foreground = obsolete.foreground(repo, [p1.node()])
1132 # note: the <node> variable contains a random identifier
1099 # note: the <node> variable contains a random identifier
1133 if repo[node].node() in foreground:
1100 if repo[node].node() in foreground:
1134 pas = [p1] # allow updating to successors
1101 pas = [p1] # allow updating to successors
1135 elif dirty:
1102 elif dirty:
1136 msg = _("uncommitted changes")
1103 msg = _("uncommitted changes")
1137 if onode is None:
1104 if onode is None:
1138 hint = _("commit and merge, or update --clean to"
1105 hint = _("commit and merge, or update --clean to"
1139 " discard changes")
1106 " discard changes")
1140 else:
1107 else:
1141 hint = _("commit or update --clean to discard"
1108 hint = _("commit or update --clean to discard"
1142 " changes")
1109 " changes")
1143 raise util.Abort(msg, hint=hint)
1110 raise util.Abort(msg, hint=hint)
1144 else: # node is none
1111 else: # node is none
1145 msg = _("not a linear update")
1112 msg = _("not a linear update")
1146 hint = _("merge or update --check to force update")
1113 hint = _("merge or update --check to force update")
1147 raise util.Abort(msg, hint=hint)
1114 raise util.Abort(msg, hint=hint)
1148 else:
1115 else:
1149 # Allow jumping branches if clean and specific rev given
1116 # Allow jumping branches if clean and specific rev given
1150 pas = [p1]
1117 pas = [p1]
1151
1118
1152 followcopies = False
1119 followcopies = False
1153 if overwrite:
1120 if overwrite:
1154 pas = [wc]
1121 pas = [wc]
1155 elif pas == [p2]: # backwards
1122 elif pas == [p2]: # backwards
1156 pas = [wc.p1()]
1123 pas = [wc.p1()]
1157 elif not branchmerge and not wc.dirty(missing=True):
1124 elif not branchmerge and not wc.dirty(missing=True):
1158 pass
1125 pass
1159 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1126 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
1160 followcopies = True
1127 followcopies = True
1161
1128
1162 ### calculate phase
1129 ### calculate phase
1163 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1130 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
1164 partial, mergeancestor, followcopies)
1131 partial, mergeancestor, followcopies)
1165
1132
1166 ### apply phase
1133 ### apply phase
1167 if not branchmerge: # just jump to the new rev
1134 if not branchmerge: # just jump to the new rev
1168 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1135 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1169 if not partial:
1136 if not partial:
1170 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1137 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1171 # note that we're in the middle of an update
1138 # note that we're in the middle of an update
1172 repo.vfs.write('updatestate', p2.hex())
1139 repo.vfs.write('updatestate', p2.hex())
1173
1140
1174 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1141 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1175
1142
1176 if not partial:
1143 if not partial:
1177 repo.setparents(fp1, fp2)
1144 repo.setparents(fp1, fp2)
1178 recordupdates(repo, actions, branchmerge)
1145 recordupdates(repo, actions, branchmerge)
1179 # update completed, clear state
1146 # update completed, clear state
1180 util.unlink(repo.join('updatestate'))
1147 util.unlink(repo.join('updatestate'))
1181
1148
1182 if not branchmerge:
1149 if not branchmerge:
1183 repo.dirstate.setbranch(p2.branch())
1150 repo.dirstate.setbranch(p2.branch())
1184 finally:
1151 finally:
1185 wlock.release()
1152 wlock.release()
1186
1153
1187 if not partial:
1154 if not partial:
1188 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1155 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1189 return stats
1156 return stats
General Comments 0
You need to be logged in to leave comments. Login now