##// END OF EJS Templates
merge: pass merge ancestor to calculateupdates as a list...
Mads Kiilerich -
r21081:ffd7b6ce default
parent child Browse files
Show More
@@ -1,1164 +1,1164
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 archival, merge, pathutil, revset
15 archival, merge, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''overrides scmutil.match so that the matcher it returns will ignore all
27 '''overrides scmutil.match so that the matcher it returns will ignore all
28 largefiles'''
28 largefiles'''
29 oldmatch = None # for the closure
29 oldmatch = None # for the closure
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
31 default='relpath'):
31 default='relpath'):
32 match = oldmatch(ctx, pats, opts, globbed, default)
32 match = oldmatch(ctx, pats, opts, globbed, default)
33 m = copy.copy(match)
33 m = copy.copy(match)
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
35 manifest)
35 manifest)
36 m._files = filter(notlfile, m._files)
36 m._files = filter(notlfile, m._files)
37 m._fmap = set(m._files)
37 m._fmap = set(m._files)
38 m._always = False
38 m._always = False
39 origmatchfn = m.matchfn
39 origmatchfn = m.matchfn
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
41 return m
41 return m
42 oldmatch = installmatchfn(overridematch)
42 oldmatch = installmatchfn(overridematch)
43
43
44 def installmatchfn(f):
44 def installmatchfn(f):
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installnormalfilesmatchfn will require n calls to
54 Note that n calls to installnormalfilesmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57
57
58 def addlargefiles(ui, repo, *pats, **opts):
58 def addlargefiles(ui, repo, *pats, **opts):
59 large = opts.pop('large', None)
59 large = opts.pop('large', None)
60 lfsize = lfutil.getminsize(
60 lfsize = lfutil.getminsize(
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
62
62
63 lfmatcher = None
63 lfmatcher = None
64 if lfutil.islfilesrepo(repo):
64 if lfutil.islfilesrepo(repo):
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
66 if lfpats:
66 if lfpats:
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
68
68
69 lfnames = []
69 lfnames = []
70 m = scmutil.match(repo[None], pats, opts)
70 m = scmutil.match(repo[None], pats, opts)
71 m.bad = lambda x, y: None
71 m.bad = lambda x, y: None
72 wctx = repo[None]
72 wctx = repo[None]
73 for f in repo.walk(m):
73 for f in repo.walk(m):
74 exact = m.exact(f)
74 exact = m.exact(f)
75 lfile = lfutil.standin(f) in wctx
75 lfile = lfutil.standin(f) in wctx
76 nfile = f in wctx
76 nfile = f in wctx
77 exists = lfile or nfile
77 exists = lfile or nfile
78
78
79 # Don't warn the user when they attempt to add a normal tracked file.
79 # Don't warn the user when they attempt to add a normal tracked file.
80 # The normal add code will do that for us.
80 # The normal add code will do that for us.
81 if exact and exists:
81 if exact and exists:
82 if lfile:
82 if lfile:
83 ui.warn(_('%s already a largefile\n') % f)
83 ui.warn(_('%s already a largefile\n') % f)
84 continue
84 continue
85
85
86 if (exact or not exists) and not lfutil.isstandin(f):
86 if (exact or not exists) and not lfutil.isstandin(f):
87 wfile = repo.wjoin(f)
87 wfile = repo.wjoin(f)
88
88
89 # In case the file was removed previously, but not committed
89 # In case the file was removed previously, but not committed
90 # (issue3507)
90 # (issue3507)
91 if not os.path.exists(wfile):
91 if not os.path.exists(wfile):
92 continue
92 continue
93
93
94 abovemin = (lfsize and
94 abovemin = (lfsize and
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
97 lfnames.append(f)
98 if ui.verbose or not exact:
98 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
100
101 bad = []
101 bad = []
102 standins = []
102 standins = []
103
103
104 # Need to lock, otherwise there could be a race condition between
104 # Need to lock, otherwise there could be a race condition between
105 # when standins are created and added to the repo.
105 # when standins are created and added to the repo.
106 wlock = repo.wlock()
106 wlock = repo.wlock()
107 try:
107 try:
108 if not opts.get('dry_run'):
108 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
110 for f in lfnames:
111 standinname = lfutil.standin(f)
111 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
112 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
114 standins.append(standinname)
115 if lfdirstate[f] == 'r':
115 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
116 lfdirstate.normallookup(f)
117 else:
117 else:
118 lfdirstate.add(f)
118 lfdirstate.add(f)
119 lfdirstate.write()
119 lfdirstate.write()
120 bad += [lfutil.splitstandin(f)
120 bad += [lfutil.splitstandin(f)
121 for f in repo[None].add(standins)
121 for f in repo[None].add(standins)
122 if f in m.files()]
122 if f in m.files()]
123 finally:
123 finally:
124 wlock.release()
124 wlock.release()
125 return bad
125 return bad
126
126
127 def removelargefiles(ui, repo, *pats, **opts):
127 def removelargefiles(ui, repo, *pats, **opts):
128 after = opts.get('after')
128 after = opts.get('after')
129 if not pats and not after:
129 if not pats and not after:
130 raise util.Abort(_('no files specified'))
130 raise util.Abort(_('no files specified'))
131 m = scmutil.match(repo[None], pats, opts)
131 m = scmutil.match(repo[None], pats, opts)
132 try:
132 try:
133 repo.lfstatus = True
133 repo.lfstatus = True
134 s = repo.status(match=m, clean=True)
134 s = repo.status(match=m, clean=True)
135 finally:
135 finally:
136 repo.lfstatus = False
136 repo.lfstatus = False
137 manifest = repo[None].manifest()
137 manifest = repo[None].manifest()
138 modified, added, deleted, clean = [[f for f in list
138 modified, added, deleted, clean = [[f for f in list
139 if lfutil.standin(f) in manifest]
139 if lfutil.standin(f) in manifest]
140 for list in [s[0], s[1], s[3], s[6]]]
140 for list in [s[0], s[1], s[3], s[6]]]
141
141
142 def warn(files, msg):
142 def warn(files, msg):
143 for f in files:
143 for f in files:
144 ui.warn(msg % m.rel(f))
144 ui.warn(msg % m.rel(f))
145 return int(len(files) > 0)
145 return int(len(files) > 0)
146
146
147 result = 0
147 result = 0
148
148
149 if after:
149 if after:
150 remove, forget = deleted, []
150 remove, forget = deleted, []
151 result = warn(modified + added + clean,
151 result = warn(modified + added + clean,
152 _('not removing %s: file still exists\n'))
152 _('not removing %s: file still exists\n'))
153 else:
153 else:
154 remove, forget = deleted + clean, []
154 remove, forget = deleted + clean, []
155 result = warn(modified, _('not removing %s: file is modified (use -f'
155 result = warn(modified, _('not removing %s: file is modified (use -f'
156 ' to force removal)\n'))
156 ' to force removal)\n'))
157 result = warn(added, _('not removing %s: file has been marked for add'
157 result = warn(added, _('not removing %s: file has been marked for add'
158 ' (use forget to undo)\n')) or result
158 ' (use forget to undo)\n')) or result
159
159
160 for f in sorted(remove + forget):
160 for f in sorted(remove + forget):
161 if ui.verbose or not m.exact(f):
161 if ui.verbose or not m.exact(f):
162 ui.status(_('removing %s\n') % m.rel(f))
162 ui.status(_('removing %s\n') % m.rel(f))
163
163
164 # Need to lock because standin files are deleted then removed from the
164 # Need to lock because standin files are deleted then removed from the
165 # repository and we could race in-between.
165 # repository and we could race in-between.
166 wlock = repo.wlock()
166 wlock = repo.wlock()
167 try:
167 try:
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
169 for f in remove:
169 for f in remove:
170 if not after:
170 if not after:
171 # If this is being called by addremove, notify the user that we
171 # If this is being called by addremove, notify the user that we
172 # are removing the file.
172 # are removing the file.
173 if getattr(repo, "_isaddremove", False):
173 if getattr(repo, "_isaddremove", False):
174 ui.status(_('removing %s\n') % f)
174 ui.status(_('removing %s\n') % f)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
176 lfdirstate.remove(f)
176 lfdirstate.remove(f)
177 lfdirstate.write()
177 lfdirstate.write()
178 forget = [lfutil.standin(f) for f in forget]
178 forget = [lfutil.standin(f) for f in forget]
179 remove = [lfutil.standin(f) for f in remove]
179 remove = [lfutil.standin(f) for f in remove]
180 repo[None].forget(forget)
180 repo[None].forget(forget)
181 # If this is being called by addremove, let the original addremove
181 # If this is being called by addremove, let the original addremove
182 # function handle this.
182 # function handle this.
183 if not getattr(repo, "_isaddremove", False):
183 if not getattr(repo, "_isaddremove", False):
184 for f in remove:
184 for f in remove:
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
186 repo[None].forget(remove)
186 repo[None].forget(remove)
187 finally:
187 finally:
188 wlock.release()
188 wlock.release()
189
189
190 return result
190 return result
191
191
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
193 # appear at their right place in the manifests.
193 # appear at their right place in the manifests.
194 def decodepath(orig, path):
194 def decodepath(orig, path):
195 return lfutil.splitstandin(path) or path
195 return lfutil.splitstandin(path) or path
196
196
197 # -- Wrappers: modify existing commands --------------------------------
197 # -- Wrappers: modify existing commands --------------------------------
198
198
199 # Add works by going through the files that the user wanted to add and
199 # Add works by going through the files that the user wanted to add and
200 # checking if they should be added as largefiles. Then it makes a new
200 # checking if they should be added as largefiles. Then it makes a new
201 # matcher which matches only the normal files and runs the original
201 # matcher which matches only the normal files and runs the original
202 # version of add.
202 # version of add.
203 def overrideadd(orig, ui, repo, *pats, **opts):
203 def overrideadd(orig, ui, repo, *pats, **opts):
204 normal = opts.pop('normal')
204 normal = opts.pop('normal')
205 if normal:
205 if normal:
206 if opts.get('large'):
206 if opts.get('large'):
207 raise util.Abort(_('--normal cannot be used with --large'))
207 raise util.Abort(_('--normal cannot be used with --large'))
208 return orig(ui, repo, *pats, **opts)
208 return orig(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
210 installnormalfilesmatchfn(repo[None].manifest())
210 installnormalfilesmatchfn(repo[None].manifest())
211 result = orig(ui, repo, *pats, **opts)
211 result = orig(ui, repo, *pats, **opts)
212 restorematchfn()
212 restorematchfn()
213
213
214 return (result == 1 or bad) and 1 or 0
214 return (result == 1 or bad) and 1 or 0
215
215
216 def overrideremove(orig, ui, repo, *pats, **opts):
216 def overrideremove(orig, ui, repo, *pats, **opts):
217 installnormalfilesmatchfn(repo[None].manifest())
217 installnormalfilesmatchfn(repo[None].manifest())
218 result = orig(ui, repo, *pats, **opts)
218 result = orig(ui, repo, *pats, **opts)
219 restorematchfn()
219 restorematchfn()
220 return removelargefiles(ui, repo, *pats, **opts) or result
220 return removelargefiles(ui, repo, *pats, **opts) or result
221
221
222 def overridestatusfn(orig, repo, rev2, **opts):
222 def overridestatusfn(orig, repo, rev2, **opts):
223 try:
223 try:
224 repo._repo.lfstatus = True
224 repo._repo.lfstatus = True
225 return orig(repo, rev2, **opts)
225 return orig(repo, rev2, **opts)
226 finally:
226 finally:
227 repo._repo.lfstatus = False
227 repo._repo.lfstatus = False
228
228
229 def overridestatus(orig, ui, repo, *pats, **opts):
229 def overridestatus(orig, ui, repo, *pats, **opts):
230 try:
230 try:
231 repo.lfstatus = True
231 repo.lfstatus = True
232 return orig(ui, repo, *pats, **opts)
232 return orig(ui, repo, *pats, **opts)
233 finally:
233 finally:
234 repo.lfstatus = False
234 repo.lfstatus = False
235
235
236 def overridedirty(orig, repo, ignoreupdate=False):
236 def overridedirty(orig, repo, ignoreupdate=False):
237 try:
237 try:
238 repo._repo.lfstatus = True
238 repo._repo.lfstatus = True
239 return orig(repo, ignoreupdate)
239 return orig(repo, ignoreupdate)
240 finally:
240 finally:
241 repo._repo.lfstatus = False
241 repo._repo.lfstatus = False
242
242
243 def overridelog(orig, ui, repo, *pats, **opts):
243 def overridelog(orig, ui, repo, *pats, **opts):
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
245 default='relpath'):
245 default='relpath'):
246 """Matcher that merges root directory with .hglf, suitable for log.
246 """Matcher that merges root directory with .hglf, suitable for log.
247 It is still possible to match .hglf directly.
247 It is still possible to match .hglf directly.
248 For any listed files run log on the standin too.
248 For any listed files run log on the standin too.
249 matchfn tries both the given filename and with .hglf stripped.
249 matchfn tries both the given filename and with .hglf stripped.
250 """
250 """
251 match = oldmatch(ctx, pats, opts, globbed, default)
251 match = oldmatch(ctx, pats, opts, globbed, default)
252 m = copy.copy(match)
252 m = copy.copy(match)
253 for i in range(0, len(m._files)):
253 for i in range(0, len(m._files)):
254 standin = lfutil.standin(m._files[i])
254 standin = lfutil.standin(m._files[i])
255 if standin in repo[ctx.node()]:
255 if standin in repo[ctx.node()]:
256 m._files[i] = standin
256 m._files[i] = standin
257 m._fmap = set(m._files)
257 m._fmap = set(m._files)
258 m._always = False
258 m._always = False
259 origmatchfn = m.matchfn
259 origmatchfn = m.matchfn
260 def lfmatchfn(f):
260 def lfmatchfn(f):
261 lf = lfutil.splitstandin(f)
261 lf = lfutil.splitstandin(f)
262 if lf is not None and origmatchfn(lf):
262 if lf is not None and origmatchfn(lf):
263 return True
263 return True
264 r = origmatchfn(f)
264 r = origmatchfn(f)
265 return r
265 return r
266 m.matchfn = lfmatchfn
266 m.matchfn = lfmatchfn
267 return m
267 return m
268 oldmatch = installmatchfn(overridematch)
268 oldmatch = installmatchfn(overridematch)
269 try:
269 try:
270 repo.lfstatus = True
270 repo.lfstatus = True
271 return orig(ui, repo, *pats, **opts)
271 return orig(ui, repo, *pats, **opts)
272 finally:
272 finally:
273 repo.lfstatus = False
273 repo.lfstatus = False
274 restorematchfn()
274 restorematchfn()
275
275
276 def overrideverify(orig, ui, repo, *pats, **opts):
276 def overrideverify(orig, ui, repo, *pats, **opts):
277 large = opts.pop('large', False)
277 large = opts.pop('large', False)
278 all = opts.pop('lfa', False)
278 all = opts.pop('lfa', False)
279 contents = opts.pop('lfc', False)
279 contents = opts.pop('lfc', False)
280
280
281 result = orig(ui, repo, *pats, **opts)
281 result = orig(ui, repo, *pats, **opts)
282 if large or all or contents:
282 if large or all or contents:
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
284 return result
284 return result
285
285
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
287 large = opts.pop('large', False)
287 large = opts.pop('large', False)
288 if large:
288 if large:
289 lfcommands.debugdirstate(ui, repo)
289 lfcommands.debugdirstate(ui, repo)
290 else:
290 else:
291 orig(ui, repo, *pats, **opts)
291 orig(ui, repo, *pats, **opts)
292
292
293 # Override needs to refresh standins so that update's normal merge
293 # Override needs to refresh standins so that update's normal merge
294 # will go through properly. Then the other update hook (overriding repo.update)
294 # will go through properly. Then the other update hook (overriding repo.update)
295 # will get the new files. Filemerge is also overridden so that the merge
295 # will get the new files. Filemerge is also overridden so that the merge
296 # will merge standins correctly.
296 # will merge standins correctly.
297 def overrideupdate(orig, ui, repo, *pats, **opts):
297 def overrideupdate(orig, ui, repo, *pats, **opts):
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
300 False, False)
300 False, False)
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
302
302
303 # Need to lock between the standins getting updated and their
303 # Need to lock between the standins getting updated and their
304 # largefiles getting updated
304 # largefiles getting updated
305 wlock = repo.wlock()
305 wlock = repo.wlock()
306 try:
306 try:
307 if opts['check']:
307 if opts['check']:
308 mod = len(modified) > 0
308 mod = len(modified) > 0
309 for lfile in unsure:
309 for lfile in unsure:
310 standin = lfutil.standin(lfile)
310 standin = lfutil.standin(lfile)
311 if repo['.'][standin].data().strip() != \
311 if repo['.'][standin].data().strip() != \
312 lfutil.hashfile(repo.wjoin(lfile)):
312 lfutil.hashfile(repo.wjoin(lfile)):
313 mod = True
313 mod = True
314 else:
314 else:
315 lfdirstate.normal(lfile)
315 lfdirstate.normal(lfile)
316 lfdirstate.write()
316 lfdirstate.write()
317 if mod:
317 if mod:
318 raise util.Abort(_('uncommitted changes'))
318 raise util.Abort(_('uncommitted changes'))
319 # XXX handle removed differently
319 # XXX handle removed differently
320 if not opts['clean']:
320 if not opts['clean']:
321 for lfile in unsure + modified + added:
321 for lfile in unsure + modified + added:
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
323 finally:
323 finally:
324 wlock.release()
324 wlock.release()
325 return orig(ui, repo, *pats, **opts)
325 return orig(ui, repo, *pats, **opts)
326
326
327 # Before starting the manifest merge, merge.updates will call
327 # Before starting the manifest merge, merge.updates will call
328 # _checkunknown to check if there are any files in the merged-in
328 # _checkunknown to check if there are any files in the merged-in
329 # changeset that collide with unknown files in the working copy.
329 # changeset that collide with unknown files in the working copy.
330 #
330 #
331 # The largefiles are seen as unknown, so this prevents us from merging
331 # The largefiles are seen as unknown, so this prevents us from merging
332 # in a file 'foo' if we already have a largefile with the same name.
332 # in a file 'foo' if we already have a largefile with the same name.
333 #
333 #
334 # The overridden function filters the unknown files by removing any
334 # The overridden function filters the unknown files by removing any
335 # largefiles. This makes the merge proceed and we can then handle this
335 # largefiles. This makes the merge proceed and we can then handle this
336 # case further in the overridden manifestmerge function below.
336 # case further in the overridden manifestmerge function below.
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
339 return False
339 return False
340 return origfn(repo, wctx, mctx, f)
340 return origfn(repo, wctx, mctx, f)
341
341
342 # The manifest merge handles conflicts on the manifest level. We want
342 # The manifest merge handles conflicts on the manifest level. We want
343 # to handle changes in largefile-ness of files at this level too.
343 # to handle changes in largefile-ness of files at this level too.
344 #
344 #
345 # The strategy is to run the original manifestmerge and then process
345 # The strategy is to run the original manifestmerge and then process
346 # the action list it outputs. There are two cases we need to deal with:
346 # the action list it outputs. There are two cases we need to deal with:
347 #
347 #
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
349 # detected via its standin file, which will enter the working copy
349 # detected via its standin file, which will enter the working copy
350 # with a "get" action. It is not "merge" since the standin is all
350 # with a "get" action. It is not "merge" since the standin is all
351 # Mercurial is concerned with at this level -- the link to the
351 # Mercurial is concerned with at this level -- the link to the
352 # existing normal file is not relevant here.
352 # existing normal file is not relevant here.
353 #
353 #
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
355 # since the largefile will be present in the working copy and
355 # since the largefile will be present in the working copy and
356 # different from the normal file in p2. Mercurial therefore
356 # different from the normal file in p2. Mercurial therefore
357 # triggers a merge action.
357 # triggers a merge action.
358 #
358 #
359 # In both cases, we prompt the user and emit new actions to either
359 # In both cases, we prompt the user and emit new actions to either
360 # remove the standin (if the normal file was kept) or to remove the
360 # remove the standin (if the normal file was kept) or to remove the
361 # normal file and get the standin (if the largefile was kept). The
361 # normal file and get the standin (if the largefile was kept). The
362 # default prompt answer is to use the largefile version since it was
362 # default prompt answer is to use the largefile version since it was
363 # presumably changed on purpose.
363 # presumably changed on purpose.
364 #
364 #
365 # Finally, the merge.applyupdates function will then take care of
365 # Finally, the merge.applyupdates function will then take care of
366 # writing the files into the working copy and lfcommands.updatelfiles
366 # writing the files into the working copy and lfcommands.updatelfiles
367 # will update the largefiles.
367 # will update the largefiles.
368 def overridecalculateupdates(origfn, repo, p1, p2, pa, branchmerge, force,
368 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
369 partial, acceptremote, followcopies):
369 partial, acceptremote, followcopies):
370 overwrite = force and not branchmerge
370 overwrite = force and not branchmerge
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
371 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
372 acceptremote, followcopies)
372 acceptremote, followcopies)
373
373
374 if overwrite:
374 if overwrite:
375 return actions
375 return actions
376
376
377 removes = set(a[0] for a in actions if a[1] == 'r')
377 removes = set(a[0] for a in actions if a[1] == 'r')
378 processed = []
378 processed = []
379
379
380 for action in actions:
380 for action in actions:
381 f, m, args, msg = action
381 f, m, args, msg = action
382
382
383 splitstandin = f and lfutil.splitstandin(f)
383 splitstandin = f and lfutil.splitstandin(f)
384 if (m == "g" and splitstandin is not None and
384 if (m == "g" and splitstandin is not None and
385 splitstandin in p1 and splitstandin not in removes):
385 splitstandin in p1 and splitstandin not in removes):
386 # Case 1: normal file in the working copy, largefile in
386 # Case 1: normal file in the working copy, largefile in
387 # the second parent
387 # the second parent
388 lfile = splitstandin
388 lfile = splitstandin
389 standin = f
389 standin = f
390 msg = _('remote turned local normal file %s into a largefile\n'
390 msg = _('remote turned local normal file %s into a largefile\n'
391 'use (l)argefile or keep (n)ormal file?'
391 'use (l)argefile or keep (n)ormal file?'
392 '$$ &Largefile $$ &Normal file') % lfile
392 '$$ &Largefile $$ &Normal file') % lfile
393 if repo.ui.promptchoice(msg, 0) == 0:
393 if repo.ui.promptchoice(msg, 0) == 0:
394 processed.append((lfile, "r", None, msg))
394 processed.append((lfile, "r", None, msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
396 else:
396 else:
397 processed.append((standin, "r", None, msg))
397 processed.append((standin, "r", None, msg))
398 elif (m == "g" and
398 elif (m == "g" and
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
400 # Case 2: largefile in the working copy, normal file in
400 # Case 2: largefile in the working copy, normal file in
401 # the second parent
401 # the second parent
402 standin = lfutil.standin(f)
402 standin = lfutil.standin(f)
403 lfile = f
403 lfile = f
404 msg = _('remote turned local largefile %s into a normal file\n'
404 msg = _('remote turned local largefile %s into a normal file\n'
405 'keep (l)argefile or use (n)ormal file?'
405 'keep (l)argefile or use (n)ormal file?'
406 '$$ &Largefile $$ &Normal file') % lfile
406 '$$ &Largefile $$ &Normal file') % lfile
407 if repo.ui.promptchoice(msg, 0) == 0:
407 if repo.ui.promptchoice(msg, 0) == 0:
408 processed.append((lfile, "r", None, msg))
408 processed.append((lfile, "r", None, msg))
409 else:
409 else:
410 processed.append((standin, "r", None, msg))
410 processed.append((standin, "r", None, msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
412 else:
412 else:
413 processed.append(action)
413 processed.append(action)
414
414
415 return processed
415 return processed
416
416
417 # Override filemerge to prompt the user about how they wish to merge
417 # Override filemerge to prompt the user about how they wish to merge
418 # largefiles. This will handle identical edits without prompting the user.
418 # largefiles. This will handle identical edits without prompting the user.
419 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
419 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
420 if not lfutil.isstandin(orig):
420 if not lfutil.isstandin(orig):
421 return origfn(repo, mynode, orig, fcd, fco, fca)
421 return origfn(repo, mynode, orig, fcd, fco, fca)
422
422
423 ahash = fca.data().strip().lower()
423 ahash = fca.data().strip().lower()
424 dhash = fcd.data().strip().lower()
424 dhash = fcd.data().strip().lower()
425 ohash = fco.data().strip().lower()
425 ohash = fco.data().strip().lower()
426 if (ohash != ahash and
426 if (ohash != ahash and
427 ohash != dhash and
427 ohash != dhash and
428 (dhash == ahash or
428 (dhash == ahash or
429 repo.ui.promptchoice(
429 repo.ui.promptchoice(
430 _('largefile %s has a merge conflict\nancestor was %s\n'
430 _('largefile %s has a merge conflict\nancestor was %s\n'
431 'keep (l)ocal %s or\ntake (o)ther %s?'
431 'keep (l)ocal %s or\ntake (o)ther %s?'
432 '$$ &Local $$ &Other') %
432 '$$ &Local $$ &Other') %
433 (lfutil.splitstandin(orig), ahash, dhash, ohash),
433 (lfutil.splitstandin(orig), ahash, dhash, ohash),
434 0) == 1)):
434 0) == 1)):
435 repo.wwrite(fcd.path(), fco.data(), fco.flags())
435 repo.wwrite(fcd.path(), fco.data(), fco.flags())
436 return 0
436 return 0
437
437
438 # Copy first changes the matchers to match standins instead of
438 # Copy first changes the matchers to match standins instead of
439 # largefiles. Then it overrides util.copyfile in that function it
439 # largefiles. Then it overrides util.copyfile in that function it
440 # checks if the destination largefile already exists. It also keeps a
440 # checks if the destination largefile already exists. It also keeps a
441 # list of copied files so that the largefiles can be copied and the
441 # list of copied files so that the largefiles can be copied and the
442 # dirstate updated.
442 # dirstate updated.
443 def overridecopy(orig, ui, repo, pats, opts, rename=False):
443 def overridecopy(orig, ui, repo, pats, opts, rename=False):
444 # doesn't remove largefile on rename
444 # doesn't remove largefile on rename
445 if len(pats) < 2:
445 if len(pats) < 2:
446 # this isn't legal, let the original function deal with it
446 # this isn't legal, let the original function deal with it
447 return orig(ui, repo, pats, opts, rename)
447 return orig(ui, repo, pats, opts, rename)
448
448
449 def makestandin(relpath):
449 def makestandin(relpath):
450 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
450 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
451 return os.path.join(repo.wjoin(lfutil.standin(path)))
451 return os.path.join(repo.wjoin(lfutil.standin(path)))
452
452
453 fullpats = scmutil.expandpats(pats)
453 fullpats = scmutil.expandpats(pats)
454 dest = fullpats[-1]
454 dest = fullpats[-1]
455
455
456 if os.path.isdir(dest):
456 if os.path.isdir(dest):
457 if not os.path.isdir(makestandin(dest)):
457 if not os.path.isdir(makestandin(dest)):
458 os.makedirs(makestandin(dest))
458 os.makedirs(makestandin(dest))
459 # This could copy both lfiles and normal files in one command,
459 # This could copy both lfiles and normal files in one command,
460 # but we don't want to do that. First replace their matcher to
460 # but we don't want to do that. First replace their matcher to
461 # only match normal files and run it, then replace it to just
461 # only match normal files and run it, then replace it to just
462 # match largefiles and run it again.
462 # match largefiles and run it again.
463 nonormalfiles = False
463 nonormalfiles = False
464 nolfiles = False
464 nolfiles = False
465 try:
465 try:
466 try:
466 try:
467 installnormalfilesmatchfn(repo[None].manifest())
467 installnormalfilesmatchfn(repo[None].manifest())
468 result = orig(ui, repo, pats, opts, rename)
468 result = orig(ui, repo, pats, opts, rename)
469 except util.Abort, e:
469 except util.Abort, e:
470 if str(e) != _('no files to copy'):
470 if str(e) != _('no files to copy'):
471 raise e
471 raise e
472 else:
472 else:
473 nonormalfiles = True
473 nonormalfiles = True
474 result = 0
474 result = 0
475 finally:
475 finally:
476 restorematchfn()
476 restorematchfn()
477
477
478 # The first rename can cause our current working directory to be removed.
478 # The first rename can cause our current working directory to be removed.
479 # In that case there is nothing left to copy/rename so just quit.
479 # In that case there is nothing left to copy/rename so just quit.
480 try:
480 try:
481 repo.getcwd()
481 repo.getcwd()
482 except OSError:
482 except OSError:
483 return result
483 return result
484
484
485 try:
485 try:
486 try:
486 try:
487 # When we call orig below it creates the standins but we don't add
487 # When we call orig below it creates the standins but we don't add
488 # them to the dir state until later so lock during that time.
488 # them to the dir state until later so lock during that time.
489 wlock = repo.wlock()
489 wlock = repo.wlock()
490
490
491 manifest = repo[None].manifest()
491 manifest = repo[None].manifest()
492 oldmatch = None # for the closure
492 oldmatch = None # for the closure
493 def overridematch(ctx, pats=[], opts={}, globbed=False,
493 def overridematch(ctx, pats=[], opts={}, globbed=False,
494 default='relpath'):
494 default='relpath'):
495 newpats = []
495 newpats = []
496 # The patterns were previously mangled to add the standin
496 # The patterns were previously mangled to add the standin
497 # directory; we need to remove that now
497 # directory; we need to remove that now
498 for pat in pats:
498 for pat in pats:
499 if match_.patkind(pat) is None and lfutil.shortname in pat:
499 if match_.patkind(pat) is None and lfutil.shortname in pat:
500 newpats.append(pat.replace(lfutil.shortname, ''))
500 newpats.append(pat.replace(lfutil.shortname, ''))
501 else:
501 else:
502 newpats.append(pat)
502 newpats.append(pat)
503 match = oldmatch(ctx, newpats, opts, globbed, default)
503 match = oldmatch(ctx, newpats, opts, globbed, default)
504 m = copy.copy(match)
504 m = copy.copy(match)
505 lfile = lambda f: lfutil.standin(f) in manifest
505 lfile = lambda f: lfutil.standin(f) in manifest
506 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
506 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
507 m._fmap = set(m._files)
507 m._fmap = set(m._files)
508 m._always = False
508 m._always = False
509 origmatchfn = m.matchfn
509 origmatchfn = m.matchfn
510 m.matchfn = lambda f: (lfutil.isstandin(f) and
510 m.matchfn = lambda f: (lfutil.isstandin(f) and
511 (f in manifest) and
511 (f in manifest) and
512 origmatchfn(lfutil.splitstandin(f)) or
512 origmatchfn(lfutil.splitstandin(f)) or
513 None)
513 None)
514 return m
514 return m
515 oldmatch = installmatchfn(overridematch)
515 oldmatch = installmatchfn(overridematch)
516 listpats = []
516 listpats = []
517 for pat in pats:
517 for pat in pats:
518 if match_.patkind(pat) is not None:
518 if match_.patkind(pat) is not None:
519 listpats.append(pat)
519 listpats.append(pat)
520 else:
520 else:
521 listpats.append(makestandin(pat))
521 listpats.append(makestandin(pat))
522
522
523 try:
523 try:
524 origcopyfile = util.copyfile
524 origcopyfile = util.copyfile
525 copiedfiles = []
525 copiedfiles = []
526 def overridecopyfile(src, dest):
526 def overridecopyfile(src, dest):
527 if (lfutil.shortname in src and
527 if (lfutil.shortname in src and
528 dest.startswith(repo.wjoin(lfutil.shortname))):
528 dest.startswith(repo.wjoin(lfutil.shortname))):
529 destlfile = dest.replace(lfutil.shortname, '')
529 destlfile = dest.replace(lfutil.shortname, '')
530 if not opts['force'] and os.path.exists(destlfile):
530 if not opts['force'] and os.path.exists(destlfile):
531 raise IOError('',
531 raise IOError('',
532 _('destination largefile already exists'))
532 _('destination largefile already exists'))
533 copiedfiles.append((src, dest))
533 copiedfiles.append((src, dest))
534 origcopyfile(src, dest)
534 origcopyfile(src, dest)
535
535
536 util.copyfile = overridecopyfile
536 util.copyfile = overridecopyfile
537 result += orig(ui, repo, listpats, opts, rename)
537 result += orig(ui, repo, listpats, opts, rename)
538 finally:
538 finally:
539 util.copyfile = origcopyfile
539 util.copyfile = origcopyfile
540
540
541 lfdirstate = lfutil.openlfdirstate(ui, repo)
541 lfdirstate = lfutil.openlfdirstate(ui, repo)
542 for (src, dest) in copiedfiles:
542 for (src, dest) in copiedfiles:
543 if (lfutil.shortname in src and
543 if (lfutil.shortname in src and
544 dest.startswith(repo.wjoin(lfutil.shortname))):
544 dest.startswith(repo.wjoin(lfutil.shortname))):
545 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
545 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
546 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
546 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
547 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
547 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
548 if not os.path.isdir(destlfiledir):
548 if not os.path.isdir(destlfiledir):
549 os.makedirs(destlfiledir)
549 os.makedirs(destlfiledir)
550 if rename:
550 if rename:
551 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
551 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
552 lfdirstate.remove(srclfile)
552 lfdirstate.remove(srclfile)
553 else:
553 else:
554 util.copyfile(repo.wjoin(srclfile),
554 util.copyfile(repo.wjoin(srclfile),
555 repo.wjoin(destlfile))
555 repo.wjoin(destlfile))
556
556
557 lfdirstate.add(destlfile)
557 lfdirstate.add(destlfile)
558 lfdirstate.write()
558 lfdirstate.write()
559 except util.Abort, e:
559 except util.Abort, e:
560 if str(e) != _('no files to copy'):
560 if str(e) != _('no files to copy'):
561 raise e
561 raise e
562 else:
562 else:
563 nolfiles = True
563 nolfiles = True
564 finally:
564 finally:
565 restorematchfn()
565 restorematchfn()
566 wlock.release()
566 wlock.release()
567
567
568 if nolfiles and nonormalfiles:
568 if nolfiles and nonormalfiles:
569 raise util.Abort(_('no files to copy'))
569 raise util.Abort(_('no files to copy'))
570
570
571 return result
571 return result
572
572
573 # When the user calls revert, we have to be careful to not revert any
573 # When the user calls revert, we have to be careful to not revert any
574 # changes to other largefiles accidentally. This means we have to keep
574 # changes to other largefiles accidentally. This means we have to keep
575 # track of the largefiles that are being reverted so we only pull down
575 # track of the largefiles that are being reverted so we only pull down
576 # the necessary largefiles.
576 # the necessary largefiles.
577 #
577 #
578 # Standins are only updated (to match the hash of largefiles) before
578 # Standins are only updated (to match the hash of largefiles) before
579 # commits. Update the standins then run the original revert, changing
579 # commits. Update the standins then run the original revert, changing
580 # the matcher to hit standins instead of largefiles. Based on the
580 # the matcher to hit standins instead of largefiles. Based on the
581 # resulting standins update the largefiles. Then return the standins
581 # resulting standins update the largefiles. Then return the standins
582 # to their proper state
582 # to their proper state
583 def overriderevert(orig, ui, repo, *pats, **opts):
583 def overriderevert(orig, ui, repo, *pats, **opts):
584 # Because we put the standins in a bad state (by updating them)
584 # Because we put the standins in a bad state (by updating them)
585 # and then return them to a correct state we need to lock to
585 # and then return them to a correct state we need to lock to
586 # prevent others from changing them in their incorrect state.
586 # prevent others from changing them in their incorrect state.
587 wlock = repo.wlock()
587 wlock = repo.wlock()
588 try:
588 try:
589 lfdirstate = lfutil.openlfdirstate(ui, repo)
589 lfdirstate = lfutil.openlfdirstate(ui, repo)
590 (modified, added, removed, missing, unknown, ignored, clean) = \
590 (modified, added, removed, missing, unknown, ignored, clean) = \
591 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
591 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
592 lfdirstate.write()
592 lfdirstate.write()
593 for lfile in modified:
593 for lfile in modified:
594 lfutil.updatestandin(repo, lfutil.standin(lfile))
594 lfutil.updatestandin(repo, lfutil.standin(lfile))
595 for lfile in missing:
595 for lfile in missing:
596 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
596 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
597 os.unlink(repo.wjoin(lfutil.standin(lfile)))
597 os.unlink(repo.wjoin(lfutil.standin(lfile)))
598
598
599 try:
599 try:
600 ctx = scmutil.revsingle(repo, opts.get('rev'))
600 ctx = scmutil.revsingle(repo, opts.get('rev'))
601 oldmatch = None # for the closure
601 oldmatch = None # for the closure
602 def overridematch(ctx, pats=[], opts={}, globbed=False,
602 def overridematch(ctx, pats=[], opts={}, globbed=False,
603 default='relpath'):
603 default='relpath'):
604 match = oldmatch(ctx, pats, opts, globbed, default)
604 match = oldmatch(ctx, pats, opts, globbed, default)
605 m = copy.copy(match)
605 m = copy.copy(match)
606 def tostandin(f):
606 def tostandin(f):
607 if lfutil.standin(f) in ctx:
607 if lfutil.standin(f) in ctx:
608 return lfutil.standin(f)
608 return lfutil.standin(f)
609 elif lfutil.standin(f) in repo[None]:
609 elif lfutil.standin(f) in repo[None]:
610 return None
610 return None
611 return f
611 return f
612 m._files = [tostandin(f) for f in m._files]
612 m._files = [tostandin(f) for f in m._files]
613 m._files = [f for f in m._files if f is not None]
613 m._files = [f for f in m._files if f is not None]
614 m._fmap = set(m._files)
614 m._fmap = set(m._files)
615 m._always = False
615 m._always = False
616 origmatchfn = m.matchfn
616 origmatchfn = m.matchfn
617 def matchfn(f):
617 def matchfn(f):
618 if lfutil.isstandin(f):
618 if lfutil.isstandin(f):
619 # We need to keep track of what largefiles are being
619 # We need to keep track of what largefiles are being
620 # matched so we know which ones to update later --
620 # matched so we know which ones to update later --
621 # otherwise we accidentally revert changes to other
621 # otherwise we accidentally revert changes to other
622 # largefiles. This is repo-specific, so duckpunch the
622 # largefiles. This is repo-specific, so duckpunch the
623 # repo object to keep the list of largefiles for us
623 # repo object to keep the list of largefiles for us
624 # later.
624 # later.
625 if origmatchfn(lfutil.splitstandin(f)) and \
625 if origmatchfn(lfutil.splitstandin(f)) and \
626 (f in repo[None] or f in ctx):
626 (f in repo[None] or f in ctx):
627 lfileslist = getattr(repo, '_lfilestoupdate', [])
627 lfileslist = getattr(repo, '_lfilestoupdate', [])
628 lfileslist.append(lfutil.splitstandin(f))
628 lfileslist.append(lfutil.splitstandin(f))
629 repo._lfilestoupdate = lfileslist
629 repo._lfilestoupdate = lfileslist
630 return True
630 return True
631 else:
631 else:
632 return False
632 return False
633 return origmatchfn(f)
633 return origmatchfn(f)
634 m.matchfn = matchfn
634 m.matchfn = matchfn
635 return m
635 return m
636 oldmatch = installmatchfn(overridematch)
636 oldmatch = installmatchfn(overridematch)
637 scmutil.match
637 scmutil.match
638 matches = overridematch(repo[None], pats, opts)
638 matches = overridematch(repo[None], pats, opts)
639 orig(ui, repo, *pats, **opts)
639 orig(ui, repo, *pats, **opts)
640 finally:
640 finally:
641 restorematchfn()
641 restorematchfn()
642 lfileslist = getattr(repo, '_lfilestoupdate', [])
642 lfileslist = getattr(repo, '_lfilestoupdate', [])
643 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
643 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
644 printmessage=False)
644 printmessage=False)
645
645
646 # empty out the largefiles list so we start fresh next time
646 # empty out the largefiles list so we start fresh next time
647 repo._lfilestoupdate = []
647 repo._lfilestoupdate = []
648 for lfile in modified:
648 for lfile in modified:
649 if lfile in lfileslist:
649 if lfile in lfileslist:
650 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
650 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
651 in repo['.']:
651 in repo['.']:
652 lfutil.writestandin(repo, lfutil.standin(lfile),
652 lfutil.writestandin(repo, lfutil.standin(lfile),
653 repo['.'][lfile].data().strip(),
653 repo['.'][lfile].data().strip(),
654 'x' in repo['.'][lfile].flags())
654 'x' in repo['.'][lfile].flags())
655 lfdirstate = lfutil.openlfdirstate(ui, repo)
655 lfdirstate = lfutil.openlfdirstate(ui, repo)
656 for lfile in added:
656 for lfile in added:
657 standin = lfutil.standin(lfile)
657 standin = lfutil.standin(lfile)
658 if standin not in ctx and (standin in matches or opts.get('all')):
658 if standin not in ctx and (standin in matches or opts.get('all')):
659 if lfile in lfdirstate:
659 if lfile in lfdirstate:
660 lfdirstate.drop(lfile)
660 lfdirstate.drop(lfile)
661 util.unlinkpath(repo.wjoin(standin))
661 util.unlinkpath(repo.wjoin(standin))
662 lfdirstate.write()
662 lfdirstate.write()
663 finally:
663 finally:
664 wlock.release()
664 wlock.release()
665
665
666 def hgupdaterepo(orig, repo, node, overwrite):
666 def hgupdaterepo(orig, repo, node, overwrite):
667 if not overwrite:
667 if not overwrite:
668 # Only call updatelfiles on the standins that have changed to save time
668 # Only call updatelfiles on the standins that have changed to save time
669 oldstandins = lfutil.getstandinsstate(repo)
669 oldstandins = lfutil.getstandinsstate(repo)
670
670
671 result = orig(repo, node, overwrite)
671 result = orig(repo, node, overwrite)
672
672
673 filelist = None
673 filelist = None
674 if not overwrite:
674 if not overwrite:
675 newstandins = lfutil.getstandinsstate(repo)
675 newstandins = lfutil.getstandinsstate(repo)
676 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
676 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
677 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
677 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
678 return result
678 return result
679
679
680 def hgmerge(orig, repo, node, force=None, remind=True):
680 def hgmerge(orig, repo, node, force=None, remind=True):
681 result = orig(repo, node, force, remind)
681 result = orig(repo, node, force, remind)
682 lfcommands.updatelfiles(repo.ui, repo)
682 lfcommands.updatelfiles(repo.ui, repo)
683 return result
683 return result
684
684
685 # When we rebase a repository with remotely changed largefiles, we need to
685 # When we rebase a repository with remotely changed largefiles, we need to
686 # take some extra care so that the largefiles are correctly updated in the
686 # take some extra care so that the largefiles are correctly updated in the
687 # working copy
687 # working copy
688 def overridepull(orig, ui, repo, source=None, **opts):
688 def overridepull(orig, ui, repo, source=None, **opts):
689 revsprepull = len(repo)
689 revsprepull = len(repo)
690 if not source:
690 if not source:
691 source = 'default'
691 source = 'default'
692 repo.lfpullsource = source
692 repo.lfpullsource = source
693 if opts.get('rebase', False):
693 if opts.get('rebase', False):
694 repo._isrebasing = True
694 repo._isrebasing = True
695 try:
695 try:
696 if opts.get('update'):
696 if opts.get('update'):
697 del opts['update']
697 del opts['update']
698 ui.debug('--update and --rebase are not compatible, ignoring '
698 ui.debug('--update and --rebase are not compatible, ignoring '
699 'the update flag\n')
699 'the update flag\n')
700 del opts['rebase']
700 del opts['rebase']
701 origpostincoming = commands.postincoming
701 origpostincoming = commands.postincoming
702 def _dummy(*args, **kwargs):
702 def _dummy(*args, **kwargs):
703 pass
703 pass
704 commands.postincoming = _dummy
704 commands.postincoming = _dummy
705 try:
705 try:
706 result = commands.pull(ui, repo, source, **opts)
706 result = commands.pull(ui, repo, source, **opts)
707 finally:
707 finally:
708 commands.postincoming = origpostincoming
708 commands.postincoming = origpostincoming
709 revspostpull = len(repo)
709 revspostpull = len(repo)
710 if revspostpull > revsprepull:
710 if revspostpull > revsprepull:
711 result = result or rebase.rebase(ui, repo)
711 result = result or rebase.rebase(ui, repo)
712 finally:
712 finally:
713 repo._isrebasing = False
713 repo._isrebasing = False
714 else:
714 else:
715 result = orig(ui, repo, source, **opts)
715 result = orig(ui, repo, source, **opts)
716 revspostpull = len(repo)
716 revspostpull = len(repo)
717 lfrevs = opts.get('lfrev', [])
717 lfrevs = opts.get('lfrev', [])
718 if opts.get('all_largefiles'):
718 if opts.get('all_largefiles'):
719 lfrevs.append('pulled()')
719 lfrevs.append('pulled()')
720 if lfrevs and revspostpull > revsprepull:
720 if lfrevs and revspostpull > revsprepull:
721 numcached = 0
721 numcached = 0
722 repo.firstpulled = revsprepull # for pulled() revset expression
722 repo.firstpulled = revsprepull # for pulled() revset expression
723 try:
723 try:
724 for rev in scmutil.revrange(repo, lfrevs):
724 for rev in scmutil.revrange(repo, lfrevs):
725 ui.note(_('pulling largefiles for revision %s\n') % rev)
725 ui.note(_('pulling largefiles for revision %s\n') % rev)
726 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
726 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
727 numcached += len(cached)
727 numcached += len(cached)
728 finally:
728 finally:
729 del repo.firstpulled
729 del repo.firstpulled
730 ui.status(_("%d largefiles cached\n") % numcached)
730 ui.status(_("%d largefiles cached\n") % numcached)
731 return result
731 return result
732
732
733 def pulledrevsetsymbol(repo, subset, x):
733 def pulledrevsetsymbol(repo, subset, x):
734 """``pulled()``
734 """``pulled()``
735 Changesets that just has been pulled.
735 Changesets that just has been pulled.
736
736
737 Only available with largefiles from pull --lfrev expressions.
737 Only available with largefiles from pull --lfrev expressions.
738
738
739 .. container:: verbose
739 .. container:: verbose
740
740
741 Some examples:
741 Some examples:
742
742
743 - pull largefiles for all new changesets::
743 - pull largefiles for all new changesets::
744
744
745 hg pull -lfrev "pulled()"
745 hg pull -lfrev "pulled()"
746
746
747 - pull largefiles for all new branch heads::
747 - pull largefiles for all new branch heads::
748
748
749 hg pull -lfrev "head(pulled()) and not closed()"
749 hg pull -lfrev "head(pulled()) and not closed()"
750
750
751 """
751 """
752
752
753 try:
753 try:
754 firstpulled = repo.firstpulled
754 firstpulled = repo.firstpulled
755 except AttributeError:
755 except AttributeError:
756 raise util.Abort(_("pulled() only available in --lfrev"))
756 raise util.Abort(_("pulled() only available in --lfrev"))
757 return revset.baseset([r for r in subset if r >= firstpulled])
757 return revset.baseset([r for r in subset if r >= firstpulled])
758
758
759 def overrideclone(orig, ui, source, dest=None, **opts):
759 def overrideclone(orig, ui, source, dest=None, **opts):
760 d = dest
760 d = dest
761 if d is None:
761 if d is None:
762 d = hg.defaultdest(source)
762 d = hg.defaultdest(source)
763 if opts.get('all_largefiles') and not hg.islocal(d):
763 if opts.get('all_largefiles') and not hg.islocal(d):
764 raise util.Abort(_(
764 raise util.Abort(_(
765 '--all-largefiles is incompatible with non-local destination %s' %
765 '--all-largefiles is incompatible with non-local destination %s' %
766 d))
766 d))
767
767
768 return orig(ui, source, dest, **opts)
768 return orig(ui, source, dest, **opts)
769
769
770 def hgclone(orig, ui, opts, *args, **kwargs):
770 def hgclone(orig, ui, opts, *args, **kwargs):
771 result = orig(ui, opts, *args, **kwargs)
771 result = orig(ui, opts, *args, **kwargs)
772
772
773 if result is not None:
773 if result is not None:
774 sourcerepo, destrepo = result
774 sourcerepo, destrepo = result
775 repo = destrepo.local()
775 repo = destrepo.local()
776
776
777 # Caching is implicitly limited to 'rev' option, since the dest repo was
777 # Caching is implicitly limited to 'rev' option, since the dest repo was
778 # truncated at that point. The user may expect a download count with
778 # truncated at that point. The user may expect a download count with
779 # this option, so attempt whether or not this is a largefile repo.
779 # this option, so attempt whether or not this is a largefile repo.
780 if opts.get('all_largefiles'):
780 if opts.get('all_largefiles'):
781 success, missing = lfcommands.downloadlfiles(ui, repo, None)
781 success, missing = lfcommands.downloadlfiles(ui, repo, None)
782
782
783 if missing != 0:
783 if missing != 0:
784 return None
784 return None
785
785
786 return result
786 return result
787
787
788 def overriderebase(orig, ui, repo, **opts):
788 def overriderebase(orig, ui, repo, **opts):
789 repo._isrebasing = True
789 repo._isrebasing = True
790 try:
790 try:
791 return orig(ui, repo, **opts)
791 return orig(ui, repo, **opts)
792 finally:
792 finally:
793 repo._isrebasing = False
793 repo._isrebasing = False
794
794
795 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
795 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
796 prefix=None, mtime=None, subrepos=None):
796 prefix=None, mtime=None, subrepos=None):
797 # No need to lock because we are only reading history and
797 # No need to lock because we are only reading history and
798 # largefile caches, neither of which are modified.
798 # largefile caches, neither of which are modified.
799 lfcommands.cachelfiles(repo.ui, repo, node)
799 lfcommands.cachelfiles(repo.ui, repo, node)
800
800
801 if kind not in archival.archivers:
801 if kind not in archival.archivers:
802 raise util.Abort(_("unknown archive type '%s'") % kind)
802 raise util.Abort(_("unknown archive type '%s'") % kind)
803
803
804 ctx = repo[node]
804 ctx = repo[node]
805
805
806 if kind == 'files':
806 if kind == 'files':
807 if prefix:
807 if prefix:
808 raise util.Abort(
808 raise util.Abort(
809 _('cannot give prefix when archiving to files'))
809 _('cannot give prefix when archiving to files'))
810 else:
810 else:
811 prefix = archival.tidyprefix(dest, kind, prefix)
811 prefix = archival.tidyprefix(dest, kind, prefix)
812
812
813 def write(name, mode, islink, getdata):
813 def write(name, mode, islink, getdata):
814 if matchfn and not matchfn(name):
814 if matchfn and not matchfn(name):
815 return
815 return
816 data = getdata()
816 data = getdata()
817 if decode:
817 if decode:
818 data = repo.wwritedata(name, data)
818 data = repo.wwritedata(name, data)
819 archiver.addfile(prefix + name, mode, islink, data)
819 archiver.addfile(prefix + name, mode, islink, data)
820
820
821 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
821 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
822
822
823 if repo.ui.configbool("ui", "archivemeta", True):
823 if repo.ui.configbool("ui", "archivemeta", True):
824 def metadata():
824 def metadata():
825 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
825 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
826 hex(repo.changelog.node(0)), hex(node), ctx.branch())
826 hex(repo.changelog.node(0)), hex(node), ctx.branch())
827
827
828 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
828 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
829 if repo.tagtype(t) == 'global')
829 if repo.tagtype(t) == 'global')
830 if not tags:
830 if not tags:
831 repo.ui.pushbuffer()
831 repo.ui.pushbuffer()
832 opts = {'template': '{latesttag}\n{latesttagdistance}',
832 opts = {'template': '{latesttag}\n{latesttagdistance}',
833 'style': '', 'patch': None, 'git': None}
833 'style': '', 'patch': None, 'git': None}
834 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
834 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
835 ltags, dist = repo.ui.popbuffer().split('\n')
835 ltags, dist = repo.ui.popbuffer().split('\n')
836 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
836 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
837 tags += 'latesttagdistance: %s\n' % dist
837 tags += 'latesttagdistance: %s\n' % dist
838
838
839 return base + tags
839 return base + tags
840
840
841 write('.hg_archival.txt', 0644, False, metadata)
841 write('.hg_archival.txt', 0644, False, metadata)
842
842
843 for f in ctx:
843 for f in ctx:
844 ff = ctx.flags(f)
844 ff = ctx.flags(f)
845 getdata = ctx[f].data
845 getdata = ctx[f].data
846 if lfutil.isstandin(f):
846 if lfutil.isstandin(f):
847 path = lfutil.findfile(repo, getdata().strip())
847 path = lfutil.findfile(repo, getdata().strip())
848 if path is None:
848 if path is None:
849 raise util.Abort(
849 raise util.Abort(
850 _('largefile %s not found in repo store or system cache')
850 _('largefile %s not found in repo store or system cache')
851 % lfutil.splitstandin(f))
851 % lfutil.splitstandin(f))
852 f = lfutil.splitstandin(f)
852 f = lfutil.splitstandin(f)
853
853
854 def getdatafn():
854 def getdatafn():
855 fd = None
855 fd = None
856 try:
856 try:
857 fd = open(path, 'rb')
857 fd = open(path, 'rb')
858 return fd.read()
858 return fd.read()
859 finally:
859 finally:
860 if fd:
860 if fd:
861 fd.close()
861 fd.close()
862
862
863 getdata = getdatafn
863 getdata = getdatafn
864 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
864 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
865
865
866 if subrepos:
866 if subrepos:
867 for subpath in sorted(ctx.substate):
867 for subpath in sorted(ctx.substate):
868 sub = ctx.sub(subpath)
868 sub = ctx.sub(subpath)
869 submatch = match_.narrowmatcher(subpath, matchfn)
869 submatch = match_.narrowmatcher(subpath, matchfn)
870 sub.archive(repo.ui, archiver, prefix, submatch)
870 sub.archive(repo.ui, archiver, prefix, submatch)
871
871
872 archiver.done()
872 archiver.done()
873
873
874 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
874 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
875 repo._get(repo._state + ('hg',))
875 repo._get(repo._state + ('hg',))
876 rev = repo._state[1]
876 rev = repo._state[1]
877 ctx = repo._repo[rev]
877 ctx = repo._repo[rev]
878
878
879 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
879 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
880
880
881 def write(name, mode, islink, getdata):
881 def write(name, mode, islink, getdata):
882 # At this point, the standin has been replaced with the largefile name,
882 # At this point, the standin has been replaced with the largefile name,
883 # so the normal matcher works here without the lfutil variants.
883 # so the normal matcher works here without the lfutil variants.
884 if match and not match(f):
884 if match and not match(f):
885 return
885 return
886 data = getdata()
886 data = getdata()
887
887
888 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
888 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
889
889
890 for f in ctx:
890 for f in ctx:
891 ff = ctx.flags(f)
891 ff = ctx.flags(f)
892 getdata = ctx[f].data
892 getdata = ctx[f].data
893 if lfutil.isstandin(f):
893 if lfutil.isstandin(f):
894 path = lfutil.findfile(repo._repo, getdata().strip())
894 path = lfutil.findfile(repo._repo, getdata().strip())
895 if path is None:
895 if path is None:
896 raise util.Abort(
896 raise util.Abort(
897 _('largefile %s not found in repo store or system cache')
897 _('largefile %s not found in repo store or system cache')
898 % lfutil.splitstandin(f))
898 % lfutil.splitstandin(f))
899 f = lfutil.splitstandin(f)
899 f = lfutil.splitstandin(f)
900
900
901 def getdatafn():
901 def getdatafn():
902 fd = None
902 fd = None
903 try:
903 try:
904 fd = open(os.path.join(prefix, path), 'rb')
904 fd = open(os.path.join(prefix, path), 'rb')
905 return fd.read()
905 return fd.read()
906 finally:
906 finally:
907 if fd:
907 if fd:
908 fd.close()
908 fd.close()
909
909
910 getdata = getdatafn
910 getdata = getdatafn
911
911
912 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
912 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
913
913
914 for subpath in sorted(ctx.substate):
914 for subpath in sorted(ctx.substate):
915 sub = ctx.sub(subpath)
915 sub = ctx.sub(subpath)
916 submatch = match_.narrowmatcher(subpath, match)
916 submatch = match_.narrowmatcher(subpath, match)
917 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
917 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
918 submatch)
918 submatch)
919
919
920 # If a largefile is modified, the change is not reflected in its
920 # If a largefile is modified, the change is not reflected in its
921 # standin until a commit. cmdutil.bailifchanged() raises an exception
921 # standin until a commit. cmdutil.bailifchanged() raises an exception
922 # if the repo has uncommitted changes. Wrap it to also check if
922 # if the repo has uncommitted changes. Wrap it to also check if
923 # largefiles were changed. This is used by bisect and backout.
923 # largefiles were changed. This is used by bisect and backout.
924 def overridebailifchanged(orig, repo):
924 def overridebailifchanged(orig, repo):
925 orig(repo)
925 orig(repo)
926 repo.lfstatus = True
926 repo.lfstatus = True
927 modified, added, removed, deleted = repo.status()[:4]
927 modified, added, removed, deleted = repo.status()[:4]
928 repo.lfstatus = False
928 repo.lfstatus = False
929 if modified or added or removed or deleted:
929 if modified or added or removed or deleted:
930 raise util.Abort(_('uncommitted changes'))
930 raise util.Abort(_('uncommitted changes'))
931
931
932 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
932 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
933 def overridefetch(orig, ui, repo, *pats, **opts):
933 def overridefetch(orig, ui, repo, *pats, **opts):
934 repo.lfstatus = True
934 repo.lfstatus = True
935 modified, added, removed, deleted = repo.status()[:4]
935 modified, added, removed, deleted = repo.status()[:4]
936 repo.lfstatus = False
936 repo.lfstatus = False
937 if modified or added or removed or deleted:
937 if modified or added or removed or deleted:
938 raise util.Abort(_('uncommitted changes'))
938 raise util.Abort(_('uncommitted changes'))
939 return orig(ui, repo, *pats, **opts)
939 return orig(ui, repo, *pats, **opts)
940
940
941 def overrideforget(orig, ui, repo, *pats, **opts):
941 def overrideforget(orig, ui, repo, *pats, **opts):
942 installnormalfilesmatchfn(repo[None].manifest())
942 installnormalfilesmatchfn(repo[None].manifest())
943 result = orig(ui, repo, *pats, **opts)
943 result = orig(ui, repo, *pats, **opts)
944 restorematchfn()
944 restorematchfn()
945 m = scmutil.match(repo[None], pats, opts)
945 m = scmutil.match(repo[None], pats, opts)
946
946
947 try:
947 try:
948 repo.lfstatus = True
948 repo.lfstatus = True
949 s = repo.status(match=m, clean=True)
949 s = repo.status(match=m, clean=True)
950 finally:
950 finally:
951 repo.lfstatus = False
951 repo.lfstatus = False
952 forget = sorted(s[0] + s[1] + s[3] + s[6])
952 forget = sorted(s[0] + s[1] + s[3] + s[6])
953 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
953 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
954
954
955 for f in forget:
955 for f in forget:
956 if lfutil.standin(f) not in repo.dirstate and not \
956 if lfutil.standin(f) not in repo.dirstate and not \
957 os.path.isdir(m.rel(lfutil.standin(f))):
957 os.path.isdir(m.rel(lfutil.standin(f))):
958 ui.warn(_('not removing %s: file is already untracked\n')
958 ui.warn(_('not removing %s: file is already untracked\n')
959 % m.rel(f))
959 % m.rel(f))
960 result = 1
960 result = 1
961
961
962 for f in forget:
962 for f in forget:
963 if ui.verbose or not m.exact(f):
963 if ui.verbose or not m.exact(f):
964 ui.status(_('removing %s\n') % m.rel(f))
964 ui.status(_('removing %s\n') % m.rel(f))
965
965
966 # Need to lock because standin files are deleted then removed from the
966 # Need to lock because standin files are deleted then removed from the
967 # repository and we could race in-between.
967 # repository and we could race in-between.
968 wlock = repo.wlock()
968 wlock = repo.wlock()
969 try:
969 try:
970 lfdirstate = lfutil.openlfdirstate(ui, repo)
970 lfdirstate = lfutil.openlfdirstate(ui, repo)
971 for f in forget:
971 for f in forget:
972 if lfdirstate[f] == 'a':
972 if lfdirstate[f] == 'a':
973 lfdirstate.drop(f)
973 lfdirstate.drop(f)
974 else:
974 else:
975 lfdirstate.remove(f)
975 lfdirstate.remove(f)
976 lfdirstate.write()
976 lfdirstate.write()
977 standins = [lfutil.standin(f) for f in forget]
977 standins = [lfutil.standin(f) for f in forget]
978 for f in standins:
978 for f in standins:
979 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
979 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
980 repo[None].forget(standins)
980 repo[None].forget(standins)
981 finally:
981 finally:
982 wlock.release()
982 wlock.release()
983
983
984 return result
984 return result
985
985
986 def outgoinghook(ui, repo, other, opts, missing):
986 def outgoinghook(ui, repo, other, opts, missing):
987 if opts.pop('large', None):
987 if opts.pop('large', None):
988 toupload = set()
988 toupload = set()
989 lfutil.getlfilestoupload(repo, missing,
989 lfutil.getlfilestoupload(repo, missing,
990 lambda fn, lfhash: toupload.add(fn))
990 lambda fn, lfhash: toupload.add(fn))
991 if not toupload:
991 if not toupload:
992 ui.status(_('largefiles: no files to upload\n'))
992 ui.status(_('largefiles: no files to upload\n'))
993 else:
993 else:
994 ui.status(_('largefiles to upload:\n'))
994 ui.status(_('largefiles to upload:\n'))
995 for file in sorted(toupload):
995 for file in sorted(toupload):
996 ui.status(lfutil.splitstandin(file) + '\n')
996 ui.status(lfutil.splitstandin(file) + '\n')
997 ui.status('\n')
997 ui.status('\n')
998
998
999 def summaryremotehook(ui, repo, opts, changes):
999 def summaryremotehook(ui, repo, opts, changes):
1000 largeopt = opts.get('large', False)
1000 largeopt = opts.get('large', False)
1001 if changes is None:
1001 if changes is None:
1002 if largeopt:
1002 if largeopt:
1003 return (False, True) # only outgoing check is needed
1003 return (False, True) # only outgoing check is needed
1004 else:
1004 else:
1005 return (False, False)
1005 return (False, False)
1006 elif largeopt:
1006 elif largeopt:
1007 url, branch, peer, outgoing = changes[1]
1007 url, branch, peer, outgoing = changes[1]
1008 if peer is None:
1008 if peer is None:
1009 # i18n: column positioning for "hg summary"
1009 # i18n: column positioning for "hg summary"
1010 ui.status(_('largefiles: (no remote repo)\n'))
1010 ui.status(_('largefiles: (no remote repo)\n'))
1011 return
1011 return
1012
1012
1013 toupload = set()
1013 toupload = set()
1014 lfutil.getlfilestoupload(repo, outgoing.missing,
1014 lfutil.getlfilestoupload(repo, outgoing.missing,
1015 lambda fn, lfhash: toupload.add(fn))
1015 lambda fn, lfhash: toupload.add(fn))
1016 if not toupload:
1016 if not toupload:
1017 # i18n: column positioning for "hg summary"
1017 # i18n: column positioning for "hg summary"
1018 ui.status(_('largefiles: (no files to upload)\n'))
1018 ui.status(_('largefiles: (no files to upload)\n'))
1019 else:
1019 else:
1020 # i18n: column positioning for "hg summary"
1020 # i18n: column positioning for "hg summary"
1021 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1021 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1022
1022
1023 def overridesummary(orig, ui, repo, *pats, **opts):
1023 def overridesummary(orig, ui, repo, *pats, **opts):
1024 try:
1024 try:
1025 repo.lfstatus = True
1025 repo.lfstatus = True
1026 orig(ui, repo, *pats, **opts)
1026 orig(ui, repo, *pats, **opts)
1027 finally:
1027 finally:
1028 repo.lfstatus = False
1028 repo.lfstatus = False
1029
1029
1030 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1030 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1031 similarity=None):
1031 similarity=None):
1032 if not lfutil.islfilesrepo(repo):
1032 if not lfutil.islfilesrepo(repo):
1033 return orig(repo, pats, opts, dry_run, similarity)
1033 return orig(repo, pats, opts, dry_run, similarity)
1034 # Get the list of missing largefiles so we can remove them
1034 # Get the list of missing largefiles so we can remove them
1035 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1035 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1036 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1036 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1037 False, False)
1037 False, False)
1038 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1038 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1039
1039
1040 # Call into the normal remove code, but the removing of the standin, we want
1040 # Call into the normal remove code, but the removing of the standin, we want
1041 # to have handled by original addremove. Monkey patching here makes sure
1041 # to have handled by original addremove. Monkey patching here makes sure
1042 # we don't remove the standin in the largefiles code, preventing a very
1042 # we don't remove the standin in the largefiles code, preventing a very
1043 # confused state later.
1043 # confused state later.
1044 if missing:
1044 if missing:
1045 m = [repo.wjoin(f) for f in missing]
1045 m = [repo.wjoin(f) for f in missing]
1046 repo._isaddremove = True
1046 repo._isaddremove = True
1047 removelargefiles(repo.ui, repo, *m, **opts)
1047 removelargefiles(repo.ui, repo, *m, **opts)
1048 repo._isaddremove = False
1048 repo._isaddremove = False
1049 # Call into the normal add code, and any files that *should* be added as
1049 # Call into the normal add code, and any files that *should* be added as
1050 # largefiles will be
1050 # largefiles will be
1051 addlargefiles(repo.ui, repo, *pats, **opts)
1051 addlargefiles(repo.ui, repo, *pats, **opts)
1052 # Now that we've handled largefiles, hand off to the original addremove
1052 # Now that we've handled largefiles, hand off to the original addremove
1053 # function to take care of the rest. Make sure it doesn't do anything with
1053 # function to take care of the rest. Make sure it doesn't do anything with
1054 # largefiles by installing a matcher that will ignore them.
1054 # largefiles by installing a matcher that will ignore them.
1055 installnormalfilesmatchfn(repo[None].manifest())
1055 installnormalfilesmatchfn(repo[None].manifest())
1056 result = orig(repo, pats, opts, dry_run, similarity)
1056 result = orig(repo, pats, opts, dry_run, similarity)
1057 restorematchfn()
1057 restorematchfn()
1058 return result
1058 return result
1059
1059
1060 # Calling purge with --all will cause the largefiles to be deleted.
1060 # Calling purge with --all will cause the largefiles to be deleted.
1061 # Override repo.status to prevent this from happening.
1061 # Override repo.status to prevent this from happening.
1062 def overridepurge(orig, ui, repo, *dirs, **opts):
1062 def overridepurge(orig, ui, repo, *dirs, **opts):
1063 # XXX large file status is buggy when used on repo proxy.
1063 # XXX large file status is buggy when used on repo proxy.
1064 # XXX this needs to be investigate.
1064 # XXX this needs to be investigate.
1065 repo = repo.unfiltered()
1065 repo = repo.unfiltered()
1066 oldstatus = repo.status
1066 oldstatus = repo.status
1067 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1067 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1068 clean=False, unknown=False, listsubrepos=False):
1068 clean=False, unknown=False, listsubrepos=False):
1069 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1069 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1070 listsubrepos)
1070 listsubrepos)
1071 lfdirstate = lfutil.openlfdirstate(ui, repo)
1071 lfdirstate = lfutil.openlfdirstate(ui, repo)
1072 modified, added, removed, deleted, unknown, ignored, clean = r
1072 modified, added, removed, deleted, unknown, ignored, clean = r
1073 unknown = [f for f in unknown if lfdirstate[f] == '?']
1073 unknown = [f for f in unknown if lfdirstate[f] == '?']
1074 ignored = [f for f in ignored if lfdirstate[f] == '?']
1074 ignored = [f for f in ignored if lfdirstate[f] == '?']
1075 return modified, added, removed, deleted, unknown, ignored, clean
1075 return modified, added, removed, deleted, unknown, ignored, clean
1076 repo.status = overridestatus
1076 repo.status = overridestatus
1077 orig(ui, repo, *dirs, **opts)
1077 orig(ui, repo, *dirs, **opts)
1078 repo.status = oldstatus
1078 repo.status = oldstatus
1079
1079
1080 def overriderollback(orig, ui, repo, **opts):
1080 def overriderollback(orig, ui, repo, **opts):
1081 result = orig(ui, repo, **opts)
1081 result = orig(ui, repo, **opts)
1082 merge.update(repo, node=None, branchmerge=False, force=True,
1082 merge.update(repo, node=None, branchmerge=False, force=True,
1083 partial=lfutil.isstandin)
1083 partial=lfutil.isstandin)
1084 wlock = repo.wlock()
1084 wlock = repo.wlock()
1085 try:
1085 try:
1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1086 lfdirstate = lfutil.openlfdirstate(ui, repo)
1087 lfiles = lfutil.listlfiles(repo)
1087 lfiles = lfutil.listlfiles(repo)
1088 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1088 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1089 for file in lfiles:
1089 for file in lfiles:
1090 if file in oldlfiles:
1090 if file in oldlfiles:
1091 lfdirstate.normallookup(file)
1091 lfdirstate.normallookup(file)
1092 else:
1092 else:
1093 lfdirstate.add(file)
1093 lfdirstate.add(file)
1094 lfdirstate.write()
1094 lfdirstate.write()
1095 finally:
1095 finally:
1096 wlock.release()
1096 wlock.release()
1097 return result
1097 return result
1098
1098
1099 def overridetransplant(orig, ui, repo, *revs, **opts):
1099 def overridetransplant(orig, ui, repo, *revs, **opts):
1100 try:
1100 try:
1101 oldstandins = lfutil.getstandinsstate(repo)
1101 oldstandins = lfutil.getstandinsstate(repo)
1102 repo._istransplanting = True
1102 repo._istransplanting = True
1103 result = orig(ui, repo, *revs, **opts)
1103 result = orig(ui, repo, *revs, **opts)
1104 newstandins = lfutil.getstandinsstate(repo)
1104 newstandins = lfutil.getstandinsstate(repo)
1105 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1105 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1106 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1106 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1107 printmessage=True)
1107 printmessage=True)
1108 finally:
1108 finally:
1109 repo._istransplanting = False
1109 repo._istransplanting = False
1110 return result
1110 return result
1111
1111
1112 def overridecat(orig, ui, repo, file1, *pats, **opts):
1112 def overridecat(orig, ui, repo, file1, *pats, **opts):
1113 ctx = scmutil.revsingle(repo, opts.get('rev'))
1113 ctx = scmutil.revsingle(repo, opts.get('rev'))
1114 err = 1
1114 err = 1
1115 notbad = set()
1115 notbad = set()
1116 m = scmutil.match(ctx, (file1,) + pats, opts)
1116 m = scmutil.match(ctx, (file1,) + pats, opts)
1117 origmatchfn = m.matchfn
1117 origmatchfn = m.matchfn
1118 def lfmatchfn(f):
1118 def lfmatchfn(f):
1119 lf = lfutil.splitstandin(f)
1119 lf = lfutil.splitstandin(f)
1120 if lf is None:
1120 if lf is None:
1121 return origmatchfn(f)
1121 return origmatchfn(f)
1122 notbad.add(lf)
1122 notbad.add(lf)
1123 return origmatchfn(lf)
1123 return origmatchfn(lf)
1124 m.matchfn = lfmatchfn
1124 m.matchfn = lfmatchfn
1125 origbadfn = m.bad
1125 origbadfn = m.bad
1126 def lfbadfn(f, msg):
1126 def lfbadfn(f, msg):
1127 if not f in notbad:
1127 if not f in notbad:
1128 return origbadfn(f, msg)
1128 return origbadfn(f, msg)
1129 m.bad = lfbadfn
1129 m.bad = lfbadfn
1130 for f in ctx.walk(m):
1130 for f in ctx.walk(m):
1131 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1131 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1132 pathname=f)
1132 pathname=f)
1133 lf = lfutil.splitstandin(f)
1133 lf = lfutil.splitstandin(f)
1134 if lf is None:
1134 if lf is None:
1135 # duplicating unreachable code from commands.cat
1135 # duplicating unreachable code from commands.cat
1136 data = ctx[f].data()
1136 data = ctx[f].data()
1137 if opts.get('decode'):
1137 if opts.get('decode'):
1138 data = repo.wwritedata(f, data)
1138 data = repo.wwritedata(f, data)
1139 fp.write(data)
1139 fp.write(data)
1140 else:
1140 else:
1141 hash = lfutil.readstandin(repo, lf, ctx.rev())
1141 hash = lfutil.readstandin(repo, lf, ctx.rev())
1142 if not lfutil.inusercache(repo.ui, hash):
1142 if not lfutil.inusercache(repo.ui, hash):
1143 store = basestore._openstore(repo)
1143 store = basestore._openstore(repo)
1144 success, missing = store.get([(lf, hash)])
1144 success, missing = store.get([(lf, hash)])
1145 if len(success) != 1:
1145 if len(success) != 1:
1146 raise util.Abort(
1146 raise util.Abort(
1147 _('largefile %s is not in cache and could not be '
1147 _('largefile %s is not in cache and could not be '
1148 'downloaded') % lf)
1148 'downloaded') % lf)
1149 path = lfutil.usercachepath(repo.ui, hash)
1149 path = lfutil.usercachepath(repo.ui, hash)
1150 fpin = open(path, "rb")
1150 fpin = open(path, "rb")
1151 for chunk in util.filechunkiter(fpin, 128 * 1024):
1151 for chunk in util.filechunkiter(fpin, 128 * 1024):
1152 fp.write(chunk)
1152 fp.write(chunk)
1153 fpin.close()
1153 fpin.close()
1154 fp.close()
1154 fp.close()
1155 err = 0
1155 err = 0
1156 return err
1156 return err
1157
1157
1158 def mercurialsinkbefore(orig, sink):
1158 def mercurialsinkbefore(orig, sink):
1159 sink.repo._isconverting = True
1159 sink.repo._isconverting = True
1160 orig(sink)
1160 orig(sink)
1161
1161
1162 def mercurialsinkafter(orig, sink):
1162 def mercurialsinkafter(orig, sink):
1163 sink.repo._isconverting = False
1163 sink.repo._isconverting = False
1164 orig(sink)
1164 orig(sink)
@@ -1,1019 +1,1021
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
13 import error, util, filemerge, copies, subrepo, worker, dicthelpers
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split("\0")
21 bits = data.split("\0")
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return "\0".join(bits)
23 return "\0".join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = "merge/state"
48 statepathv1 = "merge/state"
49 statepathv2 = "merge/state2"
49 statepathv2 = "merge/state2"
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 if node:
58 if node:
59 self._local = node
59 self._local = node
60 self._other = other
60 self._other = other
61 shutil.rmtree(self._repo.join("merge"), True)
61 shutil.rmtree(self._repo.join("merge"), True)
62 self._dirty = False
62 self._dirty = False
63
63
64 def _read(self):
64 def _read(self):
65 """Analyse each record content to restore a serialized state from disk
65 """Analyse each record content to restore a serialized state from disk
66
66
67 This function process "record" entry produced by the de-serialization
67 This function process "record" entry produced by the de-serialization
68 of on disk file.
68 of on disk file.
69 """
69 """
70 self._state = {}
70 self._state = {}
71 records = self._readrecords()
71 records = self._readrecords()
72 for rtype, record in records:
72 for rtype, record in records:
73 if rtype == 'L':
73 if rtype == 'L':
74 self._local = bin(record)
74 self._local = bin(record)
75 elif rtype == 'O':
75 elif rtype == 'O':
76 self._other = bin(record)
76 self._other = bin(record)
77 elif rtype == "F":
77 elif rtype == "F":
78 bits = record.split("\0")
78 bits = record.split("\0")
79 self._state[bits[0]] = bits[1:]
79 self._state[bits[0]] = bits[1:]
80 elif not rtype.islower():
80 elif not rtype.islower():
81 raise util.Abort(_('unsupported merge state record: %s')
81 raise util.Abort(_('unsupported merge state record: %s')
82 % rtype)
82 % rtype)
83 self._dirty = False
83 self._dirty = False
84
84
85 def _readrecords(self):
85 def _readrecords(self):
86 """Read merge state from disk and return a list of record (TYPE, data)
86 """Read merge state from disk and return a list of record (TYPE, data)
87
87
88 We read data from both v1 and v2 files and decide which one to use.
88 We read data from both v1 and v2 files and decide which one to use.
89
89
90 V1 has been used by version prior to 2.9.1 and contains less data than
90 V1 has been used by version prior to 2.9.1 and contains less data than
91 v2. We read both versions and check if no data in v2 contradicts
91 v2. We read both versions and check if no data in v2 contradicts
92 v1. If there is not contradiction we can safely assume that both v1
92 v1. If there is not contradiction we can safely assume that both v1
93 and v2 were written at the same time and use the extract data in v2. If
93 and v2 were written at the same time and use the extract data in v2. If
94 there is contradiction we ignore v2 content as we assume an old version
94 there is contradiction we ignore v2 content as we assume an old version
95 of Mercurial has overwritten the mergestate file and left an old v2
95 of Mercurial has overwritten the mergestate file and left an old v2
96 file around.
96 file around.
97
97
98 returns list of record [(TYPE, data), ...]"""
98 returns list of record [(TYPE, data), ...]"""
99 v1records = self._readrecordsv1()
99 v1records = self._readrecordsv1()
100 v2records = self._readrecordsv2()
100 v2records = self._readrecordsv2()
101 oldv2 = set() # old format version of v2 record
101 oldv2 = set() # old format version of v2 record
102 for rec in v2records:
102 for rec in v2records:
103 if rec[0] == 'L':
103 if rec[0] == 'L':
104 oldv2.add(rec)
104 oldv2.add(rec)
105 elif rec[0] == 'F':
105 elif rec[0] == 'F':
106 # drop the onode data (not contained in v1)
106 # drop the onode data (not contained in v1)
107 oldv2.add(('F', _droponode(rec[1])))
107 oldv2.add(('F', _droponode(rec[1])))
108 for rec in v1records:
108 for rec in v1records:
109 if rec not in oldv2:
109 if rec not in oldv2:
110 # v1 file is newer than v2 file, use it
110 # v1 file is newer than v2 file, use it
111 # we have to infer the "other" changeset of the merge
111 # we have to infer the "other" changeset of the merge
112 # we cannot do better than that with v1 of the format
112 # we cannot do better than that with v1 of the format
113 mctx = self._repo[None].parents()[-1]
113 mctx = self._repo[None].parents()[-1]
114 v1records.append(('O', mctx.hex()))
114 v1records.append(('O', mctx.hex()))
115 # add place holder "other" file node information
115 # add place holder "other" file node information
116 # nobody is using it yet so we do no need to fetch the data
116 # nobody is using it yet so we do no need to fetch the data
117 # if mctx was wrong `mctx[bits[-2]]` may fails.
117 # if mctx was wrong `mctx[bits[-2]]` may fails.
118 for idx, r in enumerate(v1records):
118 for idx, r in enumerate(v1records):
119 if r[0] == 'F':
119 if r[0] == 'F':
120 bits = r[1].split("\0")
120 bits = r[1].split("\0")
121 bits.insert(-2, '')
121 bits.insert(-2, '')
122 v1records[idx] = (r[0], "\0".join(bits))
122 v1records[idx] = (r[0], "\0".join(bits))
123 return v1records
123 return v1records
124 else:
124 else:
125 return v2records
125 return v2records
126
126
127 def _readrecordsv1(self):
127 def _readrecordsv1(self):
128 """read on disk merge state for version 1 file
128 """read on disk merge state for version 1 file
129
129
130 returns list of record [(TYPE, data), ...]
130 returns list of record [(TYPE, data), ...]
131
131
132 Note: the "F" data from this file are one entry short
132 Note: the "F" data from this file are one entry short
133 (no "other file node" entry)
133 (no "other file node" entry)
134 """
134 """
135 records = []
135 records = []
136 try:
136 try:
137 f = self._repo.opener(self.statepathv1)
137 f = self._repo.opener(self.statepathv1)
138 for i, l in enumerate(f):
138 for i, l in enumerate(f):
139 if i == 0:
139 if i == 0:
140 records.append(('L', l[:-1]))
140 records.append(('L', l[:-1]))
141 else:
141 else:
142 records.append(('F', l[:-1]))
142 records.append(('F', l[:-1]))
143 f.close()
143 f.close()
144 except IOError, err:
144 except IOError, err:
145 if err.errno != errno.ENOENT:
145 if err.errno != errno.ENOENT:
146 raise
146 raise
147 return records
147 return records
148
148
149 def _readrecordsv2(self):
149 def _readrecordsv2(self):
150 """read on disk merge state for version 2 file
150 """read on disk merge state for version 2 file
151
151
152 returns list of record [(TYPE, data), ...]
152 returns list of record [(TYPE, data), ...]
153 """
153 """
154 records = []
154 records = []
155 try:
155 try:
156 f = self._repo.opener(self.statepathv2)
156 f = self._repo.opener(self.statepathv2)
157 data = f.read()
157 data = f.read()
158 off = 0
158 off = 0
159 end = len(data)
159 end = len(data)
160 while off < end:
160 while off < end:
161 rtype = data[off]
161 rtype = data[off]
162 off += 1
162 off += 1
163 length = _unpack('>I', data[off:(off + 4)])[0]
163 length = _unpack('>I', data[off:(off + 4)])[0]
164 off += 4
164 off += 4
165 record = data[off:(off + length)]
165 record = data[off:(off + length)]
166 off += length
166 off += length
167 records.append((rtype, record))
167 records.append((rtype, record))
168 f.close()
168 f.close()
169 except IOError, err:
169 except IOError, err:
170 if err.errno != errno.ENOENT:
170 if err.errno != errno.ENOENT:
171 raise
171 raise
172 return records
172 return records
173
173
174 def commit(self):
174 def commit(self):
175 """Write current state on disk (if necessary)"""
175 """Write current state on disk (if necessary)"""
176 if self._dirty:
176 if self._dirty:
177 records = []
177 records = []
178 records.append(("L", hex(self._local)))
178 records.append(("L", hex(self._local)))
179 records.append(("O", hex(self._other)))
179 records.append(("O", hex(self._other)))
180 for d, v in self._state.iteritems():
180 for d, v in self._state.iteritems():
181 records.append(("F", "\0".join([d] + v)))
181 records.append(("F", "\0".join([d] + v)))
182 self._writerecords(records)
182 self._writerecords(records)
183 self._dirty = False
183 self._dirty = False
184
184
185 def _writerecords(self, records):
185 def _writerecords(self, records):
186 """Write current state on disk (both v1 and v2)"""
186 """Write current state on disk (both v1 and v2)"""
187 self._writerecordsv1(records)
187 self._writerecordsv1(records)
188 self._writerecordsv2(records)
188 self._writerecordsv2(records)
189
189
190 def _writerecordsv1(self, records):
190 def _writerecordsv1(self, records):
191 """Write current state on disk in a version 1 file"""
191 """Write current state on disk in a version 1 file"""
192 f = self._repo.opener(self.statepathv1, "w")
192 f = self._repo.opener(self.statepathv1, "w")
193 irecords = iter(records)
193 irecords = iter(records)
194 lrecords = irecords.next()
194 lrecords = irecords.next()
195 assert lrecords[0] == 'L'
195 assert lrecords[0] == 'L'
196 f.write(hex(self._local) + "\n")
196 f.write(hex(self._local) + "\n")
197 for rtype, data in irecords:
197 for rtype, data in irecords:
198 if rtype == "F":
198 if rtype == "F":
199 f.write("%s\n" % _droponode(data))
199 f.write("%s\n" % _droponode(data))
200 f.close()
200 f.close()
201
201
202 def _writerecordsv2(self, records):
202 def _writerecordsv2(self, records):
203 """Write current state on disk in a version 2 file"""
203 """Write current state on disk in a version 2 file"""
204 f = self._repo.opener(self.statepathv2, "w")
204 f = self._repo.opener(self.statepathv2, "w")
205 for key, data in records:
205 for key, data in records:
206 assert len(key) == 1
206 assert len(key) == 1
207 format = ">sI%is" % len(data)
207 format = ">sI%is" % len(data)
208 f.write(_pack(format, key, len(data), data))
208 f.write(_pack(format, key, len(data), data))
209 f.close()
209 f.close()
210
210
211 def add(self, fcl, fco, fca, fd):
211 def add(self, fcl, fco, fca, fd):
212 """add a new (potentially?) conflicting file the merge state
212 """add a new (potentially?) conflicting file the merge state
213 fcl: file context for local,
213 fcl: file context for local,
214 fco: file context for remote,
214 fco: file context for remote,
215 fca: file context for ancestors,
215 fca: file context for ancestors,
216 fd: file path of the resulting merge.
216 fd: file path of the resulting merge.
217
217
218 note: also write the local version to the `.hg/merge` directory.
218 note: also write the local version to the `.hg/merge` directory.
219 """
219 """
220 hash = util.sha1(fcl.path()).hexdigest()
220 hash = util.sha1(fcl.path()).hexdigest()
221 self._repo.opener.write("merge/" + hash, fcl.data())
221 self._repo.opener.write("merge/" + hash, fcl.data())
222 self._state[fd] = ['u', hash, fcl.path(),
222 self._state[fd] = ['u', hash, fcl.path(),
223 fca.path(), hex(fca.filenode()),
223 fca.path(), hex(fca.filenode()),
224 fco.path(), hex(fco.filenode()),
224 fco.path(), hex(fco.filenode()),
225 fcl.flags()]
225 fcl.flags()]
226 self._dirty = True
226 self._dirty = True
227
227
228 def __contains__(self, dfile):
228 def __contains__(self, dfile):
229 return dfile in self._state
229 return dfile in self._state
230
230
231 def __getitem__(self, dfile):
231 def __getitem__(self, dfile):
232 return self._state[dfile][0]
232 return self._state[dfile][0]
233
233
234 def __iter__(self):
234 def __iter__(self):
235 l = self._state.keys()
235 l = self._state.keys()
236 l.sort()
236 l.sort()
237 for f in l:
237 for f in l:
238 yield f
238 yield f
239
239
240 def files(self):
240 def files(self):
241 return self._state.keys()
241 return self._state.keys()
242
242
243 def mark(self, dfile, state):
243 def mark(self, dfile, state):
244 self._state[dfile][0] = state
244 self._state[dfile][0] = state
245 self._dirty = True
245 self._dirty = True
246
246
247 def resolve(self, dfile, wctx):
247 def resolve(self, dfile, wctx):
248 """rerun merge process for file path `dfile`"""
248 """rerun merge process for file path `dfile`"""
249 if self[dfile] == 'r':
249 if self[dfile] == 'r':
250 return 0
250 return 0
251 stateentry = self._state[dfile]
251 stateentry = self._state[dfile]
252 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
252 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
253 octx = self._repo[self._other]
253 octx = self._repo[self._other]
254 fcd = wctx[dfile]
254 fcd = wctx[dfile]
255 fco = octx[ofile]
255 fco = octx[ofile]
256 fca = self._repo.filectx(afile, fileid=anode)
256 fca = self._repo.filectx(afile, fileid=anode)
257 # "premerge" x flags
257 # "premerge" x flags
258 flo = fco.flags()
258 flo = fco.flags()
259 fla = fca.flags()
259 fla = fca.flags()
260 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
260 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
261 if fca.node() == nullid:
261 if fca.node() == nullid:
262 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
262 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
263 afile)
263 afile)
264 elif flags == fla:
264 elif flags == fla:
265 flags = flo
265 flags = flo
266 # restore local
266 # restore local
267 f = self._repo.opener("merge/" + hash)
267 f = self._repo.opener("merge/" + hash)
268 self._repo.wwrite(dfile, f.read(), flags)
268 self._repo.wwrite(dfile, f.read(), flags)
269 f.close()
269 f.close()
270 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
270 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
271 if r is None:
271 if r is None:
272 # no real conflict
272 # no real conflict
273 del self._state[dfile]
273 del self._state[dfile]
274 self._dirty = True
274 self._dirty = True
275 elif not r:
275 elif not r:
276 self.mark(dfile, 'r')
276 self.mark(dfile, 'r')
277 return r
277 return r
278
278
279 def _checkunknownfile(repo, wctx, mctx, f):
279 def _checkunknownfile(repo, wctx, mctx, f):
280 return (not repo.dirstate._ignore(f)
280 return (not repo.dirstate._ignore(f)
281 and os.path.isfile(repo.wjoin(f))
281 and os.path.isfile(repo.wjoin(f))
282 and repo.wopener.audit.check(f)
282 and repo.wopener.audit.check(f)
283 and repo.dirstate.normalize(f) not in repo.dirstate
283 and repo.dirstate.normalize(f) not in repo.dirstate
284 and mctx[f].cmp(wctx[f]))
284 and mctx[f].cmp(wctx[f]))
285
285
286 def _checkunknown(repo, wctx, mctx):
286 def _checkunknown(repo, wctx, mctx):
287 "check for collisions between unknown files and files in mctx"
287 "check for collisions between unknown files and files in mctx"
288
288
289 error = False
289 error = False
290 for f in mctx:
290 for f in mctx:
291 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
291 if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
292 error = True
292 error = True
293 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
293 wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
294 if error:
294 if error:
295 raise util.Abort(_("untracked files in working directory differ "
295 raise util.Abort(_("untracked files in working directory differ "
296 "from files in requested revision"))
296 "from files in requested revision"))
297
297
298 def _forgetremoved(wctx, mctx, branchmerge):
298 def _forgetremoved(wctx, mctx, branchmerge):
299 """
299 """
300 Forget removed files
300 Forget removed files
301
301
302 If we're jumping between revisions (as opposed to merging), and if
302 If we're jumping between revisions (as opposed to merging), and if
303 neither the working directory nor the target rev has the file,
303 neither the working directory nor the target rev has the file,
304 then we need to remove it from the dirstate, to prevent the
304 then we need to remove it from the dirstate, to prevent the
305 dirstate from listing the file when it is no longer in the
305 dirstate from listing the file when it is no longer in the
306 manifest.
306 manifest.
307
307
308 If we're merging, and the other revision has removed a file
308 If we're merging, and the other revision has removed a file
309 that is not present in the working directory, we need to mark it
309 that is not present in the working directory, we need to mark it
310 as removed.
310 as removed.
311 """
311 """
312
312
313 actions = []
313 actions = []
314 state = branchmerge and 'r' or 'f'
314 state = branchmerge and 'r' or 'f'
315 for f in wctx.deleted():
315 for f in wctx.deleted():
316 if f not in mctx:
316 if f not in mctx:
317 actions.append((f, state, None, "forget deleted"))
317 actions.append((f, state, None, "forget deleted"))
318
318
319 if not branchmerge:
319 if not branchmerge:
320 for f in wctx.removed():
320 for f in wctx.removed():
321 if f not in mctx:
321 if f not in mctx:
322 actions.append((f, "f", None, "forget removed"))
322 actions.append((f, "f", None, "forget removed"))
323
323
324 return actions
324 return actions
325
325
326 def _checkcollision(repo, wmf, actions):
326 def _checkcollision(repo, wmf, actions):
327 # build provisional merged manifest up
327 # build provisional merged manifest up
328 pmmf = set(wmf)
328 pmmf = set(wmf)
329
329
330 def addop(f, args):
330 def addop(f, args):
331 pmmf.add(f)
331 pmmf.add(f)
332 def removeop(f, args):
332 def removeop(f, args):
333 pmmf.discard(f)
333 pmmf.discard(f)
334 def nop(f, args):
334 def nop(f, args):
335 pass
335 pass
336
336
337 def renamemoveop(f, args):
337 def renamemoveop(f, args):
338 f2, flags = args
338 f2, flags = args
339 pmmf.discard(f2)
339 pmmf.discard(f2)
340 pmmf.add(f)
340 pmmf.add(f)
341 def renamegetop(f, args):
341 def renamegetop(f, args):
342 f2, flags = args
342 f2, flags = args
343 pmmf.add(f)
343 pmmf.add(f)
344 def mergeop(f, args):
344 def mergeop(f, args):
345 f1, f2, fa, move, anc = args
345 f1, f2, fa, move, anc = args
346 if move:
346 if move:
347 pmmf.discard(f1)
347 pmmf.discard(f1)
348 pmmf.add(f)
348 pmmf.add(f)
349
349
350 opmap = {
350 opmap = {
351 "a": addop,
351 "a": addop,
352 "dm": renamemoveop,
352 "dm": renamemoveop,
353 "dg": renamegetop,
353 "dg": renamegetop,
354 "dr": nop,
354 "dr": nop,
355 "e": nop,
355 "e": nop,
356 "f": addop, # untracked file should be kept in working directory
356 "f": addop, # untracked file should be kept in working directory
357 "g": addop,
357 "g": addop,
358 "m": mergeop,
358 "m": mergeop,
359 "r": removeop,
359 "r": removeop,
360 "rd": nop,
360 "rd": nop,
361 "cd": addop,
361 "cd": addop,
362 "dc": addop,
362 "dc": addop,
363 }
363 }
364 for f, m, args, msg in actions:
364 for f, m, args, msg in actions:
365 op = opmap.get(m)
365 op = opmap.get(m)
366 assert op, m
366 assert op, m
367 op(f, args)
367 op(f, args)
368
368
369 # check case-folding collision in provisional merged manifest
369 # check case-folding collision in provisional merged manifest
370 foldmap = {}
370 foldmap = {}
371 for f in sorted(pmmf):
371 for f in sorted(pmmf):
372 fold = util.normcase(f)
372 fold = util.normcase(f)
373 if fold in foldmap:
373 if fold in foldmap:
374 raise util.Abort(_("case-folding collision between %s and %s")
374 raise util.Abort(_("case-folding collision between %s and %s")
375 % (f, foldmap[fold]))
375 % (f, foldmap[fold]))
376 foldmap[fold] = f
376 foldmap[fold] = f
377
377
378 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
378 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
379 acceptremote, followcopies):
379 acceptremote, followcopies):
380 """
380 """
381 Merge p1 and p2 with ancestor pa and generate merge action list
381 Merge p1 and p2 with ancestor pa and generate merge action list
382
382
383 branchmerge and force are as passed in to update
383 branchmerge and force are as passed in to update
384 partial = function to filter file lists
384 partial = function to filter file lists
385 acceptremote = accept the incoming changes without prompting
385 acceptremote = accept the incoming changes without prompting
386 """
386 """
387
387
388 actions, copy, movewithdir = [], {}, {}
388 actions, copy, movewithdir = [], {}, {}
389
389
390 # manifests fetched in order are going to be faster, so prime the caches
390 # manifests fetched in order are going to be faster, so prime the caches
391 [x.manifest() for x in
391 [x.manifest() for x in
392 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
392 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
393
393
394 if followcopies:
394 if followcopies:
395 ret = copies.mergecopies(repo, wctx, p2, pa)
395 ret = copies.mergecopies(repo, wctx, p2, pa)
396 copy, movewithdir, diverge, renamedelete = ret
396 copy, movewithdir, diverge, renamedelete = ret
397 for of, fl in diverge.iteritems():
397 for of, fl in diverge.iteritems():
398 actions.append((of, "dr", (fl,), "divergent renames"))
398 actions.append((of, "dr", (fl,), "divergent renames"))
399 for of, fl in renamedelete.iteritems():
399 for of, fl in renamedelete.iteritems():
400 actions.append((of, "rd", (fl,), "rename and delete"))
400 actions.append((of, "rd", (fl,), "rename and delete"))
401
401
402 repo.ui.note(_("resolving manifests\n"))
402 repo.ui.note(_("resolving manifests\n"))
403 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
403 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
404 % (bool(branchmerge), bool(force), bool(partial)))
404 % (bool(branchmerge), bool(force), bool(partial)))
405 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
405 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
406
406
407 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
407 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
408 copied = set(copy.values())
408 copied = set(copy.values())
409 copied.update(movewithdir.values())
409 copied.update(movewithdir.values())
410
410
411 if '.hgsubstate' in m1:
411 if '.hgsubstate' in m1:
412 # check whether sub state is modified
412 # check whether sub state is modified
413 for s in sorted(wctx.substate):
413 for s in sorted(wctx.substate):
414 if wctx.sub(s).dirty():
414 if wctx.sub(s).dirty():
415 m1['.hgsubstate'] += "+"
415 m1['.hgsubstate'] += "+"
416 break
416 break
417
417
418 aborts = []
418 aborts = []
419 # Compare manifests
419 # Compare manifests
420 fdiff = dicthelpers.diff(m1, m2)
420 fdiff = dicthelpers.diff(m1, m2)
421 flagsdiff = m1.flagsdiff(m2)
421 flagsdiff = m1.flagsdiff(m2)
422 diff12 = dicthelpers.join(fdiff, flagsdiff)
422 diff12 = dicthelpers.join(fdiff, flagsdiff)
423
423
424 for f, (n12, fl12) in diff12.iteritems():
424 for f, (n12, fl12) in diff12.iteritems():
425 if n12:
425 if n12:
426 n1, n2 = n12
426 n1, n2 = n12
427 else: # file contents didn't change, but flags did
427 else: # file contents didn't change, but flags did
428 n1 = n2 = m1.get(f, None)
428 n1 = n2 = m1.get(f, None)
429 if n1 is None:
429 if n1 is None:
430 # Since n1 == n2, the file isn't present in m2 either. This
430 # Since n1 == n2, the file isn't present in m2 either. This
431 # means that the file was removed or deleted locally and
431 # means that the file was removed or deleted locally and
432 # removed remotely, but that residual entries remain in flags.
432 # removed remotely, but that residual entries remain in flags.
433 # This can happen in manifests generated by workingctx.
433 # This can happen in manifests generated by workingctx.
434 continue
434 continue
435 if fl12:
435 if fl12:
436 fl1, fl2 = fl12
436 fl1, fl2 = fl12
437 else: # flags didn't change, file contents did
437 else: # flags didn't change, file contents did
438 fl1 = fl2 = m1.flags(f)
438 fl1 = fl2 = m1.flags(f)
439
439
440 if partial and not partial(f):
440 if partial and not partial(f):
441 continue
441 continue
442 if n1 and n2:
442 if n1 and n2:
443 fa = f
443 fa = f
444 a = ma.get(f, nullid)
444 a = ma.get(f, nullid)
445 if a == nullid:
445 if a == nullid:
446 fa = copy.get(f, f)
446 fa = copy.get(f, f)
447 # Note: f as default is wrong - we can't really make a 3-way
447 # Note: f as default is wrong - we can't really make a 3-way
448 # merge without an ancestor file.
448 # merge without an ancestor file.
449 fla = ma.flags(fa)
449 fla = ma.flags(fa)
450 nol = 'l' not in fl1 + fl2 + fla
450 nol = 'l' not in fl1 + fl2 + fla
451 if n2 == a and fl2 == fla:
451 if n2 == a and fl2 == fla:
452 pass # remote unchanged - keep local
452 pass # remote unchanged - keep local
453 elif n1 == a and fl1 == fla: # local unchanged - use remote
453 elif n1 == a and fl1 == fla: # local unchanged - use remote
454 if n1 == n2: # optimization: keep local content
454 if n1 == n2: # optimization: keep local content
455 actions.append((f, "e", (fl2,), "update permissions"))
455 actions.append((f, "e", (fl2,), "update permissions"))
456 else:
456 else:
457 actions.append((f, "g", (fl2,), "remote is newer"))
457 actions.append((f, "g", (fl2,), "remote is newer"))
458 elif nol and n2 == a: # remote only changed 'x'
458 elif nol and n2 == a: # remote only changed 'x'
459 actions.append((f, "e", (fl2,), "update permissions"))
459 actions.append((f, "e", (fl2,), "update permissions"))
460 elif nol and n1 == a: # local only changed 'x'
460 elif nol and n1 == a: # local only changed 'x'
461 actions.append((f, "g", (fl1,), "remote is newer"))
461 actions.append((f, "g", (fl1,), "remote is newer"))
462 else: # both changed something
462 else: # both changed something
463 actions.append((f, "m", (f, f, fa, False, pa.node()),
463 actions.append((f, "m", (f, f, fa, False, pa.node()),
464 "versions differ"))
464 "versions differ"))
465 elif f in copied: # files we'll deal with on m2 side
465 elif f in copied: # files we'll deal with on m2 side
466 pass
466 pass
467 elif n1 and f in movewithdir: # directory rename, move local
467 elif n1 and f in movewithdir: # directory rename, move local
468 f2 = movewithdir[f]
468 f2 = movewithdir[f]
469 actions.append((f2, "dm", (f, fl1),
469 actions.append((f2, "dm", (f, fl1),
470 "remote directory rename - move from " + f))
470 "remote directory rename - move from " + f))
471 elif n1 and f in copy:
471 elif n1 and f in copy:
472 f2 = copy[f]
472 f2 = copy[f]
473 actions.append((f, "m", (f, f2, f2, False, pa.node()),
473 actions.append((f, "m", (f, f2, f2, False, pa.node()),
474 "local copied/moved from " + f2))
474 "local copied/moved from " + f2))
475 elif n1 and f in ma: # clean, a different, no remote
475 elif n1 and f in ma: # clean, a different, no remote
476 if n1 != ma[f]:
476 if n1 != ma[f]:
477 if acceptremote:
477 if acceptremote:
478 actions.append((f, "r", None, "remote delete"))
478 actions.append((f, "r", None, "remote delete"))
479 else:
479 else:
480 actions.append((f, "cd", None, "prompt changed/deleted"))
480 actions.append((f, "cd", None, "prompt changed/deleted"))
481 elif n1[20:] == "a": # added, no remote
481 elif n1[20:] == "a": # added, no remote
482 actions.append((f, "f", None, "remote deleted"))
482 actions.append((f, "f", None, "remote deleted"))
483 else:
483 else:
484 actions.append((f, "r", None, "other deleted"))
484 actions.append((f, "r", None, "other deleted"))
485 elif n2 and f in movewithdir:
485 elif n2 and f in movewithdir:
486 f2 = movewithdir[f]
486 f2 = movewithdir[f]
487 actions.append((f2, "dg", (f, fl2),
487 actions.append((f2, "dg", (f, fl2),
488 "local directory rename - get from " + f))
488 "local directory rename - get from " + f))
489 elif n2 and f in copy:
489 elif n2 and f in copy:
490 f2 = copy[f]
490 f2 = copy[f]
491 if f2 in m2:
491 if f2 in m2:
492 actions.append((f, "m", (f2, f, f2, False, pa.node()),
492 actions.append((f, "m", (f2, f, f2, False, pa.node()),
493 "remote copied from " + f2))
493 "remote copied from " + f2))
494 else:
494 else:
495 actions.append((f, "m", (f2, f, f2, True, pa.node()),
495 actions.append((f, "m", (f2, f, f2, True, pa.node()),
496 "remote moved from " + f2))
496 "remote moved from " + f2))
497 elif n2 and f not in ma:
497 elif n2 and f not in ma:
498 # local unknown, remote created: the logic is described by the
498 # local unknown, remote created: the logic is described by the
499 # following table:
499 # following table:
500 #
500 #
501 # force branchmerge different | action
501 # force branchmerge different | action
502 # n * n | get
502 # n * n | get
503 # n * y | abort
503 # n * y | abort
504 # y n * | get
504 # y n * | get
505 # y y n | get
505 # y y n | get
506 # y y y | merge
506 # y y y | merge
507 #
507 #
508 # Checking whether the files are different is expensive, so we
508 # Checking whether the files are different is expensive, so we
509 # don't do that when we can avoid it.
509 # don't do that when we can avoid it.
510 if force and not branchmerge:
510 if force and not branchmerge:
511 actions.append((f, "g", (fl2,), "remote created"))
511 actions.append((f, "g", (fl2,), "remote created"))
512 else:
512 else:
513 different = _checkunknownfile(repo, wctx, p2, f)
513 different = _checkunknownfile(repo, wctx, p2, f)
514 if force and branchmerge and different:
514 if force and branchmerge and different:
515 # FIXME: This is wrong - f is not in ma ...
515 # FIXME: This is wrong - f is not in ma ...
516 actions.append((f, "m", (f, f, f, False, pa.node()),
516 actions.append((f, "m", (f, f, f, False, pa.node()),
517 "remote differs from untracked local"))
517 "remote differs from untracked local"))
518 elif not force and different:
518 elif not force and different:
519 aborts.append((f, "ud"))
519 aborts.append((f, "ud"))
520 else:
520 else:
521 actions.append((f, "g", (fl2,), "remote created"))
521 actions.append((f, "g", (fl2,), "remote created"))
522 elif n2 and n2 != ma[f]:
522 elif n2 and n2 != ma[f]:
523 different = _checkunknownfile(repo, wctx, p2, f)
523 different = _checkunknownfile(repo, wctx, p2, f)
524 if not force and different:
524 if not force and different:
525 aborts.append((f, "ud"))
525 aborts.append((f, "ud"))
526 else:
526 else:
527 # if different: old untracked f may be overwritten and lost
527 # if different: old untracked f may be overwritten and lost
528 if acceptremote:
528 if acceptremote:
529 actions.append((f, "g", (m2.flags(f),),
529 actions.append((f, "g", (m2.flags(f),),
530 "remote recreating"))
530 "remote recreating"))
531 else:
531 else:
532 actions.append((f, "dc", (m2.flags(f),),
532 actions.append((f, "dc", (m2.flags(f),),
533 "prompt deleted/changed"))
533 "prompt deleted/changed"))
534
534
535 for f, m in sorted(aborts):
535 for f, m in sorted(aborts):
536 if m == "ud":
536 if m == "ud":
537 repo.ui.warn(_("%s: untracked file differs\n") % f)
537 repo.ui.warn(_("%s: untracked file differs\n") % f)
538 else: assert False, m
538 else: assert False, m
539 if aborts:
539 if aborts:
540 raise util.Abort(_("untracked files in working directory differ "
540 raise util.Abort(_("untracked files in working directory differ "
541 "from files in requested revision"))
541 "from files in requested revision"))
542
542
543 if not util.checkcase(repo.path):
543 if not util.checkcase(repo.path):
544 # check collision between files only in p2 for clean update
544 # check collision between files only in p2 for clean update
545 if (not branchmerge and
545 if (not branchmerge and
546 (force or not wctx.dirty(missing=True, branch=False))):
546 (force or not wctx.dirty(missing=True, branch=False))):
547 _checkcollision(repo, m2, [])
547 _checkcollision(repo, m2, [])
548 else:
548 else:
549 _checkcollision(repo, m1, actions)
549 _checkcollision(repo, m1, actions)
550
550
551 return actions
551 return actions
552
552
553 def actionkey(a):
553 def actionkey(a):
554 return a[1] in "rf" and -1 or 0, a
554 return a[1] in "rf" and -1 or 0, a
555
555
556 def getremove(repo, mctx, overwrite, args):
556 def getremove(repo, mctx, overwrite, args):
557 """apply usually-non-interactive updates to the working directory
557 """apply usually-non-interactive updates to the working directory
558
558
559 mctx is the context to be merged into the working copy
559 mctx is the context to be merged into the working copy
560
560
561 yields tuples for progress updates
561 yields tuples for progress updates
562 """
562 """
563 verbose = repo.ui.verbose
563 verbose = repo.ui.verbose
564 unlink = util.unlinkpath
564 unlink = util.unlinkpath
565 wjoin = repo.wjoin
565 wjoin = repo.wjoin
566 fctx = mctx.filectx
566 fctx = mctx.filectx
567 wwrite = repo.wwrite
567 wwrite = repo.wwrite
568 audit = repo.wopener.audit
568 audit = repo.wopener.audit
569 i = 0
569 i = 0
570 for arg in args:
570 for arg in args:
571 f = arg[0]
571 f = arg[0]
572 if arg[1] == 'r':
572 if arg[1] == 'r':
573 if verbose:
573 if verbose:
574 repo.ui.note(_("removing %s\n") % f)
574 repo.ui.note(_("removing %s\n") % f)
575 audit(f)
575 audit(f)
576 try:
576 try:
577 unlink(wjoin(f), ignoremissing=True)
577 unlink(wjoin(f), ignoremissing=True)
578 except OSError, inst:
578 except OSError, inst:
579 repo.ui.warn(_("update failed to remove %s: %s!\n") %
579 repo.ui.warn(_("update failed to remove %s: %s!\n") %
580 (f, inst.strerror))
580 (f, inst.strerror))
581 else:
581 else:
582 if verbose:
582 if verbose:
583 repo.ui.note(_("getting %s\n") % f)
583 repo.ui.note(_("getting %s\n") % f)
584 wwrite(f, fctx(f).data(), arg[2][0])
584 wwrite(f, fctx(f).data(), arg[2][0])
585 if i == 100:
585 if i == 100:
586 yield i, f
586 yield i, f
587 i = 0
587 i = 0
588 i += 1
588 i += 1
589 if i > 0:
589 if i > 0:
590 yield i, f
590 yield i, f
591
591
592 def applyupdates(repo, actions, wctx, mctx, overwrite):
592 def applyupdates(repo, actions, wctx, mctx, overwrite):
593 """apply the merge action list to the working directory
593 """apply the merge action list to the working directory
594
594
595 wctx is the working copy context
595 wctx is the working copy context
596 mctx is the context to be merged into the working copy
596 mctx is the context to be merged into the working copy
597
597
598 Return a tuple of counts (updated, merged, removed, unresolved) that
598 Return a tuple of counts (updated, merged, removed, unresolved) that
599 describes how many files were affected by the update.
599 describes how many files were affected by the update.
600 """
600 """
601
601
602 updated, merged, removed, unresolved = 0, 0, 0, 0
602 updated, merged, removed, unresolved = 0, 0, 0, 0
603 ms = mergestate(repo)
603 ms = mergestate(repo)
604 ms.reset(wctx.p1().node(), mctx.node())
604 ms.reset(wctx.p1().node(), mctx.node())
605 moves = []
605 moves = []
606 actions.sort(key=actionkey)
606 actions.sort(key=actionkey)
607
607
608 # prescan for merges
608 # prescan for merges
609 for a in actions:
609 for a in actions:
610 f, m, args, msg = a
610 f, m, args, msg = a
611 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
611 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
612 if m == "m": # merge
612 if m == "m": # merge
613 f1, f2, fa, move, anc = args
613 f1, f2, fa, move, anc = args
614 if f == '.hgsubstate': # merged internally
614 if f == '.hgsubstate': # merged internally
615 continue
615 continue
616 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
616 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
617 fcl = wctx[f1]
617 fcl = wctx[f1]
618 fco = mctx[f2]
618 fco = mctx[f2]
619 actx = repo[anc]
619 actx = repo[anc]
620 if fa in actx:
620 if fa in actx:
621 fca = actx[fa]
621 fca = actx[fa]
622 else:
622 else:
623 fca = repo.filectx(f1, fileid=nullrev)
623 fca = repo.filectx(f1, fileid=nullrev)
624 ms.add(fcl, fco, fca, f)
624 ms.add(fcl, fco, fca, f)
625 if f1 != f and move:
625 if f1 != f and move:
626 moves.append(f1)
626 moves.append(f1)
627
627
628 audit = repo.wopener.audit
628 audit = repo.wopener.audit
629
629
630 # remove renamed files after safely stored
630 # remove renamed files after safely stored
631 for f in moves:
631 for f in moves:
632 if os.path.lexists(repo.wjoin(f)):
632 if os.path.lexists(repo.wjoin(f)):
633 repo.ui.debug("removing %s\n" % f)
633 repo.ui.debug("removing %s\n" % f)
634 audit(f)
634 audit(f)
635 util.unlinkpath(repo.wjoin(f))
635 util.unlinkpath(repo.wjoin(f))
636
636
637 numupdates = len(actions)
637 numupdates = len(actions)
638 workeractions = [a for a in actions if a[1] in 'gr']
638 workeractions = [a for a in actions if a[1] in 'gr']
639 updateactions = [a for a in workeractions if a[1] == 'g']
639 updateactions = [a for a in workeractions if a[1] == 'g']
640 updated = len(updateactions)
640 updated = len(updateactions)
641 removeactions = [a for a in workeractions if a[1] == 'r']
641 removeactions = [a for a in workeractions if a[1] == 'r']
642 removed = len(removeactions)
642 removed = len(removeactions)
643 actions = [a for a in actions if a[1] not in 'gr']
643 actions = [a for a in actions if a[1] not in 'gr']
644
644
645 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
645 hgsub = [a[1] for a in workeractions if a[0] == '.hgsubstate']
646 if hgsub and hgsub[0] == 'r':
646 if hgsub and hgsub[0] == 'r':
647 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
647 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
648
648
649 z = 0
649 z = 0
650 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
650 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
651 removeactions)
651 removeactions)
652 for i, item in prog:
652 for i, item in prog:
653 z += i
653 z += i
654 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
654 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
655 unit=_('files'))
655 unit=_('files'))
656 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
656 prog = worker.worker(repo.ui, 0.001, getremove, (repo, mctx, overwrite),
657 updateactions)
657 updateactions)
658 for i, item in prog:
658 for i, item in prog:
659 z += i
659 z += i
660 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
660 repo.ui.progress(_('updating'), z, item=item, total=numupdates,
661 unit=_('files'))
661 unit=_('files'))
662
662
663 if hgsub and hgsub[0] == 'g':
663 if hgsub and hgsub[0] == 'g':
664 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
664 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
665
665
666 _updating = _('updating')
666 _updating = _('updating')
667 _files = _('files')
667 _files = _('files')
668 progress = repo.ui.progress
668 progress = repo.ui.progress
669
669
670 for i, a in enumerate(actions):
670 for i, a in enumerate(actions):
671 f, m, args, msg = a
671 f, m, args, msg = a
672 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
672 progress(_updating, z + i + 1, item=f, total=numupdates, unit=_files)
673 if m == "m": # merge
673 if m == "m": # merge
674 f1, f2, fa, move, anc = args
674 f1, f2, fa, move, anc = args
675 if f == '.hgsubstate': # subrepo states need updating
675 if f == '.hgsubstate': # subrepo states need updating
676 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
676 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
677 overwrite)
677 overwrite)
678 continue
678 continue
679 audit(f)
679 audit(f)
680 r = ms.resolve(f, wctx)
680 r = ms.resolve(f, wctx)
681 if r is not None and r > 0:
681 if r is not None and r > 0:
682 unresolved += 1
682 unresolved += 1
683 else:
683 else:
684 if r is None:
684 if r is None:
685 updated += 1
685 updated += 1
686 else:
686 else:
687 merged += 1
687 merged += 1
688 elif m == "dm": # directory rename, move local
688 elif m == "dm": # directory rename, move local
689 f0, flags = args
689 f0, flags = args
690 repo.ui.note(_("moving %s to %s\n") % (f0, f))
690 repo.ui.note(_("moving %s to %s\n") % (f0, f))
691 audit(f)
691 audit(f)
692 repo.wwrite(f, wctx.filectx(f0).data(), flags)
692 repo.wwrite(f, wctx.filectx(f0).data(), flags)
693 util.unlinkpath(repo.wjoin(f0))
693 util.unlinkpath(repo.wjoin(f0))
694 updated += 1
694 updated += 1
695 elif m == "dg": # local directory rename, get
695 elif m == "dg": # local directory rename, get
696 f0, flags = args
696 f0, flags = args
697 repo.ui.note(_("getting %s to %s\n") % (f0, f))
697 repo.ui.note(_("getting %s to %s\n") % (f0, f))
698 repo.wwrite(f, mctx.filectx(f0).data(), flags)
698 repo.wwrite(f, mctx.filectx(f0).data(), flags)
699 updated += 1
699 updated += 1
700 elif m == "dr": # divergent renames
700 elif m == "dr": # divergent renames
701 fl, = args
701 fl, = args
702 repo.ui.warn(_("note: possible conflict - %s was renamed "
702 repo.ui.warn(_("note: possible conflict - %s was renamed "
703 "multiple times to:\n") % f)
703 "multiple times to:\n") % f)
704 for nf in fl:
704 for nf in fl:
705 repo.ui.warn(" %s\n" % nf)
705 repo.ui.warn(" %s\n" % nf)
706 elif m == "rd": # rename and delete
706 elif m == "rd": # rename and delete
707 fl, = args
707 fl, = args
708 repo.ui.warn(_("note: possible conflict - %s was deleted "
708 repo.ui.warn(_("note: possible conflict - %s was deleted "
709 "and renamed to:\n") % f)
709 "and renamed to:\n") % f)
710 for nf in fl:
710 for nf in fl:
711 repo.ui.warn(" %s\n" % nf)
711 repo.ui.warn(" %s\n" % nf)
712 elif m == "e": # exec
712 elif m == "e": # exec
713 flags, = args
713 flags, = args
714 audit(f)
714 audit(f)
715 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
715 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
716 updated += 1
716 updated += 1
717 ms.commit()
717 ms.commit()
718 progress(_updating, None, total=numupdates, unit=_files)
718 progress(_updating, None, total=numupdates, unit=_files)
719
719
720 return updated, merged, removed, unresolved
720 return updated, merged, removed, unresolved
721
721
722 def calculateupdates(repo, wctx, mctx, ancestor, branchmerge, force, partial,
722 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
723 acceptremote, followcopies):
723 acceptremote, followcopies):
724 "Calculate the actions needed to merge mctx into wctx using ancestor"
724 "Calculate the actions needed to merge mctx into wctx using ancestors"
725
726 ancestor = ancestors[0]
725
727
726 actions = manifestmerge(repo, wctx, mctx,
728 actions = manifestmerge(repo, wctx, mctx,
727 ancestor,
729 ancestor,
728 branchmerge, force,
730 branchmerge, force,
729 partial, acceptremote, followcopies)
731 partial, acceptremote, followcopies)
730
732
731 # Filter out prompts.
733 # Filter out prompts.
732 newactions, prompts = [], []
734 newactions, prompts = [], []
733 for a in actions:
735 for a in actions:
734 if a[1] in ("cd", "dc"):
736 if a[1] in ("cd", "dc"):
735 prompts.append(a)
737 prompts.append(a)
736 else:
738 else:
737 newactions.append(a)
739 newactions.append(a)
738 # Prompt and create actions. TODO: Move this towards resolve phase.
740 # Prompt and create actions. TODO: Move this towards resolve phase.
739 for f, m, args, msg in sorted(prompts):
741 for f, m, args, msg in sorted(prompts):
740 if m == "cd":
742 if m == "cd":
741 if repo.ui.promptchoice(
743 if repo.ui.promptchoice(
742 _("local changed %s which remote deleted\n"
744 _("local changed %s which remote deleted\n"
743 "use (c)hanged version or (d)elete?"
745 "use (c)hanged version or (d)elete?"
744 "$$ &Changed $$ &Delete") % f, 0):
746 "$$ &Changed $$ &Delete") % f, 0):
745 newactions.append((f, "r", None, "prompt delete"))
747 newactions.append((f, "r", None, "prompt delete"))
746 else:
748 else:
747 newactions.append((f, "a", None, "prompt keep"))
749 newactions.append((f, "a", None, "prompt keep"))
748 elif m == "dc":
750 elif m == "dc":
749 flags, = args
751 flags, = args
750 if repo.ui.promptchoice(
752 if repo.ui.promptchoice(
751 _("remote changed %s which local deleted\n"
753 _("remote changed %s which local deleted\n"
752 "use (c)hanged version or leave (d)eleted?"
754 "use (c)hanged version or leave (d)eleted?"
753 "$$ &Changed $$ &Deleted") % f, 0) == 0:
755 "$$ &Changed $$ &Deleted") % f, 0) == 0:
754 newactions.append((f, "g", (flags,), "prompt recreating"))
756 newactions.append((f, "g", (flags,), "prompt recreating"))
755 else: assert False, m
757 else: assert False, m
756
758
757 if wctx.rev() is None:
759 if wctx.rev() is None:
758 newactions += _forgetremoved(wctx, mctx, branchmerge)
760 newactions += _forgetremoved(wctx, mctx, branchmerge)
759
761
760 return newactions
762 return newactions
761
763
762 def recordupdates(repo, actions, branchmerge):
764 def recordupdates(repo, actions, branchmerge):
763 "record merge actions to the dirstate"
765 "record merge actions to the dirstate"
764
766
765 for a in actions:
767 for a in actions:
766 f, m, args, msg = a
768 f, m, args, msg = a
767 if m == "r": # remove
769 if m == "r": # remove
768 if branchmerge:
770 if branchmerge:
769 repo.dirstate.remove(f)
771 repo.dirstate.remove(f)
770 else:
772 else:
771 repo.dirstate.drop(f)
773 repo.dirstate.drop(f)
772 elif m == "a": # re-add
774 elif m == "a": # re-add
773 if not branchmerge:
775 if not branchmerge:
774 repo.dirstate.add(f)
776 repo.dirstate.add(f)
775 elif m == "f": # forget
777 elif m == "f": # forget
776 repo.dirstate.drop(f)
778 repo.dirstate.drop(f)
777 elif m == "e": # exec change
779 elif m == "e": # exec change
778 repo.dirstate.normallookup(f)
780 repo.dirstate.normallookup(f)
779 elif m == "g": # get
781 elif m == "g": # get
780 if branchmerge:
782 if branchmerge:
781 repo.dirstate.otherparent(f)
783 repo.dirstate.otherparent(f)
782 else:
784 else:
783 repo.dirstate.normal(f)
785 repo.dirstate.normal(f)
784 elif m == "m": # merge
786 elif m == "m": # merge
785 f1, f2, fa, move, anc = args
787 f1, f2, fa, move, anc = args
786 if branchmerge:
788 if branchmerge:
787 # We've done a branch merge, mark this file as merged
789 # We've done a branch merge, mark this file as merged
788 # so that we properly record the merger later
790 # so that we properly record the merger later
789 repo.dirstate.merge(f)
791 repo.dirstate.merge(f)
790 if f1 != f2: # copy/rename
792 if f1 != f2: # copy/rename
791 if move:
793 if move:
792 repo.dirstate.remove(f1)
794 repo.dirstate.remove(f1)
793 if f1 != f:
795 if f1 != f:
794 repo.dirstate.copy(f1, f)
796 repo.dirstate.copy(f1, f)
795 else:
797 else:
796 repo.dirstate.copy(f2, f)
798 repo.dirstate.copy(f2, f)
797 else:
799 else:
798 # We've update-merged a locally modified file, so
800 # We've update-merged a locally modified file, so
799 # we set the dirstate to emulate a normal checkout
801 # we set the dirstate to emulate a normal checkout
800 # of that file some time in the past. Thus our
802 # of that file some time in the past. Thus our
801 # merge will appear as a normal local file
803 # merge will appear as a normal local file
802 # modification.
804 # modification.
803 if f2 == f: # file not locally copied/moved
805 if f2 == f: # file not locally copied/moved
804 repo.dirstate.normallookup(f)
806 repo.dirstate.normallookup(f)
805 if move:
807 if move:
806 repo.dirstate.drop(f1)
808 repo.dirstate.drop(f1)
807 elif m == "dm": # directory rename, move local
809 elif m == "dm": # directory rename, move local
808 f0, flag = args
810 f0, flag = args
809 if f0 not in repo.dirstate:
811 if f0 not in repo.dirstate:
810 # untracked file moved
812 # untracked file moved
811 continue
813 continue
812 if branchmerge:
814 if branchmerge:
813 repo.dirstate.add(f)
815 repo.dirstate.add(f)
814 repo.dirstate.remove(f0)
816 repo.dirstate.remove(f0)
815 repo.dirstate.copy(f0, f)
817 repo.dirstate.copy(f0, f)
816 else:
818 else:
817 repo.dirstate.normal(f)
819 repo.dirstate.normal(f)
818 repo.dirstate.drop(f0)
820 repo.dirstate.drop(f0)
819 elif m == "dg": # directory rename, get
821 elif m == "dg": # directory rename, get
820 f0, flag = args
822 f0, flag = args
821 if branchmerge:
823 if branchmerge:
822 repo.dirstate.add(f)
824 repo.dirstate.add(f)
823 repo.dirstate.copy(f0, f)
825 repo.dirstate.copy(f0, f)
824 else:
826 else:
825 repo.dirstate.normal(f)
827 repo.dirstate.normal(f)
826
828
827 def update(repo, node, branchmerge, force, partial, ancestor=None,
829 def update(repo, node, branchmerge, force, partial, ancestor=None,
828 mergeancestor=False):
830 mergeancestor=False):
829 """
831 """
830 Perform a merge between the working directory and the given node
832 Perform a merge between the working directory and the given node
831
833
832 node = the node to update to, or None if unspecified
834 node = the node to update to, or None if unspecified
833 branchmerge = whether to merge between branches
835 branchmerge = whether to merge between branches
834 force = whether to force branch merging or file overwriting
836 force = whether to force branch merging or file overwriting
835 partial = a function to filter file lists (dirstate not updated)
837 partial = a function to filter file lists (dirstate not updated)
836 mergeancestor = whether it is merging with an ancestor. If true,
838 mergeancestor = whether it is merging with an ancestor. If true,
837 we should accept the incoming changes for any prompts that occur.
839 we should accept the incoming changes for any prompts that occur.
838 If false, merging with an ancestor (fast-forward) is only allowed
840 If false, merging with an ancestor (fast-forward) is only allowed
839 between different named branches. This flag is used by rebase extension
841 between different named branches. This flag is used by rebase extension
840 as a temporary fix and should be avoided in general.
842 as a temporary fix and should be avoided in general.
841
843
842 The table below shows all the behaviors of the update command
844 The table below shows all the behaviors of the update command
843 given the -c and -C or no options, whether the working directory
845 given the -c and -C or no options, whether the working directory
844 is dirty, whether a revision is specified, and the relationship of
846 is dirty, whether a revision is specified, and the relationship of
845 the parent rev to the target rev (linear, on the same named
847 the parent rev to the target rev (linear, on the same named
846 branch, or on another named branch).
848 branch, or on another named branch).
847
849
848 This logic is tested by test-update-branches.t.
850 This logic is tested by test-update-branches.t.
849
851
850 -c -C dirty rev | linear same cross
852 -c -C dirty rev | linear same cross
851 n n n n | ok (1) x
853 n n n n | ok (1) x
852 n n n y | ok ok ok
854 n n n y | ok ok ok
853 n n y n | merge (2) (2)
855 n n y n | merge (2) (2)
854 n n y y | merge (3) (3)
856 n n y y | merge (3) (3)
855 n y * * | --- discard ---
857 n y * * | --- discard ---
856 y n y * | --- (4) ---
858 y n y * | --- (4) ---
857 y n n * | --- ok ---
859 y n n * | --- ok ---
858 y y * * | --- (5) ---
860 y y * * | --- (5) ---
859
861
860 x = can't happen
862 x = can't happen
861 * = don't-care
863 * = don't-care
862 1 = abort: not a linear update (merge or update --check to force update)
864 1 = abort: not a linear update (merge or update --check to force update)
863 2 = abort: uncommitted changes (commit and merge, or update --clean to
865 2 = abort: uncommitted changes (commit and merge, or update --clean to
864 discard changes)
866 discard changes)
865 3 = abort: uncommitted changes (commit or update --clean to discard changes)
867 3 = abort: uncommitted changes (commit or update --clean to discard changes)
866 4 = abort: uncommitted changes (checked in commands.py)
868 4 = abort: uncommitted changes (checked in commands.py)
867 5 = incompatible options (checked in commands.py)
869 5 = incompatible options (checked in commands.py)
868
870
869 Return the same tuple as applyupdates().
871 Return the same tuple as applyupdates().
870 """
872 """
871
873
872 onode = node
874 onode = node
873 wlock = repo.wlock()
875 wlock = repo.wlock()
874 try:
876 try:
875 wc = repo[None]
877 wc = repo[None]
876 pl = wc.parents()
878 pl = wc.parents()
877 p1 = pl[0]
879 p1 = pl[0]
878 pa = None
880 pas = [None]
879 if ancestor:
881 if ancestor:
880 pa = repo[ancestor]
882 pas = [repo[ancestor]]
881
883
882 if node is None:
884 if node is None:
883 # Here is where we should consider bookmarks, divergent bookmarks,
885 # Here is where we should consider bookmarks, divergent bookmarks,
884 # foreground changesets (successors), and tip of current branch;
886 # foreground changesets (successors), and tip of current branch;
885 # but currently we are only checking the branch tips.
887 # but currently we are only checking the branch tips.
886 try:
888 try:
887 node = repo.branchtip(wc.branch())
889 node = repo.branchtip(wc.branch())
888 except error.RepoLookupError:
890 except error.RepoLookupError:
889 if wc.branch() == "default": # no default branch!
891 if wc.branch() == "default": # no default branch!
890 node = repo.lookup("tip") # update to tip
892 node = repo.lookup("tip") # update to tip
891 else:
893 else:
892 raise util.Abort(_("branch %s not found") % wc.branch())
894 raise util.Abort(_("branch %s not found") % wc.branch())
893
895
894 if p1.obsolete() and not p1.children():
896 if p1.obsolete() and not p1.children():
895 # allow updating to successors
897 # allow updating to successors
896 successors = obsolete.successorssets(repo, p1.node())
898 successors = obsolete.successorssets(repo, p1.node())
897
899
898 # behavior of certain cases is as follows,
900 # behavior of certain cases is as follows,
899 #
901 #
900 # divergent changesets: update to highest rev, similar to what
902 # divergent changesets: update to highest rev, similar to what
901 # is currently done when there are more than one head
903 # is currently done when there are more than one head
902 # (i.e. 'tip')
904 # (i.e. 'tip')
903 #
905 #
904 # replaced changesets: same as divergent except we know there
906 # replaced changesets: same as divergent except we know there
905 # is no conflict
907 # is no conflict
906 #
908 #
907 # pruned changeset: no update is done; though, we could
909 # pruned changeset: no update is done; though, we could
908 # consider updating to the first non-obsolete parent,
910 # consider updating to the first non-obsolete parent,
909 # similar to what is current done for 'hg prune'
911 # similar to what is current done for 'hg prune'
910
912
911 if successors:
913 if successors:
912 # flatten the list here handles both divergent (len > 1)
914 # flatten the list here handles both divergent (len > 1)
913 # and the usual case (len = 1)
915 # and the usual case (len = 1)
914 successors = [n for sub in successors for n in sub]
916 successors = [n for sub in successors for n in sub]
915
917
916 # get the max revision for the given successors set,
918 # get the max revision for the given successors set,
917 # i.e. the 'tip' of a set
919 # i.e. the 'tip' of a set
918 node = repo.revs("max(%ln)", successors)[0]
920 node = repo.revs("max(%ln)", successors)[0]
919 pa = p1
921 pas = [p1]
920
922
921 overwrite = force and not branchmerge
923 overwrite = force and not branchmerge
922
924
923 p2 = repo[node]
925 p2 = repo[node]
924 if pa is None:
926 if pas[0] is None:
925 pa = p1.ancestor(p2)
927 pas = [p1.ancestor(p2)]
926
928
927 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
929 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
928
930
929 ### check phase
931 ### check phase
930 if not overwrite and len(pl) > 1:
932 if not overwrite and len(pl) > 1:
931 raise util.Abort(_("outstanding uncommitted merges"))
933 raise util.Abort(_("outstanding uncommitted merges"))
932 if branchmerge:
934 if branchmerge:
933 if pa == p2:
935 if pas == [p2]:
934 raise util.Abort(_("merging with a working directory ancestor"
936 raise util.Abort(_("merging with a working directory ancestor"
935 " has no effect"))
937 " has no effect"))
936 elif pa == p1:
938 elif pas == [p1]:
937 if not mergeancestor and p1.branch() == p2.branch():
939 if not mergeancestor and p1.branch() == p2.branch():
938 raise util.Abort(_("nothing to merge"),
940 raise util.Abort(_("nothing to merge"),
939 hint=_("use 'hg update' "
941 hint=_("use 'hg update' "
940 "or check 'hg heads'"))
942 "or check 'hg heads'"))
941 if not force and (wc.files() or wc.deleted()):
943 if not force and (wc.files() or wc.deleted()):
942 raise util.Abort(_("uncommitted changes"),
944 raise util.Abort(_("uncommitted changes"),
943 hint=_("use 'hg status' to list changes"))
945 hint=_("use 'hg status' to list changes"))
944 for s in sorted(wc.substate):
946 for s in sorted(wc.substate):
945 if wc.sub(s).dirty():
947 if wc.sub(s).dirty():
946 raise util.Abort(_("uncommitted changes in "
948 raise util.Abort(_("uncommitted changes in "
947 "subrepository '%s'") % s)
949 "subrepository '%s'") % s)
948
950
949 elif not overwrite:
951 elif not overwrite:
950 if p1 == p2: # no-op update
952 if p1 == p2: # no-op update
951 # call the hooks and exit early
953 # call the hooks and exit early
952 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
954 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
953 repo.hook('update', parent1=xp2, parent2='', error=0)
955 repo.hook('update', parent1=xp2, parent2='', error=0)
954 return 0, 0, 0, 0
956 return 0, 0, 0, 0
955
957
956 if pa not in (p1, p2): # nonlinear
958 if pas not in ([p1], [p2]): # nonlinear
957 dirty = wc.dirty(missing=True)
959 dirty = wc.dirty(missing=True)
958 if dirty or onode is None:
960 if dirty or onode is None:
959 # Branching is a bit strange to ensure we do the minimal
961 # Branching is a bit strange to ensure we do the minimal
960 # amount of call to obsolete.background.
962 # amount of call to obsolete.background.
961 foreground = obsolete.foreground(repo, [p1.node()])
963 foreground = obsolete.foreground(repo, [p1.node()])
962 # note: the <node> variable contains a random identifier
964 # note: the <node> variable contains a random identifier
963 if repo[node].node() in foreground:
965 if repo[node].node() in foreground:
964 pa = p1 # allow updating to successors
966 pas = [p1] # allow updating to successors
965 elif dirty:
967 elif dirty:
966 msg = _("uncommitted changes")
968 msg = _("uncommitted changes")
967 if onode is None:
969 if onode is None:
968 hint = _("commit and merge, or update --clean to"
970 hint = _("commit and merge, or update --clean to"
969 " discard changes")
971 " discard changes")
970 else:
972 else:
971 hint = _("commit or update --clean to discard"
973 hint = _("commit or update --clean to discard"
972 " changes")
974 " changes")
973 raise util.Abort(msg, hint=hint)
975 raise util.Abort(msg, hint=hint)
974 else: # node is none
976 else: # node is none
975 msg = _("not a linear update")
977 msg = _("not a linear update")
976 hint = _("merge or update --check to force update")
978 hint = _("merge or update --check to force update")
977 raise util.Abort(msg, hint=hint)
979 raise util.Abort(msg, hint=hint)
978 else:
980 else:
979 # Allow jumping branches if clean and specific rev given
981 # Allow jumping branches if clean and specific rev given
980 pa = p1
982 pas = [p1]
981
983
982 followcopies = False
984 followcopies = False
983 if overwrite:
985 if overwrite:
984 pa = wc
986 pas = [wc]
985 elif pa == p2: # backwards
987 elif pas == [p2]: # backwards
986 pa = wc.p1()
988 pas = [wc.p1()]
987 elif not branchmerge and not wc.dirty(missing=True):
989 elif not branchmerge and not wc.dirty(missing=True):
988 pass
990 pass
989 elif pa and repo.ui.configbool("merge", "followcopies", True):
991 elif pas[0] and repo.ui.configbool("merge", "followcopies", True):
990 followcopies = True
992 followcopies = True
991
993
992 ### calculate phase
994 ### calculate phase
993 actions = calculateupdates(repo, wc, p2, pa, branchmerge, force,
995 actions = calculateupdates(repo, wc, p2, pas, branchmerge, force,
994 partial, mergeancestor, followcopies)
996 partial, mergeancestor, followcopies)
995
997
996 ### apply phase
998 ### apply phase
997 if not branchmerge: # just jump to the new rev
999 if not branchmerge: # just jump to the new rev
998 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1000 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
999 if not partial:
1001 if not partial:
1000 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1002 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1001 # note that we're in the middle of an update
1003 # note that we're in the middle of an update
1002 repo.vfs.write('updatestate', p2.hex())
1004 repo.vfs.write('updatestate', p2.hex())
1003
1005
1004 stats = applyupdates(repo, actions, wc, p2, overwrite)
1006 stats = applyupdates(repo, actions, wc, p2, overwrite)
1005
1007
1006 if not partial:
1008 if not partial:
1007 repo.setparents(fp1, fp2)
1009 repo.setparents(fp1, fp2)
1008 recordupdates(repo, actions, branchmerge)
1010 recordupdates(repo, actions, branchmerge)
1009 # update completed, clear state
1011 # update completed, clear state
1010 util.unlink(repo.join('updatestate'))
1012 util.unlink(repo.join('updatestate'))
1011
1013
1012 if not branchmerge:
1014 if not branchmerge:
1013 repo.dirstate.setbranch(p2.branch())
1015 repo.dirstate.setbranch(p2.branch())
1014 finally:
1016 finally:
1015 wlock.release()
1017 wlock.release()
1016
1018
1017 if not partial:
1019 if not partial:
1018 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1020 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1019 return stats
1021 return stats
General Comments 0
You need to be logged in to leave comments. Login now