##// END OF EJS Templates
merge: make calculateupdates() return file->action dict...
Martin von Zweigbergk -
r23641:a7a0f32a default
parent child Browse files
Show More
@@ -1,1305 +1,1296 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18
18
19 import lfutil
19 import lfutil
20 import lfcommands
20 import lfcommands
21 import basestore
21 import basestore
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def composenormalfilematcher(match, manifest):
25 def composenormalfilematcher(match, manifest):
26 m = copy.copy(match)
26 m = copy.copy(match)
27 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
27 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
28 manifest)
28 manifest)
29 m._files = filter(notlfile, m._files)
29 m._files = filter(notlfile, m._files)
30 m._fmap = set(m._files)
30 m._fmap = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def installnormalfilesmatchfn(manifest):
36 def installnormalfilesmatchfn(manifest):
37 '''installmatchfn with a matchfn that ignores all largefiles'''
37 '''installmatchfn with a matchfn that ignores all largefiles'''
38 def overridematch(ctx, pats=[], opts={}, globbed=False,
38 def overridematch(ctx, pats=[], opts={}, globbed=False,
39 default='relpath'):
39 default='relpath'):
40 match = oldmatch(ctx, pats, opts, globbed, default)
40 match = oldmatch(ctx, pats, opts, globbed, default)
41 return composenormalfilematcher(match, manifest)
41 return composenormalfilematcher(match, manifest)
42 oldmatch = installmatchfn(overridematch)
42 oldmatch = installmatchfn(overridematch)
43
43
44 def installmatchfn(f):
44 def installmatchfn(f):
45 '''monkey patch the scmutil module with a custom match function.
45 '''monkey patch the scmutil module with a custom match function.
46 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
46 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
47 oldmatch = scmutil.match
47 oldmatch = scmutil.match
48 setattr(f, 'oldmatch', oldmatch)
48 setattr(f, 'oldmatch', oldmatch)
49 scmutil.match = f
49 scmutil.match = f
50 return oldmatch
50 return oldmatch
51
51
52 def restorematchfn():
52 def restorematchfn():
53 '''restores scmutil.match to what it was before installmatchfn
53 '''restores scmutil.match to what it was before installmatchfn
54 was called. no-op if scmutil.match is its original function.
54 was called. no-op if scmutil.match is its original function.
55
55
56 Note that n calls to installmatchfn will require n calls to
56 Note that n calls to installmatchfn will require n calls to
57 restore the original matchfn.'''
57 restore the original matchfn.'''
58 scmutil.match = getattr(scmutil.match, 'oldmatch')
58 scmutil.match = getattr(scmutil.match, 'oldmatch')
59
59
60 def installmatchandpatsfn(f):
60 def installmatchandpatsfn(f):
61 oldmatchandpats = scmutil.matchandpats
61 oldmatchandpats = scmutil.matchandpats
62 setattr(f, 'oldmatchandpats', oldmatchandpats)
62 setattr(f, 'oldmatchandpats', oldmatchandpats)
63 scmutil.matchandpats = f
63 scmutil.matchandpats = f
64 return oldmatchandpats
64 return oldmatchandpats
65
65
66 def restorematchandpatsfn():
66 def restorematchandpatsfn():
67 '''restores scmutil.matchandpats to what it was before
67 '''restores scmutil.matchandpats to what it was before
68 installmatchandpatsfn was called. No-op if scmutil.matchandpats
68 installmatchandpatsfn was called. No-op if scmutil.matchandpats
69 is its original function.
69 is its original function.
70
70
71 Note that n calls to installmatchandpatsfn will require n calls
71 Note that n calls to installmatchandpatsfn will require n calls
72 to restore the original matchfn.'''
72 to restore the original matchfn.'''
73 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
73 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
74 scmutil.matchandpats)
74 scmutil.matchandpats)
75
75
76 def addlargefiles(ui, repo, matcher, **opts):
76 def addlargefiles(ui, repo, matcher, **opts):
77 large = opts.pop('large', None)
77 large = opts.pop('large', None)
78 lfsize = lfutil.getminsize(
78 lfsize = lfutil.getminsize(
79 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
79 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
80
80
81 lfmatcher = None
81 lfmatcher = None
82 if lfutil.islfilesrepo(repo):
82 if lfutil.islfilesrepo(repo):
83 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
83 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
84 if lfpats:
84 if lfpats:
85 lfmatcher = match_.match(repo.root, '', list(lfpats))
85 lfmatcher = match_.match(repo.root, '', list(lfpats))
86
86
87 lfnames = []
87 lfnames = []
88 m = copy.copy(matcher)
88 m = copy.copy(matcher)
89 m.bad = lambda x, y: None
89 m.bad = lambda x, y: None
90 wctx = repo[None]
90 wctx = repo[None]
91 for f in repo.walk(m):
91 for f in repo.walk(m):
92 exact = m.exact(f)
92 exact = m.exact(f)
93 lfile = lfutil.standin(f) in wctx
93 lfile = lfutil.standin(f) in wctx
94 nfile = f in wctx
94 nfile = f in wctx
95 exists = lfile or nfile
95 exists = lfile or nfile
96
96
97 # Don't warn the user when they attempt to add a normal tracked file.
97 # Don't warn the user when they attempt to add a normal tracked file.
98 # The normal add code will do that for us.
98 # The normal add code will do that for us.
99 if exact and exists:
99 if exact and exists:
100 if lfile:
100 if lfile:
101 ui.warn(_('%s already a largefile\n') % f)
101 ui.warn(_('%s already a largefile\n') % f)
102 continue
102 continue
103
103
104 if (exact or not exists) and not lfutil.isstandin(f):
104 if (exact or not exists) and not lfutil.isstandin(f):
105 wfile = repo.wjoin(f)
105 wfile = repo.wjoin(f)
106
106
107 # In case the file was removed previously, but not committed
107 # In case the file was removed previously, but not committed
108 # (issue3507)
108 # (issue3507)
109 if not os.path.exists(wfile):
109 if not os.path.exists(wfile):
110 continue
110 continue
111
111
112 abovemin = (lfsize and
112 abovemin = (lfsize and
113 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
113 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
114 if large or abovemin or (lfmatcher and lfmatcher(f)):
114 if large or abovemin or (lfmatcher and lfmatcher(f)):
115 lfnames.append(f)
115 lfnames.append(f)
116 if ui.verbose or not exact:
116 if ui.verbose or not exact:
117 ui.status(_('adding %s as a largefile\n') % m.rel(f))
117 ui.status(_('adding %s as a largefile\n') % m.rel(f))
118
118
119 bad = []
119 bad = []
120
120
121 # Need to lock, otherwise there could be a race condition between
121 # Need to lock, otherwise there could be a race condition between
122 # when standins are created and added to the repo.
122 # when standins are created and added to the repo.
123 wlock = repo.wlock()
123 wlock = repo.wlock()
124 try:
124 try:
125 if not opts.get('dry_run'):
125 if not opts.get('dry_run'):
126 standins = []
126 standins = []
127 lfdirstate = lfutil.openlfdirstate(ui, repo)
127 lfdirstate = lfutil.openlfdirstate(ui, repo)
128 for f in lfnames:
128 for f in lfnames:
129 standinname = lfutil.standin(f)
129 standinname = lfutil.standin(f)
130 lfutil.writestandin(repo, standinname, hash='',
130 lfutil.writestandin(repo, standinname, hash='',
131 executable=lfutil.getexecutable(repo.wjoin(f)))
131 executable=lfutil.getexecutable(repo.wjoin(f)))
132 standins.append(standinname)
132 standins.append(standinname)
133 if lfdirstate[f] == 'r':
133 if lfdirstate[f] == 'r':
134 lfdirstate.normallookup(f)
134 lfdirstate.normallookup(f)
135 else:
135 else:
136 lfdirstate.add(f)
136 lfdirstate.add(f)
137 lfdirstate.write()
137 lfdirstate.write()
138 bad += [lfutil.splitstandin(f)
138 bad += [lfutil.splitstandin(f)
139 for f in repo[None].add(standins)
139 for f in repo[None].add(standins)
140 if f in m.files()]
140 if f in m.files()]
141 finally:
141 finally:
142 wlock.release()
142 wlock.release()
143 return bad
143 return bad
144
144
145 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
145 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
146 after = opts.get('after')
146 after = opts.get('after')
147 if not pats and not after:
147 if not pats and not after:
148 raise util.Abort(_('no files specified'))
148 raise util.Abort(_('no files specified'))
149 m = scmutil.match(repo[None], pats, opts)
149 m = scmutil.match(repo[None], pats, opts)
150 try:
150 try:
151 repo.lfstatus = True
151 repo.lfstatus = True
152 s = repo.status(match=m, clean=True)
152 s = repo.status(match=m, clean=True)
153 finally:
153 finally:
154 repo.lfstatus = False
154 repo.lfstatus = False
155 manifest = repo[None].manifest()
155 manifest = repo[None].manifest()
156 modified, added, deleted, clean = [[f for f in list
156 modified, added, deleted, clean = [[f for f in list
157 if lfutil.standin(f) in manifest]
157 if lfutil.standin(f) in manifest]
158 for list in (s.modified, s.added,
158 for list in (s.modified, s.added,
159 s.deleted, s.clean)]
159 s.deleted, s.clean)]
160
160
161 def warn(files, msg):
161 def warn(files, msg):
162 for f in files:
162 for f in files:
163 ui.warn(msg % m.rel(f))
163 ui.warn(msg % m.rel(f))
164 return int(len(files) > 0)
164 return int(len(files) > 0)
165
165
166 result = 0
166 result = 0
167
167
168 if after:
168 if after:
169 remove = deleted
169 remove = deleted
170 result = warn(modified + added + clean,
170 result = warn(modified + added + clean,
171 _('not removing %s: file still exists\n'))
171 _('not removing %s: file still exists\n'))
172 else:
172 else:
173 remove = deleted + clean
173 remove = deleted + clean
174 result = warn(modified, _('not removing %s: file is modified (use -f'
174 result = warn(modified, _('not removing %s: file is modified (use -f'
175 ' to force removal)\n'))
175 ' to force removal)\n'))
176 result = warn(added, _('not removing %s: file has been marked for add'
176 result = warn(added, _('not removing %s: file has been marked for add'
177 ' (use forget to undo)\n')) or result
177 ' (use forget to undo)\n')) or result
178
178
179 for f in sorted(remove):
179 for f in sorted(remove):
180 if ui.verbose or not m.exact(f):
180 if ui.verbose or not m.exact(f):
181 ui.status(_('removing %s\n') % m.rel(f))
181 ui.status(_('removing %s\n') % m.rel(f))
182
182
183 # Need to lock because standin files are deleted then removed from the
183 # Need to lock because standin files are deleted then removed from the
184 # repository and we could race in-between.
184 # repository and we could race in-between.
185 wlock = repo.wlock()
185 wlock = repo.wlock()
186 try:
186 try:
187 lfdirstate = lfutil.openlfdirstate(ui, repo)
187 lfdirstate = lfutil.openlfdirstate(ui, repo)
188 for f in remove:
188 for f in remove:
189 if not after:
189 if not after:
190 # If this is being called by addremove, notify the user that we
190 # If this is being called by addremove, notify the user that we
191 # are removing the file.
191 # are removing the file.
192 if isaddremove:
192 if isaddremove:
193 ui.status(_('removing %s\n') % f)
193 ui.status(_('removing %s\n') % f)
194
194
195 if not opts.get('dry_run'):
195 if not opts.get('dry_run'):
196 if not after:
196 if not after:
197 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
197 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
198 lfdirstate.remove(f)
198 lfdirstate.remove(f)
199
199
200 if opts.get('dry_run'):
200 if opts.get('dry_run'):
201 return result
201 return result
202
202
203 lfdirstate.write()
203 lfdirstate.write()
204 remove = [lfutil.standin(f) for f in remove]
204 remove = [lfutil.standin(f) for f in remove]
205 # If this is being called by addremove, let the original addremove
205 # If this is being called by addremove, let the original addremove
206 # function handle this.
206 # function handle this.
207 if not isaddremove:
207 if not isaddremove:
208 for f in remove:
208 for f in remove:
209 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
209 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
210 repo[None].forget(remove)
210 repo[None].forget(remove)
211 finally:
211 finally:
212 wlock.release()
212 wlock.release()
213
213
214 return result
214 return result
215
215
216 # For overriding mercurial.hgweb.webcommands so that largefiles will
216 # For overriding mercurial.hgweb.webcommands so that largefiles will
217 # appear at their right place in the manifests.
217 # appear at their right place in the manifests.
218 def decodepath(orig, path):
218 def decodepath(orig, path):
219 return lfutil.splitstandin(path) or path
219 return lfutil.splitstandin(path) or path
220
220
221 # -- Wrappers: modify existing commands --------------------------------
221 # -- Wrappers: modify existing commands --------------------------------
222
222
223 # Add works by going through the files that the user wanted to add and
223 # Add works by going through the files that the user wanted to add and
224 # checking if they should be added as largefiles. Then it makes a new
224 # checking if they should be added as largefiles. Then it makes a new
225 # matcher which matches only the normal files and runs the original
225 # matcher which matches only the normal files and runs the original
226 # version of add.
226 # version of add.
227 def overrideadd(orig, ui, repo, *pats, **opts):
227 def overrideadd(orig, ui, repo, *pats, **opts):
228 normal = opts.pop('normal')
228 normal = opts.pop('normal')
229 if normal:
229 if normal:
230 if opts.get('large'):
230 if opts.get('large'):
231 raise util.Abort(_('--normal cannot be used with --large'))
231 raise util.Abort(_('--normal cannot be used with --large'))
232 return orig(ui, repo, *pats, **opts)
232 return orig(ui, repo, *pats, **opts)
233 matcher = scmutil.match(repo[None], pats, opts)
233 matcher = scmutil.match(repo[None], pats, opts)
234 bad = addlargefiles(ui, repo, matcher, **opts)
234 bad = addlargefiles(ui, repo, matcher, **opts)
235 installnormalfilesmatchfn(repo[None].manifest())
235 installnormalfilesmatchfn(repo[None].manifest())
236 result = orig(ui, repo, *pats, **opts)
236 result = orig(ui, repo, *pats, **opts)
237 restorematchfn()
237 restorematchfn()
238
238
239 return (result == 1 or bad) and 1 or 0
239 return (result == 1 or bad) and 1 or 0
240
240
241 def overrideremove(orig, ui, repo, *pats, **opts):
241 def overrideremove(orig, ui, repo, *pats, **opts):
242 installnormalfilesmatchfn(repo[None].manifest())
242 installnormalfilesmatchfn(repo[None].manifest())
243 result = orig(ui, repo, *pats, **opts)
243 result = orig(ui, repo, *pats, **opts)
244 restorematchfn()
244 restorematchfn()
245 return removelargefiles(ui, repo, False, *pats, **opts) or result
245 return removelargefiles(ui, repo, False, *pats, **opts) or result
246
246
247 def overridestatusfn(orig, repo, rev2, **opts):
247 def overridestatusfn(orig, repo, rev2, **opts):
248 try:
248 try:
249 repo._repo.lfstatus = True
249 repo._repo.lfstatus = True
250 return orig(repo, rev2, **opts)
250 return orig(repo, rev2, **opts)
251 finally:
251 finally:
252 repo._repo.lfstatus = False
252 repo._repo.lfstatus = False
253
253
254 def overridestatus(orig, ui, repo, *pats, **opts):
254 def overridestatus(orig, ui, repo, *pats, **opts):
255 try:
255 try:
256 repo.lfstatus = True
256 repo.lfstatus = True
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258 finally:
258 finally:
259 repo.lfstatus = False
259 repo.lfstatus = False
260
260
261 def overridedirty(orig, repo, ignoreupdate=False):
261 def overridedirty(orig, repo, ignoreupdate=False):
262 try:
262 try:
263 repo._repo.lfstatus = True
263 repo._repo.lfstatus = True
264 return orig(repo, ignoreupdate)
264 return orig(repo, ignoreupdate)
265 finally:
265 finally:
266 repo._repo.lfstatus = False
266 repo._repo.lfstatus = False
267
267
268 def overridelog(orig, ui, repo, *pats, **opts):
268 def overridelog(orig, ui, repo, *pats, **opts):
269 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
269 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
270 default='relpath'):
270 default='relpath'):
271 """Matcher that merges root directory with .hglf, suitable for log.
271 """Matcher that merges root directory with .hglf, suitable for log.
272 It is still possible to match .hglf directly.
272 It is still possible to match .hglf directly.
273 For any listed files run log on the standin too.
273 For any listed files run log on the standin too.
274 matchfn tries both the given filename and with .hglf stripped.
274 matchfn tries both the given filename and with .hglf stripped.
275 """
275 """
276 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
276 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
277 m, p = copy.copy(matchandpats)
277 m, p = copy.copy(matchandpats)
278
278
279 if m.always():
279 if m.always():
280 # We want to match everything anyway, so there's no benefit trying
280 # We want to match everything anyway, so there's no benefit trying
281 # to add standins.
281 # to add standins.
282 return matchandpats
282 return matchandpats
283
283
284 pats = set(p)
284 pats = set(p)
285 # TODO: handling of patterns in both cases below
285 # TODO: handling of patterns in both cases below
286 if m._cwd:
286 if m._cwd:
287 if os.path.isabs(m._cwd):
287 if os.path.isabs(m._cwd):
288 # TODO: handle largefile magic when invoked from other cwd
288 # TODO: handle largefile magic when invoked from other cwd
289 return matchandpats
289 return matchandpats
290 back = (m._cwd.count('/') + 1) * '../'
290 back = (m._cwd.count('/') + 1) * '../'
291 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
291 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
292 else:
292 else:
293 pats.update(lfutil.standin(f) for f in p)
293 pats.update(lfutil.standin(f) for f in p)
294
294
295 for i in range(0, len(m._files)):
295 for i in range(0, len(m._files)):
296 standin = lfutil.standin(m._files[i])
296 standin = lfutil.standin(m._files[i])
297 if standin in repo[ctx.node()]:
297 if standin in repo[ctx.node()]:
298 m._files[i] = standin
298 m._files[i] = standin
299 elif m._files[i] not in repo[ctx.node()]:
299 elif m._files[i] not in repo[ctx.node()]:
300 m._files.append(standin)
300 m._files.append(standin)
301 pats.add(standin)
301 pats.add(standin)
302
302
303 m._fmap = set(m._files)
303 m._fmap = set(m._files)
304 m._always = False
304 m._always = False
305 origmatchfn = m.matchfn
305 origmatchfn = m.matchfn
306 def lfmatchfn(f):
306 def lfmatchfn(f):
307 lf = lfutil.splitstandin(f)
307 lf = lfutil.splitstandin(f)
308 if lf is not None and origmatchfn(lf):
308 if lf is not None and origmatchfn(lf):
309 return True
309 return True
310 r = origmatchfn(f)
310 r = origmatchfn(f)
311 return r
311 return r
312 m.matchfn = lfmatchfn
312 m.matchfn = lfmatchfn
313
313
314 return m, pats
314 return m, pats
315
315
316 # For hg log --patch, the match object is used in two different senses:
316 # For hg log --patch, the match object is used in two different senses:
317 # (1) to determine what revisions should be printed out, and
317 # (1) to determine what revisions should be printed out, and
318 # (2) to determine what files to print out diffs for.
318 # (2) to determine what files to print out diffs for.
319 # The magic matchandpats override should be used for case (1) but not for
319 # The magic matchandpats override should be used for case (1) but not for
320 # case (2).
320 # case (2).
321 def overridemakelogfilematcher(repo, pats, opts):
321 def overridemakelogfilematcher(repo, pats, opts):
322 pctx = repo[None]
322 pctx = repo[None]
323 match, pats = oldmatchandpats(pctx, pats, opts)
323 match, pats = oldmatchandpats(pctx, pats, opts)
324 return lambda rev: match
324 return lambda rev: match
325
325
326 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
326 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
327 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
327 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
328 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
328 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
329
329
330 try:
330 try:
331 return orig(ui, repo, *pats, **opts)
331 return orig(ui, repo, *pats, **opts)
332 finally:
332 finally:
333 restorematchandpatsfn()
333 restorematchandpatsfn()
334 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
334 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
335
335
336 def overrideverify(orig, ui, repo, *pats, **opts):
336 def overrideverify(orig, ui, repo, *pats, **opts):
337 large = opts.pop('large', False)
337 large = opts.pop('large', False)
338 all = opts.pop('lfa', False)
338 all = opts.pop('lfa', False)
339 contents = opts.pop('lfc', False)
339 contents = opts.pop('lfc', False)
340
340
341 result = orig(ui, repo, *pats, **opts)
341 result = orig(ui, repo, *pats, **opts)
342 if large or all or contents:
342 if large or all or contents:
343 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
343 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
344 return result
344 return result
345
345
346 def overridedebugstate(orig, ui, repo, *pats, **opts):
346 def overridedebugstate(orig, ui, repo, *pats, **opts):
347 large = opts.pop('large', False)
347 large = opts.pop('large', False)
348 if large:
348 if large:
349 class fakerepo(object):
349 class fakerepo(object):
350 dirstate = lfutil.openlfdirstate(ui, repo)
350 dirstate = lfutil.openlfdirstate(ui, repo)
351 orig(ui, fakerepo, *pats, **opts)
351 orig(ui, fakerepo, *pats, **opts)
352 else:
352 else:
353 orig(ui, repo, *pats, **opts)
353 orig(ui, repo, *pats, **opts)
354
354
355 # Override needs to refresh standins so that update's normal merge
355 # Override needs to refresh standins so that update's normal merge
356 # will go through properly. Then the other update hook (overriding repo.update)
356 # will go through properly. Then the other update hook (overriding repo.update)
357 # will get the new files. Filemerge is also overridden so that the merge
357 # will get the new files. Filemerge is also overridden so that the merge
358 # will merge standins correctly.
358 # will merge standins correctly.
359 def overrideupdate(orig, ui, repo, *pats, **opts):
359 def overrideupdate(orig, ui, repo, *pats, **opts):
360 # Need to lock between the standins getting updated and their
360 # Need to lock between the standins getting updated and their
361 # largefiles getting updated
361 # largefiles getting updated
362 wlock = repo.wlock()
362 wlock = repo.wlock()
363 try:
363 try:
364 if opts['check']:
364 if opts['check']:
365 lfdirstate = lfutil.openlfdirstate(ui, repo)
365 lfdirstate = lfutil.openlfdirstate(ui, repo)
366 unsure, s = lfdirstate.status(
366 unsure, s = lfdirstate.status(
367 match_.always(repo.root, repo.getcwd()),
367 match_.always(repo.root, repo.getcwd()),
368 [], False, False, False)
368 [], False, False, False)
369
369
370 mod = len(s.modified) > 0
370 mod = len(s.modified) > 0
371 for lfile in unsure:
371 for lfile in unsure:
372 standin = lfutil.standin(lfile)
372 standin = lfutil.standin(lfile)
373 if repo['.'][standin].data().strip() != \
373 if repo['.'][standin].data().strip() != \
374 lfutil.hashfile(repo.wjoin(lfile)):
374 lfutil.hashfile(repo.wjoin(lfile)):
375 mod = True
375 mod = True
376 else:
376 else:
377 lfdirstate.normal(lfile)
377 lfdirstate.normal(lfile)
378 lfdirstate.write()
378 lfdirstate.write()
379 if mod:
379 if mod:
380 raise util.Abort(_('uncommitted changes'))
380 raise util.Abort(_('uncommitted changes'))
381 return orig(ui, repo, *pats, **opts)
381 return orig(ui, repo, *pats, **opts)
382 finally:
382 finally:
383 wlock.release()
383 wlock.release()
384
384
385 # Before starting the manifest merge, merge.updates will call
385 # Before starting the manifest merge, merge.updates will call
386 # _checkunknownfile to check if there are any files in the merged-in
386 # _checkunknownfile to check if there are any files in the merged-in
387 # changeset that collide with unknown files in the working copy.
387 # changeset that collide with unknown files in the working copy.
388 #
388 #
389 # The largefiles are seen as unknown, so this prevents us from merging
389 # The largefiles are seen as unknown, so this prevents us from merging
390 # in a file 'foo' if we already have a largefile with the same name.
390 # in a file 'foo' if we already have a largefile with the same name.
391 #
391 #
392 # The overridden function filters the unknown files by removing any
392 # The overridden function filters the unknown files by removing any
393 # largefiles. This makes the merge proceed and we can then handle this
393 # largefiles. This makes the merge proceed and we can then handle this
394 # case further in the overridden calculateupdates function below.
394 # case further in the overridden calculateupdates function below.
395 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
395 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
396 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
396 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
397 return False
397 return False
398 return origfn(repo, wctx, mctx, f)
398 return origfn(repo, wctx, mctx, f)
399
399
400 # The manifest merge handles conflicts on the manifest level. We want
400 # The manifest merge handles conflicts on the manifest level. We want
401 # to handle changes in largefile-ness of files at this level too.
401 # to handle changes in largefile-ness of files at this level too.
402 #
402 #
403 # The strategy is to run the original calculateupdates and then process
403 # The strategy is to run the original calculateupdates and then process
404 # the action list it outputs. There are two cases we need to deal with:
404 # the action list it outputs. There are two cases we need to deal with:
405 #
405 #
406 # 1. Normal file in p1, largefile in p2. Here the largefile is
406 # 1. Normal file in p1, largefile in p2. Here the largefile is
407 # detected via its standin file, which will enter the working copy
407 # detected via its standin file, which will enter the working copy
408 # with a "get" action. It is not "merge" since the standin is all
408 # with a "get" action. It is not "merge" since the standin is all
409 # Mercurial is concerned with at this level -- the link to the
409 # Mercurial is concerned with at this level -- the link to the
410 # existing normal file is not relevant here.
410 # existing normal file is not relevant here.
411 #
411 #
412 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
412 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
413 # since the largefile will be present in the working copy and
413 # since the largefile will be present in the working copy and
414 # different from the normal file in p2. Mercurial therefore
414 # different from the normal file in p2. Mercurial therefore
415 # triggers a merge action.
415 # triggers a merge action.
416 #
416 #
417 # In both cases, we prompt the user and emit new actions to either
417 # In both cases, we prompt the user and emit new actions to either
418 # remove the standin (if the normal file was kept) or to remove the
418 # remove the standin (if the normal file was kept) or to remove the
419 # normal file and get the standin (if the largefile was kept). The
419 # normal file and get the standin (if the largefile was kept). The
420 # default prompt answer is to use the largefile version since it was
420 # default prompt answer is to use the largefile version since it was
421 # presumably changed on purpose.
421 # presumably changed on purpose.
422 #
422 #
423 # Finally, the merge.applyupdates function will then take care of
423 # Finally, the merge.applyupdates function will then take care of
424 # writing the files into the working copy and lfcommands.updatelfiles
424 # writing the files into the working copy and lfcommands.updatelfiles
425 # will update the largefiles.
425 # will update the largefiles.
426 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
426 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
427 partial, acceptremote, followcopies):
427 partial, acceptremote, followcopies):
428 overwrite = force and not branchmerge
428 overwrite = force and not branchmerge
429 actions, diverge, renamedelete = origfn(
429 actions, diverge, renamedelete = origfn(
430 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
430 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
431 followcopies)
431 followcopies)
432
432
433 if overwrite:
433 if overwrite:
434 return actions, diverge, renamedelete
434 return actions, diverge, renamedelete
435
435
436 # Convert to dictionary with filename as key and action as value.
436 # Convert to dictionary with filename as key and action as value.
437 lfiles = set()
437 lfiles = set()
438 actionbyfile = {}
438 actionbyfile = actions
439 for m, l in actions.iteritems():
439 for f in actionbyfile:
440 for f, args, msg in l:
440 splitstandin = f and lfutil.splitstandin(f)
441 actionbyfile[f] = m, args, msg
441 if splitstandin in p1:
442 splitstandin = f and lfutil.splitstandin(f)
442 lfiles.add(splitstandin)
443 if splitstandin in p1:
443 elif lfutil.standin(f) in p1:
444 lfiles.add(splitstandin)
444 lfiles.add(f)
445 elif lfutil.standin(f) in p1:
446 lfiles.add(f)
447
445
448 for lfile in lfiles:
446 for lfile in lfiles:
449 standin = lfutil.standin(lfile)
447 standin = lfutil.standin(lfile)
450 (lm, largs, lmsg) = actionbyfile.get(lfile, (None, None, None))
448 (lm, largs, lmsg) = actionbyfile.get(lfile, (None, None, None))
451 (sm, sargs, smsg) = actionbyfile.get(standin, (None, None, None))
449 (sm, sargs, smsg) = actionbyfile.get(standin, (None, None, None))
452 if sm in ('g', 'dc') and lm != 'r':
450 if sm in ('g', 'dc') and lm != 'r':
453 # Case 1: normal file in the working copy, largefile in
451 # Case 1: normal file in the working copy, largefile in
454 # the second parent
452 # the second parent
455 usermsg = _('remote turned local normal file %s into a largefile\n'
453 usermsg = _('remote turned local normal file %s into a largefile\n'
456 'use (l)argefile or keep (n)ormal file?'
454 'use (l)argefile or keep (n)ormal file?'
457 '$$ &Largefile $$ &Normal file') % lfile
455 '$$ &Largefile $$ &Normal file') % lfile
458 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
456 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
459 actionbyfile[lfile] = ('r', None, 'replaced by standin')
457 actionbyfile[lfile] = ('r', None, 'replaced by standin')
460 actionbyfile[standin] = ('g', sargs, 'replaces standin')
458 actionbyfile[standin] = ('g', sargs, 'replaces standin')
461 else: # keep local normal file
459 else: # keep local normal file
462 actionbyfile[lfile] = ('k', None, 'replaces standin')
460 actionbyfile[lfile] = ('k', None, 'replaces standin')
463 if branchmerge:
461 if branchmerge:
464 actionbyfile[standin] = ('k', None,
462 actionbyfile[standin] = ('k', None,
465 'replaced by non-standin')
463 'replaced by non-standin')
466 else:
464 else:
467 actionbyfile[standin] = ('r', None,
465 actionbyfile[standin] = ('r', None,
468 'replaced by non-standin')
466 'replaced by non-standin')
469 elif lm in ('g', 'dc') and sm != 'r':
467 elif lm in ('g', 'dc') and sm != 'r':
470 # Case 2: largefile in the working copy, normal file in
468 # Case 2: largefile in the working copy, normal file in
471 # the second parent
469 # the second parent
472 usermsg = _('remote turned local largefile %s into a normal file\n'
470 usermsg = _('remote turned local largefile %s into a normal file\n'
473 'keep (l)argefile or use (n)ormal file?'
471 'keep (l)argefile or use (n)ormal file?'
474 '$$ &Largefile $$ &Normal file') % lfile
472 '$$ &Largefile $$ &Normal file') % lfile
475 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
473 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
476 if branchmerge:
474 if branchmerge:
477 # largefile can be restored from standin safely
475 # largefile can be restored from standin safely
478 actionbyfile[lfile] = ('k', None, 'replaced by standin')
476 actionbyfile[lfile] = ('k', None, 'replaced by standin')
479 actionbyfile[standin] = ('k', None, 'replaces standin')
477 actionbyfile[standin] = ('k', None, 'replaces standin')
480 else:
478 else:
481 # "lfile" should be marked as "removed" without
479 # "lfile" should be marked as "removed" without
482 # removal of itself
480 # removal of itself
483 actionbyfile[lfile] = ('lfmr', None,
481 actionbyfile[lfile] = ('lfmr', None,
484 'forget non-standin largefile')
482 'forget non-standin largefile')
485
483
486 # linear-merge should treat this largefile as 're-added'
484 # linear-merge should treat this largefile as 're-added'
487 actionbyfile[standin] = ('a', None, 'keep standin')
485 actionbyfile[standin] = ('a', None, 'keep standin')
488 else: # pick remote normal file
486 else: # pick remote normal file
489 actionbyfile[lfile] = ('g', largs, 'replaces standin')
487 actionbyfile[lfile] = ('g', largs, 'replaces standin')
490 actionbyfile[standin] = ('r', None, 'replaced by non-standin')
488 actionbyfile[standin] = ('r', None, 'replaced by non-standin')
491
489
492 # Convert back to dictionary-of-lists format
490 return actionbyfile, diverge, renamedelete
493 for l in actions.itervalues():
494 l[:] = []
495 actions['lfmr'] = []
496 for f, (m, args, msg) in actionbyfile.iteritems():
497 actions[m].append((f, args, msg))
498
499 return actions, diverge, renamedelete
500
491
501 def mergerecordupdates(orig, repo, actions, branchmerge):
492 def mergerecordupdates(orig, repo, actions, branchmerge):
502 if 'lfmr' in actions:
493 if 'lfmr' in actions:
503 # this should be executed before 'orig', to execute 'remove'
494 # this should be executed before 'orig', to execute 'remove'
504 # before all other actions
495 # before all other actions
505 for lfile, args, msg in actions['lfmr']:
496 for lfile, args, msg in actions['lfmr']:
506 repo.dirstate.remove(lfile)
497 repo.dirstate.remove(lfile)
507
498
508 return orig(repo, actions, branchmerge)
499 return orig(repo, actions, branchmerge)
509
500
510
501
511 # Override filemerge to prompt the user about how they wish to merge
502 # Override filemerge to prompt the user about how they wish to merge
512 # largefiles. This will handle identical edits without prompting the user.
503 # largefiles. This will handle identical edits without prompting the user.
513 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
504 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
514 if not lfutil.isstandin(orig):
505 if not lfutil.isstandin(orig):
515 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
506 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
516
507
517 ahash = fca.data().strip().lower()
508 ahash = fca.data().strip().lower()
518 dhash = fcd.data().strip().lower()
509 dhash = fcd.data().strip().lower()
519 ohash = fco.data().strip().lower()
510 ohash = fco.data().strip().lower()
520 if (ohash != ahash and
511 if (ohash != ahash and
521 ohash != dhash and
512 ohash != dhash and
522 (dhash == ahash or
513 (dhash == ahash or
523 repo.ui.promptchoice(
514 repo.ui.promptchoice(
524 _('largefile %s has a merge conflict\nancestor was %s\n'
515 _('largefile %s has a merge conflict\nancestor was %s\n'
525 'keep (l)ocal %s or\ntake (o)ther %s?'
516 'keep (l)ocal %s or\ntake (o)ther %s?'
526 '$$ &Local $$ &Other') %
517 '$$ &Local $$ &Other') %
527 (lfutil.splitstandin(orig), ahash, dhash, ohash),
518 (lfutil.splitstandin(orig), ahash, dhash, ohash),
528 0) == 1)):
519 0) == 1)):
529 repo.wwrite(fcd.path(), fco.data(), fco.flags())
520 repo.wwrite(fcd.path(), fco.data(), fco.flags())
530 return 0
521 return 0
531
522
532 # Copy first changes the matchers to match standins instead of
523 # Copy first changes the matchers to match standins instead of
533 # largefiles. Then it overrides util.copyfile in that function it
524 # largefiles. Then it overrides util.copyfile in that function it
534 # checks if the destination largefile already exists. It also keeps a
525 # checks if the destination largefile already exists. It also keeps a
535 # list of copied files so that the largefiles can be copied and the
526 # list of copied files so that the largefiles can be copied and the
536 # dirstate updated.
527 # dirstate updated.
537 def overridecopy(orig, ui, repo, pats, opts, rename=False):
528 def overridecopy(orig, ui, repo, pats, opts, rename=False):
538 # doesn't remove largefile on rename
529 # doesn't remove largefile on rename
539 if len(pats) < 2:
530 if len(pats) < 2:
540 # this isn't legal, let the original function deal with it
531 # this isn't legal, let the original function deal with it
541 return orig(ui, repo, pats, opts, rename)
532 return orig(ui, repo, pats, opts, rename)
542
533
543 def makestandin(relpath):
534 def makestandin(relpath):
544 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
535 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
545 return os.path.join(repo.wjoin(lfutil.standin(path)))
536 return os.path.join(repo.wjoin(lfutil.standin(path)))
546
537
547 fullpats = scmutil.expandpats(pats)
538 fullpats = scmutil.expandpats(pats)
548 dest = fullpats[-1]
539 dest = fullpats[-1]
549
540
550 if os.path.isdir(dest):
541 if os.path.isdir(dest):
551 if not os.path.isdir(makestandin(dest)):
542 if not os.path.isdir(makestandin(dest)):
552 os.makedirs(makestandin(dest))
543 os.makedirs(makestandin(dest))
553 # This could copy both lfiles and normal files in one command,
544 # This could copy both lfiles and normal files in one command,
554 # but we don't want to do that. First replace their matcher to
545 # but we don't want to do that. First replace their matcher to
555 # only match normal files and run it, then replace it to just
546 # only match normal files and run it, then replace it to just
556 # match largefiles and run it again.
547 # match largefiles and run it again.
557 nonormalfiles = False
548 nonormalfiles = False
558 nolfiles = False
549 nolfiles = False
559 installnormalfilesmatchfn(repo[None].manifest())
550 installnormalfilesmatchfn(repo[None].manifest())
560 try:
551 try:
561 try:
552 try:
562 result = orig(ui, repo, pats, opts, rename)
553 result = orig(ui, repo, pats, opts, rename)
563 except util.Abort, e:
554 except util.Abort, e:
564 if str(e) != _('no files to copy'):
555 if str(e) != _('no files to copy'):
565 raise e
556 raise e
566 else:
557 else:
567 nonormalfiles = True
558 nonormalfiles = True
568 result = 0
559 result = 0
569 finally:
560 finally:
570 restorematchfn()
561 restorematchfn()
571
562
572 # The first rename can cause our current working directory to be removed.
563 # The first rename can cause our current working directory to be removed.
573 # In that case there is nothing left to copy/rename so just quit.
564 # In that case there is nothing left to copy/rename so just quit.
574 try:
565 try:
575 repo.getcwd()
566 repo.getcwd()
576 except OSError:
567 except OSError:
577 return result
568 return result
578
569
579 try:
570 try:
580 try:
571 try:
581 # When we call orig below it creates the standins but we don't add
572 # When we call orig below it creates the standins but we don't add
582 # them to the dir state until later so lock during that time.
573 # them to the dir state until later so lock during that time.
583 wlock = repo.wlock()
574 wlock = repo.wlock()
584
575
585 manifest = repo[None].manifest()
576 manifest = repo[None].manifest()
586 def overridematch(ctx, pats=[], opts={}, globbed=False,
577 def overridematch(ctx, pats=[], opts={}, globbed=False,
587 default='relpath'):
578 default='relpath'):
588 newpats = []
579 newpats = []
589 # The patterns were previously mangled to add the standin
580 # The patterns were previously mangled to add the standin
590 # directory; we need to remove that now
581 # directory; we need to remove that now
591 for pat in pats:
582 for pat in pats:
592 if match_.patkind(pat) is None and lfutil.shortname in pat:
583 if match_.patkind(pat) is None and lfutil.shortname in pat:
593 newpats.append(pat.replace(lfutil.shortname, ''))
584 newpats.append(pat.replace(lfutil.shortname, ''))
594 else:
585 else:
595 newpats.append(pat)
586 newpats.append(pat)
596 match = oldmatch(ctx, newpats, opts, globbed, default)
587 match = oldmatch(ctx, newpats, opts, globbed, default)
597 m = copy.copy(match)
588 m = copy.copy(match)
598 lfile = lambda f: lfutil.standin(f) in manifest
589 lfile = lambda f: lfutil.standin(f) in manifest
599 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
590 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
600 m._fmap = set(m._files)
591 m._fmap = set(m._files)
601 origmatchfn = m.matchfn
592 origmatchfn = m.matchfn
602 m.matchfn = lambda f: (lfutil.isstandin(f) and
593 m.matchfn = lambda f: (lfutil.isstandin(f) and
603 (f in manifest) and
594 (f in manifest) and
604 origmatchfn(lfutil.splitstandin(f)) or
595 origmatchfn(lfutil.splitstandin(f)) or
605 None)
596 None)
606 return m
597 return m
607 oldmatch = installmatchfn(overridematch)
598 oldmatch = installmatchfn(overridematch)
608 listpats = []
599 listpats = []
609 for pat in pats:
600 for pat in pats:
610 if match_.patkind(pat) is not None:
601 if match_.patkind(pat) is not None:
611 listpats.append(pat)
602 listpats.append(pat)
612 else:
603 else:
613 listpats.append(makestandin(pat))
604 listpats.append(makestandin(pat))
614
605
615 try:
606 try:
616 origcopyfile = util.copyfile
607 origcopyfile = util.copyfile
617 copiedfiles = []
608 copiedfiles = []
618 def overridecopyfile(src, dest):
609 def overridecopyfile(src, dest):
619 if (lfutil.shortname in src and
610 if (lfutil.shortname in src and
620 dest.startswith(repo.wjoin(lfutil.shortname))):
611 dest.startswith(repo.wjoin(lfutil.shortname))):
621 destlfile = dest.replace(lfutil.shortname, '')
612 destlfile = dest.replace(lfutil.shortname, '')
622 if not opts['force'] and os.path.exists(destlfile):
613 if not opts['force'] and os.path.exists(destlfile):
623 raise IOError('',
614 raise IOError('',
624 _('destination largefile already exists'))
615 _('destination largefile already exists'))
625 copiedfiles.append((src, dest))
616 copiedfiles.append((src, dest))
626 origcopyfile(src, dest)
617 origcopyfile(src, dest)
627
618
628 util.copyfile = overridecopyfile
619 util.copyfile = overridecopyfile
629 result += orig(ui, repo, listpats, opts, rename)
620 result += orig(ui, repo, listpats, opts, rename)
630 finally:
621 finally:
631 util.copyfile = origcopyfile
622 util.copyfile = origcopyfile
632
623
633 lfdirstate = lfutil.openlfdirstate(ui, repo)
624 lfdirstate = lfutil.openlfdirstate(ui, repo)
634 for (src, dest) in copiedfiles:
625 for (src, dest) in copiedfiles:
635 if (lfutil.shortname in src and
626 if (lfutil.shortname in src and
636 dest.startswith(repo.wjoin(lfutil.shortname))):
627 dest.startswith(repo.wjoin(lfutil.shortname))):
637 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
628 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
638 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
629 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
639 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
630 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
640 if not os.path.isdir(destlfiledir):
631 if not os.path.isdir(destlfiledir):
641 os.makedirs(destlfiledir)
632 os.makedirs(destlfiledir)
642 if rename:
633 if rename:
643 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
634 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
644
635
645 # The file is gone, but this deletes any empty parent
636 # The file is gone, but this deletes any empty parent
646 # directories as a side-effect.
637 # directories as a side-effect.
647 util.unlinkpath(repo.wjoin(srclfile), True)
638 util.unlinkpath(repo.wjoin(srclfile), True)
648 lfdirstate.remove(srclfile)
639 lfdirstate.remove(srclfile)
649 else:
640 else:
650 util.copyfile(repo.wjoin(srclfile),
641 util.copyfile(repo.wjoin(srclfile),
651 repo.wjoin(destlfile))
642 repo.wjoin(destlfile))
652
643
653 lfdirstate.add(destlfile)
644 lfdirstate.add(destlfile)
654 lfdirstate.write()
645 lfdirstate.write()
655 except util.Abort, e:
646 except util.Abort, e:
656 if str(e) != _('no files to copy'):
647 if str(e) != _('no files to copy'):
657 raise e
648 raise e
658 else:
649 else:
659 nolfiles = True
650 nolfiles = True
660 finally:
651 finally:
661 restorematchfn()
652 restorematchfn()
662 wlock.release()
653 wlock.release()
663
654
664 if nolfiles and nonormalfiles:
655 if nolfiles and nonormalfiles:
665 raise util.Abort(_('no files to copy'))
656 raise util.Abort(_('no files to copy'))
666
657
667 return result
658 return result
668
659
669 # When the user calls revert, we have to be careful to not revert any
660 # When the user calls revert, we have to be careful to not revert any
670 # changes to other largefiles accidentally. This means we have to keep
661 # changes to other largefiles accidentally. This means we have to keep
671 # track of the largefiles that are being reverted so we only pull down
662 # track of the largefiles that are being reverted so we only pull down
672 # the necessary largefiles.
663 # the necessary largefiles.
673 #
664 #
674 # Standins are only updated (to match the hash of largefiles) before
665 # Standins are only updated (to match the hash of largefiles) before
675 # commits. Update the standins then run the original revert, changing
666 # commits. Update the standins then run the original revert, changing
676 # the matcher to hit standins instead of largefiles. Based on the
667 # the matcher to hit standins instead of largefiles. Based on the
677 # resulting standins update the largefiles.
668 # resulting standins update the largefiles.
678 def overriderevert(orig, ui, repo, *pats, **opts):
669 def overriderevert(orig, ui, repo, *pats, **opts):
679 # Because we put the standins in a bad state (by updating them)
670 # Because we put the standins in a bad state (by updating them)
680 # and then return them to a correct state we need to lock to
671 # and then return them to a correct state we need to lock to
681 # prevent others from changing them in their incorrect state.
672 # prevent others from changing them in their incorrect state.
682 wlock = repo.wlock()
673 wlock = repo.wlock()
683 try:
674 try:
684 lfdirstate = lfutil.openlfdirstate(ui, repo)
675 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 s = lfutil.lfdirstatestatus(lfdirstate, repo)
676 s = lfutil.lfdirstatestatus(lfdirstate, repo)
686 lfdirstate.write()
677 lfdirstate.write()
687 for lfile in s.modified:
678 for lfile in s.modified:
688 lfutil.updatestandin(repo, lfutil.standin(lfile))
679 lfutil.updatestandin(repo, lfutil.standin(lfile))
689 for lfile in s.deleted:
680 for lfile in s.deleted:
690 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
681 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
691 os.unlink(repo.wjoin(lfutil.standin(lfile)))
682 os.unlink(repo.wjoin(lfutil.standin(lfile)))
692
683
693 oldstandins = lfutil.getstandinsstate(repo)
684 oldstandins = lfutil.getstandinsstate(repo)
694
685
695 def overridematch(ctx, pats=[], opts={}, globbed=False,
686 def overridematch(ctx, pats=[], opts={}, globbed=False,
696 default='relpath'):
687 default='relpath'):
697 match = oldmatch(ctx, pats, opts, globbed, default)
688 match = oldmatch(ctx, pats, opts, globbed, default)
698 m = copy.copy(match)
689 m = copy.copy(match)
699 def tostandin(f):
690 def tostandin(f):
700 if lfutil.standin(f) in ctx:
691 if lfutil.standin(f) in ctx:
701 return lfutil.standin(f)
692 return lfutil.standin(f)
702 elif lfutil.standin(f) in repo[None]:
693 elif lfutil.standin(f) in repo[None]:
703 return None
694 return None
704 return f
695 return f
705 m._files = [tostandin(f) for f in m._files]
696 m._files = [tostandin(f) for f in m._files]
706 m._files = [f for f in m._files if f is not None]
697 m._files = [f for f in m._files if f is not None]
707 m._fmap = set(m._files)
698 m._fmap = set(m._files)
708 origmatchfn = m.matchfn
699 origmatchfn = m.matchfn
709 def matchfn(f):
700 def matchfn(f):
710 if lfutil.isstandin(f):
701 if lfutil.isstandin(f):
711 return (origmatchfn(lfutil.splitstandin(f)) and
702 return (origmatchfn(lfutil.splitstandin(f)) and
712 (f in repo[None] or f in ctx))
703 (f in repo[None] or f in ctx))
713 return origmatchfn(f)
704 return origmatchfn(f)
714 m.matchfn = matchfn
705 m.matchfn = matchfn
715 return m
706 return m
716 oldmatch = installmatchfn(overridematch)
707 oldmatch = installmatchfn(overridematch)
717 try:
708 try:
718 orig(ui, repo, *pats, **opts)
709 orig(ui, repo, *pats, **opts)
719 finally:
710 finally:
720 restorematchfn()
711 restorematchfn()
721
712
722 newstandins = lfutil.getstandinsstate(repo)
713 newstandins = lfutil.getstandinsstate(repo)
723 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
714 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
724 # lfdirstate should be 'normallookup'-ed for updated files,
715 # lfdirstate should be 'normallookup'-ed for updated files,
725 # because reverting doesn't touch dirstate for 'normal' files
716 # because reverting doesn't touch dirstate for 'normal' files
726 # when target revision is explicitly specified: in such case,
717 # when target revision is explicitly specified: in such case,
727 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
718 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
728 # of target (standin) file.
719 # of target (standin) file.
729 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
720 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
730 normallookup=True)
721 normallookup=True)
731
722
732 finally:
723 finally:
733 wlock.release()
724 wlock.release()
734
725
735 # after pulling changesets, we need to take some extra care to get
726 # after pulling changesets, we need to take some extra care to get
736 # largefiles updated remotely
727 # largefiles updated remotely
737 def overridepull(orig, ui, repo, source=None, **opts):
728 def overridepull(orig, ui, repo, source=None, **opts):
738 revsprepull = len(repo)
729 revsprepull = len(repo)
739 if not source:
730 if not source:
740 source = 'default'
731 source = 'default'
741 repo.lfpullsource = source
732 repo.lfpullsource = source
742 result = orig(ui, repo, source, **opts)
733 result = orig(ui, repo, source, **opts)
743 revspostpull = len(repo)
734 revspostpull = len(repo)
744 lfrevs = opts.get('lfrev', [])
735 lfrevs = opts.get('lfrev', [])
745 if opts.get('all_largefiles'):
736 if opts.get('all_largefiles'):
746 lfrevs.append('pulled()')
737 lfrevs.append('pulled()')
747 if lfrevs and revspostpull > revsprepull:
738 if lfrevs and revspostpull > revsprepull:
748 numcached = 0
739 numcached = 0
749 repo.firstpulled = revsprepull # for pulled() revset expression
740 repo.firstpulled = revsprepull # for pulled() revset expression
750 try:
741 try:
751 for rev in scmutil.revrange(repo, lfrevs):
742 for rev in scmutil.revrange(repo, lfrevs):
752 ui.note(_('pulling largefiles for revision %s\n') % rev)
743 ui.note(_('pulling largefiles for revision %s\n') % rev)
753 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
744 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
754 numcached += len(cached)
745 numcached += len(cached)
755 finally:
746 finally:
756 del repo.firstpulled
747 del repo.firstpulled
757 ui.status(_("%d largefiles cached\n") % numcached)
748 ui.status(_("%d largefiles cached\n") % numcached)
758 return result
749 return result
759
750
760 def pulledrevsetsymbol(repo, subset, x):
751 def pulledrevsetsymbol(repo, subset, x):
761 """``pulled()``
752 """``pulled()``
762 Changesets that just has been pulled.
753 Changesets that just has been pulled.
763
754
764 Only available with largefiles from pull --lfrev expressions.
755 Only available with largefiles from pull --lfrev expressions.
765
756
766 .. container:: verbose
757 .. container:: verbose
767
758
768 Some examples:
759 Some examples:
769
760
770 - pull largefiles for all new changesets::
761 - pull largefiles for all new changesets::
771
762
772 hg pull -lfrev "pulled()"
763 hg pull -lfrev "pulled()"
773
764
774 - pull largefiles for all new branch heads::
765 - pull largefiles for all new branch heads::
775
766
776 hg pull -lfrev "head(pulled()) and not closed()"
767 hg pull -lfrev "head(pulled()) and not closed()"
777
768
778 """
769 """
779
770
780 try:
771 try:
781 firstpulled = repo.firstpulled
772 firstpulled = repo.firstpulled
782 except AttributeError:
773 except AttributeError:
783 raise util.Abort(_("pulled() only available in --lfrev"))
774 raise util.Abort(_("pulled() only available in --lfrev"))
784 return revset.baseset([r for r in subset if r >= firstpulled])
775 return revset.baseset([r for r in subset if r >= firstpulled])
785
776
786 def overrideclone(orig, ui, source, dest=None, **opts):
777 def overrideclone(orig, ui, source, dest=None, **opts):
787 d = dest
778 d = dest
788 if d is None:
779 if d is None:
789 d = hg.defaultdest(source)
780 d = hg.defaultdest(source)
790 if opts.get('all_largefiles') and not hg.islocal(d):
781 if opts.get('all_largefiles') and not hg.islocal(d):
791 raise util.Abort(_(
782 raise util.Abort(_(
792 '--all-largefiles is incompatible with non-local destination %s') %
783 '--all-largefiles is incompatible with non-local destination %s') %
793 d)
784 d)
794
785
795 return orig(ui, source, dest, **opts)
786 return orig(ui, source, dest, **opts)
796
787
797 def hgclone(orig, ui, opts, *args, **kwargs):
788 def hgclone(orig, ui, opts, *args, **kwargs):
798 result = orig(ui, opts, *args, **kwargs)
789 result = orig(ui, opts, *args, **kwargs)
799
790
800 if result is not None:
791 if result is not None:
801 sourcerepo, destrepo = result
792 sourcerepo, destrepo = result
802 repo = destrepo.local()
793 repo = destrepo.local()
803
794
804 # Caching is implicitly limited to 'rev' option, since the dest repo was
795 # Caching is implicitly limited to 'rev' option, since the dest repo was
805 # truncated at that point. The user may expect a download count with
796 # truncated at that point. The user may expect a download count with
806 # this option, so attempt whether or not this is a largefile repo.
797 # this option, so attempt whether or not this is a largefile repo.
807 if opts.get('all_largefiles'):
798 if opts.get('all_largefiles'):
808 success, missing = lfcommands.downloadlfiles(ui, repo, None)
799 success, missing = lfcommands.downloadlfiles(ui, repo, None)
809
800
810 if missing != 0:
801 if missing != 0:
811 return None
802 return None
812
803
813 return result
804 return result
814
805
815 def overriderebase(orig, ui, repo, **opts):
806 def overriderebase(orig, ui, repo, **opts):
816 resuming = opts.get('continue')
807 resuming = opts.get('continue')
817 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
808 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
818 repo._lfstatuswriters.append(lambda *msg, **opts: None)
809 repo._lfstatuswriters.append(lambda *msg, **opts: None)
819 try:
810 try:
820 return orig(ui, repo, **opts)
811 return orig(ui, repo, **opts)
821 finally:
812 finally:
822 repo._lfstatuswriters.pop()
813 repo._lfstatuswriters.pop()
823 repo._lfcommithooks.pop()
814 repo._lfcommithooks.pop()
824
815
825 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
816 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
826 prefix=None, mtime=None, subrepos=None):
817 prefix=None, mtime=None, subrepos=None):
827 # No need to lock because we are only reading history and
818 # No need to lock because we are only reading history and
828 # largefile caches, neither of which are modified.
819 # largefile caches, neither of which are modified.
829 lfcommands.cachelfiles(repo.ui, repo, node)
820 lfcommands.cachelfiles(repo.ui, repo, node)
830
821
831 if kind not in archival.archivers:
822 if kind not in archival.archivers:
832 raise util.Abort(_("unknown archive type '%s'") % kind)
823 raise util.Abort(_("unknown archive type '%s'") % kind)
833
824
834 ctx = repo[node]
825 ctx = repo[node]
835
826
836 if kind == 'files':
827 if kind == 'files':
837 if prefix:
828 if prefix:
838 raise util.Abort(
829 raise util.Abort(
839 _('cannot give prefix when archiving to files'))
830 _('cannot give prefix when archiving to files'))
840 else:
831 else:
841 prefix = archival.tidyprefix(dest, kind, prefix)
832 prefix = archival.tidyprefix(dest, kind, prefix)
842
833
843 def write(name, mode, islink, getdata):
834 def write(name, mode, islink, getdata):
844 if matchfn and not matchfn(name):
835 if matchfn and not matchfn(name):
845 return
836 return
846 data = getdata()
837 data = getdata()
847 if decode:
838 if decode:
848 data = repo.wwritedata(name, data)
839 data = repo.wwritedata(name, data)
849 archiver.addfile(prefix + name, mode, islink, data)
840 archiver.addfile(prefix + name, mode, islink, data)
850
841
851 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
842 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
852
843
853 if repo.ui.configbool("ui", "archivemeta", True):
844 if repo.ui.configbool("ui", "archivemeta", True):
854 def metadata():
845 def metadata():
855 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
846 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
856 hex(repo.changelog.node(0)), hex(node), ctx.branch())
847 hex(repo.changelog.node(0)), hex(node), ctx.branch())
857
848
858 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
849 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
859 if repo.tagtype(t) == 'global')
850 if repo.tagtype(t) == 'global')
860 if not tags:
851 if not tags:
861 repo.ui.pushbuffer()
852 repo.ui.pushbuffer()
862 opts = {'template': '{latesttag}\n{latesttagdistance}',
853 opts = {'template': '{latesttag}\n{latesttagdistance}',
863 'style': '', 'patch': None, 'git': None}
854 'style': '', 'patch': None, 'git': None}
864 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
855 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
865 ltags, dist = repo.ui.popbuffer().split('\n')
856 ltags, dist = repo.ui.popbuffer().split('\n')
866 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
857 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
867 tags += 'latesttagdistance: %s\n' % dist
858 tags += 'latesttagdistance: %s\n' % dist
868
859
869 return base + tags
860 return base + tags
870
861
871 write('.hg_archival.txt', 0644, False, metadata)
862 write('.hg_archival.txt', 0644, False, metadata)
872
863
873 for f in ctx:
864 for f in ctx:
874 ff = ctx.flags(f)
865 ff = ctx.flags(f)
875 getdata = ctx[f].data
866 getdata = ctx[f].data
876 if lfutil.isstandin(f):
867 if lfutil.isstandin(f):
877 path = lfutil.findfile(repo, getdata().strip())
868 path = lfutil.findfile(repo, getdata().strip())
878 if path is None:
869 if path is None:
879 raise util.Abort(
870 raise util.Abort(
880 _('largefile %s not found in repo store or system cache')
871 _('largefile %s not found in repo store or system cache')
881 % lfutil.splitstandin(f))
872 % lfutil.splitstandin(f))
882 f = lfutil.splitstandin(f)
873 f = lfutil.splitstandin(f)
883
874
884 def getdatafn():
875 def getdatafn():
885 fd = None
876 fd = None
886 try:
877 try:
887 fd = open(path, 'rb')
878 fd = open(path, 'rb')
888 return fd.read()
879 return fd.read()
889 finally:
880 finally:
890 if fd:
881 if fd:
891 fd.close()
882 fd.close()
892
883
893 getdata = getdatafn
884 getdata = getdatafn
894 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
885 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
895
886
896 if subrepos:
887 if subrepos:
897 for subpath in sorted(ctx.substate):
888 for subpath in sorted(ctx.substate):
898 sub = ctx.sub(subpath)
889 sub = ctx.sub(subpath)
899 submatch = match_.narrowmatcher(subpath, matchfn)
890 submatch = match_.narrowmatcher(subpath, matchfn)
900 sub.archive(archiver, prefix, submatch)
891 sub.archive(archiver, prefix, submatch)
901
892
902 archiver.done()
893 archiver.done()
903
894
904 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
895 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
905 repo._get(repo._state + ('hg',))
896 repo._get(repo._state + ('hg',))
906 rev = repo._state[1]
897 rev = repo._state[1]
907 ctx = repo._repo[rev]
898 ctx = repo._repo[rev]
908
899
909 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
900 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
910
901
911 def write(name, mode, islink, getdata):
902 def write(name, mode, islink, getdata):
912 # At this point, the standin has been replaced with the largefile name,
903 # At this point, the standin has been replaced with the largefile name,
913 # so the normal matcher works here without the lfutil variants.
904 # so the normal matcher works here without the lfutil variants.
914 if match and not match(f):
905 if match and not match(f):
915 return
906 return
916 data = getdata()
907 data = getdata()
917
908
918 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
909 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
919
910
920 for f in ctx:
911 for f in ctx:
921 ff = ctx.flags(f)
912 ff = ctx.flags(f)
922 getdata = ctx[f].data
913 getdata = ctx[f].data
923 if lfutil.isstandin(f):
914 if lfutil.isstandin(f):
924 path = lfutil.findfile(repo._repo, getdata().strip())
915 path = lfutil.findfile(repo._repo, getdata().strip())
925 if path is None:
916 if path is None:
926 raise util.Abort(
917 raise util.Abort(
927 _('largefile %s not found in repo store or system cache')
918 _('largefile %s not found in repo store or system cache')
928 % lfutil.splitstandin(f))
919 % lfutil.splitstandin(f))
929 f = lfutil.splitstandin(f)
920 f = lfutil.splitstandin(f)
930
921
931 def getdatafn():
922 def getdatafn():
932 fd = None
923 fd = None
933 try:
924 try:
934 fd = open(os.path.join(prefix, path), 'rb')
925 fd = open(os.path.join(prefix, path), 'rb')
935 return fd.read()
926 return fd.read()
936 finally:
927 finally:
937 if fd:
928 if fd:
938 fd.close()
929 fd.close()
939
930
940 getdata = getdatafn
931 getdata = getdatafn
941
932
942 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
933 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
943
934
944 for subpath in sorted(ctx.substate):
935 for subpath in sorted(ctx.substate):
945 sub = ctx.sub(subpath)
936 sub = ctx.sub(subpath)
946 submatch = match_.narrowmatcher(subpath, match)
937 submatch = match_.narrowmatcher(subpath, match)
947 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
938 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
948
939
949 # If a largefile is modified, the change is not reflected in its
940 # If a largefile is modified, the change is not reflected in its
950 # standin until a commit. cmdutil.bailifchanged() raises an exception
941 # standin until a commit. cmdutil.bailifchanged() raises an exception
951 # if the repo has uncommitted changes. Wrap it to also check if
942 # if the repo has uncommitted changes. Wrap it to also check if
952 # largefiles were changed. This is used by bisect, backout and fetch.
943 # largefiles were changed. This is used by bisect, backout and fetch.
953 def overridebailifchanged(orig, repo):
944 def overridebailifchanged(orig, repo):
954 orig(repo)
945 orig(repo)
955 repo.lfstatus = True
946 repo.lfstatus = True
956 s = repo.status()
947 s = repo.status()
957 repo.lfstatus = False
948 repo.lfstatus = False
958 if s.modified or s.added or s.removed or s.deleted:
949 if s.modified or s.added or s.removed or s.deleted:
959 raise util.Abort(_('uncommitted changes'))
950 raise util.Abort(_('uncommitted changes'))
960
951
961 def overrideforget(orig, ui, repo, *pats, **opts):
952 def overrideforget(orig, ui, repo, *pats, **opts):
962 installnormalfilesmatchfn(repo[None].manifest())
953 installnormalfilesmatchfn(repo[None].manifest())
963 result = orig(ui, repo, *pats, **opts)
954 result = orig(ui, repo, *pats, **opts)
964 restorematchfn()
955 restorematchfn()
965 m = scmutil.match(repo[None], pats, opts)
956 m = scmutil.match(repo[None], pats, opts)
966
957
967 try:
958 try:
968 repo.lfstatus = True
959 repo.lfstatus = True
969 s = repo.status(match=m, clean=True)
960 s = repo.status(match=m, clean=True)
970 finally:
961 finally:
971 repo.lfstatus = False
962 repo.lfstatus = False
972 forget = sorted(s.modified + s.added + s.deleted + s.clean)
963 forget = sorted(s.modified + s.added + s.deleted + s.clean)
973 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
964 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
974
965
975 for f in forget:
966 for f in forget:
976 if lfutil.standin(f) not in repo.dirstate and not \
967 if lfutil.standin(f) not in repo.dirstate and not \
977 os.path.isdir(m.rel(lfutil.standin(f))):
968 os.path.isdir(m.rel(lfutil.standin(f))):
978 ui.warn(_('not removing %s: file is already untracked\n')
969 ui.warn(_('not removing %s: file is already untracked\n')
979 % m.rel(f))
970 % m.rel(f))
980 result = 1
971 result = 1
981
972
982 for f in forget:
973 for f in forget:
983 if ui.verbose or not m.exact(f):
974 if ui.verbose or not m.exact(f):
984 ui.status(_('removing %s\n') % m.rel(f))
975 ui.status(_('removing %s\n') % m.rel(f))
985
976
986 # Need to lock because standin files are deleted then removed from the
977 # Need to lock because standin files are deleted then removed from the
987 # repository and we could race in-between.
978 # repository and we could race in-between.
988 wlock = repo.wlock()
979 wlock = repo.wlock()
989 try:
980 try:
990 lfdirstate = lfutil.openlfdirstate(ui, repo)
981 lfdirstate = lfutil.openlfdirstate(ui, repo)
991 for f in forget:
982 for f in forget:
992 if lfdirstate[f] == 'a':
983 if lfdirstate[f] == 'a':
993 lfdirstate.drop(f)
984 lfdirstate.drop(f)
994 else:
985 else:
995 lfdirstate.remove(f)
986 lfdirstate.remove(f)
996 lfdirstate.write()
987 lfdirstate.write()
997 standins = [lfutil.standin(f) for f in forget]
988 standins = [lfutil.standin(f) for f in forget]
998 for f in standins:
989 for f in standins:
999 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
990 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1000 repo[None].forget(standins)
991 repo[None].forget(standins)
1001 finally:
992 finally:
1002 wlock.release()
993 wlock.release()
1003
994
1004 return result
995 return result
1005
996
1006 def _getoutgoings(repo, other, missing, addfunc):
997 def _getoutgoings(repo, other, missing, addfunc):
1007 """get pairs of filename and largefile hash in outgoing revisions
998 """get pairs of filename and largefile hash in outgoing revisions
1008 in 'missing'.
999 in 'missing'.
1009
1000
1010 largefiles already existing on 'other' repository are ignored.
1001 largefiles already existing on 'other' repository are ignored.
1011
1002
1012 'addfunc' is invoked with each unique pairs of filename and
1003 'addfunc' is invoked with each unique pairs of filename and
1013 largefile hash value.
1004 largefile hash value.
1014 """
1005 """
1015 knowns = set()
1006 knowns = set()
1016 lfhashes = set()
1007 lfhashes = set()
1017 def dedup(fn, lfhash):
1008 def dedup(fn, lfhash):
1018 k = (fn, lfhash)
1009 k = (fn, lfhash)
1019 if k not in knowns:
1010 if k not in knowns:
1020 knowns.add(k)
1011 knowns.add(k)
1021 lfhashes.add(lfhash)
1012 lfhashes.add(lfhash)
1022 lfutil.getlfilestoupload(repo, missing, dedup)
1013 lfutil.getlfilestoupload(repo, missing, dedup)
1023 if lfhashes:
1014 if lfhashes:
1024 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1015 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1025 for fn, lfhash in knowns:
1016 for fn, lfhash in knowns:
1026 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1017 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1027 addfunc(fn, lfhash)
1018 addfunc(fn, lfhash)
1028
1019
1029 def outgoinghook(ui, repo, other, opts, missing):
1020 def outgoinghook(ui, repo, other, opts, missing):
1030 if opts.pop('large', None):
1021 if opts.pop('large', None):
1031 lfhashes = set()
1022 lfhashes = set()
1032 if ui.debugflag:
1023 if ui.debugflag:
1033 toupload = {}
1024 toupload = {}
1034 def addfunc(fn, lfhash):
1025 def addfunc(fn, lfhash):
1035 if fn not in toupload:
1026 if fn not in toupload:
1036 toupload[fn] = []
1027 toupload[fn] = []
1037 toupload[fn].append(lfhash)
1028 toupload[fn].append(lfhash)
1038 lfhashes.add(lfhash)
1029 lfhashes.add(lfhash)
1039 def showhashes(fn):
1030 def showhashes(fn):
1040 for lfhash in sorted(toupload[fn]):
1031 for lfhash in sorted(toupload[fn]):
1041 ui.debug(' %s\n' % (lfhash))
1032 ui.debug(' %s\n' % (lfhash))
1042 else:
1033 else:
1043 toupload = set()
1034 toupload = set()
1044 def addfunc(fn, lfhash):
1035 def addfunc(fn, lfhash):
1045 toupload.add(fn)
1036 toupload.add(fn)
1046 lfhashes.add(lfhash)
1037 lfhashes.add(lfhash)
1047 def showhashes(fn):
1038 def showhashes(fn):
1048 pass
1039 pass
1049 _getoutgoings(repo, other, missing, addfunc)
1040 _getoutgoings(repo, other, missing, addfunc)
1050
1041
1051 if not toupload:
1042 if not toupload:
1052 ui.status(_('largefiles: no files to upload\n'))
1043 ui.status(_('largefiles: no files to upload\n'))
1053 else:
1044 else:
1054 ui.status(_('largefiles to upload (%d entities):\n')
1045 ui.status(_('largefiles to upload (%d entities):\n')
1055 % (len(lfhashes)))
1046 % (len(lfhashes)))
1056 for file in sorted(toupload):
1047 for file in sorted(toupload):
1057 ui.status(lfutil.splitstandin(file) + '\n')
1048 ui.status(lfutil.splitstandin(file) + '\n')
1058 showhashes(file)
1049 showhashes(file)
1059 ui.status('\n')
1050 ui.status('\n')
1060
1051
1061 def summaryremotehook(ui, repo, opts, changes):
1052 def summaryremotehook(ui, repo, opts, changes):
1062 largeopt = opts.get('large', False)
1053 largeopt = opts.get('large', False)
1063 if changes is None:
1054 if changes is None:
1064 if largeopt:
1055 if largeopt:
1065 return (False, True) # only outgoing check is needed
1056 return (False, True) # only outgoing check is needed
1066 else:
1057 else:
1067 return (False, False)
1058 return (False, False)
1068 elif largeopt:
1059 elif largeopt:
1069 url, branch, peer, outgoing = changes[1]
1060 url, branch, peer, outgoing = changes[1]
1070 if peer is None:
1061 if peer is None:
1071 # i18n: column positioning for "hg summary"
1062 # i18n: column positioning for "hg summary"
1072 ui.status(_('largefiles: (no remote repo)\n'))
1063 ui.status(_('largefiles: (no remote repo)\n'))
1073 return
1064 return
1074
1065
1075 toupload = set()
1066 toupload = set()
1076 lfhashes = set()
1067 lfhashes = set()
1077 def addfunc(fn, lfhash):
1068 def addfunc(fn, lfhash):
1078 toupload.add(fn)
1069 toupload.add(fn)
1079 lfhashes.add(lfhash)
1070 lfhashes.add(lfhash)
1080 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1071 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1081
1072
1082 if not toupload:
1073 if not toupload:
1083 # i18n: column positioning for "hg summary"
1074 # i18n: column positioning for "hg summary"
1084 ui.status(_('largefiles: (no files to upload)\n'))
1075 ui.status(_('largefiles: (no files to upload)\n'))
1085 else:
1076 else:
1086 # i18n: column positioning for "hg summary"
1077 # i18n: column positioning for "hg summary"
1087 ui.status(_('largefiles: %d entities for %d files to upload\n')
1078 ui.status(_('largefiles: %d entities for %d files to upload\n')
1088 % (len(lfhashes), len(toupload)))
1079 % (len(lfhashes), len(toupload)))
1089
1080
1090 def overridesummary(orig, ui, repo, *pats, **opts):
1081 def overridesummary(orig, ui, repo, *pats, **opts):
1091 try:
1082 try:
1092 repo.lfstatus = True
1083 repo.lfstatus = True
1093 orig(ui, repo, *pats, **opts)
1084 orig(ui, repo, *pats, **opts)
1094 finally:
1085 finally:
1095 repo.lfstatus = False
1086 repo.lfstatus = False
1096
1087
1097 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1088 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1098 similarity=None):
1089 similarity=None):
1099 if not lfutil.islfilesrepo(repo):
1090 if not lfutil.islfilesrepo(repo):
1100 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1091 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1101 # Get the list of missing largefiles so we can remove them
1092 # Get the list of missing largefiles so we can remove them
1102 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1093 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1103 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1094 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1104 False, False, False)
1095 False, False, False)
1105
1096
1106 # Call into the normal remove code, but the removing of the standin, we want
1097 # Call into the normal remove code, but the removing of the standin, we want
1107 # to have handled by original addremove. Monkey patching here makes sure
1098 # to have handled by original addremove. Monkey patching here makes sure
1108 # we don't remove the standin in the largefiles code, preventing a very
1099 # we don't remove the standin in the largefiles code, preventing a very
1109 # confused state later.
1100 # confused state later.
1110 if s.deleted:
1101 if s.deleted:
1111 m = [repo.wjoin(f) for f in s.deleted]
1102 m = [repo.wjoin(f) for f in s.deleted]
1112 removelargefiles(repo.ui, repo, True, *m, **opts)
1103 removelargefiles(repo.ui, repo, True, *m, **opts)
1113 # Call into the normal add code, and any files that *should* be added as
1104 # Call into the normal add code, and any files that *should* be added as
1114 # largefiles will be
1105 # largefiles will be
1115 addlargefiles(repo.ui, repo, matcher, **opts)
1106 addlargefiles(repo.ui, repo, matcher, **opts)
1116 # Now that we've handled largefiles, hand off to the original addremove
1107 # Now that we've handled largefiles, hand off to the original addremove
1117 # function to take care of the rest. Make sure it doesn't do anything with
1108 # function to take care of the rest. Make sure it doesn't do anything with
1118 # largefiles by passing a matcher that will ignore them.
1109 # largefiles by passing a matcher that will ignore them.
1119 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1110 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1120 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1111 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1121
1112
1122 # Calling purge with --all will cause the largefiles to be deleted.
1113 # Calling purge with --all will cause the largefiles to be deleted.
1123 # Override repo.status to prevent this from happening.
1114 # Override repo.status to prevent this from happening.
1124 def overridepurge(orig, ui, repo, *dirs, **opts):
1115 def overridepurge(orig, ui, repo, *dirs, **opts):
1125 # XXX Monkey patching a repoview will not work. The assigned attribute will
1116 # XXX Monkey patching a repoview will not work. The assigned attribute will
1126 # be set on the unfiltered repo, but we will only lookup attributes in the
1117 # be set on the unfiltered repo, but we will only lookup attributes in the
1127 # unfiltered repo if the lookup in the repoview object itself fails. As the
1118 # unfiltered repo if the lookup in the repoview object itself fails. As the
1128 # monkey patched method exists on the repoview class the lookup will not
1119 # monkey patched method exists on the repoview class the lookup will not
1129 # fail. As a result, the original version will shadow the monkey patched
1120 # fail. As a result, the original version will shadow the monkey patched
1130 # one, defeating the monkey patch.
1121 # one, defeating the monkey patch.
1131 #
1122 #
1132 # As a work around we use an unfiltered repo here. We should do something
1123 # As a work around we use an unfiltered repo here. We should do something
1133 # cleaner instead.
1124 # cleaner instead.
1134 repo = repo.unfiltered()
1125 repo = repo.unfiltered()
1135 oldstatus = repo.status
1126 oldstatus = repo.status
1136 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1127 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1137 clean=False, unknown=False, listsubrepos=False):
1128 clean=False, unknown=False, listsubrepos=False):
1138 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1129 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1139 listsubrepos)
1130 listsubrepos)
1140 lfdirstate = lfutil.openlfdirstate(ui, repo)
1131 lfdirstate = lfutil.openlfdirstate(ui, repo)
1141 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1132 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1142 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1133 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1143 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1134 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1144 unknown, ignored, r.clean)
1135 unknown, ignored, r.clean)
1145 repo.status = overridestatus
1136 repo.status = overridestatus
1146 orig(ui, repo, *dirs, **opts)
1137 orig(ui, repo, *dirs, **opts)
1147 repo.status = oldstatus
1138 repo.status = oldstatus
1148 def overriderollback(orig, ui, repo, **opts):
1139 def overriderollback(orig, ui, repo, **opts):
1149 wlock = repo.wlock()
1140 wlock = repo.wlock()
1150 try:
1141 try:
1151 before = repo.dirstate.parents()
1142 before = repo.dirstate.parents()
1152 orphans = set(f for f in repo.dirstate
1143 orphans = set(f for f in repo.dirstate
1153 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1144 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1154 result = orig(ui, repo, **opts)
1145 result = orig(ui, repo, **opts)
1155 after = repo.dirstate.parents()
1146 after = repo.dirstate.parents()
1156 if before == after:
1147 if before == after:
1157 return result # no need to restore standins
1148 return result # no need to restore standins
1158
1149
1159 pctx = repo['.']
1150 pctx = repo['.']
1160 for f in repo.dirstate:
1151 for f in repo.dirstate:
1161 if lfutil.isstandin(f):
1152 if lfutil.isstandin(f):
1162 orphans.discard(f)
1153 orphans.discard(f)
1163 if repo.dirstate[f] == 'r':
1154 if repo.dirstate[f] == 'r':
1164 repo.wvfs.unlinkpath(f, ignoremissing=True)
1155 repo.wvfs.unlinkpath(f, ignoremissing=True)
1165 elif f in pctx:
1156 elif f in pctx:
1166 fctx = pctx[f]
1157 fctx = pctx[f]
1167 repo.wwrite(f, fctx.data(), fctx.flags())
1158 repo.wwrite(f, fctx.data(), fctx.flags())
1168 else:
1159 else:
1169 # content of standin is not so important in 'a',
1160 # content of standin is not so important in 'a',
1170 # 'm' or 'n' (coming from the 2nd parent) cases
1161 # 'm' or 'n' (coming from the 2nd parent) cases
1171 lfutil.writestandin(repo, f, '', False)
1162 lfutil.writestandin(repo, f, '', False)
1172 for standin in orphans:
1163 for standin in orphans:
1173 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1164 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1174
1165
1175 lfdirstate = lfutil.openlfdirstate(ui, repo)
1166 lfdirstate = lfutil.openlfdirstate(ui, repo)
1176 orphans = set(lfdirstate)
1167 orphans = set(lfdirstate)
1177 lfiles = lfutil.listlfiles(repo)
1168 lfiles = lfutil.listlfiles(repo)
1178 for file in lfiles:
1169 for file in lfiles:
1179 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1170 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1180 orphans.discard(file)
1171 orphans.discard(file)
1181 for lfile in orphans:
1172 for lfile in orphans:
1182 lfdirstate.drop(lfile)
1173 lfdirstate.drop(lfile)
1183 lfdirstate.write()
1174 lfdirstate.write()
1184 finally:
1175 finally:
1185 wlock.release()
1176 wlock.release()
1186 return result
1177 return result
1187
1178
1188 def overridetransplant(orig, ui, repo, *revs, **opts):
1179 def overridetransplant(orig, ui, repo, *revs, **opts):
1189 resuming = opts.get('continue')
1180 resuming = opts.get('continue')
1190 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1181 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1191 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1182 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1192 try:
1183 try:
1193 result = orig(ui, repo, *revs, **opts)
1184 result = orig(ui, repo, *revs, **opts)
1194 finally:
1185 finally:
1195 repo._lfstatuswriters.pop()
1186 repo._lfstatuswriters.pop()
1196 repo._lfcommithooks.pop()
1187 repo._lfcommithooks.pop()
1197 return result
1188 return result
1198
1189
1199 def overridecat(orig, ui, repo, file1, *pats, **opts):
1190 def overridecat(orig, ui, repo, file1, *pats, **opts):
1200 ctx = scmutil.revsingle(repo, opts.get('rev'))
1191 ctx = scmutil.revsingle(repo, opts.get('rev'))
1201 err = 1
1192 err = 1
1202 notbad = set()
1193 notbad = set()
1203 m = scmutil.match(ctx, (file1,) + pats, opts)
1194 m = scmutil.match(ctx, (file1,) + pats, opts)
1204 origmatchfn = m.matchfn
1195 origmatchfn = m.matchfn
1205 def lfmatchfn(f):
1196 def lfmatchfn(f):
1206 if origmatchfn(f):
1197 if origmatchfn(f):
1207 return True
1198 return True
1208 lf = lfutil.splitstandin(f)
1199 lf = lfutil.splitstandin(f)
1209 if lf is None:
1200 if lf is None:
1210 return False
1201 return False
1211 notbad.add(lf)
1202 notbad.add(lf)
1212 return origmatchfn(lf)
1203 return origmatchfn(lf)
1213 m.matchfn = lfmatchfn
1204 m.matchfn = lfmatchfn
1214 origbadfn = m.bad
1205 origbadfn = m.bad
1215 def lfbadfn(f, msg):
1206 def lfbadfn(f, msg):
1216 if not f in notbad:
1207 if not f in notbad:
1217 origbadfn(f, msg)
1208 origbadfn(f, msg)
1218 m.bad = lfbadfn
1209 m.bad = lfbadfn
1219 for f in ctx.walk(m):
1210 for f in ctx.walk(m):
1220 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1211 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1221 pathname=f)
1212 pathname=f)
1222 lf = lfutil.splitstandin(f)
1213 lf = lfutil.splitstandin(f)
1223 if lf is None or origmatchfn(f):
1214 if lf is None or origmatchfn(f):
1224 # duplicating unreachable code from commands.cat
1215 # duplicating unreachable code from commands.cat
1225 data = ctx[f].data()
1216 data = ctx[f].data()
1226 if opts.get('decode'):
1217 if opts.get('decode'):
1227 data = repo.wwritedata(f, data)
1218 data = repo.wwritedata(f, data)
1228 fp.write(data)
1219 fp.write(data)
1229 else:
1220 else:
1230 hash = lfutil.readstandin(repo, lf, ctx.rev())
1221 hash = lfutil.readstandin(repo, lf, ctx.rev())
1231 if not lfutil.inusercache(repo.ui, hash):
1222 if not lfutil.inusercache(repo.ui, hash):
1232 store = basestore._openstore(repo)
1223 store = basestore._openstore(repo)
1233 success, missing = store.get([(lf, hash)])
1224 success, missing = store.get([(lf, hash)])
1234 if len(success) != 1:
1225 if len(success) != 1:
1235 raise util.Abort(
1226 raise util.Abort(
1236 _('largefile %s is not in cache and could not be '
1227 _('largefile %s is not in cache and could not be '
1237 'downloaded') % lf)
1228 'downloaded') % lf)
1238 path = lfutil.usercachepath(repo.ui, hash)
1229 path = lfutil.usercachepath(repo.ui, hash)
1239 fpin = open(path, "rb")
1230 fpin = open(path, "rb")
1240 for chunk in util.filechunkiter(fpin, 128 * 1024):
1231 for chunk in util.filechunkiter(fpin, 128 * 1024):
1241 fp.write(chunk)
1232 fp.write(chunk)
1242 fpin.close()
1233 fpin.close()
1243 fp.close()
1234 fp.close()
1244 err = 0
1235 err = 0
1245 return err
1236 return err
1246
1237
1247 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1238 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1248 *args, **kwargs):
1239 *args, **kwargs):
1249 wlock = repo.wlock()
1240 wlock = repo.wlock()
1250 try:
1241 try:
1251 # branch | | |
1242 # branch | | |
1252 # merge | force | partial | action
1243 # merge | force | partial | action
1253 # -------+-------+---------+--------------
1244 # -------+-------+---------+--------------
1254 # x | x | x | linear-merge
1245 # x | x | x | linear-merge
1255 # o | x | x | branch-merge
1246 # o | x | x | branch-merge
1256 # x | o | x | overwrite (as clean update)
1247 # x | o | x | overwrite (as clean update)
1257 # o | o | x | force-branch-merge (*1)
1248 # o | o | x | force-branch-merge (*1)
1258 # x | x | o | (*)
1249 # x | x | o | (*)
1259 # o | x | o | (*)
1250 # o | x | o | (*)
1260 # x | o | o | overwrite (as revert)
1251 # x | o | o | overwrite (as revert)
1261 # o | o | o | (*)
1252 # o | o | o | (*)
1262 #
1253 #
1263 # (*) don't care
1254 # (*) don't care
1264 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1255 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1265
1256
1266 linearmerge = not branchmerge and not force and not partial
1257 linearmerge = not branchmerge and not force and not partial
1267
1258
1268 if linearmerge or (branchmerge and force and not partial):
1259 if linearmerge or (branchmerge and force and not partial):
1269 # update standins for linear-merge or force-branch-merge,
1260 # update standins for linear-merge or force-branch-merge,
1270 # because largefiles in the working directory may be modified
1261 # because largefiles in the working directory may be modified
1271 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1262 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1272 unsure, s = lfdirstate.status(match_.always(repo.root,
1263 unsure, s = lfdirstate.status(match_.always(repo.root,
1273 repo.getcwd()),
1264 repo.getcwd()),
1274 [], False, False, False)
1265 [], False, False, False)
1275 for lfile in unsure + s.modified + s.added:
1266 for lfile in unsure + s.modified + s.added:
1276 lfutil.updatestandin(repo, lfutil.standin(lfile))
1267 lfutil.updatestandin(repo, lfutil.standin(lfile))
1277
1268
1278 if linearmerge:
1269 if linearmerge:
1279 # Only call updatelfiles on the standins that have changed
1270 # Only call updatelfiles on the standins that have changed
1280 # to save time
1271 # to save time
1281 oldstandins = lfutil.getstandinsstate(repo)
1272 oldstandins = lfutil.getstandinsstate(repo)
1282
1273
1283 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1274 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1284
1275
1285 filelist = None
1276 filelist = None
1286 if linearmerge:
1277 if linearmerge:
1287 newstandins = lfutil.getstandinsstate(repo)
1278 newstandins = lfutil.getstandinsstate(repo)
1288 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1279 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1289
1280
1290 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1281 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1291 normallookup=partial)
1282 normallookup=partial)
1292
1283
1293 return result
1284 return result
1294 finally:
1285 finally:
1295 wlock.release()
1286 wlock.release()
1296
1287
1297 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1288 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1298 result = orig(repo, files, *args, **kwargs)
1289 result = orig(repo, files, *args, **kwargs)
1299
1290
1300 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1291 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1301 if filelist:
1292 if filelist:
1302 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1293 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1303 printmessage=False, normallookup=True)
1294 printmessage=False, normallookup=True)
1304
1295
1305 return result
1296 return result
@@ -1,1169 +1,1169 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error as errormod, util, filemerge, copies, subrepo, worker
13 import error as errormod, util, filemerge, copies, subrepo, worker
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split('\0')
21 bits = data.split('\0')
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return '\0'.join(bits)
23 return '\0'.join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = 'merge/state'
48 statepathv1 = 'merge/state'
49 statepathv2 = 'merge/state2'
49 statepathv2 = 'merge/state2'
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join('merge'), True)
63 shutil.rmtree(self._repo.join('merge'), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == 'F':
81 elif rtype == 'F':
82 bits = record.split('\0')
82 bits = record.split('\0')
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split('\0')
124 bits = r[1].split('\0')
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], '\0'.join(bits))
126 v1records[idx] = (r[0], '\0'.join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(('L', hex(self._local)))
194 records.append(('L', hex(self._local)))
195 records.append(('O', hex(self._other)))
195 records.append(('O', hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(('F', '\0'.join([d] + v)))
197 records.append(('F', '\0'.join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, 'w')
208 f = self._repo.opener(self.statepathv1, 'w')
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + '\n')
212 f.write(hex(self._local) + '\n')
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == 'F':
214 if rtype == 'F':
215 f.write('%s\n' % _droponode(data))
215 f.write('%s\n' % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, 'w')
220 f = self._repo.opener(self.statepathv2, 'w')
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = '>sI%is' % len(data)
223 format = '>sI%is' % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write('merge/' + hash, fcl.data())
237 self._repo.opener.write('merge/' + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener('merge/' + hash)
287 f = self._repo.opener('merge/' + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f):
301 return (os.path.isfile(repo.wjoin(f))
301 return (os.path.isfile(repo.wjoin(f))
302 and repo.wopener.audit.check(f)
302 and repo.wopener.audit.check(f)
303 and repo.dirstate.normalize(f) not in repo.dirstate
303 and repo.dirstate.normalize(f) not in repo.dirstate
304 and mctx[f].cmp(wctx[f]))
304 and mctx[f].cmp(wctx[f]))
305
305
306 def _forgetremoved(wctx, mctx, branchmerge):
306 def _forgetremoved(wctx, mctx, branchmerge):
307 """
307 """
308 Forget removed files
308 Forget removed files
309
309
310 If we're jumping between revisions (as opposed to merging), and if
310 If we're jumping between revisions (as opposed to merging), and if
311 neither the working directory nor the target rev has the file,
311 neither the working directory nor the target rev has the file,
312 then we need to remove it from the dirstate, to prevent the
312 then we need to remove it from the dirstate, to prevent the
313 dirstate from listing the file when it is no longer in the
313 dirstate from listing the file when it is no longer in the
314 manifest.
314 manifest.
315
315
316 If we're merging, and the other revision has removed a file
316 If we're merging, and the other revision has removed a file
317 that is not present in the working directory, we need to mark it
317 that is not present in the working directory, we need to mark it
318 as removed.
318 as removed.
319 """
319 """
320
320
321 actions = {}
321 actions = {}
322 m = 'f'
322 m = 'f'
323 if branchmerge:
323 if branchmerge:
324 m = 'r'
324 m = 'r'
325 for f in wctx.deleted():
325 for f in wctx.deleted():
326 if f not in mctx:
326 if f not in mctx:
327 actions[f] = m, None, "forget deleted"
327 actions[f] = m, None, "forget deleted"
328
328
329 if not branchmerge:
329 if not branchmerge:
330 for f in wctx.removed():
330 for f in wctx.removed():
331 if f not in mctx:
331 if f not in mctx:
332 actions[f] = 'f', None, "forget removed"
332 actions[f] = 'f', None, "forget removed"
333
333
334 return actions
334 return actions
335
335
336 def _checkcollision(repo, wmf, actions):
336 def _checkcollision(repo, wmf, actions):
337 # build provisional merged manifest up
337 # build provisional merged manifest up
338 pmmf = set(wmf)
338 pmmf = set(wmf)
339
339
340 if actions:
340 if actions:
341 # k, dr, e and rd are no-op
341 # k, dr, e and rd are no-op
342 for m in 'a', 'f', 'g', 'cd', 'dc':
342 for m in 'a', 'f', 'g', 'cd', 'dc':
343 for f, args, msg in actions[m]:
343 for f, args, msg in actions[m]:
344 pmmf.add(f)
344 pmmf.add(f)
345 for f, args, msg in actions['r']:
345 for f, args, msg in actions['r']:
346 pmmf.discard(f)
346 pmmf.discard(f)
347 for f, args, msg in actions['dm']:
347 for f, args, msg in actions['dm']:
348 f2, flags = args
348 f2, flags = args
349 pmmf.discard(f2)
349 pmmf.discard(f2)
350 pmmf.add(f)
350 pmmf.add(f)
351 for f, args, msg in actions['dg']:
351 for f, args, msg in actions['dg']:
352 pmmf.add(f)
352 pmmf.add(f)
353 for f, args, msg in actions['m']:
353 for f, args, msg in actions['m']:
354 f1, f2, fa, move, anc = args
354 f1, f2, fa, move, anc = args
355 if move:
355 if move:
356 pmmf.discard(f1)
356 pmmf.discard(f1)
357 pmmf.add(f)
357 pmmf.add(f)
358
358
359 # check case-folding collision in provisional merged manifest
359 # check case-folding collision in provisional merged manifest
360 foldmap = {}
360 foldmap = {}
361 for f in sorted(pmmf):
361 for f in sorted(pmmf):
362 fold = util.normcase(f)
362 fold = util.normcase(f)
363 if fold in foldmap:
363 if fold in foldmap:
364 raise util.Abort(_("case-folding collision between %s and %s")
364 raise util.Abort(_("case-folding collision between %s and %s")
365 % (f, foldmap[fold]))
365 % (f, foldmap[fold]))
366 foldmap[fold] = f
366 foldmap[fold] = f
367
367
368 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
368 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
369 acceptremote, followcopies):
369 acceptremote, followcopies):
370 """
370 """
371 Merge p1 and p2 with ancestor pa and generate merge action list
371 Merge p1 and p2 with ancestor pa and generate merge action list
372
372
373 branchmerge and force are as passed in to update
373 branchmerge and force are as passed in to update
374 partial = function to filter file lists
374 partial = function to filter file lists
375 acceptremote = accept the incoming changes without prompting
375 acceptremote = accept the incoming changes without prompting
376 """
376 """
377
377
378 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
378 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
379
379
380 # manifests fetched in order are going to be faster, so prime the caches
380 # manifests fetched in order are going to be faster, so prime the caches
381 [x.manifest() for x in
381 [x.manifest() for x in
382 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
382 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
383
383
384 if followcopies:
384 if followcopies:
385 ret = copies.mergecopies(repo, wctx, p2, pa)
385 ret = copies.mergecopies(repo, wctx, p2, pa)
386 copy, movewithdir, diverge, renamedelete = ret
386 copy, movewithdir, diverge, renamedelete = ret
387
387
388 repo.ui.note(_("resolving manifests\n"))
388 repo.ui.note(_("resolving manifests\n"))
389 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
389 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
390 % (bool(branchmerge), bool(force), bool(partial)))
390 % (bool(branchmerge), bool(force), bool(partial)))
391 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
391 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
392
392
393 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
393 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
394 copied = set(copy.values())
394 copied = set(copy.values())
395 copied.update(movewithdir.values())
395 copied.update(movewithdir.values())
396
396
397 if '.hgsubstate' in m1:
397 if '.hgsubstate' in m1:
398 # check whether sub state is modified
398 # check whether sub state is modified
399 for s in sorted(wctx.substate):
399 for s in sorted(wctx.substate):
400 if wctx.sub(s).dirty():
400 if wctx.sub(s).dirty():
401 m1['.hgsubstate'] += '+'
401 m1['.hgsubstate'] += '+'
402 break
402 break
403
403
404 aborts = []
404 aborts = []
405 # Compare manifests
405 # Compare manifests
406 diff = m1.diff(m2)
406 diff = m1.diff(m2)
407
407
408 actions = {}
408 actions = {}
409 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
409 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
410 if partial and not partial(f):
410 if partial and not partial(f):
411 continue
411 continue
412 if n1 and n2: # file exists on both local and remote side
412 if n1 and n2: # file exists on both local and remote side
413 if f not in ma:
413 if f not in ma:
414 fa = copy.get(f, None)
414 fa = copy.get(f, None)
415 if fa is not None:
415 if fa is not None:
416 actions[f] = ('m', (f, f, fa, False, pa.node()),
416 actions[f] = ('m', (f, f, fa, False, pa.node()),
417 "both renamed from " + fa)
417 "both renamed from " + fa)
418 else:
418 else:
419 actions[f] = ('m', (f, f, None, False, pa.node()),
419 actions[f] = ('m', (f, f, None, False, pa.node()),
420 "both created")
420 "both created")
421 else:
421 else:
422 a = ma[f]
422 a = ma[f]
423 fla = ma.flags(f)
423 fla = ma.flags(f)
424 nol = 'l' not in fl1 + fl2 + fla
424 nol = 'l' not in fl1 + fl2 + fla
425 if n2 == a and fl2 == fla:
425 if n2 == a and fl2 == fla:
426 actions[f] = ('k' , (), "remote unchanged")
426 actions[f] = ('k' , (), "remote unchanged")
427 elif n1 == a and fl1 == fla: # local unchanged - use remote
427 elif n1 == a and fl1 == fla: # local unchanged - use remote
428 if n1 == n2: # optimization: keep local content
428 if n1 == n2: # optimization: keep local content
429 actions[f] = ('e', (fl2,), "update permissions")
429 actions[f] = ('e', (fl2,), "update permissions")
430 else:
430 else:
431 actions[f] = ('g', (fl2,), "remote is newer")
431 actions[f] = ('g', (fl2,), "remote is newer")
432 elif nol and n2 == a: # remote only changed 'x'
432 elif nol and n2 == a: # remote only changed 'x'
433 actions[f] = ('e', (fl2,), "update permissions")
433 actions[f] = ('e', (fl2,), "update permissions")
434 elif nol and n1 == a: # local only changed 'x'
434 elif nol and n1 == a: # local only changed 'x'
435 actions[f] = ('g', (fl1,), "remote is newer")
435 actions[f] = ('g', (fl1,), "remote is newer")
436 else: # both changed something
436 else: # both changed something
437 actions[f] = ('m', (f, f, f, False, pa.node()),
437 actions[f] = ('m', (f, f, f, False, pa.node()),
438 "versions differ")
438 "versions differ")
439 elif n1: # file exists only on local side
439 elif n1: # file exists only on local side
440 if f in copied:
440 if f in copied:
441 pass # we'll deal with it on m2 side
441 pass # we'll deal with it on m2 side
442 elif f in movewithdir: # directory rename, move local
442 elif f in movewithdir: # directory rename, move local
443 f2 = movewithdir[f]
443 f2 = movewithdir[f]
444 if f2 in m2:
444 if f2 in m2:
445 actions[f2] = ('m', (f, f2, None, True, pa.node()),
445 actions[f2] = ('m', (f, f2, None, True, pa.node()),
446 "remote directory rename, both created")
446 "remote directory rename, both created")
447 else:
447 else:
448 actions[f2] = ('dm', (f, fl1),
448 actions[f2] = ('dm', (f, fl1),
449 "remote directory rename - move from " + f)
449 "remote directory rename - move from " + f)
450 elif f in copy:
450 elif f in copy:
451 f2 = copy[f]
451 f2 = copy[f]
452 actions[f] = ('m', (f, f2, f2, False, pa.node()),
452 actions[f] = ('m', (f, f2, f2, False, pa.node()),
453 "local copied/moved from " + f2)
453 "local copied/moved from " + f2)
454 elif f in ma: # clean, a different, no remote
454 elif f in ma: # clean, a different, no remote
455 if n1 != ma[f]:
455 if n1 != ma[f]:
456 if acceptremote:
456 if acceptremote:
457 actions[f] = ('r', None, "remote delete")
457 actions[f] = ('r', None, "remote delete")
458 else:
458 else:
459 actions[f] = ('cd', None, "prompt changed/deleted")
459 actions[f] = ('cd', None, "prompt changed/deleted")
460 elif n1[20:] == 'a':
460 elif n1[20:] == 'a':
461 # This extra 'a' is added by working copy manifest to mark
461 # This extra 'a' is added by working copy manifest to mark
462 # the file as locally added. We should forget it instead of
462 # the file as locally added. We should forget it instead of
463 # deleting it.
463 # deleting it.
464 actions[f] = ('f', None, "remote deleted")
464 actions[f] = ('f', None, "remote deleted")
465 else:
465 else:
466 actions[f] = ('r', None, "other deleted")
466 actions[f] = ('r', None, "other deleted")
467 elif n2: # file exists only on remote side
467 elif n2: # file exists only on remote side
468 if f in copied:
468 if f in copied:
469 pass # we'll deal with it on m1 side
469 pass # we'll deal with it on m1 side
470 elif f in movewithdir:
470 elif f in movewithdir:
471 f2 = movewithdir[f]
471 f2 = movewithdir[f]
472 if f2 in m1:
472 if f2 in m1:
473 actions[f2] = ('m', (f2, f, None, False, pa.node()),
473 actions[f2] = ('m', (f2, f, None, False, pa.node()),
474 "local directory rename, both created")
474 "local directory rename, both created")
475 else:
475 else:
476 actions[f2] = ('dg', (f, fl2),
476 actions[f2] = ('dg', (f, fl2),
477 "local directory rename - get from " + f)
477 "local directory rename - get from " + f)
478 elif f in copy:
478 elif f in copy:
479 f2 = copy[f]
479 f2 = copy[f]
480 if f2 in m2:
480 if f2 in m2:
481 actions[f] = ('m', (f2, f, f2, False, pa.node()),
481 actions[f] = ('m', (f2, f, f2, False, pa.node()),
482 "remote copied from " + f2)
482 "remote copied from " + f2)
483 else:
483 else:
484 actions[f] = ('m', (f2, f, f2, True, pa.node()),
484 actions[f] = ('m', (f2, f, f2, True, pa.node()),
485 "remote moved from " + f2)
485 "remote moved from " + f2)
486 elif f not in ma:
486 elif f not in ma:
487 # local unknown, remote created: the logic is described by the
487 # local unknown, remote created: the logic is described by the
488 # following table:
488 # following table:
489 #
489 #
490 # force branchmerge different | action
490 # force branchmerge different | action
491 # n * n | get
491 # n * n | get
492 # n * y | abort
492 # n * y | abort
493 # y n * | get
493 # y n * | get
494 # y y n | get
494 # y y n | get
495 # y y y | merge
495 # y y y | merge
496 #
496 #
497 # Checking whether the files are different is expensive, so we
497 # Checking whether the files are different is expensive, so we
498 # don't do that when we can avoid it.
498 # don't do that when we can avoid it.
499 if force and not branchmerge:
499 if force and not branchmerge:
500 actions[f] = ('g', (fl2,), "remote created")
500 actions[f] = ('g', (fl2,), "remote created")
501 else:
501 else:
502 different = _checkunknownfile(repo, wctx, p2, f)
502 different = _checkunknownfile(repo, wctx, p2, f)
503 if force and branchmerge and different:
503 if force and branchmerge and different:
504 actions[f] = ('m', (f, f, None, False, pa.node()),
504 actions[f] = ('m', (f, f, None, False, pa.node()),
505 "remote differs from untracked local")
505 "remote differs from untracked local")
506 elif not force and different:
506 elif not force and different:
507 aborts.append((f, 'ud'))
507 aborts.append((f, 'ud'))
508 else:
508 else:
509 actions[f] = ('g', (fl2,), "remote created")
509 actions[f] = ('g', (fl2,), "remote created")
510 elif n2 != ma[f]:
510 elif n2 != ma[f]:
511 different = _checkunknownfile(repo, wctx, p2, f)
511 different = _checkunknownfile(repo, wctx, p2, f)
512 if not force and different:
512 if not force and different:
513 aborts.append((f, 'ud'))
513 aborts.append((f, 'ud'))
514 else:
514 else:
515 if acceptremote:
515 if acceptremote:
516 actions[f] = ('g', (fl2,), "remote recreating")
516 actions[f] = ('g', (fl2,), "remote recreating")
517 else:
517 else:
518 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
518 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
519
519
520 for f, m in sorted(aborts):
520 for f, m in sorted(aborts):
521 if m == 'ud':
521 if m == 'ud':
522 repo.ui.warn(_("%s: untracked file differs\n") % f)
522 repo.ui.warn(_("%s: untracked file differs\n") % f)
523 else: assert False, m
523 else: assert False, m
524 if aborts:
524 if aborts:
525 raise util.Abort(_("untracked files in working directory differ "
525 raise util.Abort(_("untracked files in working directory differ "
526 "from files in requested revision"))
526 "from files in requested revision"))
527
527
528 return actions, diverge, renamedelete
528 return actions, diverge, renamedelete
529
529
530 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
530 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
531 """Resolves false conflicts where the nodeid changed but the content
531 """Resolves false conflicts where the nodeid changed but the content
532 remained the same."""
532 remained the same."""
533
533
534 for f, (m, args, msg) in actions.items():
534 for f, (m, args, msg) in actions.items():
535 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
535 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
536 # local did change but ended up with same content
536 # local did change but ended up with same content
537 actions[f] = 'r', None, "prompt same"
537 actions[f] = 'r', None, "prompt same"
538 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
538 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
539 # remote did change but ended up with same content
539 # remote did change but ended up with same content
540 del actions[f] # don't get = keep local deleted
540 del actions[f] # don't get = keep local deleted
541
541
542 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
542 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
543 acceptremote, followcopies):
543 acceptremote, followcopies):
544 "Calculate the actions needed to merge mctx into wctx using ancestors"
544 "Calculate the actions needed to merge mctx into wctx using ancestors"
545
545
546 if len(ancestors) == 1: # default
546 if len(ancestors) == 1: # default
547 actions, diverge, renamedelete = manifestmerge(
547 actions, diverge, renamedelete = manifestmerge(
548 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
548 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
549 acceptremote, followcopies)
549 acceptremote, followcopies)
550
550
551 else: # only when merge.preferancestor=* - the default
551 else: # only when merge.preferancestor=* - the default
552 repo.ui.note(
552 repo.ui.note(
553 _("note: merging %s and %s using bids from ancestors %s\n") %
553 _("note: merging %s and %s using bids from ancestors %s\n") %
554 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
554 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
555
555
556 # Call for bids
556 # Call for bids
557 fbids = {} # mapping filename to bids (action method to list af actions)
557 fbids = {} # mapping filename to bids (action method to list af actions)
558 diverge, renamedelete = None, None
558 diverge, renamedelete = None, None
559 for ancestor in ancestors:
559 for ancestor in ancestors:
560 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
560 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
561 actions, diverge1, renamedelete1 = manifestmerge(
561 actions, diverge1, renamedelete1 = manifestmerge(
562 repo, wctx, mctx, ancestor, branchmerge, force, partial,
562 repo, wctx, mctx, ancestor, branchmerge, force, partial,
563 acceptremote, followcopies)
563 acceptremote, followcopies)
564 if diverge is None: # and renamedelete is None.
564 if diverge is None: # and renamedelete is None.
565 # Arbitrarily pick warnings from first iteration
565 # Arbitrarily pick warnings from first iteration
566 diverge = diverge1
566 diverge = diverge1
567 renamedelete = renamedelete1
567 renamedelete = renamedelete1
568 for f, a in sorted(actions.iteritems()):
568 for f, a in sorted(actions.iteritems()):
569 m, args, msg = a
569 m, args, msg = a
570 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
570 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
571 if f in fbids:
571 if f in fbids:
572 d = fbids[f]
572 d = fbids[f]
573 if m in d:
573 if m in d:
574 d[m].append(a)
574 d[m].append(a)
575 else:
575 else:
576 d[m] = [a]
576 d[m] = [a]
577 else:
577 else:
578 fbids[f] = {m: [a]}
578 fbids[f] = {m: [a]}
579
579
580 # Pick the best bid for each file
580 # Pick the best bid for each file
581 repo.ui.note(_('\nauction for merging merge bids\n'))
581 repo.ui.note(_('\nauction for merging merge bids\n'))
582 actions = {}
582 actions = {}
583 for f, bids in sorted(fbids.items()):
583 for f, bids in sorted(fbids.items()):
584 # bids is a mapping from action method to list af actions
584 # bids is a mapping from action method to list af actions
585 # Consensus?
585 # Consensus?
586 if len(bids) == 1: # all bids are the same kind of method
586 if len(bids) == 1: # all bids are the same kind of method
587 m, l = bids.items()[0]
587 m, l = bids.items()[0]
588 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
588 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
589 repo.ui.note(" %s: consensus for %s\n" % (f, m))
589 repo.ui.note(" %s: consensus for %s\n" % (f, m))
590 actions[f] = l[0]
590 actions[f] = l[0]
591 continue
591 continue
592 # If keep is an option, just do it.
592 # If keep is an option, just do it.
593 if 'k' in bids:
593 if 'k' in bids:
594 repo.ui.note(" %s: picking 'keep' action\n" % f)
594 repo.ui.note(" %s: picking 'keep' action\n" % f)
595 actions[f] = bids['k'][0]
595 actions[f] = bids['k'][0]
596 continue
596 continue
597 # If there are gets and they all agree [how could they not?], do it.
597 # If there are gets and they all agree [how could they not?], do it.
598 if 'g' in bids:
598 if 'g' in bids:
599 ga0 = bids['g'][0]
599 ga0 = bids['g'][0]
600 if util.all(a == ga0 for a in bids['g'][1:]):
600 if util.all(a == ga0 for a in bids['g'][1:]):
601 repo.ui.note(" %s: picking 'get' action\n" % f)
601 repo.ui.note(" %s: picking 'get' action\n" % f)
602 actions[f] = ga0
602 actions[f] = ga0
603 continue
603 continue
604 # TODO: Consider other simple actions such as mode changes
604 # TODO: Consider other simple actions such as mode changes
605 # Handle inefficient democrazy.
605 # Handle inefficient democrazy.
606 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
606 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
607 for m, l in sorted(bids.items()):
607 for m, l in sorted(bids.items()):
608 for _f, args, msg in l:
608 for _f, args, msg in l:
609 repo.ui.note(' %s -> %s\n' % (msg, m))
609 repo.ui.note(' %s -> %s\n' % (msg, m))
610 # Pick random action. TODO: Instead, prompt user when resolving
610 # Pick random action. TODO: Instead, prompt user when resolving
611 m, l = bids.items()[0]
611 m, l = bids.items()[0]
612 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
612 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
613 (f, m))
613 (f, m))
614 actions[f] = l[0]
614 actions[f] = l[0]
615 continue
615 continue
616 repo.ui.note(_('end of auction\n\n'))
616 repo.ui.note(_('end of auction\n\n'))
617
617
618 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
618 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
619
619
620 if wctx.rev() is None:
620 if wctx.rev() is None:
621 fractions = _forgetremoved(wctx, mctx, branchmerge)
621 fractions = _forgetremoved(wctx, mctx, branchmerge)
622 actions.update(fractions)
622 actions.update(fractions)
623
623
624 # Convert to dictionary-of-lists format
625 actionbyfile = actions
626 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
627 for f, (m, args, msg) in actionbyfile.iteritems():
628 actions[m].append((f, args, msg))
629
630 return actions, diverge, renamedelete
624 return actions, diverge, renamedelete
631
625
632 def batchremove(repo, actions):
626 def batchremove(repo, actions):
633 """apply removes to the working directory
627 """apply removes to the working directory
634
628
635 yields tuples for progress updates
629 yields tuples for progress updates
636 """
630 """
637 verbose = repo.ui.verbose
631 verbose = repo.ui.verbose
638 unlink = util.unlinkpath
632 unlink = util.unlinkpath
639 wjoin = repo.wjoin
633 wjoin = repo.wjoin
640 audit = repo.wopener.audit
634 audit = repo.wopener.audit
641 i = 0
635 i = 0
642 for f, args, msg in actions:
636 for f, args, msg in actions:
643 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
637 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
644 if verbose:
638 if verbose:
645 repo.ui.note(_("removing %s\n") % f)
639 repo.ui.note(_("removing %s\n") % f)
646 audit(f)
640 audit(f)
647 try:
641 try:
648 unlink(wjoin(f), ignoremissing=True)
642 unlink(wjoin(f), ignoremissing=True)
649 except OSError, inst:
643 except OSError, inst:
650 repo.ui.warn(_("update failed to remove %s: %s!\n") %
644 repo.ui.warn(_("update failed to remove %s: %s!\n") %
651 (f, inst.strerror))
645 (f, inst.strerror))
652 if i == 100:
646 if i == 100:
653 yield i, f
647 yield i, f
654 i = 0
648 i = 0
655 i += 1
649 i += 1
656 if i > 0:
650 if i > 0:
657 yield i, f
651 yield i, f
658
652
659 def batchget(repo, mctx, actions):
653 def batchget(repo, mctx, actions):
660 """apply gets to the working directory
654 """apply gets to the working directory
661
655
662 mctx is the context to get from
656 mctx is the context to get from
663
657
664 yields tuples for progress updates
658 yields tuples for progress updates
665 """
659 """
666 verbose = repo.ui.verbose
660 verbose = repo.ui.verbose
667 fctx = mctx.filectx
661 fctx = mctx.filectx
668 wwrite = repo.wwrite
662 wwrite = repo.wwrite
669 i = 0
663 i = 0
670 for f, args, msg in actions:
664 for f, args, msg in actions:
671 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
665 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
672 if verbose:
666 if verbose:
673 repo.ui.note(_("getting %s\n") % f)
667 repo.ui.note(_("getting %s\n") % f)
674 wwrite(f, fctx(f).data(), args[0])
668 wwrite(f, fctx(f).data(), args[0])
675 if i == 100:
669 if i == 100:
676 yield i, f
670 yield i, f
677 i = 0
671 i = 0
678 i += 1
672 i += 1
679 if i > 0:
673 if i > 0:
680 yield i, f
674 yield i, f
681
675
682 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
676 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
683 """apply the merge action list to the working directory
677 """apply the merge action list to the working directory
684
678
685 wctx is the working copy context
679 wctx is the working copy context
686 mctx is the context to be merged into the working copy
680 mctx is the context to be merged into the working copy
687
681
688 Return a tuple of counts (updated, merged, removed, unresolved) that
682 Return a tuple of counts (updated, merged, removed, unresolved) that
689 describes how many files were affected by the update.
683 describes how many files were affected by the update.
690 """
684 """
691
685
692 updated, merged, removed, unresolved = 0, 0, 0, 0
686 updated, merged, removed, unresolved = 0, 0, 0, 0
693 ms = mergestate(repo)
687 ms = mergestate(repo)
694 ms.reset(wctx.p1().node(), mctx.node())
688 ms.reset(wctx.p1().node(), mctx.node())
695 moves = []
689 moves = []
696 for m, l in actions.items():
690 for m, l in actions.items():
697 l.sort()
691 l.sort()
698
692
699 # prescan for merges
693 # prescan for merges
700 for f, args, msg in actions['m']:
694 for f, args, msg in actions['m']:
701 f1, f2, fa, move, anc = args
695 f1, f2, fa, move, anc = args
702 if f == '.hgsubstate': # merged internally
696 if f == '.hgsubstate': # merged internally
703 continue
697 continue
704 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
698 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
705 fcl = wctx[f1]
699 fcl = wctx[f1]
706 fco = mctx[f2]
700 fco = mctx[f2]
707 actx = repo[anc]
701 actx = repo[anc]
708 if fa in actx:
702 if fa in actx:
709 fca = actx[fa]
703 fca = actx[fa]
710 else:
704 else:
711 fca = repo.filectx(f1, fileid=nullrev)
705 fca = repo.filectx(f1, fileid=nullrev)
712 ms.add(fcl, fco, fca, f)
706 ms.add(fcl, fco, fca, f)
713 if f1 != f and move:
707 if f1 != f and move:
714 moves.append(f1)
708 moves.append(f1)
715
709
716 audit = repo.wopener.audit
710 audit = repo.wopener.audit
717 _updating = _('updating')
711 _updating = _('updating')
718 _files = _('files')
712 _files = _('files')
719 progress = repo.ui.progress
713 progress = repo.ui.progress
720
714
721 # remove renamed files after safely stored
715 # remove renamed files after safely stored
722 for f in moves:
716 for f in moves:
723 if os.path.lexists(repo.wjoin(f)):
717 if os.path.lexists(repo.wjoin(f)):
724 repo.ui.debug("removing %s\n" % f)
718 repo.ui.debug("removing %s\n" % f)
725 audit(f)
719 audit(f)
726 util.unlinkpath(repo.wjoin(f))
720 util.unlinkpath(repo.wjoin(f))
727
721
728 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
722 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
729
723
730 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
724 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
731 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
725 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
732
726
733 # remove in parallel (must come first)
727 # remove in parallel (must come first)
734 z = 0
728 z = 0
735 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
729 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
736 for i, item in prog:
730 for i, item in prog:
737 z += i
731 z += i
738 progress(_updating, z, item=item, total=numupdates, unit=_files)
732 progress(_updating, z, item=item, total=numupdates, unit=_files)
739 removed = len(actions['r'])
733 removed = len(actions['r'])
740
734
741 # get in parallel
735 # get in parallel
742 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
736 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
743 for i, item in prog:
737 for i, item in prog:
744 z += i
738 z += i
745 progress(_updating, z, item=item, total=numupdates, unit=_files)
739 progress(_updating, z, item=item, total=numupdates, unit=_files)
746 updated = len(actions['g'])
740 updated = len(actions['g'])
747
741
748 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
742 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
749 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
743 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
750
744
751 # forget (manifest only, just log it) (must come first)
745 # forget (manifest only, just log it) (must come first)
752 for f, args, msg in actions['f']:
746 for f, args, msg in actions['f']:
753 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
747 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
754 z += 1
748 z += 1
755 progress(_updating, z, item=f, total=numupdates, unit=_files)
749 progress(_updating, z, item=f, total=numupdates, unit=_files)
756
750
757 # re-add (manifest only, just log it)
751 # re-add (manifest only, just log it)
758 for f, args, msg in actions['a']:
752 for f, args, msg in actions['a']:
759 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
753 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
760 z += 1
754 z += 1
761 progress(_updating, z, item=f, total=numupdates, unit=_files)
755 progress(_updating, z, item=f, total=numupdates, unit=_files)
762
756
763 # keep (noop, just log it)
757 # keep (noop, just log it)
764 for f, args, msg in actions['k']:
758 for f, args, msg in actions['k']:
765 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
759 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
766 # no progress
760 # no progress
767
761
768 # merge
762 # merge
769 for f, args, msg in actions['m']:
763 for f, args, msg in actions['m']:
770 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
764 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
771 z += 1
765 z += 1
772 progress(_updating, z, item=f, total=numupdates, unit=_files)
766 progress(_updating, z, item=f, total=numupdates, unit=_files)
773 if f == '.hgsubstate': # subrepo states need updating
767 if f == '.hgsubstate': # subrepo states need updating
774 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
768 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
775 overwrite)
769 overwrite)
776 continue
770 continue
777 audit(f)
771 audit(f)
778 r = ms.resolve(f, wctx, labels=labels)
772 r = ms.resolve(f, wctx, labels=labels)
779 if r is not None and r > 0:
773 if r is not None and r > 0:
780 unresolved += 1
774 unresolved += 1
781 else:
775 else:
782 if r is None:
776 if r is None:
783 updated += 1
777 updated += 1
784 else:
778 else:
785 merged += 1
779 merged += 1
786
780
787 # directory rename, move local
781 # directory rename, move local
788 for f, args, msg in actions['dm']:
782 for f, args, msg in actions['dm']:
789 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
783 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
790 z += 1
784 z += 1
791 progress(_updating, z, item=f, total=numupdates, unit=_files)
785 progress(_updating, z, item=f, total=numupdates, unit=_files)
792 f0, flags = args
786 f0, flags = args
793 repo.ui.note(_("moving %s to %s\n") % (f0, f))
787 repo.ui.note(_("moving %s to %s\n") % (f0, f))
794 audit(f)
788 audit(f)
795 repo.wwrite(f, wctx.filectx(f0).data(), flags)
789 repo.wwrite(f, wctx.filectx(f0).data(), flags)
796 util.unlinkpath(repo.wjoin(f0))
790 util.unlinkpath(repo.wjoin(f0))
797 updated += 1
791 updated += 1
798
792
799 # local directory rename, get
793 # local directory rename, get
800 for f, args, msg in actions['dg']:
794 for f, args, msg in actions['dg']:
801 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
795 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
802 z += 1
796 z += 1
803 progress(_updating, z, item=f, total=numupdates, unit=_files)
797 progress(_updating, z, item=f, total=numupdates, unit=_files)
804 f0, flags = args
798 f0, flags = args
805 repo.ui.note(_("getting %s to %s\n") % (f0, f))
799 repo.ui.note(_("getting %s to %s\n") % (f0, f))
806 repo.wwrite(f, mctx.filectx(f0).data(), flags)
800 repo.wwrite(f, mctx.filectx(f0).data(), flags)
807 updated += 1
801 updated += 1
808
802
809 # exec
803 # exec
810 for f, args, msg in actions['e']:
804 for f, args, msg in actions['e']:
811 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
805 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
812 z += 1
806 z += 1
813 progress(_updating, z, item=f, total=numupdates, unit=_files)
807 progress(_updating, z, item=f, total=numupdates, unit=_files)
814 flags, = args
808 flags, = args
815 audit(f)
809 audit(f)
816 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
810 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
817 updated += 1
811 updated += 1
818
812
819 ms.commit()
813 ms.commit()
820 progress(_updating, None, total=numupdates, unit=_files)
814 progress(_updating, None, total=numupdates, unit=_files)
821
815
822 return updated, merged, removed, unresolved
816 return updated, merged, removed, unresolved
823
817
824 def recordupdates(repo, actions, branchmerge):
818 def recordupdates(repo, actions, branchmerge):
825 "record merge actions to the dirstate"
819 "record merge actions to the dirstate"
826 # remove (must come first)
820 # remove (must come first)
827 for f, args, msg in actions['r']:
821 for f, args, msg in actions['r']:
828 if branchmerge:
822 if branchmerge:
829 repo.dirstate.remove(f)
823 repo.dirstate.remove(f)
830 else:
824 else:
831 repo.dirstate.drop(f)
825 repo.dirstate.drop(f)
832
826
833 # forget (must come first)
827 # forget (must come first)
834 for f, args, msg in actions['f']:
828 for f, args, msg in actions['f']:
835 repo.dirstate.drop(f)
829 repo.dirstate.drop(f)
836
830
837 # re-add
831 # re-add
838 for f, args, msg in actions['a']:
832 for f, args, msg in actions['a']:
839 if not branchmerge:
833 if not branchmerge:
840 repo.dirstate.add(f)
834 repo.dirstate.add(f)
841
835
842 # exec change
836 # exec change
843 for f, args, msg in actions['e']:
837 for f, args, msg in actions['e']:
844 repo.dirstate.normallookup(f)
838 repo.dirstate.normallookup(f)
845
839
846 # keep
840 # keep
847 for f, args, msg in actions['k']:
841 for f, args, msg in actions['k']:
848 pass
842 pass
849
843
850 # get
844 # get
851 for f, args, msg in actions['g']:
845 for f, args, msg in actions['g']:
852 if branchmerge:
846 if branchmerge:
853 repo.dirstate.otherparent(f)
847 repo.dirstate.otherparent(f)
854 else:
848 else:
855 repo.dirstate.normal(f)
849 repo.dirstate.normal(f)
856
850
857 # merge
851 # merge
858 for f, args, msg in actions['m']:
852 for f, args, msg in actions['m']:
859 f1, f2, fa, move, anc = args
853 f1, f2, fa, move, anc = args
860 if branchmerge:
854 if branchmerge:
861 # We've done a branch merge, mark this file as merged
855 # We've done a branch merge, mark this file as merged
862 # so that we properly record the merger later
856 # so that we properly record the merger later
863 repo.dirstate.merge(f)
857 repo.dirstate.merge(f)
864 if f1 != f2: # copy/rename
858 if f1 != f2: # copy/rename
865 if move:
859 if move:
866 repo.dirstate.remove(f1)
860 repo.dirstate.remove(f1)
867 if f1 != f:
861 if f1 != f:
868 repo.dirstate.copy(f1, f)
862 repo.dirstate.copy(f1, f)
869 else:
863 else:
870 repo.dirstate.copy(f2, f)
864 repo.dirstate.copy(f2, f)
871 else:
865 else:
872 # We've update-merged a locally modified file, so
866 # We've update-merged a locally modified file, so
873 # we set the dirstate to emulate a normal checkout
867 # we set the dirstate to emulate a normal checkout
874 # of that file some time in the past. Thus our
868 # of that file some time in the past. Thus our
875 # merge will appear as a normal local file
869 # merge will appear as a normal local file
876 # modification.
870 # modification.
877 if f2 == f: # file not locally copied/moved
871 if f2 == f: # file not locally copied/moved
878 repo.dirstate.normallookup(f)
872 repo.dirstate.normallookup(f)
879 if move:
873 if move:
880 repo.dirstate.drop(f1)
874 repo.dirstate.drop(f1)
881
875
882 # directory rename, move local
876 # directory rename, move local
883 for f, args, msg in actions['dm']:
877 for f, args, msg in actions['dm']:
884 f0, flag = args
878 f0, flag = args
885 if branchmerge:
879 if branchmerge:
886 repo.dirstate.add(f)
880 repo.dirstate.add(f)
887 repo.dirstate.remove(f0)
881 repo.dirstate.remove(f0)
888 repo.dirstate.copy(f0, f)
882 repo.dirstate.copy(f0, f)
889 else:
883 else:
890 repo.dirstate.normal(f)
884 repo.dirstate.normal(f)
891 repo.dirstate.drop(f0)
885 repo.dirstate.drop(f0)
892
886
893 # directory rename, get
887 # directory rename, get
894 for f, args, msg in actions['dg']:
888 for f, args, msg in actions['dg']:
895 f0, flag = args
889 f0, flag = args
896 if branchmerge:
890 if branchmerge:
897 repo.dirstate.add(f)
891 repo.dirstate.add(f)
898 repo.dirstate.copy(f0, f)
892 repo.dirstate.copy(f0, f)
899 else:
893 else:
900 repo.dirstate.normal(f)
894 repo.dirstate.normal(f)
901
895
902 def update(repo, node, branchmerge, force, partial, ancestor=None,
896 def update(repo, node, branchmerge, force, partial, ancestor=None,
903 mergeancestor=False, labels=None):
897 mergeancestor=False, labels=None):
904 """
898 """
905 Perform a merge between the working directory and the given node
899 Perform a merge between the working directory and the given node
906
900
907 node = the node to update to, or None if unspecified
901 node = the node to update to, or None if unspecified
908 branchmerge = whether to merge between branches
902 branchmerge = whether to merge between branches
909 force = whether to force branch merging or file overwriting
903 force = whether to force branch merging or file overwriting
910 partial = a function to filter file lists (dirstate not updated)
904 partial = a function to filter file lists (dirstate not updated)
911 mergeancestor = whether it is merging with an ancestor. If true,
905 mergeancestor = whether it is merging with an ancestor. If true,
912 we should accept the incoming changes for any prompts that occur.
906 we should accept the incoming changes for any prompts that occur.
913 If false, merging with an ancestor (fast-forward) is only allowed
907 If false, merging with an ancestor (fast-forward) is only allowed
914 between different named branches. This flag is used by rebase extension
908 between different named branches. This flag is used by rebase extension
915 as a temporary fix and should be avoided in general.
909 as a temporary fix and should be avoided in general.
916
910
917 The table below shows all the behaviors of the update command
911 The table below shows all the behaviors of the update command
918 given the -c and -C or no options, whether the working directory
912 given the -c and -C or no options, whether the working directory
919 is dirty, whether a revision is specified, and the relationship of
913 is dirty, whether a revision is specified, and the relationship of
920 the parent rev to the target rev (linear, on the same named
914 the parent rev to the target rev (linear, on the same named
921 branch, or on another named branch).
915 branch, or on another named branch).
922
916
923 This logic is tested by test-update-branches.t.
917 This logic is tested by test-update-branches.t.
924
918
925 -c -C dirty rev | linear same cross
919 -c -C dirty rev | linear same cross
926 n n n n | ok (1) x
920 n n n n | ok (1) x
927 n n n y | ok ok ok
921 n n n y | ok ok ok
928 n n y n | merge (2) (2)
922 n n y n | merge (2) (2)
929 n n y y | merge (3) (3)
923 n n y y | merge (3) (3)
930 n y * * | --- discard ---
924 n y * * | --- discard ---
931 y n y * | --- (4) ---
925 y n y * | --- (4) ---
932 y n n * | --- ok ---
926 y n n * | --- ok ---
933 y y * * | --- (5) ---
927 y y * * | --- (5) ---
934
928
935 x = can't happen
929 x = can't happen
936 * = don't-care
930 * = don't-care
937 1 = abort: not a linear update (merge or update --check to force update)
931 1 = abort: not a linear update (merge or update --check to force update)
938 2 = abort: uncommitted changes (commit and merge, or update --clean to
932 2 = abort: uncommitted changes (commit and merge, or update --clean to
939 discard changes)
933 discard changes)
940 3 = abort: uncommitted changes (commit or update --clean to discard changes)
934 3 = abort: uncommitted changes (commit or update --clean to discard changes)
941 4 = abort: uncommitted changes (checked in commands.py)
935 4 = abort: uncommitted changes (checked in commands.py)
942 5 = incompatible options (checked in commands.py)
936 5 = incompatible options (checked in commands.py)
943
937
944 Return the same tuple as applyupdates().
938 Return the same tuple as applyupdates().
945 """
939 """
946
940
947 onode = node
941 onode = node
948 wlock = repo.wlock()
942 wlock = repo.wlock()
949 try:
943 try:
950 wc = repo[None]
944 wc = repo[None]
951 pl = wc.parents()
945 pl = wc.parents()
952 p1 = pl[0]
946 p1 = pl[0]
953 pas = [None]
947 pas = [None]
954 if ancestor is not None:
948 if ancestor is not None:
955 pas = [repo[ancestor]]
949 pas = [repo[ancestor]]
956
950
957 if node is None:
951 if node is None:
958 # Here is where we should consider bookmarks, divergent bookmarks,
952 # Here is where we should consider bookmarks, divergent bookmarks,
959 # foreground changesets (successors), and tip of current branch;
953 # foreground changesets (successors), and tip of current branch;
960 # but currently we are only checking the branch tips.
954 # but currently we are only checking the branch tips.
961 try:
955 try:
962 node = repo.branchtip(wc.branch())
956 node = repo.branchtip(wc.branch())
963 except errormod.RepoLookupError:
957 except errormod.RepoLookupError:
964 if wc.branch() == 'default': # no default branch!
958 if wc.branch() == 'default': # no default branch!
965 node = repo.lookup('tip') # update to tip
959 node = repo.lookup('tip') # update to tip
966 else:
960 else:
967 raise util.Abort(_("branch %s not found") % wc.branch())
961 raise util.Abort(_("branch %s not found") % wc.branch())
968
962
969 if p1.obsolete() and not p1.children():
963 if p1.obsolete() and not p1.children():
970 # allow updating to successors
964 # allow updating to successors
971 successors = obsolete.successorssets(repo, p1.node())
965 successors = obsolete.successorssets(repo, p1.node())
972
966
973 # behavior of certain cases is as follows,
967 # behavior of certain cases is as follows,
974 #
968 #
975 # divergent changesets: update to highest rev, similar to what
969 # divergent changesets: update to highest rev, similar to what
976 # is currently done when there are more than one head
970 # is currently done when there are more than one head
977 # (i.e. 'tip')
971 # (i.e. 'tip')
978 #
972 #
979 # replaced changesets: same as divergent except we know there
973 # replaced changesets: same as divergent except we know there
980 # is no conflict
974 # is no conflict
981 #
975 #
982 # pruned changeset: no update is done; though, we could
976 # pruned changeset: no update is done; though, we could
983 # consider updating to the first non-obsolete parent,
977 # consider updating to the first non-obsolete parent,
984 # similar to what is current done for 'hg prune'
978 # similar to what is current done for 'hg prune'
985
979
986 if successors:
980 if successors:
987 # flatten the list here handles both divergent (len > 1)
981 # flatten the list here handles both divergent (len > 1)
988 # and the usual case (len = 1)
982 # and the usual case (len = 1)
989 successors = [n for sub in successors for n in sub]
983 successors = [n for sub in successors for n in sub]
990
984
991 # get the max revision for the given successors set,
985 # get the max revision for the given successors set,
992 # i.e. the 'tip' of a set
986 # i.e. the 'tip' of a set
993 node = repo.revs('max(%ln)', successors).first()
987 node = repo.revs('max(%ln)', successors).first()
994 pas = [p1]
988 pas = [p1]
995
989
996 overwrite = force and not branchmerge
990 overwrite = force and not branchmerge
997
991
998 p2 = repo[node]
992 p2 = repo[node]
999 if pas[0] is None:
993 if pas[0] is None:
1000 if repo.ui.config('merge', 'preferancestor', '*') == '*':
994 if repo.ui.config('merge', 'preferancestor', '*') == '*':
1001 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
995 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1002 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
996 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1003 else:
997 else:
1004 pas = [p1.ancestor(p2, warn=branchmerge)]
998 pas = [p1.ancestor(p2, warn=branchmerge)]
1005
999
1006 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1000 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1007
1001
1008 ### check phase
1002 ### check phase
1009 if not overwrite and len(pl) > 1:
1003 if not overwrite and len(pl) > 1:
1010 raise util.Abort(_("outstanding uncommitted merge"))
1004 raise util.Abort(_("outstanding uncommitted merge"))
1011 if branchmerge:
1005 if branchmerge:
1012 if pas == [p2]:
1006 if pas == [p2]:
1013 raise util.Abort(_("merging with a working directory ancestor"
1007 raise util.Abort(_("merging with a working directory ancestor"
1014 " has no effect"))
1008 " has no effect"))
1015 elif pas == [p1]:
1009 elif pas == [p1]:
1016 if not mergeancestor and p1.branch() == p2.branch():
1010 if not mergeancestor and p1.branch() == p2.branch():
1017 raise util.Abort(_("nothing to merge"),
1011 raise util.Abort(_("nothing to merge"),
1018 hint=_("use 'hg update' "
1012 hint=_("use 'hg update' "
1019 "or check 'hg heads'"))
1013 "or check 'hg heads'"))
1020 if not force and (wc.files() or wc.deleted()):
1014 if not force and (wc.files() or wc.deleted()):
1021 raise util.Abort(_("uncommitted changes"),
1015 raise util.Abort(_("uncommitted changes"),
1022 hint=_("use 'hg status' to list changes"))
1016 hint=_("use 'hg status' to list changes"))
1023 for s in sorted(wc.substate):
1017 for s in sorted(wc.substate):
1024 if wc.sub(s).dirty():
1018 if wc.sub(s).dirty():
1025 raise util.Abort(_("uncommitted changes in "
1019 raise util.Abort(_("uncommitted changes in "
1026 "subrepository '%s'") % s)
1020 "subrepository '%s'") % s)
1027
1021
1028 elif not overwrite:
1022 elif not overwrite:
1029 if p1 == p2: # no-op update
1023 if p1 == p2: # no-op update
1030 # call the hooks and exit early
1024 # call the hooks and exit early
1031 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1025 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1032 repo.hook('update', parent1=xp2, parent2='', error=0)
1026 repo.hook('update', parent1=xp2, parent2='', error=0)
1033 return 0, 0, 0, 0
1027 return 0, 0, 0, 0
1034
1028
1035 if pas not in ([p1], [p2]): # nonlinear
1029 if pas not in ([p1], [p2]): # nonlinear
1036 dirty = wc.dirty(missing=True)
1030 dirty = wc.dirty(missing=True)
1037 if dirty or onode is None:
1031 if dirty or onode is None:
1038 # Branching is a bit strange to ensure we do the minimal
1032 # Branching is a bit strange to ensure we do the minimal
1039 # amount of call to obsolete.background.
1033 # amount of call to obsolete.background.
1040 foreground = obsolete.foreground(repo, [p1.node()])
1034 foreground = obsolete.foreground(repo, [p1.node()])
1041 # note: the <node> variable contains a random identifier
1035 # note: the <node> variable contains a random identifier
1042 if repo[node].node() in foreground:
1036 if repo[node].node() in foreground:
1043 pas = [p1] # allow updating to successors
1037 pas = [p1] # allow updating to successors
1044 elif dirty:
1038 elif dirty:
1045 msg = _("uncommitted changes")
1039 msg = _("uncommitted changes")
1046 if onode is None:
1040 if onode is None:
1047 hint = _("commit and merge, or update --clean to"
1041 hint = _("commit and merge, or update --clean to"
1048 " discard changes")
1042 " discard changes")
1049 else:
1043 else:
1050 hint = _("commit or update --clean to discard"
1044 hint = _("commit or update --clean to discard"
1051 " changes")
1045 " changes")
1052 raise util.Abort(msg, hint=hint)
1046 raise util.Abort(msg, hint=hint)
1053 else: # node is none
1047 else: # node is none
1054 msg = _("not a linear update")
1048 msg = _("not a linear update")
1055 hint = _("merge or update --check to force update")
1049 hint = _("merge or update --check to force update")
1056 raise util.Abort(msg, hint=hint)
1050 raise util.Abort(msg, hint=hint)
1057 else:
1051 else:
1058 # Allow jumping branches if clean and specific rev given
1052 # Allow jumping branches if clean and specific rev given
1059 pas = [p1]
1053 pas = [p1]
1060
1054
1061 followcopies = False
1055 followcopies = False
1062 if overwrite:
1056 if overwrite:
1063 pas = [wc]
1057 pas = [wc]
1064 elif pas == [p2]: # backwards
1058 elif pas == [p2]: # backwards
1065 pas = [wc.p1()]
1059 pas = [wc.p1()]
1066 elif not branchmerge and not wc.dirty(missing=True):
1060 elif not branchmerge and not wc.dirty(missing=True):
1067 pass
1061 pass
1068 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1062 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1069 followcopies = True
1063 followcopies = True
1070
1064
1071 ### calculate phase
1065 ### calculate phase
1072 actions, diverge, renamedelete = calculateupdates(
1066 actionbyfile, diverge, renamedelete = calculateupdates(
1073 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1067 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1074 followcopies)
1068 followcopies)
1069 # Convert to dictionary-of-lists format
1070 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1071 for f, (m, args, msg) in actionbyfile.iteritems():
1072 if m not in actions:
1073 actions[m] = []
1074 actions[m].append((f, args, msg))
1075
1075
1076 if not util.checkcase(repo.path):
1076 if not util.checkcase(repo.path):
1077 # check collision between files only in p2 for clean update
1077 # check collision between files only in p2 for clean update
1078 if (not branchmerge and
1078 if (not branchmerge and
1079 (force or not wc.dirty(missing=True, branch=False))):
1079 (force or not wc.dirty(missing=True, branch=False))):
1080 _checkcollision(repo, p2.manifest(), None)
1080 _checkcollision(repo, p2.manifest(), None)
1081 else:
1081 else:
1082 _checkcollision(repo, wc.manifest(), actions)
1082 _checkcollision(repo, wc.manifest(), actions)
1083
1083
1084 # Prompt and create actions. TODO: Move this towards resolve phase.
1084 # Prompt and create actions. TODO: Move this towards resolve phase.
1085 for f, args, msg in sorted(actions['cd']):
1085 for f, args, msg in sorted(actions['cd']):
1086 if repo.ui.promptchoice(
1086 if repo.ui.promptchoice(
1087 _("local changed %s which remote deleted\n"
1087 _("local changed %s which remote deleted\n"
1088 "use (c)hanged version or (d)elete?"
1088 "use (c)hanged version or (d)elete?"
1089 "$$ &Changed $$ &Delete") % f, 0):
1089 "$$ &Changed $$ &Delete") % f, 0):
1090 actions['r'].append((f, None, "prompt delete"))
1090 actions['r'].append((f, None, "prompt delete"))
1091 else:
1091 else:
1092 actions['a'].append((f, None, "prompt keep"))
1092 actions['a'].append((f, None, "prompt keep"))
1093 del actions['cd'][:]
1093 del actions['cd'][:]
1094
1094
1095 for f, args, msg in sorted(actions['dc']):
1095 for f, args, msg in sorted(actions['dc']):
1096 flags, = args
1096 flags, = args
1097 if repo.ui.promptchoice(
1097 if repo.ui.promptchoice(
1098 _("remote changed %s which local deleted\n"
1098 _("remote changed %s which local deleted\n"
1099 "use (c)hanged version or leave (d)eleted?"
1099 "use (c)hanged version or leave (d)eleted?"
1100 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1100 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1101 actions['g'].append((f, (flags,), "prompt recreating"))
1101 actions['g'].append((f, (flags,), "prompt recreating"))
1102 del actions['dc'][:]
1102 del actions['dc'][:]
1103
1103
1104 ### apply phase
1104 ### apply phase
1105 if not branchmerge: # just jump to the new rev
1105 if not branchmerge: # just jump to the new rev
1106 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1106 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1107 if not partial:
1107 if not partial:
1108 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1108 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1109 # note that we're in the middle of an update
1109 # note that we're in the middle of an update
1110 repo.vfs.write('updatestate', p2.hex())
1110 repo.vfs.write('updatestate', p2.hex())
1111
1111
1112 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1112 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1113
1113
1114 # divergent renames
1114 # divergent renames
1115 for f, fl in sorted(diverge.iteritems()):
1115 for f, fl in sorted(diverge.iteritems()):
1116 repo.ui.warn(_("note: possible conflict - %s was renamed "
1116 repo.ui.warn(_("note: possible conflict - %s was renamed "
1117 "multiple times to:\n") % f)
1117 "multiple times to:\n") % f)
1118 for nf in fl:
1118 for nf in fl:
1119 repo.ui.warn(" %s\n" % nf)
1119 repo.ui.warn(" %s\n" % nf)
1120
1120
1121 # rename and delete
1121 # rename and delete
1122 for f, fl in sorted(renamedelete.iteritems()):
1122 for f, fl in sorted(renamedelete.iteritems()):
1123 repo.ui.warn(_("note: possible conflict - %s was deleted "
1123 repo.ui.warn(_("note: possible conflict - %s was deleted "
1124 "and renamed to:\n") % f)
1124 "and renamed to:\n") % f)
1125 for nf in fl:
1125 for nf in fl:
1126 repo.ui.warn(" %s\n" % nf)
1126 repo.ui.warn(" %s\n" % nf)
1127
1127
1128 if not partial:
1128 if not partial:
1129 repo.dirstate.beginparentchange()
1129 repo.dirstate.beginparentchange()
1130 repo.setparents(fp1, fp2)
1130 repo.setparents(fp1, fp2)
1131 recordupdates(repo, actions, branchmerge)
1131 recordupdates(repo, actions, branchmerge)
1132 # update completed, clear state
1132 # update completed, clear state
1133 util.unlink(repo.join('updatestate'))
1133 util.unlink(repo.join('updatestate'))
1134
1134
1135 if not branchmerge:
1135 if not branchmerge:
1136 repo.dirstate.setbranch(p2.branch())
1136 repo.dirstate.setbranch(p2.branch())
1137 repo.dirstate.endparentchange()
1137 repo.dirstate.endparentchange()
1138 finally:
1138 finally:
1139 wlock.release()
1139 wlock.release()
1140
1140
1141 if not partial:
1141 if not partial:
1142 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1142 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1143 return stats
1143 return stats
1144
1144
1145 def graft(repo, ctx, pctx, labels):
1145 def graft(repo, ctx, pctx, labels):
1146 """Do a graft-like merge.
1146 """Do a graft-like merge.
1147
1147
1148 This is a merge where the merge ancestor is chosen such that one
1148 This is a merge where the merge ancestor is chosen such that one
1149 or more changesets are grafted onto the current changeset. In
1149 or more changesets are grafted onto the current changeset. In
1150 addition to the merge, this fixes up the dirstate to include only
1150 addition to the merge, this fixes up the dirstate to include only
1151 a single parent and tries to duplicate any renames/copies
1151 a single parent and tries to duplicate any renames/copies
1152 appropriately.
1152 appropriately.
1153
1153
1154 ctx - changeset to rebase
1154 ctx - changeset to rebase
1155 pctx - merge base, usually ctx.p1()
1155 pctx - merge base, usually ctx.p1()
1156 labels - merge labels eg ['local', 'graft']
1156 labels - merge labels eg ['local', 'graft']
1157
1157
1158 """
1158 """
1159
1159
1160 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1160 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1161 labels=labels)
1161 labels=labels)
1162 # drop the second merge parent
1162 # drop the second merge parent
1163 repo.dirstate.beginparentchange()
1163 repo.dirstate.beginparentchange()
1164 repo.setparents(repo['.'].node(), nullid)
1164 repo.setparents(repo['.'].node(), nullid)
1165 repo.dirstate.write()
1165 repo.dirstate.write()
1166 # fix up dirstate for copies and renames
1166 # fix up dirstate for copies and renames
1167 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1167 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1168 repo.dirstate.endparentchange()
1168 repo.dirstate.endparentchange()
1169 return stats
1169 return stats
General Comments 0
You need to be logged in to leave comments. Login now