##// END OF EJS Templates
merge: don't overwrite untracked file at directory rename target...
Martin von Zweigbergk -
r23653:0297d846 default
parent child Browse files
Show More
@@ -1,1307 +1,1307 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18
18
19 import lfutil
19 import lfutil
20 import lfcommands
20 import lfcommands
21 import basestore
21 import basestore
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def composelargefilematcher(match, manifest):
25 def composelargefilematcher(match, manifest):
26 '''create a matcher that matches only the largefiles in the original
26 '''create a matcher that matches only the largefiles in the original
27 matcher'''
27 matcher'''
28 m = copy.copy(match)
28 m = copy.copy(match)
29 lfile = lambda f: lfutil.standin(f) in manifest
29 lfile = lambda f: lfutil.standin(f) in manifest
30 m._files = filter(lfile, m._files)
30 m._files = filter(lfile, m._files)
31 m._fmap = set(m._files)
31 m._fmap = set(m._files)
32 m._always = False
32 m._always = False
33 origmatchfn = m.matchfn
33 origmatchfn = m.matchfn
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
35 return m
35 return m
36
36
37 def composenormalfilematcher(match, manifest):
37 def composenormalfilematcher(match, manifest):
38 m = copy.copy(match)
38 m = copy.copy(match)
39 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
39 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
40 manifest)
40 manifest)
41 m._files = filter(notlfile, m._files)
41 m._files = filter(notlfile, m._files)
42 m._fmap = set(m._files)
42 m._fmap = set(m._files)
43 m._always = False
43 m._always = False
44 origmatchfn = m.matchfn
44 origmatchfn = m.matchfn
45 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
45 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
46 return m
46 return m
47
47
48 def installnormalfilesmatchfn(manifest):
48 def installnormalfilesmatchfn(manifest):
49 '''installmatchfn with a matchfn that ignores all largefiles'''
49 '''installmatchfn with a matchfn that ignores all largefiles'''
50 def overridematch(ctx, pats=[], opts={}, globbed=False,
50 def overridematch(ctx, pats=[], opts={}, globbed=False,
51 default='relpath'):
51 default='relpath'):
52 match = oldmatch(ctx, pats, opts, globbed, default)
52 match = oldmatch(ctx, pats, opts, globbed, default)
53 return composenormalfilematcher(match, manifest)
53 return composenormalfilematcher(match, manifest)
54 oldmatch = installmatchfn(overridematch)
54 oldmatch = installmatchfn(overridematch)
55
55
56 def installmatchfn(f):
56 def installmatchfn(f):
57 '''monkey patch the scmutil module with a custom match function.
57 '''monkey patch the scmutil module with a custom match function.
58 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
58 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
59 oldmatch = scmutil.match
59 oldmatch = scmutil.match
60 setattr(f, 'oldmatch', oldmatch)
60 setattr(f, 'oldmatch', oldmatch)
61 scmutil.match = f
61 scmutil.match = f
62 return oldmatch
62 return oldmatch
63
63
64 def restorematchfn():
64 def restorematchfn():
65 '''restores scmutil.match to what it was before installmatchfn
65 '''restores scmutil.match to what it was before installmatchfn
66 was called. no-op if scmutil.match is its original function.
66 was called. no-op if scmutil.match is its original function.
67
67
68 Note that n calls to installmatchfn will require n calls to
68 Note that n calls to installmatchfn will require n calls to
69 restore the original matchfn.'''
69 restore the original matchfn.'''
70 scmutil.match = getattr(scmutil.match, 'oldmatch')
70 scmutil.match = getattr(scmutil.match, 'oldmatch')
71
71
72 def installmatchandpatsfn(f):
72 def installmatchandpatsfn(f):
73 oldmatchandpats = scmutil.matchandpats
73 oldmatchandpats = scmutil.matchandpats
74 setattr(f, 'oldmatchandpats', oldmatchandpats)
74 setattr(f, 'oldmatchandpats', oldmatchandpats)
75 scmutil.matchandpats = f
75 scmutil.matchandpats = f
76 return oldmatchandpats
76 return oldmatchandpats
77
77
78 def restorematchandpatsfn():
78 def restorematchandpatsfn():
79 '''restores scmutil.matchandpats to what it was before
79 '''restores scmutil.matchandpats to what it was before
80 installmatchandpatsfn was called. No-op if scmutil.matchandpats
80 installmatchandpatsfn was called. No-op if scmutil.matchandpats
81 is its original function.
81 is its original function.
82
82
83 Note that n calls to installmatchandpatsfn will require n calls
83 Note that n calls to installmatchandpatsfn will require n calls
84 to restore the original matchfn.'''
84 to restore the original matchfn.'''
85 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
85 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
86 scmutil.matchandpats)
86 scmutil.matchandpats)
87
87
88 def addlargefiles(ui, repo, matcher, **opts):
88 def addlargefiles(ui, repo, matcher, **opts):
89 large = opts.pop('large', None)
89 large = opts.pop('large', None)
90 lfsize = lfutil.getminsize(
90 lfsize = lfutil.getminsize(
91 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
91 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
92
92
93 lfmatcher = None
93 lfmatcher = None
94 if lfutil.islfilesrepo(repo):
94 if lfutil.islfilesrepo(repo):
95 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
95 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
96 if lfpats:
96 if lfpats:
97 lfmatcher = match_.match(repo.root, '', list(lfpats))
97 lfmatcher = match_.match(repo.root, '', list(lfpats))
98
98
99 lfnames = []
99 lfnames = []
100 m = copy.copy(matcher)
100 m = copy.copy(matcher)
101 m.bad = lambda x, y: None
101 m.bad = lambda x, y: None
102 wctx = repo[None]
102 wctx = repo[None]
103 for f in repo.walk(m):
103 for f in repo.walk(m):
104 exact = m.exact(f)
104 exact = m.exact(f)
105 lfile = lfutil.standin(f) in wctx
105 lfile = lfutil.standin(f) in wctx
106 nfile = f in wctx
106 nfile = f in wctx
107 exists = lfile or nfile
107 exists = lfile or nfile
108
108
109 # Don't warn the user when they attempt to add a normal tracked file.
109 # Don't warn the user when they attempt to add a normal tracked file.
110 # The normal add code will do that for us.
110 # The normal add code will do that for us.
111 if exact and exists:
111 if exact and exists:
112 if lfile:
112 if lfile:
113 ui.warn(_('%s already a largefile\n') % f)
113 ui.warn(_('%s already a largefile\n') % f)
114 continue
114 continue
115
115
116 if (exact or not exists) and not lfutil.isstandin(f):
116 if (exact or not exists) and not lfutil.isstandin(f):
117 wfile = repo.wjoin(f)
117 wfile = repo.wjoin(f)
118
118
119 # In case the file was removed previously, but not committed
119 # In case the file was removed previously, but not committed
120 # (issue3507)
120 # (issue3507)
121 if not os.path.exists(wfile):
121 if not os.path.exists(wfile):
122 continue
122 continue
123
123
124 abovemin = (lfsize and
124 abovemin = (lfsize and
125 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
125 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
127 lfnames.append(f)
127 lfnames.append(f)
128 if ui.verbose or not exact:
128 if ui.verbose or not exact:
129 ui.status(_('adding %s as a largefile\n') % m.rel(f))
129 ui.status(_('adding %s as a largefile\n') % m.rel(f))
130
130
131 bad = []
131 bad = []
132
132
133 # Need to lock, otherwise there could be a race condition between
133 # Need to lock, otherwise there could be a race condition between
134 # when standins are created and added to the repo.
134 # when standins are created and added to the repo.
135 wlock = repo.wlock()
135 wlock = repo.wlock()
136 try:
136 try:
137 if not opts.get('dry_run'):
137 if not opts.get('dry_run'):
138 standins = []
138 standins = []
139 lfdirstate = lfutil.openlfdirstate(ui, repo)
139 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 for f in lfnames:
140 for f in lfnames:
141 standinname = lfutil.standin(f)
141 standinname = lfutil.standin(f)
142 lfutil.writestandin(repo, standinname, hash='',
142 lfutil.writestandin(repo, standinname, hash='',
143 executable=lfutil.getexecutable(repo.wjoin(f)))
143 executable=lfutil.getexecutable(repo.wjoin(f)))
144 standins.append(standinname)
144 standins.append(standinname)
145 if lfdirstate[f] == 'r':
145 if lfdirstate[f] == 'r':
146 lfdirstate.normallookup(f)
146 lfdirstate.normallookup(f)
147 else:
147 else:
148 lfdirstate.add(f)
148 lfdirstate.add(f)
149 lfdirstate.write()
149 lfdirstate.write()
150 bad += [lfutil.splitstandin(f)
150 bad += [lfutil.splitstandin(f)
151 for f in repo[None].add(standins)
151 for f in repo[None].add(standins)
152 if f in m.files()]
152 if f in m.files()]
153 finally:
153 finally:
154 wlock.release()
154 wlock.release()
155 return bad
155 return bad
156
156
157 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
157 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
158 after = opts.get('after')
158 after = opts.get('after')
159 if not pats and not after:
159 if not pats and not after:
160 raise util.Abort(_('no files specified'))
160 raise util.Abort(_('no files specified'))
161 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
161 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
162 repo[None].manifest())
162 repo[None].manifest())
163 try:
163 try:
164 repo.lfstatus = True
164 repo.lfstatus = True
165 s = repo.status(match=m, clean=True)
165 s = repo.status(match=m, clean=True)
166 finally:
166 finally:
167 repo.lfstatus = False
167 repo.lfstatus = False
168 manifest = repo[None].manifest()
168 manifest = repo[None].manifest()
169 modified, added, deleted, clean = [[f for f in list
169 modified, added, deleted, clean = [[f for f in list
170 if lfutil.standin(f) in manifest]
170 if lfutil.standin(f) in manifest]
171 for list in (s.modified, s.added,
171 for list in (s.modified, s.added,
172 s.deleted, s.clean)]
172 s.deleted, s.clean)]
173
173
174 def warn(files, msg):
174 def warn(files, msg):
175 for f in files:
175 for f in files:
176 ui.warn(msg % m.rel(f))
176 ui.warn(msg % m.rel(f))
177 return int(len(files) > 0)
177 return int(len(files) > 0)
178
178
179 result = 0
179 result = 0
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(modified + added + clean,
183 result = warn(modified + added + clean,
184 _('not removing %s: file still exists\n'))
184 _('not removing %s: file still exists\n'))
185 else:
185 else:
186 remove = deleted + clean
186 remove = deleted + clean
187 result = warn(modified, _('not removing %s: file is modified (use -f'
187 result = warn(modified, _('not removing %s: file is modified (use -f'
188 ' to force removal)\n'))
188 ' to force removal)\n'))
189 result = warn(added, _('not removing %s: file has been marked for add'
189 result = warn(added, _('not removing %s: file has been marked for add'
190 ' (use forget to undo)\n')) or result
190 ' (use forget to undo)\n')) or result
191
191
192 for f in sorted(remove):
192 for f in sorted(remove):
193 if ui.verbose or not m.exact(f):
193 if ui.verbose or not m.exact(f):
194 ui.status(_('removing %s\n') % m.rel(f))
194 ui.status(_('removing %s\n') % m.rel(f))
195
195
196 # Need to lock because standin files are deleted then removed from the
196 # Need to lock because standin files are deleted then removed from the
197 # repository and we could race in-between.
197 # repository and we could race in-between.
198 wlock = repo.wlock()
198 wlock = repo.wlock()
199 try:
199 try:
200 lfdirstate = lfutil.openlfdirstate(ui, repo)
200 lfdirstate = lfutil.openlfdirstate(ui, repo)
201 for f in remove:
201 for f in remove:
202 if not after:
202 if not after:
203 # If this is being called by addremove, notify the user that we
203 # If this is being called by addremove, notify the user that we
204 # are removing the file.
204 # are removing the file.
205 if isaddremove:
205 if isaddremove:
206 ui.status(_('removing %s\n') % f)
206 ui.status(_('removing %s\n') % f)
207
207
208 if not opts.get('dry_run'):
208 if not opts.get('dry_run'):
209 if not after:
209 if not after:
210 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
210 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
211 lfdirstate.remove(f)
211 lfdirstate.remove(f)
212
212
213 if opts.get('dry_run'):
213 if opts.get('dry_run'):
214 return result
214 return result
215
215
216 lfdirstate.write()
216 lfdirstate.write()
217 remove = [lfutil.standin(f) for f in remove]
217 remove = [lfutil.standin(f) for f in remove]
218 # If this is being called by addremove, let the original addremove
218 # If this is being called by addremove, let the original addremove
219 # function handle this.
219 # function handle this.
220 if not isaddremove:
220 if not isaddremove:
221 for f in remove:
221 for f in remove:
222 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
222 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
223 repo[None].forget(remove)
223 repo[None].forget(remove)
224 finally:
224 finally:
225 wlock.release()
225 wlock.release()
226
226
227 return result
227 return result
228
228
229 # For overriding mercurial.hgweb.webcommands so that largefiles will
229 # For overriding mercurial.hgweb.webcommands so that largefiles will
230 # appear at their right place in the manifests.
230 # appear at their right place in the manifests.
231 def decodepath(orig, path):
231 def decodepath(orig, path):
232 return lfutil.splitstandin(path) or path
232 return lfutil.splitstandin(path) or path
233
233
234 # -- Wrappers: modify existing commands --------------------------------
234 # -- Wrappers: modify existing commands --------------------------------
235
235
236 # Add works by going through the files that the user wanted to add and
236 # Add works by going through the files that the user wanted to add and
237 # checking if they should be added as largefiles. Then it makes a new
237 # checking if they should be added as largefiles. Then it makes a new
238 # matcher which matches only the normal files and runs the original
238 # matcher which matches only the normal files and runs the original
239 # version of add.
239 # version of add.
240 def overrideadd(orig, ui, repo, *pats, **opts):
240 def overrideadd(orig, ui, repo, *pats, **opts):
241 normal = opts.pop('normal')
241 normal = opts.pop('normal')
242 if normal:
242 if normal:
243 if opts.get('large'):
243 if opts.get('large'):
244 raise util.Abort(_('--normal cannot be used with --large'))
244 raise util.Abort(_('--normal cannot be used with --large'))
245 return orig(ui, repo, *pats, **opts)
245 return orig(ui, repo, *pats, **opts)
246 matcher = scmutil.match(repo[None], pats, opts)
246 matcher = scmutil.match(repo[None], pats, opts)
247 bad = addlargefiles(ui, repo, matcher, **opts)
247 bad = addlargefiles(ui, repo, matcher, **opts)
248 installnormalfilesmatchfn(repo[None].manifest())
248 installnormalfilesmatchfn(repo[None].manifest())
249 result = orig(ui, repo, *pats, **opts)
249 result = orig(ui, repo, *pats, **opts)
250 restorematchfn()
250 restorematchfn()
251
251
252 return (result == 1 or bad) and 1 or 0
252 return (result == 1 or bad) and 1 or 0
253
253
254 def overrideremove(orig, ui, repo, *pats, **opts):
254 def overrideremove(orig, ui, repo, *pats, **opts):
255 installnormalfilesmatchfn(repo[None].manifest())
255 installnormalfilesmatchfn(repo[None].manifest())
256 result = orig(ui, repo, *pats, **opts)
256 result = orig(ui, repo, *pats, **opts)
257 restorematchfn()
257 restorematchfn()
258 return removelargefiles(ui, repo, False, *pats, **opts) or result
258 return removelargefiles(ui, repo, False, *pats, **opts) or result
259
259
260 def overridestatusfn(orig, repo, rev2, **opts):
260 def overridestatusfn(orig, repo, rev2, **opts):
261 try:
261 try:
262 repo._repo.lfstatus = True
262 repo._repo.lfstatus = True
263 return orig(repo, rev2, **opts)
263 return orig(repo, rev2, **opts)
264 finally:
264 finally:
265 repo._repo.lfstatus = False
265 repo._repo.lfstatus = False
266
266
267 def overridestatus(orig, ui, repo, *pats, **opts):
267 def overridestatus(orig, ui, repo, *pats, **opts):
268 try:
268 try:
269 repo.lfstatus = True
269 repo.lfstatus = True
270 return orig(ui, repo, *pats, **opts)
270 return orig(ui, repo, *pats, **opts)
271 finally:
271 finally:
272 repo.lfstatus = False
272 repo.lfstatus = False
273
273
274 def overridedirty(orig, repo, ignoreupdate=False):
274 def overridedirty(orig, repo, ignoreupdate=False):
275 try:
275 try:
276 repo._repo.lfstatus = True
276 repo._repo.lfstatus = True
277 return orig(repo, ignoreupdate)
277 return orig(repo, ignoreupdate)
278 finally:
278 finally:
279 repo._repo.lfstatus = False
279 repo._repo.lfstatus = False
280
280
281 def overridelog(orig, ui, repo, *pats, **opts):
281 def overridelog(orig, ui, repo, *pats, **opts):
282 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
282 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
283 default='relpath'):
283 default='relpath'):
284 """Matcher that merges root directory with .hglf, suitable for log.
284 """Matcher that merges root directory with .hglf, suitable for log.
285 It is still possible to match .hglf directly.
285 It is still possible to match .hglf directly.
286 For any listed files run log on the standin too.
286 For any listed files run log on the standin too.
287 matchfn tries both the given filename and with .hglf stripped.
287 matchfn tries both the given filename and with .hglf stripped.
288 """
288 """
289 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
289 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
290 m, p = copy.copy(matchandpats)
290 m, p = copy.copy(matchandpats)
291
291
292 if m.always():
292 if m.always():
293 # We want to match everything anyway, so there's no benefit trying
293 # We want to match everything anyway, so there's no benefit trying
294 # to add standins.
294 # to add standins.
295 return matchandpats
295 return matchandpats
296
296
297 pats = set(p)
297 pats = set(p)
298 # TODO: handling of patterns in both cases below
298 # TODO: handling of patterns in both cases below
299 if m._cwd:
299 if m._cwd:
300 if os.path.isabs(m._cwd):
300 if os.path.isabs(m._cwd):
301 # TODO: handle largefile magic when invoked from other cwd
301 # TODO: handle largefile magic when invoked from other cwd
302 return matchandpats
302 return matchandpats
303 back = (m._cwd.count('/') + 1) * '../'
303 back = (m._cwd.count('/') + 1) * '../'
304 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
304 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
305 else:
305 else:
306 pats.update(lfutil.standin(f) for f in p)
306 pats.update(lfutil.standin(f) for f in p)
307
307
308 for i in range(0, len(m._files)):
308 for i in range(0, len(m._files)):
309 standin = lfutil.standin(m._files[i])
309 standin = lfutil.standin(m._files[i])
310 if standin in repo[ctx.node()]:
310 if standin in repo[ctx.node()]:
311 m._files[i] = standin
311 m._files[i] = standin
312 elif m._files[i] not in repo[ctx.node()]:
312 elif m._files[i] not in repo[ctx.node()]:
313 m._files.append(standin)
313 m._files.append(standin)
314 pats.add(standin)
314 pats.add(standin)
315
315
316 m._fmap = set(m._files)
316 m._fmap = set(m._files)
317 m._always = False
317 m._always = False
318 origmatchfn = m.matchfn
318 origmatchfn = m.matchfn
319 def lfmatchfn(f):
319 def lfmatchfn(f):
320 lf = lfutil.splitstandin(f)
320 lf = lfutil.splitstandin(f)
321 if lf is not None and origmatchfn(lf):
321 if lf is not None and origmatchfn(lf):
322 return True
322 return True
323 r = origmatchfn(f)
323 r = origmatchfn(f)
324 return r
324 return r
325 m.matchfn = lfmatchfn
325 m.matchfn = lfmatchfn
326
326
327 return m, pats
327 return m, pats
328
328
329 # For hg log --patch, the match object is used in two different senses:
329 # For hg log --patch, the match object is used in two different senses:
330 # (1) to determine what revisions should be printed out, and
330 # (1) to determine what revisions should be printed out, and
331 # (2) to determine what files to print out diffs for.
331 # (2) to determine what files to print out diffs for.
332 # The magic matchandpats override should be used for case (1) but not for
332 # The magic matchandpats override should be used for case (1) but not for
333 # case (2).
333 # case (2).
334 def overridemakelogfilematcher(repo, pats, opts):
334 def overridemakelogfilematcher(repo, pats, opts):
335 pctx = repo[None]
335 pctx = repo[None]
336 match, pats = oldmatchandpats(pctx, pats, opts)
336 match, pats = oldmatchandpats(pctx, pats, opts)
337 return lambda rev: match
337 return lambda rev: match
338
338
339 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
339 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
340 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
340 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
341 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
341 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
342
342
343 try:
343 try:
344 return orig(ui, repo, *pats, **opts)
344 return orig(ui, repo, *pats, **opts)
345 finally:
345 finally:
346 restorematchandpatsfn()
346 restorematchandpatsfn()
347 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
347 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
348
348
349 def overrideverify(orig, ui, repo, *pats, **opts):
349 def overrideverify(orig, ui, repo, *pats, **opts):
350 large = opts.pop('large', False)
350 large = opts.pop('large', False)
351 all = opts.pop('lfa', False)
351 all = opts.pop('lfa', False)
352 contents = opts.pop('lfc', False)
352 contents = opts.pop('lfc', False)
353
353
354 result = orig(ui, repo, *pats, **opts)
354 result = orig(ui, repo, *pats, **opts)
355 if large or all or contents:
355 if large or all or contents:
356 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
356 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
357 return result
357 return result
358
358
359 def overridedebugstate(orig, ui, repo, *pats, **opts):
359 def overridedebugstate(orig, ui, repo, *pats, **opts):
360 large = opts.pop('large', False)
360 large = opts.pop('large', False)
361 if large:
361 if large:
362 class fakerepo(object):
362 class fakerepo(object):
363 dirstate = lfutil.openlfdirstate(ui, repo)
363 dirstate = lfutil.openlfdirstate(ui, repo)
364 orig(ui, fakerepo, *pats, **opts)
364 orig(ui, fakerepo, *pats, **opts)
365 else:
365 else:
366 orig(ui, repo, *pats, **opts)
366 orig(ui, repo, *pats, **opts)
367
367
368 # Override needs to refresh standins so that update's normal merge
368 # Override needs to refresh standins so that update's normal merge
369 # will go through properly. Then the other update hook (overriding repo.update)
369 # will go through properly. Then the other update hook (overriding repo.update)
370 # will get the new files. Filemerge is also overridden so that the merge
370 # will get the new files. Filemerge is also overridden so that the merge
371 # will merge standins correctly.
371 # will merge standins correctly.
372 def overrideupdate(orig, ui, repo, *pats, **opts):
372 def overrideupdate(orig, ui, repo, *pats, **opts):
373 # Need to lock between the standins getting updated and their
373 # Need to lock between the standins getting updated and their
374 # largefiles getting updated
374 # largefiles getting updated
375 wlock = repo.wlock()
375 wlock = repo.wlock()
376 try:
376 try:
377 if opts['check']:
377 if opts['check']:
378 lfdirstate = lfutil.openlfdirstate(ui, repo)
378 lfdirstate = lfutil.openlfdirstate(ui, repo)
379 unsure, s = lfdirstate.status(
379 unsure, s = lfdirstate.status(
380 match_.always(repo.root, repo.getcwd()),
380 match_.always(repo.root, repo.getcwd()),
381 [], False, False, False)
381 [], False, False, False)
382
382
383 mod = len(s.modified) > 0
383 mod = len(s.modified) > 0
384 for lfile in unsure:
384 for lfile in unsure:
385 standin = lfutil.standin(lfile)
385 standin = lfutil.standin(lfile)
386 if repo['.'][standin].data().strip() != \
386 if repo['.'][standin].data().strip() != \
387 lfutil.hashfile(repo.wjoin(lfile)):
387 lfutil.hashfile(repo.wjoin(lfile)):
388 mod = True
388 mod = True
389 else:
389 else:
390 lfdirstate.normal(lfile)
390 lfdirstate.normal(lfile)
391 lfdirstate.write()
391 lfdirstate.write()
392 if mod:
392 if mod:
393 raise util.Abort(_('uncommitted changes'))
393 raise util.Abort(_('uncommitted changes'))
394 return orig(ui, repo, *pats, **opts)
394 return orig(ui, repo, *pats, **opts)
395 finally:
395 finally:
396 wlock.release()
396 wlock.release()
397
397
398 # Before starting the manifest merge, merge.updates will call
398 # Before starting the manifest merge, merge.updates will call
399 # _checkunknownfile to check if there are any files in the merged-in
399 # _checkunknownfile to check if there are any files in the merged-in
400 # changeset that collide with unknown files in the working copy.
400 # changeset that collide with unknown files in the working copy.
401 #
401 #
402 # The largefiles are seen as unknown, so this prevents us from merging
402 # The largefiles are seen as unknown, so this prevents us from merging
403 # in a file 'foo' if we already have a largefile with the same name.
403 # in a file 'foo' if we already have a largefile with the same name.
404 #
404 #
405 # The overridden function filters the unknown files by removing any
405 # The overridden function filters the unknown files by removing any
406 # largefiles. This makes the merge proceed and we can then handle this
406 # largefiles. This makes the merge proceed and we can then handle this
407 # case further in the overridden calculateupdates function below.
407 # case further in the overridden calculateupdates function below.
408 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
408 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
409 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
409 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
410 return False
410 return False
411 return origfn(repo, wctx, mctx, f)
411 return origfn(repo, wctx, mctx, f, f2)
412
412
413 # The manifest merge handles conflicts on the manifest level. We want
413 # The manifest merge handles conflicts on the manifest level. We want
414 # to handle changes in largefile-ness of files at this level too.
414 # to handle changes in largefile-ness of files at this level too.
415 #
415 #
416 # The strategy is to run the original calculateupdates and then process
416 # The strategy is to run the original calculateupdates and then process
417 # the action list it outputs. There are two cases we need to deal with:
417 # the action list it outputs. There are two cases we need to deal with:
418 #
418 #
419 # 1. Normal file in p1, largefile in p2. Here the largefile is
419 # 1. Normal file in p1, largefile in p2. Here the largefile is
420 # detected via its standin file, which will enter the working copy
420 # detected via its standin file, which will enter the working copy
421 # with a "get" action. It is not "merge" since the standin is all
421 # with a "get" action. It is not "merge" since the standin is all
422 # Mercurial is concerned with at this level -- the link to the
422 # Mercurial is concerned with at this level -- the link to the
423 # existing normal file is not relevant here.
423 # existing normal file is not relevant here.
424 #
424 #
425 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
425 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
426 # since the largefile will be present in the working copy and
426 # since the largefile will be present in the working copy and
427 # different from the normal file in p2. Mercurial therefore
427 # different from the normal file in p2. Mercurial therefore
428 # triggers a merge action.
428 # triggers a merge action.
429 #
429 #
430 # In both cases, we prompt the user and emit new actions to either
430 # In both cases, we prompt the user and emit new actions to either
431 # remove the standin (if the normal file was kept) or to remove the
431 # remove the standin (if the normal file was kept) or to remove the
432 # normal file and get the standin (if the largefile was kept). The
432 # normal file and get the standin (if the largefile was kept). The
433 # default prompt answer is to use the largefile version since it was
433 # default prompt answer is to use the largefile version since it was
434 # presumably changed on purpose.
434 # presumably changed on purpose.
435 #
435 #
436 # Finally, the merge.applyupdates function will then take care of
436 # Finally, the merge.applyupdates function will then take care of
437 # writing the files into the working copy and lfcommands.updatelfiles
437 # writing the files into the working copy and lfcommands.updatelfiles
438 # will update the largefiles.
438 # will update the largefiles.
439 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
439 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
440 partial, acceptremote, followcopies):
440 partial, acceptremote, followcopies):
441 overwrite = force and not branchmerge
441 overwrite = force and not branchmerge
442 actions, diverge, renamedelete = origfn(
442 actions, diverge, renamedelete = origfn(
443 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
443 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
444 followcopies)
444 followcopies)
445
445
446 if overwrite:
446 if overwrite:
447 return actions, diverge, renamedelete
447 return actions, diverge, renamedelete
448
448
449 # Convert to dictionary with filename as key and action as value.
449 # Convert to dictionary with filename as key and action as value.
450 lfiles = set()
450 lfiles = set()
451 for f in actions:
451 for f in actions:
452 splitstandin = f and lfutil.splitstandin(f)
452 splitstandin = f and lfutil.splitstandin(f)
453 if splitstandin in p1:
453 if splitstandin in p1:
454 lfiles.add(splitstandin)
454 lfiles.add(splitstandin)
455 elif lfutil.standin(f) in p1:
455 elif lfutil.standin(f) in p1:
456 lfiles.add(f)
456 lfiles.add(f)
457
457
458 for lfile in lfiles:
458 for lfile in lfiles:
459 standin = lfutil.standin(lfile)
459 standin = lfutil.standin(lfile)
460 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
460 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
461 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
461 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
462 if sm in ('g', 'dc') and lm != 'r':
462 if sm in ('g', 'dc') and lm != 'r':
463 # Case 1: normal file in the working copy, largefile in
463 # Case 1: normal file in the working copy, largefile in
464 # the second parent
464 # the second parent
465 usermsg = _('remote turned local normal file %s into a largefile\n'
465 usermsg = _('remote turned local normal file %s into a largefile\n'
466 'use (l)argefile or keep (n)ormal file?'
466 'use (l)argefile or keep (n)ormal file?'
467 '$$ &Largefile $$ &Normal file') % lfile
467 '$$ &Largefile $$ &Normal file') % lfile
468 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
468 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
469 actions[lfile] = ('r', None, 'replaced by standin')
469 actions[lfile] = ('r', None, 'replaced by standin')
470 actions[standin] = ('g', sargs, 'replaces standin')
470 actions[standin] = ('g', sargs, 'replaces standin')
471 else: # keep local normal file
471 else: # keep local normal file
472 actions[lfile] = ('k', None, 'replaces standin')
472 actions[lfile] = ('k', None, 'replaces standin')
473 if branchmerge:
473 if branchmerge:
474 actions[standin] = ('k', None, 'replaced by non-standin')
474 actions[standin] = ('k', None, 'replaced by non-standin')
475 else:
475 else:
476 actions[standin] = ('r', None, 'replaced by non-standin')
476 actions[standin] = ('r', None, 'replaced by non-standin')
477 elif lm in ('g', 'dc') and sm != 'r':
477 elif lm in ('g', 'dc') and sm != 'r':
478 # Case 2: largefile in the working copy, normal file in
478 # Case 2: largefile in the working copy, normal file in
479 # the second parent
479 # the second parent
480 usermsg = _('remote turned local largefile %s into a normal file\n'
480 usermsg = _('remote turned local largefile %s into a normal file\n'
481 'keep (l)argefile or use (n)ormal file?'
481 'keep (l)argefile or use (n)ormal file?'
482 '$$ &Largefile $$ &Normal file') % lfile
482 '$$ &Largefile $$ &Normal file') % lfile
483 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
483 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
484 if branchmerge:
484 if branchmerge:
485 # largefile can be restored from standin safely
485 # largefile can be restored from standin safely
486 actions[lfile] = ('k', None, 'replaced by standin')
486 actions[lfile] = ('k', None, 'replaced by standin')
487 actions[standin] = ('k', None, 'replaces standin')
487 actions[standin] = ('k', None, 'replaces standin')
488 else:
488 else:
489 # "lfile" should be marked as "removed" without
489 # "lfile" should be marked as "removed" without
490 # removal of itself
490 # removal of itself
491 actions[lfile] = ('lfmr', None,
491 actions[lfile] = ('lfmr', None,
492 'forget non-standin largefile')
492 'forget non-standin largefile')
493
493
494 # linear-merge should treat this largefile as 're-added'
494 # linear-merge should treat this largefile as 're-added'
495 actions[standin] = ('a', None, 'keep standin')
495 actions[standin] = ('a', None, 'keep standin')
496 else: # pick remote normal file
496 else: # pick remote normal file
497 actions[lfile] = ('g', largs, 'replaces standin')
497 actions[lfile] = ('g', largs, 'replaces standin')
498 actions[standin] = ('r', None, 'replaced by non-standin')
498 actions[standin] = ('r', None, 'replaced by non-standin')
499
499
500 return actions, diverge, renamedelete
500 return actions, diverge, renamedelete
501
501
502 def mergerecordupdates(orig, repo, actions, branchmerge):
502 def mergerecordupdates(orig, repo, actions, branchmerge):
503 if 'lfmr' in actions:
503 if 'lfmr' in actions:
504 # this should be executed before 'orig', to execute 'remove'
504 # this should be executed before 'orig', to execute 'remove'
505 # before all other actions
505 # before all other actions
506 for lfile, args, msg in actions['lfmr']:
506 for lfile, args, msg in actions['lfmr']:
507 repo.dirstate.remove(lfile)
507 repo.dirstate.remove(lfile)
508
508
509 return orig(repo, actions, branchmerge)
509 return orig(repo, actions, branchmerge)
510
510
511
511
512 # Override filemerge to prompt the user about how they wish to merge
512 # Override filemerge to prompt the user about how they wish to merge
513 # largefiles. This will handle identical edits without prompting the user.
513 # largefiles. This will handle identical edits without prompting the user.
514 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
514 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
515 if not lfutil.isstandin(orig):
515 if not lfutil.isstandin(orig):
516 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
516 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
517
517
518 ahash = fca.data().strip().lower()
518 ahash = fca.data().strip().lower()
519 dhash = fcd.data().strip().lower()
519 dhash = fcd.data().strip().lower()
520 ohash = fco.data().strip().lower()
520 ohash = fco.data().strip().lower()
521 if (ohash != ahash and
521 if (ohash != ahash and
522 ohash != dhash and
522 ohash != dhash and
523 (dhash == ahash or
523 (dhash == ahash or
524 repo.ui.promptchoice(
524 repo.ui.promptchoice(
525 _('largefile %s has a merge conflict\nancestor was %s\n'
525 _('largefile %s has a merge conflict\nancestor was %s\n'
526 'keep (l)ocal %s or\ntake (o)ther %s?'
526 'keep (l)ocal %s or\ntake (o)ther %s?'
527 '$$ &Local $$ &Other') %
527 '$$ &Local $$ &Other') %
528 (lfutil.splitstandin(orig), ahash, dhash, ohash),
528 (lfutil.splitstandin(orig), ahash, dhash, ohash),
529 0) == 1)):
529 0) == 1)):
530 repo.wwrite(fcd.path(), fco.data(), fco.flags())
530 repo.wwrite(fcd.path(), fco.data(), fco.flags())
531 return 0
531 return 0
532
532
533 # Copy first changes the matchers to match standins instead of
533 # Copy first changes the matchers to match standins instead of
534 # largefiles. Then it overrides util.copyfile in that function it
534 # largefiles. Then it overrides util.copyfile in that function it
535 # checks if the destination largefile already exists. It also keeps a
535 # checks if the destination largefile already exists. It also keeps a
536 # list of copied files so that the largefiles can be copied and the
536 # list of copied files so that the largefiles can be copied and the
537 # dirstate updated.
537 # dirstate updated.
538 def overridecopy(orig, ui, repo, pats, opts, rename=False):
538 def overridecopy(orig, ui, repo, pats, opts, rename=False):
539 # doesn't remove largefile on rename
539 # doesn't remove largefile on rename
540 if len(pats) < 2:
540 if len(pats) < 2:
541 # this isn't legal, let the original function deal with it
541 # this isn't legal, let the original function deal with it
542 return orig(ui, repo, pats, opts, rename)
542 return orig(ui, repo, pats, opts, rename)
543
543
544 def makestandin(relpath):
544 def makestandin(relpath):
545 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
545 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
546 return os.path.join(repo.wjoin(lfutil.standin(path)))
546 return os.path.join(repo.wjoin(lfutil.standin(path)))
547
547
548 fullpats = scmutil.expandpats(pats)
548 fullpats = scmutil.expandpats(pats)
549 dest = fullpats[-1]
549 dest = fullpats[-1]
550
550
551 if os.path.isdir(dest):
551 if os.path.isdir(dest):
552 if not os.path.isdir(makestandin(dest)):
552 if not os.path.isdir(makestandin(dest)):
553 os.makedirs(makestandin(dest))
553 os.makedirs(makestandin(dest))
554 # This could copy both lfiles and normal files in one command,
554 # This could copy both lfiles and normal files in one command,
555 # but we don't want to do that. First replace their matcher to
555 # but we don't want to do that. First replace their matcher to
556 # only match normal files and run it, then replace it to just
556 # only match normal files and run it, then replace it to just
557 # match largefiles and run it again.
557 # match largefiles and run it again.
558 nonormalfiles = False
558 nonormalfiles = False
559 nolfiles = False
559 nolfiles = False
560 installnormalfilesmatchfn(repo[None].manifest())
560 installnormalfilesmatchfn(repo[None].manifest())
561 try:
561 try:
562 try:
562 try:
563 result = orig(ui, repo, pats, opts, rename)
563 result = orig(ui, repo, pats, opts, rename)
564 except util.Abort, e:
564 except util.Abort, e:
565 if str(e) != _('no files to copy'):
565 if str(e) != _('no files to copy'):
566 raise e
566 raise e
567 else:
567 else:
568 nonormalfiles = True
568 nonormalfiles = True
569 result = 0
569 result = 0
570 finally:
570 finally:
571 restorematchfn()
571 restorematchfn()
572
572
573 # The first rename can cause our current working directory to be removed.
573 # The first rename can cause our current working directory to be removed.
574 # In that case there is nothing left to copy/rename so just quit.
574 # In that case there is nothing left to copy/rename so just quit.
575 try:
575 try:
576 repo.getcwd()
576 repo.getcwd()
577 except OSError:
577 except OSError:
578 return result
578 return result
579
579
580 try:
580 try:
581 try:
581 try:
582 # When we call orig below it creates the standins but we don't add
582 # When we call orig below it creates the standins but we don't add
583 # them to the dir state until later so lock during that time.
583 # them to the dir state until later so lock during that time.
584 wlock = repo.wlock()
584 wlock = repo.wlock()
585
585
586 manifest = repo[None].manifest()
586 manifest = repo[None].manifest()
587 def overridematch(ctx, pats=[], opts={}, globbed=False,
587 def overridematch(ctx, pats=[], opts={}, globbed=False,
588 default='relpath'):
588 default='relpath'):
589 newpats = []
589 newpats = []
590 # The patterns were previously mangled to add the standin
590 # The patterns were previously mangled to add the standin
591 # directory; we need to remove that now
591 # directory; we need to remove that now
592 for pat in pats:
592 for pat in pats:
593 if match_.patkind(pat) is None and lfutil.shortname in pat:
593 if match_.patkind(pat) is None and lfutil.shortname in pat:
594 newpats.append(pat.replace(lfutil.shortname, ''))
594 newpats.append(pat.replace(lfutil.shortname, ''))
595 else:
595 else:
596 newpats.append(pat)
596 newpats.append(pat)
597 match = oldmatch(ctx, newpats, opts, globbed, default)
597 match = oldmatch(ctx, newpats, opts, globbed, default)
598 m = copy.copy(match)
598 m = copy.copy(match)
599 lfile = lambda f: lfutil.standin(f) in manifest
599 lfile = lambda f: lfutil.standin(f) in manifest
600 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
600 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
601 m._fmap = set(m._files)
601 m._fmap = set(m._files)
602 origmatchfn = m.matchfn
602 origmatchfn = m.matchfn
603 m.matchfn = lambda f: (lfutil.isstandin(f) and
603 m.matchfn = lambda f: (lfutil.isstandin(f) and
604 (f in manifest) and
604 (f in manifest) and
605 origmatchfn(lfutil.splitstandin(f)) or
605 origmatchfn(lfutil.splitstandin(f)) or
606 None)
606 None)
607 return m
607 return m
608 oldmatch = installmatchfn(overridematch)
608 oldmatch = installmatchfn(overridematch)
609 listpats = []
609 listpats = []
610 for pat in pats:
610 for pat in pats:
611 if match_.patkind(pat) is not None:
611 if match_.patkind(pat) is not None:
612 listpats.append(pat)
612 listpats.append(pat)
613 else:
613 else:
614 listpats.append(makestandin(pat))
614 listpats.append(makestandin(pat))
615
615
616 try:
616 try:
617 origcopyfile = util.copyfile
617 origcopyfile = util.copyfile
618 copiedfiles = []
618 copiedfiles = []
619 def overridecopyfile(src, dest):
619 def overridecopyfile(src, dest):
620 if (lfutil.shortname in src and
620 if (lfutil.shortname in src and
621 dest.startswith(repo.wjoin(lfutil.shortname))):
621 dest.startswith(repo.wjoin(lfutil.shortname))):
622 destlfile = dest.replace(lfutil.shortname, '')
622 destlfile = dest.replace(lfutil.shortname, '')
623 if not opts['force'] and os.path.exists(destlfile):
623 if not opts['force'] and os.path.exists(destlfile):
624 raise IOError('',
624 raise IOError('',
625 _('destination largefile already exists'))
625 _('destination largefile already exists'))
626 copiedfiles.append((src, dest))
626 copiedfiles.append((src, dest))
627 origcopyfile(src, dest)
627 origcopyfile(src, dest)
628
628
629 util.copyfile = overridecopyfile
629 util.copyfile = overridecopyfile
630 result += orig(ui, repo, listpats, opts, rename)
630 result += orig(ui, repo, listpats, opts, rename)
631 finally:
631 finally:
632 util.copyfile = origcopyfile
632 util.copyfile = origcopyfile
633
633
634 lfdirstate = lfutil.openlfdirstate(ui, repo)
634 lfdirstate = lfutil.openlfdirstate(ui, repo)
635 for (src, dest) in copiedfiles:
635 for (src, dest) in copiedfiles:
636 if (lfutil.shortname in src and
636 if (lfutil.shortname in src and
637 dest.startswith(repo.wjoin(lfutil.shortname))):
637 dest.startswith(repo.wjoin(lfutil.shortname))):
638 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
638 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
639 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
639 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
640 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
640 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
641 if not os.path.isdir(destlfiledir):
641 if not os.path.isdir(destlfiledir):
642 os.makedirs(destlfiledir)
642 os.makedirs(destlfiledir)
643 if rename:
643 if rename:
644 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
644 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
645
645
646 # The file is gone, but this deletes any empty parent
646 # The file is gone, but this deletes any empty parent
647 # directories as a side-effect.
647 # directories as a side-effect.
648 util.unlinkpath(repo.wjoin(srclfile), True)
648 util.unlinkpath(repo.wjoin(srclfile), True)
649 lfdirstate.remove(srclfile)
649 lfdirstate.remove(srclfile)
650 else:
650 else:
651 util.copyfile(repo.wjoin(srclfile),
651 util.copyfile(repo.wjoin(srclfile),
652 repo.wjoin(destlfile))
652 repo.wjoin(destlfile))
653
653
654 lfdirstate.add(destlfile)
654 lfdirstate.add(destlfile)
655 lfdirstate.write()
655 lfdirstate.write()
656 except util.Abort, e:
656 except util.Abort, e:
657 if str(e) != _('no files to copy'):
657 if str(e) != _('no files to copy'):
658 raise e
658 raise e
659 else:
659 else:
660 nolfiles = True
660 nolfiles = True
661 finally:
661 finally:
662 restorematchfn()
662 restorematchfn()
663 wlock.release()
663 wlock.release()
664
664
665 if nolfiles and nonormalfiles:
665 if nolfiles and nonormalfiles:
666 raise util.Abort(_('no files to copy'))
666 raise util.Abort(_('no files to copy'))
667
667
668 return result
668 return result
669
669
670 # When the user calls revert, we have to be careful to not revert any
670 # When the user calls revert, we have to be careful to not revert any
671 # changes to other largefiles accidentally. This means we have to keep
671 # changes to other largefiles accidentally. This means we have to keep
672 # track of the largefiles that are being reverted so we only pull down
672 # track of the largefiles that are being reverted so we only pull down
673 # the necessary largefiles.
673 # the necessary largefiles.
674 #
674 #
675 # Standins are only updated (to match the hash of largefiles) before
675 # Standins are only updated (to match the hash of largefiles) before
676 # commits. Update the standins then run the original revert, changing
676 # commits. Update the standins then run the original revert, changing
677 # the matcher to hit standins instead of largefiles. Based on the
677 # the matcher to hit standins instead of largefiles. Based on the
678 # resulting standins update the largefiles.
678 # resulting standins update the largefiles.
679 def overriderevert(orig, ui, repo, *pats, **opts):
679 def overriderevert(orig, ui, repo, *pats, **opts):
680 # Because we put the standins in a bad state (by updating them)
680 # Because we put the standins in a bad state (by updating them)
681 # and then return them to a correct state we need to lock to
681 # and then return them to a correct state we need to lock to
682 # prevent others from changing them in their incorrect state.
682 # prevent others from changing them in their incorrect state.
683 wlock = repo.wlock()
683 wlock = repo.wlock()
684 try:
684 try:
685 lfdirstate = lfutil.openlfdirstate(ui, repo)
685 lfdirstate = lfutil.openlfdirstate(ui, repo)
686 s = lfutil.lfdirstatestatus(lfdirstate, repo)
686 s = lfutil.lfdirstatestatus(lfdirstate, repo)
687 lfdirstate.write()
687 lfdirstate.write()
688 for lfile in s.modified:
688 for lfile in s.modified:
689 lfutil.updatestandin(repo, lfutil.standin(lfile))
689 lfutil.updatestandin(repo, lfutil.standin(lfile))
690 for lfile in s.deleted:
690 for lfile in s.deleted:
691 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
691 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
692 os.unlink(repo.wjoin(lfutil.standin(lfile)))
692 os.unlink(repo.wjoin(lfutil.standin(lfile)))
693
693
694 oldstandins = lfutil.getstandinsstate(repo)
694 oldstandins = lfutil.getstandinsstate(repo)
695
695
696 def overridematch(ctx, pats=[], opts={}, globbed=False,
696 def overridematch(ctx, pats=[], opts={}, globbed=False,
697 default='relpath'):
697 default='relpath'):
698 match = oldmatch(ctx, pats, opts, globbed, default)
698 match = oldmatch(ctx, pats, opts, globbed, default)
699 m = copy.copy(match)
699 m = copy.copy(match)
700 def tostandin(f):
700 def tostandin(f):
701 if lfutil.standin(f) in ctx:
701 if lfutil.standin(f) in ctx:
702 return lfutil.standin(f)
702 return lfutil.standin(f)
703 elif lfutil.standin(f) in repo[None]:
703 elif lfutil.standin(f) in repo[None]:
704 return None
704 return None
705 return f
705 return f
706 m._files = [tostandin(f) for f in m._files]
706 m._files = [tostandin(f) for f in m._files]
707 m._files = [f for f in m._files if f is not None]
707 m._files = [f for f in m._files if f is not None]
708 m._fmap = set(m._files)
708 m._fmap = set(m._files)
709 origmatchfn = m.matchfn
709 origmatchfn = m.matchfn
710 def matchfn(f):
710 def matchfn(f):
711 if lfutil.isstandin(f):
711 if lfutil.isstandin(f):
712 return (origmatchfn(lfutil.splitstandin(f)) and
712 return (origmatchfn(lfutil.splitstandin(f)) and
713 (f in repo[None] or f in ctx))
713 (f in repo[None] or f in ctx))
714 return origmatchfn(f)
714 return origmatchfn(f)
715 m.matchfn = matchfn
715 m.matchfn = matchfn
716 return m
716 return m
717 oldmatch = installmatchfn(overridematch)
717 oldmatch = installmatchfn(overridematch)
718 try:
718 try:
719 orig(ui, repo, *pats, **opts)
719 orig(ui, repo, *pats, **opts)
720 finally:
720 finally:
721 restorematchfn()
721 restorematchfn()
722
722
723 newstandins = lfutil.getstandinsstate(repo)
723 newstandins = lfutil.getstandinsstate(repo)
724 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
724 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
725 # lfdirstate should be 'normallookup'-ed for updated files,
725 # lfdirstate should be 'normallookup'-ed for updated files,
726 # because reverting doesn't touch dirstate for 'normal' files
726 # because reverting doesn't touch dirstate for 'normal' files
727 # when target revision is explicitly specified: in such case,
727 # when target revision is explicitly specified: in such case,
728 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
728 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
729 # of target (standin) file.
729 # of target (standin) file.
730 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
730 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
731 normallookup=True)
731 normallookup=True)
732
732
733 finally:
733 finally:
734 wlock.release()
734 wlock.release()
735
735
736 # after pulling changesets, we need to take some extra care to get
736 # after pulling changesets, we need to take some extra care to get
737 # largefiles updated remotely
737 # largefiles updated remotely
738 def overridepull(orig, ui, repo, source=None, **opts):
738 def overridepull(orig, ui, repo, source=None, **opts):
739 revsprepull = len(repo)
739 revsprepull = len(repo)
740 if not source:
740 if not source:
741 source = 'default'
741 source = 'default'
742 repo.lfpullsource = source
742 repo.lfpullsource = source
743 result = orig(ui, repo, source, **opts)
743 result = orig(ui, repo, source, **opts)
744 revspostpull = len(repo)
744 revspostpull = len(repo)
745 lfrevs = opts.get('lfrev', [])
745 lfrevs = opts.get('lfrev', [])
746 if opts.get('all_largefiles'):
746 if opts.get('all_largefiles'):
747 lfrevs.append('pulled()')
747 lfrevs.append('pulled()')
748 if lfrevs and revspostpull > revsprepull:
748 if lfrevs and revspostpull > revsprepull:
749 numcached = 0
749 numcached = 0
750 repo.firstpulled = revsprepull # for pulled() revset expression
750 repo.firstpulled = revsprepull # for pulled() revset expression
751 try:
751 try:
752 for rev in scmutil.revrange(repo, lfrevs):
752 for rev in scmutil.revrange(repo, lfrevs):
753 ui.note(_('pulling largefiles for revision %s\n') % rev)
753 ui.note(_('pulling largefiles for revision %s\n') % rev)
754 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
754 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
755 numcached += len(cached)
755 numcached += len(cached)
756 finally:
756 finally:
757 del repo.firstpulled
757 del repo.firstpulled
758 ui.status(_("%d largefiles cached\n") % numcached)
758 ui.status(_("%d largefiles cached\n") % numcached)
759 return result
759 return result
760
760
761 def pulledrevsetsymbol(repo, subset, x):
761 def pulledrevsetsymbol(repo, subset, x):
762 """``pulled()``
762 """``pulled()``
763 Changesets that just has been pulled.
763 Changesets that just has been pulled.
764
764
765 Only available with largefiles from pull --lfrev expressions.
765 Only available with largefiles from pull --lfrev expressions.
766
766
767 .. container:: verbose
767 .. container:: verbose
768
768
769 Some examples:
769 Some examples:
770
770
771 - pull largefiles for all new changesets::
771 - pull largefiles for all new changesets::
772
772
773 hg pull -lfrev "pulled()"
773 hg pull -lfrev "pulled()"
774
774
775 - pull largefiles for all new branch heads::
775 - pull largefiles for all new branch heads::
776
776
777 hg pull -lfrev "head(pulled()) and not closed()"
777 hg pull -lfrev "head(pulled()) and not closed()"
778
778
779 """
779 """
780
780
781 try:
781 try:
782 firstpulled = repo.firstpulled
782 firstpulled = repo.firstpulled
783 except AttributeError:
783 except AttributeError:
784 raise util.Abort(_("pulled() only available in --lfrev"))
784 raise util.Abort(_("pulled() only available in --lfrev"))
785 return revset.baseset([r for r in subset if r >= firstpulled])
785 return revset.baseset([r for r in subset if r >= firstpulled])
786
786
787 def overrideclone(orig, ui, source, dest=None, **opts):
787 def overrideclone(orig, ui, source, dest=None, **opts):
788 d = dest
788 d = dest
789 if d is None:
789 if d is None:
790 d = hg.defaultdest(source)
790 d = hg.defaultdest(source)
791 if opts.get('all_largefiles') and not hg.islocal(d):
791 if opts.get('all_largefiles') and not hg.islocal(d):
792 raise util.Abort(_(
792 raise util.Abort(_(
793 '--all-largefiles is incompatible with non-local destination %s') %
793 '--all-largefiles is incompatible with non-local destination %s') %
794 d)
794 d)
795
795
796 return orig(ui, source, dest, **opts)
796 return orig(ui, source, dest, **opts)
797
797
798 def hgclone(orig, ui, opts, *args, **kwargs):
798 def hgclone(orig, ui, opts, *args, **kwargs):
799 result = orig(ui, opts, *args, **kwargs)
799 result = orig(ui, opts, *args, **kwargs)
800
800
801 if result is not None:
801 if result is not None:
802 sourcerepo, destrepo = result
802 sourcerepo, destrepo = result
803 repo = destrepo.local()
803 repo = destrepo.local()
804
804
805 # Caching is implicitly limited to 'rev' option, since the dest repo was
805 # Caching is implicitly limited to 'rev' option, since the dest repo was
806 # truncated at that point. The user may expect a download count with
806 # truncated at that point. The user may expect a download count with
807 # this option, so attempt whether or not this is a largefile repo.
807 # this option, so attempt whether or not this is a largefile repo.
808 if opts.get('all_largefiles'):
808 if opts.get('all_largefiles'):
809 success, missing = lfcommands.downloadlfiles(ui, repo, None)
809 success, missing = lfcommands.downloadlfiles(ui, repo, None)
810
810
811 if missing != 0:
811 if missing != 0:
812 return None
812 return None
813
813
814 return result
814 return result
815
815
816 def overriderebase(orig, ui, repo, **opts):
816 def overriderebase(orig, ui, repo, **opts):
817 resuming = opts.get('continue')
817 resuming = opts.get('continue')
818 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
818 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
819 repo._lfstatuswriters.append(lambda *msg, **opts: None)
819 repo._lfstatuswriters.append(lambda *msg, **opts: None)
820 try:
820 try:
821 return orig(ui, repo, **opts)
821 return orig(ui, repo, **opts)
822 finally:
822 finally:
823 repo._lfstatuswriters.pop()
823 repo._lfstatuswriters.pop()
824 repo._lfcommithooks.pop()
824 repo._lfcommithooks.pop()
825
825
826 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
826 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
827 prefix=None, mtime=None, subrepos=None):
827 prefix=None, mtime=None, subrepos=None):
828 # No need to lock because we are only reading history and
828 # No need to lock because we are only reading history and
829 # largefile caches, neither of which are modified.
829 # largefile caches, neither of which are modified.
830 lfcommands.cachelfiles(repo.ui, repo, node)
830 lfcommands.cachelfiles(repo.ui, repo, node)
831
831
832 if kind not in archival.archivers:
832 if kind not in archival.archivers:
833 raise util.Abort(_("unknown archive type '%s'") % kind)
833 raise util.Abort(_("unknown archive type '%s'") % kind)
834
834
835 ctx = repo[node]
835 ctx = repo[node]
836
836
837 if kind == 'files':
837 if kind == 'files':
838 if prefix:
838 if prefix:
839 raise util.Abort(
839 raise util.Abort(
840 _('cannot give prefix when archiving to files'))
840 _('cannot give prefix when archiving to files'))
841 else:
841 else:
842 prefix = archival.tidyprefix(dest, kind, prefix)
842 prefix = archival.tidyprefix(dest, kind, prefix)
843
843
844 def write(name, mode, islink, getdata):
844 def write(name, mode, islink, getdata):
845 if matchfn and not matchfn(name):
845 if matchfn and not matchfn(name):
846 return
846 return
847 data = getdata()
847 data = getdata()
848 if decode:
848 if decode:
849 data = repo.wwritedata(name, data)
849 data = repo.wwritedata(name, data)
850 archiver.addfile(prefix + name, mode, islink, data)
850 archiver.addfile(prefix + name, mode, islink, data)
851
851
852 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
852 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
853
853
854 if repo.ui.configbool("ui", "archivemeta", True):
854 if repo.ui.configbool("ui", "archivemeta", True):
855 def metadata():
855 def metadata():
856 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
856 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
857 hex(repo.changelog.node(0)), hex(node), ctx.branch())
857 hex(repo.changelog.node(0)), hex(node), ctx.branch())
858
858
859 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
859 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
860 if repo.tagtype(t) == 'global')
860 if repo.tagtype(t) == 'global')
861 if not tags:
861 if not tags:
862 repo.ui.pushbuffer()
862 repo.ui.pushbuffer()
863 opts = {'template': '{latesttag}\n{latesttagdistance}',
863 opts = {'template': '{latesttag}\n{latesttagdistance}',
864 'style': '', 'patch': None, 'git': None}
864 'style': '', 'patch': None, 'git': None}
865 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
865 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
866 ltags, dist = repo.ui.popbuffer().split('\n')
866 ltags, dist = repo.ui.popbuffer().split('\n')
867 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
867 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
868 tags += 'latesttagdistance: %s\n' % dist
868 tags += 'latesttagdistance: %s\n' % dist
869
869
870 return base + tags
870 return base + tags
871
871
872 write('.hg_archival.txt', 0644, False, metadata)
872 write('.hg_archival.txt', 0644, False, metadata)
873
873
874 for f in ctx:
874 for f in ctx:
875 ff = ctx.flags(f)
875 ff = ctx.flags(f)
876 getdata = ctx[f].data
876 getdata = ctx[f].data
877 if lfutil.isstandin(f):
877 if lfutil.isstandin(f):
878 path = lfutil.findfile(repo, getdata().strip())
878 path = lfutil.findfile(repo, getdata().strip())
879 if path is None:
879 if path is None:
880 raise util.Abort(
880 raise util.Abort(
881 _('largefile %s not found in repo store or system cache')
881 _('largefile %s not found in repo store or system cache')
882 % lfutil.splitstandin(f))
882 % lfutil.splitstandin(f))
883 f = lfutil.splitstandin(f)
883 f = lfutil.splitstandin(f)
884
884
885 def getdatafn():
885 def getdatafn():
886 fd = None
886 fd = None
887 try:
887 try:
888 fd = open(path, 'rb')
888 fd = open(path, 'rb')
889 return fd.read()
889 return fd.read()
890 finally:
890 finally:
891 if fd:
891 if fd:
892 fd.close()
892 fd.close()
893
893
894 getdata = getdatafn
894 getdata = getdatafn
895 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
895 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
896
896
897 if subrepos:
897 if subrepos:
898 for subpath in sorted(ctx.substate):
898 for subpath in sorted(ctx.substate):
899 sub = ctx.sub(subpath)
899 sub = ctx.sub(subpath)
900 submatch = match_.narrowmatcher(subpath, matchfn)
900 submatch = match_.narrowmatcher(subpath, matchfn)
901 sub.archive(archiver, prefix, submatch)
901 sub.archive(archiver, prefix, submatch)
902
902
903 archiver.done()
903 archiver.done()
904
904
905 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
905 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
906 repo._get(repo._state + ('hg',))
906 repo._get(repo._state + ('hg',))
907 rev = repo._state[1]
907 rev = repo._state[1]
908 ctx = repo._repo[rev]
908 ctx = repo._repo[rev]
909
909
910 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
910 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
911
911
912 def write(name, mode, islink, getdata):
912 def write(name, mode, islink, getdata):
913 # At this point, the standin has been replaced with the largefile name,
913 # At this point, the standin has been replaced with the largefile name,
914 # so the normal matcher works here without the lfutil variants.
914 # so the normal matcher works here without the lfutil variants.
915 if match and not match(f):
915 if match and not match(f):
916 return
916 return
917 data = getdata()
917 data = getdata()
918
918
919 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
919 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
920
920
921 for f in ctx:
921 for f in ctx:
922 ff = ctx.flags(f)
922 ff = ctx.flags(f)
923 getdata = ctx[f].data
923 getdata = ctx[f].data
924 if lfutil.isstandin(f):
924 if lfutil.isstandin(f):
925 path = lfutil.findfile(repo._repo, getdata().strip())
925 path = lfutil.findfile(repo._repo, getdata().strip())
926 if path is None:
926 if path is None:
927 raise util.Abort(
927 raise util.Abort(
928 _('largefile %s not found in repo store or system cache')
928 _('largefile %s not found in repo store or system cache')
929 % lfutil.splitstandin(f))
929 % lfutil.splitstandin(f))
930 f = lfutil.splitstandin(f)
930 f = lfutil.splitstandin(f)
931
931
932 def getdatafn():
932 def getdatafn():
933 fd = None
933 fd = None
934 try:
934 try:
935 fd = open(os.path.join(prefix, path), 'rb')
935 fd = open(os.path.join(prefix, path), 'rb')
936 return fd.read()
936 return fd.read()
937 finally:
937 finally:
938 if fd:
938 if fd:
939 fd.close()
939 fd.close()
940
940
941 getdata = getdatafn
941 getdata = getdatafn
942
942
943 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
943 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
944
944
945 for subpath in sorted(ctx.substate):
945 for subpath in sorted(ctx.substate):
946 sub = ctx.sub(subpath)
946 sub = ctx.sub(subpath)
947 submatch = match_.narrowmatcher(subpath, match)
947 submatch = match_.narrowmatcher(subpath, match)
948 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
948 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
949
949
950 # If a largefile is modified, the change is not reflected in its
950 # If a largefile is modified, the change is not reflected in its
951 # standin until a commit. cmdutil.bailifchanged() raises an exception
951 # standin until a commit. cmdutil.bailifchanged() raises an exception
952 # if the repo has uncommitted changes. Wrap it to also check if
952 # if the repo has uncommitted changes. Wrap it to also check if
953 # largefiles were changed. This is used by bisect, backout and fetch.
953 # largefiles were changed. This is used by bisect, backout and fetch.
954 def overridebailifchanged(orig, repo):
954 def overridebailifchanged(orig, repo):
955 orig(repo)
955 orig(repo)
956 repo.lfstatus = True
956 repo.lfstatus = True
957 s = repo.status()
957 s = repo.status()
958 repo.lfstatus = False
958 repo.lfstatus = False
959 if s.modified or s.added or s.removed or s.deleted:
959 if s.modified or s.added or s.removed or s.deleted:
960 raise util.Abort(_('uncommitted changes'))
960 raise util.Abort(_('uncommitted changes'))
961
961
962 def overrideforget(orig, ui, repo, *pats, **opts):
962 def overrideforget(orig, ui, repo, *pats, **opts):
963 installnormalfilesmatchfn(repo[None].manifest())
963 installnormalfilesmatchfn(repo[None].manifest())
964 result = orig(ui, repo, *pats, **opts)
964 result = orig(ui, repo, *pats, **opts)
965 restorematchfn()
965 restorematchfn()
966 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
966 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
967 repo[None].manifest())
967 repo[None].manifest())
968
968
969 try:
969 try:
970 repo.lfstatus = True
970 repo.lfstatus = True
971 s = repo.status(match=m, clean=True)
971 s = repo.status(match=m, clean=True)
972 finally:
972 finally:
973 repo.lfstatus = False
973 repo.lfstatus = False
974 forget = sorted(s.modified + s.added + s.deleted + s.clean)
974 forget = sorted(s.modified + s.added + s.deleted + s.clean)
975 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
975 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
976
976
977 for f in forget:
977 for f in forget:
978 if lfutil.standin(f) not in repo.dirstate and not \
978 if lfutil.standin(f) not in repo.dirstate and not \
979 os.path.isdir(m.rel(lfutil.standin(f))):
979 os.path.isdir(m.rel(lfutil.standin(f))):
980 ui.warn(_('not removing %s: file is already untracked\n')
980 ui.warn(_('not removing %s: file is already untracked\n')
981 % m.rel(f))
981 % m.rel(f))
982 result = 1
982 result = 1
983
983
984 for f in forget:
984 for f in forget:
985 if ui.verbose or not m.exact(f):
985 if ui.verbose or not m.exact(f):
986 ui.status(_('removing %s\n') % m.rel(f))
986 ui.status(_('removing %s\n') % m.rel(f))
987
987
988 # Need to lock because standin files are deleted then removed from the
988 # Need to lock because standin files are deleted then removed from the
989 # repository and we could race in-between.
989 # repository and we could race in-between.
990 wlock = repo.wlock()
990 wlock = repo.wlock()
991 try:
991 try:
992 lfdirstate = lfutil.openlfdirstate(ui, repo)
992 lfdirstate = lfutil.openlfdirstate(ui, repo)
993 for f in forget:
993 for f in forget:
994 if lfdirstate[f] == 'a':
994 if lfdirstate[f] == 'a':
995 lfdirstate.drop(f)
995 lfdirstate.drop(f)
996 else:
996 else:
997 lfdirstate.remove(f)
997 lfdirstate.remove(f)
998 lfdirstate.write()
998 lfdirstate.write()
999 standins = [lfutil.standin(f) for f in forget]
999 standins = [lfutil.standin(f) for f in forget]
1000 for f in standins:
1000 for f in standins:
1001 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1001 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1002 repo[None].forget(standins)
1002 repo[None].forget(standins)
1003 finally:
1003 finally:
1004 wlock.release()
1004 wlock.release()
1005
1005
1006 return result
1006 return result
1007
1007
1008 def _getoutgoings(repo, other, missing, addfunc):
1008 def _getoutgoings(repo, other, missing, addfunc):
1009 """get pairs of filename and largefile hash in outgoing revisions
1009 """get pairs of filename and largefile hash in outgoing revisions
1010 in 'missing'.
1010 in 'missing'.
1011
1011
1012 largefiles already existing on 'other' repository are ignored.
1012 largefiles already existing on 'other' repository are ignored.
1013
1013
1014 'addfunc' is invoked with each unique pairs of filename and
1014 'addfunc' is invoked with each unique pairs of filename and
1015 largefile hash value.
1015 largefile hash value.
1016 """
1016 """
1017 knowns = set()
1017 knowns = set()
1018 lfhashes = set()
1018 lfhashes = set()
1019 def dedup(fn, lfhash):
1019 def dedup(fn, lfhash):
1020 k = (fn, lfhash)
1020 k = (fn, lfhash)
1021 if k not in knowns:
1021 if k not in knowns:
1022 knowns.add(k)
1022 knowns.add(k)
1023 lfhashes.add(lfhash)
1023 lfhashes.add(lfhash)
1024 lfutil.getlfilestoupload(repo, missing, dedup)
1024 lfutil.getlfilestoupload(repo, missing, dedup)
1025 if lfhashes:
1025 if lfhashes:
1026 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1026 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1027 for fn, lfhash in knowns:
1027 for fn, lfhash in knowns:
1028 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1028 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1029 addfunc(fn, lfhash)
1029 addfunc(fn, lfhash)
1030
1030
1031 def outgoinghook(ui, repo, other, opts, missing):
1031 def outgoinghook(ui, repo, other, opts, missing):
1032 if opts.pop('large', None):
1032 if opts.pop('large', None):
1033 lfhashes = set()
1033 lfhashes = set()
1034 if ui.debugflag:
1034 if ui.debugflag:
1035 toupload = {}
1035 toupload = {}
1036 def addfunc(fn, lfhash):
1036 def addfunc(fn, lfhash):
1037 if fn not in toupload:
1037 if fn not in toupload:
1038 toupload[fn] = []
1038 toupload[fn] = []
1039 toupload[fn].append(lfhash)
1039 toupload[fn].append(lfhash)
1040 lfhashes.add(lfhash)
1040 lfhashes.add(lfhash)
1041 def showhashes(fn):
1041 def showhashes(fn):
1042 for lfhash in sorted(toupload[fn]):
1042 for lfhash in sorted(toupload[fn]):
1043 ui.debug(' %s\n' % (lfhash))
1043 ui.debug(' %s\n' % (lfhash))
1044 else:
1044 else:
1045 toupload = set()
1045 toupload = set()
1046 def addfunc(fn, lfhash):
1046 def addfunc(fn, lfhash):
1047 toupload.add(fn)
1047 toupload.add(fn)
1048 lfhashes.add(lfhash)
1048 lfhashes.add(lfhash)
1049 def showhashes(fn):
1049 def showhashes(fn):
1050 pass
1050 pass
1051 _getoutgoings(repo, other, missing, addfunc)
1051 _getoutgoings(repo, other, missing, addfunc)
1052
1052
1053 if not toupload:
1053 if not toupload:
1054 ui.status(_('largefiles: no files to upload\n'))
1054 ui.status(_('largefiles: no files to upload\n'))
1055 else:
1055 else:
1056 ui.status(_('largefiles to upload (%d entities):\n')
1056 ui.status(_('largefiles to upload (%d entities):\n')
1057 % (len(lfhashes)))
1057 % (len(lfhashes)))
1058 for file in sorted(toupload):
1058 for file in sorted(toupload):
1059 ui.status(lfutil.splitstandin(file) + '\n')
1059 ui.status(lfutil.splitstandin(file) + '\n')
1060 showhashes(file)
1060 showhashes(file)
1061 ui.status('\n')
1061 ui.status('\n')
1062
1062
1063 def summaryremotehook(ui, repo, opts, changes):
1063 def summaryremotehook(ui, repo, opts, changes):
1064 largeopt = opts.get('large', False)
1064 largeopt = opts.get('large', False)
1065 if changes is None:
1065 if changes is None:
1066 if largeopt:
1066 if largeopt:
1067 return (False, True) # only outgoing check is needed
1067 return (False, True) # only outgoing check is needed
1068 else:
1068 else:
1069 return (False, False)
1069 return (False, False)
1070 elif largeopt:
1070 elif largeopt:
1071 url, branch, peer, outgoing = changes[1]
1071 url, branch, peer, outgoing = changes[1]
1072 if peer is None:
1072 if peer is None:
1073 # i18n: column positioning for "hg summary"
1073 # i18n: column positioning for "hg summary"
1074 ui.status(_('largefiles: (no remote repo)\n'))
1074 ui.status(_('largefiles: (no remote repo)\n'))
1075 return
1075 return
1076
1076
1077 toupload = set()
1077 toupload = set()
1078 lfhashes = set()
1078 lfhashes = set()
1079 def addfunc(fn, lfhash):
1079 def addfunc(fn, lfhash):
1080 toupload.add(fn)
1080 toupload.add(fn)
1081 lfhashes.add(lfhash)
1081 lfhashes.add(lfhash)
1082 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1082 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1083
1083
1084 if not toupload:
1084 if not toupload:
1085 # i18n: column positioning for "hg summary"
1085 # i18n: column positioning for "hg summary"
1086 ui.status(_('largefiles: (no files to upload)\n'))
1086 ui.status(_('largefiles: (no files to upload)\n'))
1087 else:
1087 else:
1088 # i18n: column positioning for "hg summary"
1088 # i18n: column positioning for "hg summary"
1089 ui.status(_('largefiles: %d entities for %d files to upload\n')
1089 ui.status(_('largefiles: %d entities for %d files to upload\n')
1090 % (len(lfhashes), len(toupload)))
1090 % (len(lfhashes), len(toupload)))
1091
1091
1092 def overridesummary(orig, ui, repo, *pats, **opts):
1092 def overridesummary(orig, ui, repo, *pats, **opts):
1093 try:
1093 try:
1094 repo.lfstatus = True
1094 repo.lfstatus = True
1095 orig(ui, repo, *pats, **opts)
1095 orig(ui, repo, *pats, **opts)
1096 finally:
1096 finally:
1097 repo.lfstatus = False
1097 repo.lfstatus = False
1098
1098
1099 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1099 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1100 similarity=None):
1100 similarity=None):
1101 if not lfutil.islfilesrepo(repo):
1101 if not lfutil.islfilesrepo(repo):
1102 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1102 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1103 # Get the list of missing largefiles so we can remove them
1103 # Get the list of missing largefiles so we can remove them
1104 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1104 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1105 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1105 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1106 False, False, False)
1106 False, False, False)
1107
1107
1108 # Call into the normal remove code, but the removing of the standin, we want
1108 # Call into the normal remove code, but the removing of the standin, we want
1109 # to have handled by original addremove. Monkey patching here makes sure
1109 # to have handled by original addremove. Monkey patching here makes sure
1110 # we don't remove the standin in the largefiles code, preventing a very
1110 # we don't remove the standin in the largefiles code, preventing a very
1111 # confused state later.
1111 # confused state later.
1112 if s.deleted:
1112 if s.deleted:
1113 m = [repo.wjoin(f) for f in s.deleted]
1113 m = [repo.wjoin(f) for f in s.deleted]
1114 removelargefiles(repo.ui, repo, True, *m, **opts)
1114 removelargefiles(repo.ui, repo, True, *m, **opts)
1115 # Call into the normal add code, and any files that *should* be added as
1115 # Call into the normal add code, and any files that *should* be added as
1116 # largefiles will be
1116 # largefiles will be
1117 addlargefiles(repo.ui, repo, matcher, **opts)
1117 addlargefiles(repo.ui, repo, matcher, **opts)
1118 # Now that we've handled largefiles, hand off to the original addremove
1118 # Now that we've handled largefiles, hand off to the original addremove
1119 # function to take care of the rest. Make sure it doesn't do anything with
1119 # function to take care of the rest. Make sure it doesn't do anything with
1120 # largefiles by passing a matcher that will ignore them.
1120 # largefiles by passing a matcher that will ignore them.
1121 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1121 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1122 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1122 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1123
1123
1124 # Calling purge with --all will cause the largefiles to be deleted.
1124 # Calling purge with --all will cause the largefiles to be deleted.
1125 # Override repo.status to prevent this from happening.
1125 # Override repo.status to prevent this from happening.
1126 def overridepurge(orig, ui, repo, *dirs, **opts):
1126 def overridepurge(orig, ui, repo, *dirs, **opts):
1127 # XXX Monkey patching a repoview will not work. The assigned attribute will
1127 # XXX Monkey patching a repoview will not work. The assigned attribute will
1128 # be set on the unfiltered repo, but we will only lookup attributes in the
1128 # be set on the unfiltered repo, but we will only lookup attributes in the
1129 # unfiltered repo if the lookup in the repoview object itself fails. As the
1129 # unfiltered repo if the lookup in the repoview object itself fails. As the
1130 # monkey patched method exists on the repoview class the lookup will not
1130 # monkey patched method exists on the repoview class the lookup will not
1131 # fail. As a result, the original version will shadow the monkey patched
1131 # fail. As a result, the original version will shadow the monkey patched
1132 # one, defeating the monkey patch.
1132 # one, defeating the monkey patch.
1133 #
1133 #
1134 # As a work around we use an unfiltered repo here. We should do something
1134 # As a work around we use an unfiltered repo here. We should do something
1135 # cleaner instead.
1135 # cleaner instead.
1136 repo = repo.unfiltered()
1136 repo = repo.unfiltered()
1137 oldstatus = repo.status
1137 oldstatus = repo.status
1138 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1138 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1139 clean=False, unknown=False, listsubrepos=False):
1139 clean=False, unknown=False, listsubrepos=False):
1140 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1140 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1141 listsubrepos)
1141 listsubrepos)
1142 lfdirstate = lfutil.openlfdirstate(ui, repo)
1142 lfdirstate = lfutil.openlfdirstate(ui, repo)
1143 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1143 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1144 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1144 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1145 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1145 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1146 unknown, ignored, r.clean)
1146 unknown, ignored, r.clean)
1147 repo.status = overridestatus
1147 repo.status = overridestatus
1148 orig(ui, repo, *dirs, **opts)
1148 orig(ui, repo, *dirs, **opts)
1149 repo.status = oldstatus
1149 repo.status = oldstatus
1150 def overriderollback(orig, ui, repo, **opts):
1150 def overriderollback(orig, ui, repo, **opts):
1151 wlock = repo.wlock()
1151 wlock = repo.wlock()
1152 try:
1152 try:
1153 before = repo.dirstate.parents()
1153 before = repo.dirstate.parents()
1154 orphans = set(f for f in repo.dirstate
1154 orphans = set(f for f in repo.dirstate
1155 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1155 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1156 result = orig(ui, repo, **opts)
1156 result = orig(ui, repo, **opts)
1157 after = repo.dirstate.parents()
1157 after = repo.dirstate.parents()
1158 if before == after:
1158 if before == after:
1159 return result # no need to restore standins
1159 return result # no need to restore standins
1160
1160
1161 pctx = repo['.']
1161 pctx = repo['.']
1162 for f in repo.dirstate:
1162 for f in repo.dirstate:
1163 if lfutil.isstandin(f):
1163 if lfutil.isstandin(f):
1164 orphans.discard(f)
1164 orphans.discard(f)
1165 if repo.dirstate[f] == 'r':
1165 if repo.dirstate[f] == 'r':
1166 repo.wvfs.unlinkpath(f, ignoremissing=True)
1166 repo.wvfs.unlinkpath(f, ignoremissing=True)
1167 elif f in pctx:
1167 elif f in pctx:
1168 fctx = pctx[f]
1168 fctx = pctx[f]
1169 repo.wwrite(f, fctx.data(), fctx.flags())
1169 repo.wwrite(f, fctx.data(), fctx.flags())
1170 else:
1170 else:
1171 # content of standin is not so important in 'a',
1171 # content of standin is not so important in 'a',
1172 # 'm' or 'n' (coming from the 2nd parent) cases
1172 # 'm' or 'n' (coming from the 2nd parent) cases
1173 lfutil.writestandin(repo, f, '', False)
1173 lfutil.writestandin(repo, f, '', False)
1174 for standin in orphans:
1174 for standin in orphans:
1175 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1175 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1176
1176
1177 lfdirstate = lfutil.openlfdirstate(ui, repo)
1177 lfdirstate = lfutil.openlfdirstate(ui, repo)
1178 orphans = set(lfdirstate)
1178 orphans = set(lfdirstate)
1179 lfiles = lfutil.listlfiles(repo)
1179 lfiles = lfutil.listlfiles(repo)
1180 for file in lfiles:
1180 for file in lfiles:
1181 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1181 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1182 orphans.discard(file)
1182 orphans.discard(file)
1183 for lfile in orphans:
1183 for lfile in orphans:
1184 lfdirstate.drop(lfile)
1184 lfdirstate.drop(lfile)
1185 lfdirstate.write()
1185 lfdirstate.write()
1186 finally:
1186 finally:
1187 wlock.release()
1187 wlock.release()
1188 return result
1188 return result
1189
1189
1190 def overridetransplant(orig, ui, repo, *revs, **opts):
1190 def overridetransplant(orig, ui, repo, *revs, **opts):
1191 resuming = opts.get('continue')
1191 resuming = opts.get('continue')
1192 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1192 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1193 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1193 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1194 try:
1194 try:
1195 result = orig(ui, repo, *revs, **opts)
1195 result = orig(ui, repo, *revs, **opts)
1196 finally:
1196 finally:
1197 repo._lfstatuswriters.pop()
1197 repo._lfstatuswriters.pop()
1198 repo._lfcommithooks.pop()
1198 repo._lfcommithooks.pop()
1199 return result
1199 return result
1200
1200
1201 def overridecat(orig, ui, repo, file1, *pats, **opts):
1201 def overridecat(orig, ui, repo, file1, *pats, **opts):
1202 ctx = scmutil.revsingle(repo, opts.get('rev'))
1202 ctx = scmutil.revsingle(repo, opts.get('rev'))
1203 err = 1
1203 err = 1
1204 notbad = set()
1204 notbad = set()
1205 m = scmutil.match(ctx, (file1,) + pats, opts)
1205 m = scmutil.match(ctx, (file1,) + pats, opts)
1206 origmatchfn = m.matchfn
1206 origmatchfn = m.matchfn
1207 def lfmatchfn(f):
1207 def lfmatchfn(f):
1208 if origmatchfn(f):
1208 if origmatchfn(f):
1209 return True
1209 return True
1210 lf = lfutil.splitstandin(f)
1210 lf = lfutil.splitstandin(f)
1211 if lf is None:
1211 if lf is None:
1212 return False
1212 return False
1213 notbad.add(lf)
1213 notbad.add(lf)
1214 return origmatchfn(lf)
1214 return origmatchfn(lf)
1215 m.matchfn = lfmatchfn
1215 m.matchfn = lfmatchfn
1216 origbadfn = m.bad
1216 origbadfn = m.bad
1217 def lfbadfn(f, msg):
1217 def lfbadfn(f, msg):
1218 if not f in notbad:
1218 if not f in notbad:
1219 origbadfn(f, msg)
1219 origbadfn(f, msg)
1220 m.bad = lfbadfn
1220 m.bad = lfbadfn
1221 for f in ctx.walk(m):
1221 for f in ctx.walk(m):
1222 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1222 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1223 pathname=f)
1223 pathname=f)
1224 lf = lfutil.splitstandin(f)
1224 lf = lfutil.splitstandin(f)
1225 if lf is None or origmatchfn(f):
1225 if lf is None or origmatchfn(f):
1226 # duplicating unreachable code from commands.cat
1226 # duplicating unreachable code from commands.cat
1227 data = ctx[f].data()
1227 data = ctx[f].data()
1228 if opts.get('decode'):
1228 if opts.get('decode'):
1229 data = repo.wwritedata(f, data)
1229 data = repo.wwritedata(f, data)
1230 fp.write(data)
1230 fp.write(data)
1231 else:
1231 else:
1232 hash = lfutil.readstandin(repo, lf, ctx.rev())
1232 hash = lfutil.readstandin(repo, lf, ctx.rev())
1233 if not lfutil.inusercache(repo.ui, hash):
1233 if not lfutil.inusercache(repo.ui, hash):
1234 store = basestore._openstore(repo)
1234 store = basestore._openstore(repo)
1235 success, missing = store.get([(lf, hash)])
1235 success, missing = store.get([(lf, hash)])
1236 if len(success) != 1:
1236 if len(success) != 1:
1237 raise util.Abort(
1237 raise util.Abort(
1238 _('largefile %s is not in cache and could not be '
1238 _('largefile %s is not in cache and could not be '
1239 'downloaded') % lf)
1239 'downloaded') % lf)
1240 path = lfutil.usercachepath(repo.ui, hash)
1240 path = lfutil.usercachepath(repo.ui, hash)
1241 fpin = open(path, "rb")
1241 fpin = open(path, "rb")
1242 for chunk in util.filechunkiter(fpin, 128 * 1024):
1242 for chunk in util.filechunkiter(fpin, 128 * 1024):
1243 fp.write(chunk)
1243 fp.write(chunk)
1244 fpin.close()
1244 fpin.close()
1245 fp.close()
1245 fp.close()
1246 err = 0
1246 err = 0
1247 return err
1247 return err
1248
1248
1249 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1249 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1250 *args, **kwargs):
1250 *args, **kwargs):
1251 wlock = repo.wlock()
1251 wlock = repo.wlock()
1252 try:
1252 try:
1253 # branch | | |
1253 # branch | | |
1254 # merge | force | partial | action
1254 # merge | force | partial | action
1255 # -------+-------+---------+--------------
1255 # -------+-------+---------+--------------
1256 # x | x | x | linear-merge
1256 # x | x | x | linear-merge
1257 # o | x | x | branch-merge
1257 # o | x | x | branch-merge
1258 # x | o | x | overwrite (as clean update)
1258 # x | o | x | overwrite (as clean update)
1259 # o | o | x | force-branch-merge (*1)
1259 # o | o | x | force-branch-merge (*1)
1260 # x | x | o | (*)
1260 # x | x | o | (*)
1261 # o | x | o | (*)
1261 # o | x | o | (*)
1262 # x | o | o | overwrite (as revert)
1262 # x | o | o | overwrite (as revert)
1263 # o | o | o | (*)
1263 # o | o | o | (*)
1264 #
1264 #
1265 # (*) don't care
1265 # (*) don't care
1266 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1266 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1267
1267
1268 linearmerge = not branchmerge and not force and not partial
1268 linearmerge = not branchmerge and not force and not partial
1269
1269
1270 if linearmerge or (branchmerge and force and not partial):
1270 if linearmerge or (branchmerge and force and not partial):
1271 # update standins for linear-merge or force-branch-merge,
1271 # update standins for linear-merge or force-branch-merge,
1272 # because largefiles in the working directory may be modified
1272 # because largefiles in the working directory may be modified
1273 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1273 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1274 unsure, s = lfdirstate.status(match_.always(repo.root,
1274 unsure, s = lfdirstate.status(match_.always(repo.root,
1275 repo.getcwd()),
1275 repo.getcwd()),
1276 [], False, False, False)
1276 [], False, False, False)
1277 for lfile in unsure + s.modified + s.added:
1277 for lfile in unsure + s.modified + s.added:
1278 lfutil.updatestandin(repo, lfutil.standin(lfile))
1278 lfutil.updatestandin(repo, lfutil.standin(lfile))
1279
1279
1280 if linearmerge:
1280 if linearmerge:
1281 # Only call updatelfiles on the standins that have changed
1281 # Only call updatelfiles on the standins that have changed
1282 # to save time
1282 # to save time
1283 oldstandins = lfutil.getstandinsstate(repo)
1283 oldstandins = lfutil.getstandinsstate(repo)
1284
1284
1285 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1285 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1286
1286
1287 filelist = None
1287 filelist = None
1288 if linearmerge:
1288 if linearmerge:
1289 newstandins = lfutil.getstandinsstate(repo)
1289 newstandins = lfutil.getstandinsstate(repo)
1290 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1290 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1291
1291
1292 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1292 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1293 normallookup=partial)
1293 normallookup=partial)
1294
1294
1295 return result
1295 return result
1296 finally:
1296 finally:
1297 wlock.release()
1297 wlock.release()
1298
1298
1299 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1299 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1300 result = orig(repo, files, *args, **kwargs)
1300 result = orig(repo, files, *args, **kwargs)
1301
1301
1302 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1302 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1303 if filelist:
1303 if filelist:
1304 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1304 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1305 printmessage=False, normallookup=True)
1305 printmessage=False, normallookup=True)
1306
1306
1307 return result
1307 return result
@@ -1,1172 +1,1177 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import struct
8 import struct
9
9
10 from node import nullid, nullrev, hex, bin
10 from node import nullid, nullrev, hex, bin
11 from i18n import _
11 from i18n import _
12 from mercurial import obsolete
12 from mercurial import obsolete
13 import error as errormod, util, filemerge, copies, subrepo, worker
13 import error as errormod, util, filemerge, copies, subrepo, worker
14 import errno, os, shutil
14 import errno, os, shutil
15
15
16 _pack = struct.pack
16 _pack = struct.pack
17 _unpack = struct.unpack
17 _unpack = struct.unpack
18
18
19 def _droponode(data):
19 def _droponode(data):
20 # used for compatibility for v1
20 # used for compatibility for v1
21 bits = data.split('\0')
21 bits = data.split('\0')
22 bits = bits[:-2] + bits[-1:]
22 bits = bits[:-2] + bits[-1:]
23 return '\0'.join(bits)
23 return '\0'.join(bits)
24
24
25 class mergestate(object):
25 class mergestate(object):
26 '''track 3-way merge state of individual files
26 '''track 3-way merge state of individual files
27
27
28 it is stored on disk when needed. Two file are used, one with an old
28 it is stored on disk when needed. Two file are used, one with an old
29 format, one with a new format. Both contains similar data, but the new
29 format, one with a new format. Both contains similar data, but the new
30 format can store new kind of field.
30 format can store new kind of field.
31
31
32 Current new format is a list of arbitrary record of the form:
32 Current new format is a list of arbitrary record of the form:
33
33
34 [type][length][content]
34 [type][length][content]
35
35
36 Type is a single character, length is a 4 bytes integer, content is an
36 Type is a single character, length is a 4 bytes integer, content is an
37 arbitrary suites of bytes of length `length`.
37 arbitrary suites of bytes of length `length`.
38
38
39 Type should be a letter. Capital letter are mandatory record, Mercurial
39 Type should be a letter. Capital letter are mandatory record, Mercurial
40 should abort if they are unknown. lower case record can be safely ignored.
40 should abort if they are unknown. lower case record can be safely ignored.
41
41
42 Currently known record:
42 Currently known record:
43
43
44 L: the node of the "local" part of the merge (hexified version)
44 L: the node of the "local" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
45 O: the node of the "other" part of the merge (hexified version)
46 F: a file to be merged entry
46 F: a file to be merged entry
47 '''
47 '''
48 statepathv1 = 'merge/state'
48 statepathv1 = 'merge/state'
49 statepathv2 = 'merge/state2'
49 statepathv2 = 'merge/state2'
50
50
51 def __init__(self, repo):
51 def __init__(self, repo):
52 self._repo = repo
52 self._repo = repo
53 self._dirty = False
53 self._dirty = False
54 self._read()
54 self._read()
55
55
56 def reset(self, node=None, other=None):
56 def reset(self, node=None, other=None):
57 self._state = {}
57 self._state = {}
58 self._local = None
58 self._local = None
59 self._other = None
59 self._other = None
60 if node:
60 if node:
61 self._local = node
61 self._local = node
62 self._other = other
62 self._other = other
63 shutil.rmtree(self._repo.join('merge'), True)
63 shutil.rmtree(self._repo.join('merge'), True)
64 self._dirty = False
64 self._dirty = False
65
65
66 def _read(self):
66 def _read(self):
67 """Analyse each record content to restore a serialized state from disk
67 """Analyse each record content to restore a serialized state from disk
68
68
69 This function process "record" entry produced by the de-serialization
69 This function process "record" entry produced by the de-serialization
70 of on disk file.
70 of on disk file.
71 """
71 """
72 self._state = {}
72 self._state = {}
73 self._local = None
73 self._local = None
74 self._other = None
74 self._other = None
75 records = self._readrecords()
75 records = self._readrecords()
76 for rtype, record in records:
76 for rtype, record in records:
77 if rtype == 'L':
77 if rtype == 'L':
78 self._local = bin(record)
78 self._local = bin(record)
79 elif rtype == 'O':
79 elif rtype == 'O':
80 self._other = bin(record)
80 self._other = bin(record)
81 elif rtype == 'F':
81 elif rtype == 'F':
82 bits = record.split('\0')
82 bits = record.split('\0')
83 self._state[bits[0]] = bits[1:]
83 self._state[bits[0]] = bits[1:]
84 elif not rtype.islower():
84 elif not rtype.islower():
85 raise util.Abort(_('unsupported merge state record: %s')
85 raise util.Abort(_('unsupported merge state record: %s')
86 % rtype)
86 % rtype)
87 self._dirty = False
87 self._dirty = False
88
88
89 def _readrecords(self):
89 def _readrecords(self):
90 """Read merge state from disk and return a list of record (TYPE, data)
90 """Read merge state from disk and return a list of record (TYPE, data)
91
91
92 We read data from both v1 and v2 files and decide which one to use.
92 We read data from both v1 and v2 files and decide which one to use.
93
93
94 V1 has been used by version prior to 2.9.1 and contains less data than
94 V1 has been used by version prior to 2.9.1 and contains less data than
95 v2. We read both versions and check if no data in v2 contradicts
95 v2. We read both versions and check if no data in v2 contradicts
96 v1. If there is not contradiction we can safely assume that both v1
96 v1. If there is not contradiction we can safely assume that both v1
97 and v2 were written at the same time and use the extract data in v2. If
97 and v2 were written at the same time and use the extract data in v2. If
98 there is contradiction we ignore v2 content as we assume an old version
98 there is contradiction we ignore v2 content as we assume an old version
99 of Mercurial has overwritten the mergestate file and left an old v2
99 of Mercurial has overwritten the mergestate file and left an old v2
100 file around.
100 file around.
101
101
102 returns list of record [(TYPE, data), ...]"""
102 returns list of record [(TYPE, data), ...]"""
103 v1records = self._readrecordsv1()
103 v1records = self._readrecordsv1()
104 v2records = self._readrecordsv2()
104 v2records = self._readrecordsv2()
105 oldv2 = set() # old format version of v2 record
105 oldv2 = set() # old format version of v2 record
106 for rec in v2records:
106 for rec in v2records:
107 if rec[0] == 'L':
107 if rec[0] == 'L':
108 oldv2.add(rec)
108 oldv2.add(rec)
109 elif rec[0] == 'F':
109 elif rec[0] == 'F':
110 # drop the onode data (not contained in v1)
110 # drop the onode data (not contained in v1)
111 oldv2.add(('F', _droponode(rec[1])))
111 oldv2.add(('F', _droponode(rec[1])))
112 for rec in v1records:
112 for rec in v1records:
113 if rec not in oldv2:
113 if rec not in oldv2:
114 # v1 file is newer than v2 file, use it
114 # v1 file is newer than v2 file, use it
115 # we have to infer the "other" changeset of the merge
115 # we have to infer the "other" changeset of the merge
116 # we cannot do better than that with v1 of the format
116 # we cannot do better than that with v1 of the format
117 mctx = self._repo[None].parents()[-1]
117 mctx = self._repo[None].parents()[-1]
118 v1records.append(('O', mctx.hex()))
118 v1records.append(('O', mctx.hex()))
119 # add place holder "other" file node information
119 # add place holder "other" file node information
120 # nobody is using it yet so we do no need to fetch the data
120 # nobody is using it yet so we do no need to fetch the data
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
121 # if mctx was wrong `mctx[bits[-2]]` may fails.
122 for idx, r in enumerate(v1records):
122 for idx, r in enumerate(v1records):
123 if r[0] == 'F':
123 if r[0] == 'F':
124 bits = r[1].split('\0')
124 bits = r[1].split('\0')
125 bits.insert(-2, '')
125 bits.insert(-2, '')
126 v1records[idx] = (r[0], '\0'.join(bits))
126 v1records[idx] = (r[0], '\0'.join(bits))
127 return v1records
127 return v1records
128 else:
128 else:
129 return v2records
129 return v2records
130
130
131 def _readrecordsv1(self):
131 def _readrecordsv1(self):
132 """read on disk merge state for version 1 file
132 """read on disk merge state for version 1 file
133
133
134 returns list of record [(TYPE, data), ...]
134 returns list of record [(TYPE, data), ...]
135
135
136 Note: the "F" data from this file are one entry short
136 Note: the "F" data from this file are one entry short
137 (no "other file node" entry)
137 (no "other file node" entry)
138 """
138 """
139 records = []
139 records = []
140 try:
140 try:
141 f = self._repo.opener(self.statepathv1)
141 f = self._repo.opener(self.statepathv1)
142 for i, l in enumerate(f):
142 for i, l in enumerate(f):
143 if i == 0:
143 if i == 0:
144 records.append(('L', l[:-1]))
144 records.append(('L', l[:-1]))
145 else:
145 else:
146 records.append(('F', l[:-1]))
146 records.append(('F', l[:-1]))
147 f.close()
147 f.close()
148 except IOError, err:
148 except IOError, err:
149 if err.errno != errno.ENOENT:
149 if err.errno != errno.ENOENT:
150 raise
150 raise
151 return records
151 return records
152
152
153 def _readrecordsv2(self):
153 def _readrecordsv2(self):
154 """read on disk merge state for version 2 file
154 """read on disk merge state for version 2 file
155
155
156 returns list of record [(TYPE, data), ...]
156 returns list of record [(TYPE, data), ...]
157 """
157 """
158 records = []
158 records = []
159 try:
159 try:
160 f = self._repo.opener(self.statepathv2)
160 f = self._repo.opener(self.statepathv2)
161 data = f.read()
161 data = f.read()
162 off = 0
162 off = 0
163 end = len(data)
163 end = len(data)
164 while off < end:
164 while off < end:
165 rtype = data[off]
165 rtype = data[off]
166 off += 1
166 off += 1
167 length = _unpack('>I', data[off:(off + 4)])[0]
167 length = _unpack('>I', data[off:(off + 4)])[0]
168 off += 4
168 off += 4
169 record = data[off:(off + length)]
169 record = data[off:(off + length)]
170 off += length
170 off += length
171 records.append((rtype, record))
171 records.append((rtype, record))
172 f.close()
172 f.close()
173 except IOError, err:
173 except IOError, err:
174 if err.errno != errno.ENOENT:
174 if err.errno != errno.ENOENT:
175 raise
175 raise
176 return records
176 return records
177
177
178 def active(self):
178 def active(self):
179 """Whether mergestate is active.
179 """Whether mergestate is active.
180
180
181 Returns True if there appears to be mergestate. This is a rough proxy
181 Returns True if there appears to be mergestate. This is a rough proxy
182 for "is a merge in progress."
182 for "is a merge in progress."
183 """
183 """
184 # Check local variables before looking at filesystem for performance
184 # Check local variables before looking at filesystem for performance
185 # reasons.
185 # reasons.
186 return bool(self._local) or bool(self._state) or \
186 return bool(self._local) or bool(self._state) or \
187 self._repo.opener.exists(self.statepathv1) or \
187 self._repo.opener.exists(self.statepathv1) or \
188 self._repo.opener.exists(self.statepathv2)
188 self._repo.opener.exists(self.statepathv2)
189
189
190 def commit(self):
190 def commit(self):
191 """Write current state on disk (if necessary)"""
191 """Write current state on disk (if necessary)"""
192 if self._dirty:
192 if self._dirty:
193 records = []
193 records = []
194 records.append(('L', hex(self._local)))
194 records.append(('L', hex(self._local)))
195 records.append(('O', hex(self._other)))
195 records.append(('O', hex(self._other)))
196 for d, v in self._state.iteritems():
196 for d, v in self._state.iteritems():
197 records.append(('F', '\0'.join([d] + v)))
197 records.append(('F', '\0'.join([d] + v)))
198 self._writerecords(records)
198 self._writerecords(records)
199 self._dirty = False
199 self._dirty = False
200
200
201 def _writerecords(self, records):
201 def _writerecords(self, records):
202 """Write current state on disk (both v1 and v2)"""
202 """Write current state on disk (both v1 and v2)"""
203 self._writerecordsv1(records)
203 self._writerecordsv1(records)
204 self._writerecordsv2(records)
204 self._writerecordsv2(records)
205
205
206 def _writerecordsv1(self, records):
206 def _writerecordsv1(self, records):
207 """Write current state on disk in a version 1 file"""
207 """Write current state on disk in a version 1 file"""
208 f = self._repo.opener(self.statepathv1, 'w')
208 f = self._repo.opener(self.statepathv1, 'w')
209 irecords = iter(records)
209 irecords = iter(records)
210 lrecords = irecords.next()
210 lrecords = irecords.next()
211 assert lrecords[0] == 'L'
211 assert lrecords[0] == 'L'
212 f.write(hex(self._local) + '\n')
212 f.write(hex(self._local) + '\n')
213 for rtype, data in irecords:
213 for rtype, data in irecords:
214 if rtype == 'F':
214 if rtype == 'F':
215 f.write('%s\n' % _droponode(data))
215 f.write('%s\n' % _droponode(data))
216 f.close()
216 f.close()
217
217
218 def _writerecordsv2(self, records):
218 def _writerecordsv2(self, records):
219 """Write current state on disk in a version 2 file"""
219 """Write current state on disk in a version 2 file"""
220 f = self._repo.opener(self.statepathv2, 'w')
220 f = self._repo.opener(self.statepathv2, 'w')
221 for key, data in records:
221 for key, data in records:
222 assert len(key) == 1
222 assert len(key) == 1
223 format = '>sI%is' % len(data)
223 format = '>sI%is' % len(data)
224 f.write(_pack(format, key, len(data), data))
224 f.write(_pack(format, key, len(data), data))
225 f.close()
225 f.close()
226
226
227 def add(self, fcl, fco, fca, fd):
227 def add(self, fcl, fco, fca, fd):
228 """add a new (potentially?) conflicting file the merge state
228 """add a new (potentially?) conflicting file the merge state
229 fcl: file context for local,
229 fcl: file context for local,
230 fco: file context for remote,
230 fco: file context for remote,
231 fca: file context for ancestors,
231 fca: file context for ancestors,
232 fd: file path of the resulting merge.
232 fd: file path of the resulting merge.
233
233
234 note: also write the local version to the `.hg/merge` directory.
234 note: also write the local version to the `.hg/merge` directory.
235 """
235 """
236 hash = util.sha1(fcl.path()).hexdigest()
236 hash = util.sha1(fcl.path()).hexdigest()
237 self._repo.opener.write('merge/' + hash, fcl.data())
237 self._repo.opener.write('merge/' + hash, fcl.data())
238 self._state[fd] = ['u', hash, fcl.path(),
238 self._state[fd] = ['u', hash, fcl.path(),
239 fca.path(), hex(fca.filenode()),
239 fca.path(), hex(fca.filenode()),
240 fco.path(), hex(fco.filenode()),
240 fco.path(), hex(fco.filenode()),
241 fcl.flags()]
241 fcl.flags()]
242 self._dirty = True
242 self._dirty = True
243
243
244 def __contains__(self, dfile):
244 def __contains__(self, dfile):
245 return dfile in self._state
245 return dfile in self._state
246
246
247 def __getitem__(self, dfile):
247 def __getitem__(self, dfile):
248 return self._state[dfile][0]
248 return self._state[dfile][0]
249
249
250 def __iter__(self):
250 def __iter__(self):
251 return iter(sorted(self._state))
251 return iter(sorted(self._state))
252
252
253 def files(self):
253 def files(self):
254 return self._state.keys()
254 return self._state.keys()
255
255
256 def mark(self, dfile, state):
256 def mark(self, dfile, state):
257 self._state[dfile][0] = state
257 self._state[dfile][0] = state
258 self._dirty = True
258 self._dirty = True
259
259
260 def unresolved(self):
260 def unresolved(self):
261 """Obtain the paths of unresolved files."""
261 """Obtain the paths of unresolved files."""
262
262
263 for f, entry in self._state.items():
263 for f, entry in self._state.items():
264 if entry[0] == 'u':
264 if entry[0] == 'u':
265 yield f
265 yield f
266
266
267 def resolve(self, dfile, wctx, labels=None):
267 def resolve(self, dfile, wctx, labels=None):
268 """rerun merge process for file path `dfile`"""
268 """rerun merge process for file path `dfile`"""
269 if self[dfile] == 'r':
269 if self[dfile] == 'r':
270 return 0
270 return 0
271 stateentry = self._state[dfile]
271 stateentry = self._state[dfile]
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
272 state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
273 octx = self._repo[self._other]
273 octx = self._repo[self._other]
274 fcd = wctx[dfile]
274 fcd = wctx[dfile]
275 fco = octx[ofile]
275 fco = octx[ofile]
276 fca = self._repo.filectx(afile, fileid=anode)
276 fca = self._repo.filectx(afile, fileid=anode)
277 # "premerge" x flags
277 # "premerge" x flags
278 flo = fco.flags()
278 flo = fco.flags()
279 fla = fca.flags()
279 fla = fca.flags()
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
280 if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
281 if fca.node() == nullid:
281 if fca.node() == nullid:
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
282 self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
283 afile)
283 afile)
284 elif flags == fla:
284 elif flags == fla:
285 flags = flo
285 flags = flo
286 # restore local
286 # restore local
287 f = self._repo.opener('merge/' + hash)
287 f = self._repo.opener('merge/' + hash)
288 self._repo.wwrite(dfile, f.read(), flags)
288 self._repo.wwrite(dfile, f.read(), flags)
289 f.close()
289 f.close()
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
290 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca,
291 labels=labels)
291 labels=labels)
292 if r is None:
292 if r is None:
293 # no real conflict
293 # no real conflict
294 del self._state[dfile]
294 del self._state[dfile]
295 self._dirty = True
295 self._dirty = True
296 elif not r:
296 elif not r:
297 self.mark(dfile, 'r')
297 self.mark(dfile, 'r')
298 return r
298 return r
299
299
300 def _checkunknownfile(repo, wctx, mctx, f):
300 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
301 if f2 is None:
302 f2 = f
301 return (os.path.isfile(repo.wjoin(f))
303 return (os.path.isfile(repo.wjoin(f))
302 and repo.wopener.audit.check(f)
304 and repo.wopener.audit.check(f)
303 and repo.dirstate.normalize(f) not in repo.dirstate
305 and repo.dirstate.normalize(f) not in repo.dirstate
304 and mctx[f].cmp(wctx[f]))
306 and mctx[f2].cmp(wctx[f]))
305
307
306 def _forgetremoved(wctx, mctx, branchmerge):
308 def _forgetremoved(wctx, mctx, branchmerge):
307 """
309 """
308 Forget removed files
310 Forget removed files
309
311
310 If we're jumping between revisions (as opposed to merging), and if
312 If we're jumping between revisions (as opposed to merging), and if
311 neither the working directory nor the target rev has the file,
313 neither the working directory nor the target rev has the file,
312 then we need to remove it from the dirstate, to prevent the
314 then we need to remove it from the dirstate, to prevent the
313 dirstate from listing the file when it is no longer in the
315 dirstate from listing the file when it is no longer in the
314 manifest.
316 manifest.
315
317
316 If we're merging, and the other revision has removed a file
318 If we're merging, and the other revision has removed a file
317 that is not present in the working directory, we need to mark it
319 that is not present in the working directory, we need to mark it
318 as removed.
320 as removed.
319 """
321 """
320
322
321 actions = {}
323 actions = {}
322 m = 'f'
324 m = 'f'
323 if branchmerge:
325 if branchmerge:
324 m = 'r'
326 m = 'r'
325 for f in wctx.deleted():
327 for f in wctx.deleted():
326 if f not in mctx:
328 if f not in mctx:
327 actions[f] = m, None, "forget deleted"
329 actions[f] = m, None, "forget deleted"
328
330
329 if not branchmerge:
331 if not branchmerge:
330 for f in wctx.removed():
332 for f in wctx.removed():
331 if f not in mctx:
333 if f not in mctx:
332 actions[f] = 'f', None, "forget removed"
334 actions[f] = 'f', None, "forget removed"
333
335
334 return actions
336 return actions
335
337
336 def _checkcollision(repo, wmf, actions):
338 def _checkcollision(repo, wmf, actions):
337 # build provisional merged manifest up
339 # build provisional merged manifest up
338 pmmf = set(wmf)
340 pmmf = set(wmf)
339
341
340 if actions:
342 if actions:
341 # k, dr, e and rd are no-op
343 # k, dr, e and rd are no-op
342 for m in 'a', 'f', 'g', 'cd', 'dc':
344 for m in 'a', 'f', 'g', 'cd', 'dc':
343 for f, args, msg in actions[m]:
345 for f, args, msg in actions[m]:
344 pmmf.add(f)
346 pmmf.add(f)
345 for f, args, msg in actions['r']:
347 for f, args, msg in actions['r']:
346 pmmf.discard(f)
348 pmmf.discard(f)
347 for f, args, msg in actions['dm']:
349 for f, args, msg in actions['dm']:
348 f2, flags = args
350 f2, flags = args
349 pmmf.discard(f2)
351 pmmf.discard(f2)
350 pmmf.add(f)
352 pmmf.add(f)
351 for f, args, msg in actions['dg']:
353 for f, args, msg in actions['dg']:
352 pmmf.add(f)
354 pmmf.add(f)
353 for f, args, msg in actions['m']:
355 for f, args, msg in actions['m']:
354 f1, f2, fa, move, anc = args
356 f1, f2, fa, move, anc = args
355 if move:
357 if move:
356 pmmf.discard(f1)
358 pmmf.discard(f1)
357 pmmf.add(f)
359 pmmf.add(f)
358
360
359 # check case-folding collision in provisional merged manifest
361 # check case-folding collision in provisional merged manifest
360 foldmap = {}
362 foldmap = {}
361 for f in sorted(pmmf):
363 for f in sorted(pmmf):
362 fold = util.normcase(f)
364 fold = util.normcase(f)
363 if fold in foldmap:
365 if fold in foldmap:
364 raise util.Abort(_("case-folding collision between %s and %s")
366 raise util.Abort(_("case-folding collision between %s and %s")
365 % (f, foldmap[fold]))
367 % (f, foldmap[fold]))
366 foldmap[fold] = f
368 foldmap[fold] = f
367
369
368 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
370 def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
369 acceptremote, followcopies):
371 acceptremote, followcopies):
370 """
372 """
371 Merge p1 and p2 with ancestor pa and generate merge action list
373 Merge p1 and p2 with ancestor pa and generate merge action list
372
374
373 branchmerge and force are as passed in to update
375 branchmerge and force are as passed in to update
374 partial = function to filter file lists
376 partial = function to filter file lists
375 acceptremote = accept the incoming changes without prompting
377 acceptremote = accept the incoming changes without prompting
376 """
378 """
377
379
378 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
380 copy, movewithdir, diverge, renamedelete = {}, {}, {}, {}
379
381
380 # manifests fetched in order are going to be faster, so prime the caches
382 # manifests fetched in order are going to be faster, so prime the caches
381 [x.manifest() for x in
383 [x.manifest() for x in
382 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
384 sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
383
385
384 if followcopies:
386 if followcopies:
385 ret = copies.mergecopies(repo, wctx, p2, pa)
387 ret = copies.mergecopies(repo, wctx, p2, pa)
386 copy, movewithdir, diverge, renamedelete = ret
388 copy, movewithdir, diverge, renamedelete = ret
387
389
388 repo.ui.note(_("resolving manifests\n"))
390 repo.ui.note(_("resolving manifests\n"))
389 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
391 repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
390 % (bool(branchmerge), bool(force), bool(partial)))
392 % (bool(branchmerge), bool(force), bool(partial)))
391 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
393 repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
392
394
393 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
395 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
394 copied = set(copy.values())
396 copied = set(copy.values())
395 copied.update(movewithdir.values())
397 copied.update(movewithdir.values())
396
398
397 if '.hgsubstate' in m1:
399 if '.hgsubstate' in m1:
398 # check whether sub state is modified
400 # check whether sub state is modified
399 for s in sorted(wctx.substate):
401 for s in sorted(wctx.substate):
400 if wctx.sub(s).dirty():
402 if wctx.sub(s).dirty():
401 m1['.hgsubstate'] += '+'
403 m1['.hgsubstate'] += '+'
402 break
404 break
403
405
404 # Compare manifests
406 # Compare manifests
405 diff = m1.diff(m2)
407 diff = m1.diff(m2)
406
408
407 actions = {}
409 actions = {}
408 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
410 for f, ((n1, fl1), (n2, fl2)) in diff.iteritems():
409 if partial and not partial(f):
411 if partial and not partial(f):
410 continue
412 continue
411 if n1 and n2: # file exists on both local and remote side
413 if n1 and n2: # file exists on both local and remote side
412 if f not in ma:
414 if f not in ma:
413 fa = copy.get(f, None)
415 fa = copy.get(f, None)
414 if fa is not None:
416 if fa is not None:
415 actions[f] = ('m', (f, f, fa, False, pa.node()),
417 actions[f] = ('m', (f, f, fa, False, pa.node()),
416 "both renamed from " + fa)
418 "both renamed from " + fa)
417 else:
419 else:
418 actions[f] = ('m', (f, f, None, False, pa.node()),
420 actions[f] = ('m', (f, f, None, False, pa.node()),
419 "both created")
421 "both created")
420 else:
422 else:
421 a = ma[f]
423 a = ma[f]
422 fla = ma.flags(f)
424 fla = ma.flags(f)
423 nol = 'l' not in fl1 + fl2 + fla
425 nol = 'l' not in fl1 + fl2 + fla
424 if n2 == a and fl2 == fla:
426 if n2 == a and fl2 == fla:
425 actions[f] = ('k' , (), "remote unchanged")
427 actions[f] = ('k' , (), "remote unchanged")
426 elif n1 == a and fl1 == fla: # local unchanged - use remote
428 elif n1 == a and fl1 == fla: # local unchanged - use remote
427 if n1 == n2: # optimization: keep local content
429 if n1 == n2: # optimization: keep local content
428 actions[f] = ('e', (fl2,), "update permissions")
430 actions[f] = ('e', (fl2,), "update permissions")
429 else:
431 else:
430 actions[f] = ('g', (fl2,), "remote is newer")
432 actions[f] = ('g', (fl2,), "remote is newer")
431 elif nol and n2 == a: # remote only changed 'x'
433 elif nol and n2 == a: # remote only changed 'x'
432 actions[f] = ('e', (fl2,), "update permissions")
434 actions[f] = ('e', (fl2,), "update permissions")
433 elif nol and n1 == a: # local only changed 'x'
435 elif nol and n1 == a: # local only changed 'x'
434 actions[f] = ('g', (fl1,), "remote is newer")
436 actions[f] = ('g', (fl1,), "remote is newer")
435 else: # both changed something
437 else: # both changed something
436 actions[f] = ('m', (f, f, f, False, pa.node()),
438 actions[f] = ('m', (f, f, f, False, pa.node()),
437 "versions differ")
439 "versions differ")
438 elif n1: # file exists only on local side
440 elif n1: # file exists only on local side
439 if f in copied:
441 if f in copied:
440 pass # we'll deal with it on m2 side
442 pass # we'll deal with it on m2 side
441 elif f in movewithdir: # directory rename, move local
443 elif f in movewithdir: # directory rename, move local
442 f2 = movewithdir[f]
444 f2 = movewithdir[f]
443 if f2 in m2:
445 if f2 in m2:
444 actions[f2] = ('m', (f, f2, None, True, pa.node()),
446 actions[f2] = ('m', (f, f2, None, True, pa.node()),
445 "remote directory rename, both created")
447 "remote directory rename, both created")
446 else:
448 else:
447 actions[f2] = ('dm', (f, fl1),
449 actions[f2] = ('dm', (f, fl1),
448 "remote directory rename - move from " + f)
450 "remote directory rename - move from " + f)
449 elif f in copy:
451 elif f in copy:
450 f2 = copy[f]
452 f2 = copy[f]
451 actions[f] = ('m', (f, f2, f2, False, pa.node()),
453 actions[f] = ('m', (f, f2, f2, False, pa.node()),
452 "local copied/moved from " + f2)
454 "local copied/moved from " + f2)
453 elif f in ma: # clean, a different, no remote
455 elif f in ma: # clean, a different, no remote
454 if n1 != ma[f]:
456 if n1 != ma[f]:
455 if acceptremote:
457 if acceptremote:
456 actions[f] = ('r', None, "remote delete")
458 actions[f] = ('r', None, "remote delete")
457 else:
459 else:
458 actions[f] = ('cd', None, "prompt changed/deleted")
460 actions[f] = ('cd', None, "prompt changed/deleted")
459 elif n1[20:] == 'a':
461 elif n1[20:] == 'a':
460 # This extra 'a' is added by working copy manifest to mark
462 # This extra 'a' is added by working copy manifest to mark
461 # the file as locally added. We should forget it instead of
463 # the file as locally added. We should forget it instead of
462 # deleting it.
464 # deleting it.
463 actions[f] = ('f', None, "remote deleted")
465 actions[f] = ('f', None, "remote deleted")
464 else:
466 else:
465 actions[f] = ('r', None, "other deleted")
467 actions[f] = ('r', None, "other deleted")
466 elif n2: # file exists only on remote side
468 elif n2: # file exists only on remote side
467 if f in copied:
469 if f in copied:
468 pass # we'll deal with it on m1 side
470 pass # we'll deal with it on m1 side
469 elif f in movewithdir:
471 elif f in movewithdir:
470 f2 = movewithdir[f]
472 f2 = movewithdir[f]
471 if f2 in m1:
473 if f2 in m1:
472 actions[f2] = ('m', (f2, f, None, False, pa.node()),
474 actions[f2] = ('m', (f2, f, None, False, pa.node()),
473 "local directory rename, both created")
475 "local directory rename, both created")
474 else:
476 else:
475 actions[f2] = ('dg', (f, fl2),
477 actions[f2] = ('dg', (f, fl2),
476 "local directory rename - get from " + f)
478 "local directory rename - get from " + f)
477 elif f in copy:
479 elif f in copy:
478 f2 = copy[f]
480 f2 = copy[f]
479 if f2 in m2:
481 if f2 in m2:
480 actions[f] = ('m', (f2, f, f2, False, pa.node()),
482 actions[f] = ('m', (f2, f, f2, False, pa.node()),
481 "remote copied from " + f2)
483 "remote copied from " + f2)
482 else:
484 else:
483 actions[f] = ('m', (f2, f, f2, True, pa.node()),
485 actions[f] = ('m', (f2, f, f2, True, pa.node()),
484 "remote moved from " + f2)
486 "remote moved from " + f2)
485 elif f not in ma:
487 elif f not in ma:
486 # local unknown, remote created: the logic is described by the
488 # local unknown, remote created: the logic is described by the
487 # following table:
489 # following table:
488 #
490 #
489 # force branchmerge different | action
491 # force branchmerge different | action
490 # n * * | create
492 # n * * | create
491 # y n * | create
493 # y n * | create
492 # y y n | create
494 # y y n | create
493 # y y y | merge
495 # y y y | merge
494 #
496 #
495 # Checking whether the files are different is expensive, so we
497 # Checking whether the files are different is expensive, so we
496 # don't do that when we can avoid it.
498 # don't do that when we can avoid it.
497 if not force:
499 if not force:
498 actions[f] = ('c', (fl2,), "remote created")
500 actions[f] = ('c', (fl2,), "remote created")
499 elif not branchmerge:
501 elif not branchmerge:
500 actions[f] = ('c', (fl2,), "remote created")
502 actions[f] = ('c', (fl2,), "remote created")
501 else:
503 else:
502 different = _checkunknownfile(repo, wctx, p2, f)
504 different = _checkunknownfile(repo, wctx, p2, f)
503 if different:
505 if different:
504 actions[f] = ('m', (f, f, None, False, pa.node()),
506 actions[f] = ('m', (f, f, None, False, pa.node()),
505 "remote differs from untracked local")
507 "remote differs from untracked local")
506 else:
508 else:
507 actions[f] = ('g', (fl2,), "remote created")
509 actions[f] = ('g', (fl2,), "remote created")
508 elif n2 != ma[f]:
510 elif n2 != ma[f]:
509 if acceptremote:
511 if acceptremote:
510 actions[f] = ('c', (fl2,), "remote recreating")
512 actions[f] = ('c', (fl2,), "remote recreating")
511 else:
513 else:
512 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
514 actions[f] = ('dc', (fl2,), "prompt deleted/changed")
513
515
514 aborts = []
516 aborts = []
515 if not force:
517 if not force:
516 for f, (m, args, msg) in actions.iteritems():
518 for f, (m, args, msg) in actions.iteritems():
517 if m in ('c', 'dc'):
519 if m in ('c', 'dc'):
518 if _checkunknownfile(repo, wctx, p2, f):
520 if _checkunknownfile(repo, wctx, p2, f):
519 aborts.append(f)
521 aborts.append(f)
522 elif m == 'dg':
523 if _checkunknownfile(repo, wctx, p2, f, args[0]):
524 aborts.append(f)
520
525
521 for f in sorted(aborts):
526 for f in sorted(aborts):
522 repo.ui.warn(_("%s: untracked file differs\n") % f)
527 repo.ui.warn(_("%s: untracked file differs\n") % f)
523 if aborts:
528 if aborts:
524 raise util.Abort(_("untracked files in working directory differ "
529 raise util.Abort(_("untracked files in working directory differ "
525 "from files in requested revision"))
530 "from files in requested revision"))
526
531
527 for f, (m, args, msg) in actions.iteritems():
532 for f, (m, args, msg) in actions.iteritems():
528 if m == 'c':
533 if m == 'c':
529 actions[f] = ('g', args, msg)
534 actions[f] = ('g', args, msg)
530
535
531 return actions, diverge, renamedelete
536 return actions, diverge, renamedelete
532
537
533 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
538 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
534 """Resolves false conflicts where the nodeid changed but the content
539 """Resolves false conflicts where the nodeid changed but the content
535 remained the same."""
540 remained the same."""
536
541
537 for f, (m, args, msg) in actions.items():
542 for f, (m, args, msg) in actions.items():
538 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
543 if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
539 # local did change but ended up with same content
544 # local did change but ended up with same content
540 actions[f] = 'r', None, "prompt same"
545 actions[f] = 'r', None, "prompt same"
541 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
546 elif m == 'dc' and f in ancestor and not mctx[f].cmp(ancestor[f]):
542 # remote did change but ended up with same content
547 # remote did change but ended up with same content
543 del actions[f] # don't get = keep local deleted
548 del actions[f] # don't get = keep local deleted
544
549
545 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
550 def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, partial,
546 acceptremote, followcopies):
551 acceptremote, followcopies):
547 "Calculate the actions needed to merge mctx into wctx using ancestors"
552 "Calculate the actions needed to merge mctx into wctx using ancestors"
548
553
549 if len(ancestors) == 1: # default
554 if len(ancestors) == 1: # default
550 actions, diverge, renamedelete = manifestmerge(
555 actions, diverge, renamedelete = manifestmerge(
551 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
556 repo, wctx, mctx, ancestors[0], branchmerge, force, partial,
552 acceptremote, followcopies)
557 acceptremote, followcopies)
553
558
554 else: # only when merge.preferancestor=* - the default
559 else: # only when merge.preferancestor=* - the default
555 repo.ui.note(
560 repo.ui.note(
556 _("note: merging %s and %s using bids from ancestors %s\n") %
561 _("note: merging %s and %s using bids from ancestors %s\n") %
557 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
562 (wctx, mctx, _(' and ').join(str(anc) for anc in ancestors)))
558
563
559 # Call for bids
564 # Call for bids
560 fbids = {} # mapping filename to bids (action method to list af actions)
565 fbids = {} # mapping filename to bids (action method to list af actions)
561 diverge, renamedelete = None, None
566 diverge, renamedelete = None, None
562 for ancestor in ancestors:
567 for ancestor in ancestors:
563 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
568 repo.ui.note(_('\ncalculating bids for ancestor %s\n') % ancestor)
564 actions, diverge1, renamedelete1 = manifestmerge(
569 actions, diverge1, renamedelete1 = manifestmerge(
565 repo, wctx, mctx, ancestor, branchmerge, force, partial,
570 repo, wctx, mctx, ancestor, branchmerge, force, partial,
566 acceptremote, followcopies)
571 acceptremote, followcopies)
567 if diverge is None: # and renamedelete is None.
572 if diverge is None: # and renamedelete is None.
568 # Arbitrarily pick warnings from first iteration
573 # Arbitrarily pick warnings from first iteration
569 diverge = diverge1
574 diverge = diverge1
570 renamedelete = renamedelete1
575 renamedelete = renamedelete1
571 for f, a in sorted(actions.iteritems()):
576 for f, a in sorted(actions.iteritems()):
572 m, args, msg = a
577 m, args, msg = a
573 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
578 repo.ui.debug(' %s: %s -> %s\n' % (f, msg, m))
574 if f in fbids:
579 if f in fbids:
575 d = fbids[f]
580 d = fbids[f]
576 if m in d:
581 if m in d:
577 d[m].append(a)
582 d[m].append(a)
578 else:
583 else:
579 d[m] = [a]
584 d[m] = [a]
580 else:
585 else:
581 fbids[f] = {m: [a]}
586 fbids[f] = {m: [a]}
582
587
583 # Pick the best bid for each file
588 # Pick the best bid for each file
584 repo.ui.note(_('\nauction for merging merge bids\n'))
589 repo.ui.note(_('\nauction for merging merge bids\n'))
585 actions = {}
590 actions = {}
586 for f, bids in sorted(fbids.items()):
591 for f, bids in sorted(fbids.items()):
587 # bids is a mapping from action method to list af actions
592 # bids is a mapping from action method to list af actions
588 # Consensus?
593 # Consensus?
589 if len(bids) == 1: # all bids are the same kind of method
594 if len(bids) == 1: # all bids are the same kind of method
590 m, l = bids.items()[0]
595 m, l = bids.items()[0]
591 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
596 if util.all(a == l[0] for a in l[1:]): # len(bids) is > 1
592 repo.ui.note(" %s: consensus for %s\n" % (f, m))
597 repo.ui.note(" %s: consensus for %s\n" % (f, m))
593 actions[f] = l[0]
598 actions[f] = l[0]
594 continue
599 continue
595 # If keep is an option, just do it.
600 # If keep is an option, just do it.
596 if 'k' in bids:
601 if 'k' in bids:
597 repo.ui.note(" %s: picking 'keep' action\n" % f)
602 repo.ui.note(" %s: picking 'keep' action\n" % f)
598 actions[f] = bids['k'][0]
603 actions[f] = bids['k'][0]
599 continue
604 continue
600 # If there are gets and they all agree [how could they not?], do it.
605 # If there are gets and they all agree [how could they not?], do it.
601 if 'g' in bids:
606 if 'g' in bids:
602 ga0 = bids['g'][0]
607 ga0 = bids['g'][0]
603 if util.all(a == ga0 for a in bids['g'][1:]):
608 if util.all(a == ga0 for a in bids['g'][1:]):
604 repo.ui.note(" %s: picking 'get' action\n" % f)
609 repo.ui.note(" %s: picking 'get' action\n" % f)
605 actions[f] = ga0
610 actions[f] = ga0
606 continue
611 continue
607 # TODO: Consider other simple actions such as mode changes
612 # TODO: Consider other simple actions such as mode changes
608 # Handle inefficient democrazy.
613 # Handle inefficient democrazy.
609 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
614 repo.ui.note(_(' %s: multiple bids for merge action:\n') % f)
610 for m, l in sorted(bids.items()):
615 for m, l in sorted(bids.items()):
611 for _f, args, msg in l:
616 for _f, args, msg in l:
612 repo.ui.note(' %s -> %s\n' % (msg, m))
617 repo.ui.note(' %s -> %s\n' % (msg, m))
613 # Pick random action. TODO: Instead, prompt user when resolving
618 # Pick random action. TODO: Instead, prompt user when resolving
614 m, l = bids.items()[0]
619 m, l = bids.items()[0]
615 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
620 repo.ui.warn(_(' %s: ambiguous merge - picked %s action\n') %
616 (f, m))
621 (f, m))
617 actions[f] = l[0]
622 actions[f] = l[0]
618 continue
623 continue
619 repo.ui.note(_('end of auction\n\n'))
624 repo.ui.note(_('end of auction\n\n'))
620
625
621 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
626 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
622
627
623 if wctx.rev() is None:
628 if wctx.rev() is None:
624 fractions = _forgetremoved(wctx, mctx, branchmerge)
629 fractions = _forgetremoved(wctx, mctx, branchmerge)
625 actions.update(fractions)
630 actions.update(fractions)
626
631
627 return actions, diverge, renamedelete
632 return actions, diverge, renamedelete
628
633
629 def batchremove(repo, actions):
634 def batchremove(repo, actions):
630 """apply removes to the working directory
635 """apply removes to the working directory
631
636
632 yields tuples for progress updates
637 yields tuples for progress updates
633 """
638 """
634 verbose = repo.ui.verbose
639 verbose = repo.ui.verbose
635 unlink = util.unlinkpath
640 unlink = util.unlinkpath
636 wjoin = repo.wjoin
641 wjoin = repo.wjoin
637 audit = repo.wopener.audit
642 audit = repo.wopener.audit
638 i = 0
643 i = 0
639 for f, args, msg in actions:
644 for f, args, msg in actions:
640 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
645 repo.ui.debug(" %s: %s -> r\n" % (f, msg))
641 if verbose:
646 if verbose:
642 repo.ui.note(_("removing %s\n") % f)
647 repo.ui.note(_("removing %s\n") % f)
643 audit(f)
648 audit(f)
644 try:
649 try:
645 unlink(wjoin(f), ignoremissing=True)
650 unlink(wjoin(f), ignoremissing=True)
646 except OSError, inst:
651 except OSError, inst:
647 repo.ui.warn(_("update failed to remove %s: %s!\n") %
652 repo.ui.warn(_("update failed to remove %s: %s!\n") %
648 (f, inst.strerror))
653 (f, inst.strerror))
649 if i == 100:
654 if i == 100:
650 yield i, f
655 yield i, f
651 i = 0
656 i = 0
652 i += 1
657 i += 1
653 if i > 0:
658 if i > 0:
654 yield i, f
659 yield i, f
655
660
656 def batchget(repo, mctx, actions):
661 def batchget(repo, mctx, actions):
657 """apply gets to the working directory
662 """apply gets to the working directory
658
663
659 mctx is the context to get from
664 mctx is the context to get from
660
665
661 yields tuples for progress updates
666 yields tuples for progress updates
662 """
667 """
663 verbose = repo.ui.verbose
668 verbose = repo.ui.verbose
664 fctx = mctx.filectx
669 fctx = mctx.filectx
665 wwrite = repo.wwrite
670 wwrite = repo.wwrite
666 i = 0
671 i = 0
667 for f, args, msg in actions:
672 for f, args, msg in actions:
668 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
673 repo.ui.debug(" %s: %s -> g\n" % (f, msg))
669 if verbose:
674 if verbose:
670 repo.ui.note(_("getting %s\n") % f)
675 repo.ui.note(_("getting %s\n") % f)
671 wwrite(f, fctx(f).data(), args[0])
676 wwrite(f, fctx(f).data(), args[0])
672 if i == 100:
677 if i == 100:
673 yield i, f
678 yield i, f
674 i = 0
679 i = 0
675 i += 1
680 i += 1
676 if i > 0:
681 if i > 0:
677 yield i, f
682 yield i, f
678
683
679 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
684 def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
680 """apply the merge action list to the working directory
685 """apply the merge action list to the working directory
681
686
682 wctx is the working copy context
687 wctx is the working copy context
683 mctx is the context to be merged into the working copy
688 mctx is the context to be merged into the working copy
684
689
685 Return a tuple of counts (updated, merged, removed, unresolved) that
690 Return a tuple of counts (updated, merged, removed, unresolved) that
686 describes how many files were affected by the update.
691 describes how many files were affected by the update.
687 """
692 """
688
693
689 updated, merged, removed, unresolved = 0, 0, 0, 0
694 updated, merged, removed, unresolved = 0, 0, 0, 0
690 ms = mergestate(repo)
695 ms = mergestate(repo)
691 ms.reset(wctx.p1().node(), mctx.node())
696 ms.reset(wctx.p1().node(), mctx.node())
692 moves = []
697 moves = []
693 for m, l in actions.items():
698 for m, l in actions.items():
694 l.sort()
699 l.sort()
695
700
696 # prescan for merges
701 # prescan for merges
697 for f, args, msg in actions['m']:
702 for f, args, msg in actions['m']:
698 f1, f2, fa, move, anc = args
703 f1, f2, fa, move, anc = args
699 if f == '.hgsubstate': # merged internally
704 if f == '.hgsubstate': # merged internally
700 continue
705 continue
701 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
706 repo.ui.debug(" preserving %s for resolve of %s\n" % (f1, f))
702 fcl = wctx[f1]
707 fcl = wctx[f1]
703 fco = mctx[f2]
708 fco = mctx[f2]
704 actx = repo[anc]
709 actx = repo[anc]
705 if fa in actx:
710 if fa in actx:
706 fca = actx[fa]
711 fca = actx[fa]
707 else:
712 else:
708 fca = repo.filectx(f1, fileid=nullrev)
713 fca = repo.filectx(f1, fileid=nullrev)
709 ms.add(fcl, fco, fca, f)
714 ms.add(fcl, fco, fca, f)
710 if f1 != f and move:
715 if f1 != f and move:
711 moves.append(f1)
716 moves.append(f1)
712
717
713 audit = repo.wopener.audit
718 audit = repo.wopener.audit
714 _updating = _('updating')
719 _updating = _('updating')
715 _files = _('files')
720 _files = _('files')
716 progress = repo.ui.progress
721 progress = repo.ui.progress
717
722
718 # remove renamed files after safely stored
723 # remove renamed files after safely stored
719 for f in moves:
724 for f in moves:
720 if os.path.lexists(repo.wjoin(f)):
725 if os.path.lexists(repo.wjoin(f)):
721 repo.ui.debug("removing %s\n" % f)
726 repo.ui.debug("removing %s\n" % f)
722 audit(f)
727 audit(f)
723 util.unlinkpath(repo.wjoin(f))
728 util.unlinkpath(repo.wjoin(f))
724
729
725 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
730 numupdates = sum(len(l) for m, l in actions.items() if m != 'k')
726
731
727 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
732 if [a for a in actions['r'] if a[0] == '.hgsubstate']:
728 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
733 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
729
734
730 # remove in parallel (must come first)
735 # remove in parallel (must come first)
731 z = 0
736 z = 0
732 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
737 prog = worker.worker(repo.ui, 0.001, batchremove, (repo,), actions['r'])
733 for i, item in prog:
738 for i, item in prog:
734 z += i
739 z += i
735 progress(_updating, z, item=item, total=numupdates, unit=_files)
740 progress(_updating, z, item=item, total=numupdates, unit=_files)
736 removed = len(actions['r'])
741 removed = len(actions['r'])
737
742
738 # get in parallel
743 # get in parallel
739 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
744 prog = worker.worker(repo.ui, 0.001, batchget, (repo, mctx), actions['g'])
740 for i, item in prog:
745 for i, item in prog:
741 z += i
746 z += i
742 progress(_updating, z, item=item, total=numupdates, unit=_files)
747 progress(_updating, z, item=item, total=numupdates, unit=_files)
743 updated = len(actions['g'])
748 updated = len(actions['g'])
744
749
745 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
750 if [a for a in actions['g'] if a[0] == '.hgsubstate']:
746 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
751 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
747
752
748 # forget (manifest only, just log it) (must come first)
753 # forget (manifest only, just log it) (must come first)
749 for f, args, msg in actions['f']:
754 for f, args, msg in actions['f']:
750 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
755 repo.ui.debug(" %s: %s -> f\n" % (f, msg))
751 z += 1
756 z += 1
752 progress(_updating, z, item=f, total=numupdates, unit=_files)
757 progress(_updating, z, item=f, total=numupdates, unit=_files)
753
758
754 # re-add (manifest only, just log it)
759 # re-add (manifest only, just log it)
755 for f, args, msg in actions['a']:
760 for f, args, msg in actions['a']:
756 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
761 repo.ui.debug(" %s: %s -> a\n" % (f, msg))
757 z += 1
762 z += 1
758 progress(_updating, z, item=f, total=numupdates, unit=_files)
763 progress(_updating, z, item=f, total=numupdates, unit=_files)
759
764
760 # keep (noop, just log it)
765 # keep (noop, just log it)
761 for f, args, msg in actions['k']:
766 for f, args, msg in actions['k']:
762 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
767 repo.ui.debug(" %s: %s -> k\n" % (f, msg))
763 # no progress
768 # no progress
764
769
765 # merge
770 # merge
766 for f, args, msg in actions['m']:
771 for f, args, msg in actions['m']:
767 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
772 repo.ui.debug(" %s: %s -> m\n" % (f, msg))
768 z += 1
773 z += 1
769 progress(_updating, z, item=f, total=numupdates, unit=_files)
774 progress(_updating, z, item=f, total=numupdates, unit=_files)
770 if f == '.hgsubstate': # subrepo states need updating
775 if f == '.hgsubstate': # subrepo states need updating
771 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
776 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
772 overwrite)
777 overwrite)
773 continue
778 continue
774 audit(f)
779 audit(f)
775 r = ms.resolve(f, wctx, labels=labels)
780 r = ms.resolve(f, wctx, labels=labels)
776 if r is not None and r > 0:
781 if r is not None and r > 0:
777 unresolved += 1
782 unresolved += 1
778 else:
783 else:
779 if r is None:
784 if r is None:
780 updated += 1
785 updated += 1
781 else:
786 else:
782 merged += 1
787 merged += 1
783
788
784 # directory rename, move local
789 # directory rename, move local
785 for f, args, msg in actions['dm']:
790 for f, args, msg in actions['dm']:
786 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
791 repo.ui.debug(" %s: %s -> dm\n" % (f, msg))
787 z += 1
792 z += 1
788 progress(_updating, z, item=f, total=numupdates, unit=_files)
793 progress(_updating, z, item=f, total=numupdates, unit=_files)
789 f0, flags = args
794 f0, flags = args
790 repo.ui.note(_("moving %s to %s\n") % (f0, f))
795 repo.ui.note(_("moving %s to %s\n") % (f0, f))
791 audit(f)
796 audit(f)
792 repo.wwrite(f, wctx.filectx(f0).data(), flags)
797 repo.wwrite(f, wctx.filectx(f0).data(), flags)
793 util.unlinkpath(repo.wjoin(f0))
798 util.unlinkpath(repo.wjoin(f0))
794 updated += 1
799 updated += 1
795
800
796 # local directory rename, get
801 # local directory rename, get
797 for f, args, msg in actions['dg']:
802 for f, args, msg in actions['dg']:
798 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
803 repo.ui.debug(" %s: %s -> dg\n" % (f, msg))
799 z += 1
804 z += 1
800 progress(_updating, z, item=f, total=numupdates, unit=_files)
805 progress(_updating, z, item=f, total=numupdates, unit=_files)
801 f0, flags = args
806 f0, flags = args
802 repo.ui.note(_("getting %s to %s\n") % (f0, f))
807 repo.ui.note(_("getting %s to %s\n") % (f0, f))
803 repo.wwrite(f, mctx.filectx(f0).data(), flags)
808 repo.wwrite(f, mctx.filectx(f0).data(), flags)
804 updated += 1
809 updated += 1
805
810
806 # exec
811 # exec
807 for f, args, msg in actions['e']:
812 for f, args, msg in actions['e']:
808 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
813 repo.ui.debug(" %s: %s -> e\n" % (f, msg))
809 z += 1
814 z += 1
810 progress(_updating, z, item=f, total=numupdates, unit=_files)
815 progress(_updating, z, item=f, total=numupdates, unit=_files)
811 flags, = args
816 flags, = args
812 audit(f)
817 audit(f)
813 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
818 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
814 updated += 1
819 updated += 1
815
820
816 ms.commit()
821 ms.commit()
817 progress(_updating, None, total=numupdates, unit=_files)
822 progress(_updating, None, total=numupdates, unit=_files)
818
823
819 return updated, merged, removed, unresolved
824 return updated, merged, removed, unresolved
820
825
821 def recordupdates(repo, actions, branchmerge):
826 def recordupdates(repo, actions, branchmerge):
822 "record merge actions to the dirstate"
827 "record merge actions to the dirstate"
823 # remove (must come first)
828 # remove (must come first)
824 for f, args, msg in actions['r']:
829 for f, args, msg in actions['r']:
825 if branchmerge:
830 if branchmerge:
826 repo.dirstate.remove(f)
831 repo.dirstate.remove(f)
827 else:
832 else:
828 repo.dirstate.drop(f)
833 repo.dirstate.drop(f)
829
834
830 # forget (must come first)
835 # forget (must come first)
831 for f, args, msg in actions['f']:
836 for f, args, msg in actions['f']:
832 repo.dirstate.drop(f)
837 repo.dirstate.drop(f)
833
838
834 # re-add
839 # re-add
835 for f, args, msg in actions['a']:
840 for f, args, msg in actions['a']:
836 if not branchmerge:
841 if not branchmerge:
837 repo.dirstate.add(f)
842 repo.dirstate.add(f)
838
843
839 # exec change
844 # exec change
840 for f, args, msg in actions['e']:
845 for f, args, msg in actions['e']:
841 repo.dirstate.normallookup(f)
846 repo.dirstate.normallookup(f)
842
847
843 # keep
848 # keep
844 for f, args, msg in actions['k']:
849 for f, args, msg in actions['k']:
845 pass
850 pass
846
851
847 # get
852 # get
848 for f, args, msg in actions['g']:
853 for f, args, msg in actions['g']:
849 if branchmerge:
854 if branchmerge:
850 repo.dirstate.otherparent(f)
855 repo.dirstate.otherparent(f)
851 else:
856 else:
852 repo.dirstate.normal(f)
857 repo.dirstate.normal(f)
853
858
854 # merge
859 # merge
855 for f, args, msg in actions['m']:
860 for f, args, msg in actions['m']:
856 f1, f2, fa, move, anc = args
861 f1, f2, fa, move, anc = args
857 if branchmerge:
862 if branchmerge:
858 # We've done a branch merge, mark this file as merged
863 # We've done a branch merge, mark this file as merged
859 # so that we properly record the merger later
864 # so that we properly record the merger later
860 repo.dirstate.merge(f)
865 repo.dirstate.merge(f)
861 if f1 != f2: # copy/rename
866 if f1 != f2: # copy/rename
862 if move:
867 if move:
863 repo.dirstate.remove(f1)
868 repo.dirstate.remove(f1)
864 if f1 != f:
869 if f1 != f:
865 repo.dirstate.copy(f1, f)
870 repo.dirstate.copy(f1, f)
866 else:
871 else:
867 repo.dirstate.copy(f2, f)
872 repo.dirstate.copy(f2, f)
868 else:
873 else:
869 # We've update-merged a locally modified file, so
874 # We've update-merged a locally modified file, so
870 # we set the dirstate to emulate a normal checkout
875 # we set the dirstate to emulate a normal checkout
871 # of that file some time in the past. Thus our
876 # of that file some time in the past. Thus our
872 # merge will appear as a normal local file
877 # merge will appear as a normal local file
873 # modification.
878 # modification.
874 if f2 == f: # file not locally copied/moved
879 if f2 == f: # file not locally copied/moved
875 repo.dirstate.normallookup(f)
880 repo.dirstate.normallookup(f)
876 if move:
881 if move:
877 repo.dirstate.drop(f1)
882 repo.dirstate.drop(f1)
878
883
879 # directory rename, move local
884 # directory rename, move local
880 for f, args, msg in actions['dm']:
885 for f, args, msg in actions['dm']:
881 f0, flag = args
886 f0, flag = args
882 if branchmerge:
887 if branchmerge:
883 repo.dirstate.add(f)
888 repo.dirstate.add(f)
884 repo.dirstate.remove(f0)
889 repo.dirstate.remove(f0)
885 repo.dirstate.copy(f0, f)
890 repo.dirstate.copy(f0, f)
886 else:
891 else:
887 repo.dirstate.normal(f)
892 repo.dirstate.normal(f)
888 repo.dirstate.drop(f0)
893 repo.dirstate.drop(f0)
889
894
890 # directory rename, get
895 # directory rename, get
891 for f, args, msg in actions['dg']:
896 for f, args, msg in actions['dg']:
892 f0, flag = args
897 f0, flag = args
893 if branchmerge:
898 if branchmerge:
894 repo.dirstate.add(f)
899 repo.dirstate.add(f)
895 repo.dirstate.copy(f0, f)
900 repo.dirstate.copy(f0, f)
896 else:
901 else:
897 repo.dirstate.normal(f)
902 repo.dirstate.normal(f)
898
903
899 def update(repo, node, branchmerge, force, partial, ancestor=None,
904 def update(repo, node, branchmerge, force, partial, ancestor=None,
900 mergeancestor=False, labels=None):
905 mergeancestor=False, labels=None):
901 """
906 """
902 Perform a merge between the working directory and the given node
907 Perform a merge between the working directory and the given node
903
908
904 node = the node to update to, or None if unspecified
909 node = the node to update to, or None if unspecified
905 branchmerge = whether to merge between branches
910 branchmerge = whether to merge between branches
906 force = whether to force branch merging or file overwriting
911 force = whether to force branch merging or file overwriting
907 partial = a function to filter file lists (dirstate not updated)
912 partial = a function to filter file lists (dirstate not updated)
908 mergeancestor = whether it is merging with an ancestor. If true,
913 mergeancestor = whether it is merging with an ancestor. If true,
909 we should accept the incoming changes for any prompts that occur.
914 we should accept the incoming changes for any prompts that occur.
910 If false, merging with an ancestor (fast-forward) is only allowed
915 If false, merging with an ancestor (fast-forward) is only allowed
911 between different named branches. This flag is used by rebase extension
916 between different named branches. This flag is used by rebase extension
912 as a temporary fix and should be avoided in general.
917 as a temporary fix and should be avoided in general.
913
918
914 The table below shows all the behaviors of the update command
919 The table below shows all the behaviors of the update command
915 given the -c and -C or no options, whether the working directory
920 given the -c and -C or no options, whether the working directory
916 is dirty, whether a revision is specified, and the relationship of
921 is dirty, whether a revision is specified, and the relationship of
917 the parent rev to the target rev (linear, on the same named
922 the parent rev to the target rev (linear, on the same named
918 branch, or on another named branch).
923 branch, or on another named branch).
919
924
920 This logic is tested by test-update-branches.t.
925 This logic is tested by test-update-branches.t.
921
926
922 -c -C dirty rev | linear same cross
927 -c -C dirty rev | linear same cross
923 n n n n | ok (1) x
928 n n n n | ok (1) x
924 n n n y | ok ok ok
929 n n n y | ok ok ok
925 n n y n | merge (2) (2)
930 n n y n | merge (2) (2)
926 n n y y | merge (3) (3)
931 n n y y | merge (3) (3)
927 n y * * | --- discard ---
932 n y * * | --- discard ---
928 y n y * | --- (4) ---
933 y n y * | --- (4) ---
929 y n n * | --- ok ---
934 y n n * | --- ok ---
930 y y * * | --- (5) ---
935 y y * * | --- (5) ---
931
936
932 x = can't happen
937 x = can't happen
933 * = don't-care
938 * = don't-care
934 1 = abort: not a linear update (merge or update --check to force update)
939 1 = abort: not a linear update (merge or update --check to force update)
935 2 = abort: uncommitted changes (commit and merge, or update --clean to
940 2 = abort: uncommitted changes (commit and merge, or update --clean to
936 discard changes)
941 discard changes)
937 3 = abort: uncommitted changes (commit or update --clean to discard changes)
942 3 = abort: uncommitted changes (commit or update --clean to discard changes)
938 4 = abort: uncommitted changes (checked in commands.py)
943 4 = abort: uncommitted changes (checked in commands.py)
939 5 = incompatible options (checked in commands.py)
944 5 = incompatible options (checked in commands.py)
940
945
941 Return the same tuple as applyupdates().
946 Return the same tuple as applyupdates().
942 """
947 """
943
948
944 onode = node
949 onode = node
945 wlock = repo.wlock()
950 wlock = repo.wlock()
946 try:
951 try:
947 wc = repo[None]
952 wc = repo[None]
948 pl = wc.parents()
953 pl = wc.parents()
949 p1 = pl[0]
954 p1 = pl[0]
950 pas = [None]
955 pas = [None]
951 if ancestor is not None:
956 if ancestor is not None:
952 pas = [repo[ancestor]]
957 pas = [repo[ancestor]]
953
958
954 if node is None:
959 if node is None:
955 # Here is where we should consider bookmarks, divergent bookmarks,
960 # Here is where we should consider bookmarks, divergent bookmarks,
956 # foreground changesets (successors), and tip of current branch;
961 # foreground changesets (successors), and tip of current branch;
957 # but currently we are only checking the branch tips.
962 # but currently we are only checking the branch tips.
958 try:
963 try:
959 node = repo.branchtip(wc.branch())
964 node = repo.branchtip(wc.branch())
960 except errormod.RepoLookupError:
965 except errormod.RepoLookupError:
961 if wc.branch() == 'default': # no default branch!
966 if wc.branch() == 'default': # no default branch!
962 node = repo.lookup('tip') # update to tip
967 node = repo.lookup('tip') # update to tip
963 else:
968 else:
964 raise util.Abort(_("branch %s not found") % wc.branch())
969 raise util.Abort(_("branch %s not found") % wc.branch())
965
970
966 if p1.obsolete() and not p1.children():
971 if p1.obsolete() and not p1.children():
967 # allow updating to successors
972 # allow updating to successors
968 successors = obsolete.successorssets(repo, p1.node())
973 successors = obsolete.successorssets(repo, p1.node())
969
974
970 # behavior of certain cases is as follows,
975 # behavior of certain cases is as follows,
971 #
976 #
972 # divergent changesets: update to highest rev, similar to what
977 # divergent changesets: update to highest rev, similar to what
973 # is currently done when there are more than one head
978 # is currently done when there are more than one head
974 # (i.e. 'tip')
979 # (i.e. 'tip')
975 #
980 #
976 # replaced changesets: same as divergent except we know there
981 # replaced changesets: same as divergent except we know there
977 # is no conflict
982 # is no conflict
978 #
983 #
979 # pruned changeset: no update is done; though, we could
984 # pruned changeset: no update is done; though, we could
980 # consider updating to the first non-obsolete parent,
985 # consider updating to the first non-obsolete parent,
981 # similar to what is current done for 'hg prune'
986 # similar to what is current done for 'hg prune'
982
987
983 if successors:
988 if successors:
984 # flatten the list here handles both divergent (len > 1)
989 # flatten the list here handles both divergent (len > 1)
985 # and the usual case (len = 1)
990 # and the usual case (len = 1)
986 successors = [n for sub in successors for n in sub]
991 successors = [n for sub in successors for n in sub]
987
992
988 # get the max revision for the given successors set,
993 # get the max revision for the given successors set,
989 # i.e. the 'tip' of a set
994 # i.e. the 'tip' of a set
990 node = repo.revs('max(%ln)', successors).first()
995 node = repo.revs('max(%ln)', successors).first()
991 pas = [p1]
996 pas = [p1]
992
997
993 overwrite = force and not branchmerge
998 overwrite = force and not branchmerge
994
999
995 p2 = repo[node]
1000 p2 = repo[node]
996 if pas[0] is None:
1001 if pas[0] is None:
997 if repo.ui.config('merge', 'preferancestor', '*') == '*':
1002 if repo.ui.config('merge', 'preferancestor', '*') == '*':
998 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1003 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
999 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1004 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1000 else:
1005 else:
1001 pas = [p1.ancestor(p2, warn=branchmerge)]
1006 pas = [p1.ancestor(p2, warn=branchmerge)]
1002
1007
1003 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1008 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
1004
1009
1005 ### check phase
1010 ### check phase
1006 if not overwrite and len(pl) > 1:
1011 if not overwrite and len(pl) > 1:
1007 raise util.Abort(_("outstanding uncommitted merge"))
1012 raise util.Abort(_("outstanding uncommitted merge"))
1008 if branchmerge:
1013 if branchmerge:
1009 if pas == [p2]:
1014 if pas == [p2]:
1010 raise util.Abort(_("merging with a working directory ancestor"
1015 raise util.Abort(_("merging with a working directory ancestor"
1011 " has no effect"))
1016 " has no effect"))
1012 elif pas == [p1]:
1017 elif pas == [p1]:
1013 if not mergeancestor and p1.branch() == p2.branch():
1018 if not mergeancestor and p1.branch() == p2.branch():
1014 raise util.Abort(_("nothing to merge"),
1019 raise util.Abort(_("nothing to merge"),
1015 hint=_("use 'hg update' "
1020 hint=_("use 'hg update' "
1016 "or check 'hg heads'"))
1021 "or check 'hg heads'"))
1017 if not force and (wc.files() or wc.deleted()):
1022 if not force and (wc.files() or wc.deleted()):
1018 raise util.Abort(_("uncommitted changes"),
1023 raise util.Abort(_("uncommitted changes"),
1019 hint=_("use 'hg status' to list changes"))
1024 hint=_("use 'hg status' to list changes"))
1020 for s in sorted(wc.substate):
1025 for s in sorted(wc.substate):
1021 if wc.sub(s).dirty():
1026 if wc.sub(s).dirty():
1022 raise util.Abort(_("uncommitted changes in "
1027 raise util.Abort(_("uncommitted changes in "
1023 "subrepository '%s'") % s)
1028 "subrepository '%s'") % s)
1024
1029
1025 elif not overwrite:
1030 elif not overwrite:
1026 if p1 == p2: # no-op update
1031 if p1 == p2: # no-op update
1027 # call the hooks and exit early
1032 # call the hooks and exit early
1028 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1033 repo.hook('preupdate', throw=True, parent1=xp2, parent2='')
1029 repo.hook('update', parent1=xp2, parent2='', error=0)
1034 repo.hook('update', parent1=xp2, parent2='', error=0)
1030 return 0, 0, 0, 0
1035 return 0, 0, 0, 0
1031
1036
1032 if pas not in ([p1], [p2]): # nonlinear
1037 if pas not in ([p1], [p2]): # nonlinear
1033 dirty = wc.dirty(missing=True)
1038 dirty = wc.dirty(missing=True)
1034 if dirty or onode is None:
1039 if dirty or onode is None:
1035 # Branching is a bit strange to ensure we do the minimal
1040 # Branching is a bit strange to ensure we do the minimal
1036 # amount of call to obsolete.background.
1041 # amount of call to obsolete.background.
1037 foreground = obsolete.foreground(repo, [p1.node()])
1042 foreground = obsolete.foreground(repo, [p1.node()])
1038 # note: the <node> variable contains a random identifier
1043 # note: the <node> variable contains a random identifier
1039 if repo[node].node() in foreground:
1044 if repo[node].node() in foreground:
1040 pas = [p1] # allow updating to successors
1045 pas = [p1] # allow updating to successors
1041 elif dirty:
1046 elif dirty:
1042 msg = _("uncommitted changes")
1047 msg = _("uncommitted changes")
1043 if onode is None:
1048 if onode is None:
1044 hint = _("commit and merge, or update --clean to"
1049 hint = _("commit and merge, or update --clean to"
1045 " discard changes")
1050 " discard changes")
1046 else:
1051 else:
1047 hint = _("commit or update --clean to discard"
1052 hint = _("commit or update --clean to discard"
1048 " changes")
1053 " changes")
1049 raise util.Abort(msg, hint=hint)
1054 raise util.Abort(msg, hint=hint)
1050 else: # node is none
1055 else: # node is none
1051 msg = _("not a linear update")
1056 msg = _("not a linear update")
1052 hint = _("merge or update --check to force update")
1057 hint = _("merge or update --check to force update")
1053 raise util.Abort(msg, hint=hint)
1058 raise util.Abort(msg, hint=hint)
1054 else:
1059 else:
1055 # Allow jumping branches if clean and specific rev given
1060 # Allow jumping branches if clean and specific rev given
1056 pas = [p1]
1061 pas = [p1]
1057
1062
1058 followcopies = False
1063 followcopies = False
1059 if overwrite:
1064 if overwrite:
1060 pas = [wc]
1065 pas = [wc]
1061 elif pas == [p2]: # backwards
1066 elif pas == [p2]: # backwards
1062 pas = [wc.p1()]
1067 pas = [wc.p1()]
1063 elif not branchmerge and not wc.dirty(missing=True):
1068 elif not branchmerge and not wc.dirty(missing=True):
1064 pass
1069 pass
1065 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1070 elif pas[0] and repo.ui.configbool('merge', 'followcopies', True):
1066 followcopies = True
1071 followcopies = True
1067
1072
1068 ### calculate phase
1073 ### calculate phase
1069 actionbyfile, diverge, renamedelete = calculateupdates(
1074 actionbyfile, diverge, renamedelete = calculateupdates(
1070 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1075 repo, wc, p2, pas, branchmerge, force, partial, mergeancestor,
1071 followcopies)
1076 followcopies)
1072 # Convert to dictionary-of-lists format
1077 # Convert to dictionary-of-lists format
1073 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1078 actions = dict((m, []) for m in 'a f g cd dc r dm dg m e k'.split())
1074 for f, (m, args, msg) in actionbyfile.iteritems():
1079 for f, (m, args, msg) in actionbyfile.iteritems():
1075 if m not in actions:
1080 if m not in actions:
1076 actions[m] = []
1081 actions[m] = []
1077 actions[m].append((f, args, msg))
1082 actions[m].append((f, args, msg))
1078
1083
1079 if not util.checkcase(repo.path):
1084 if not util.checkcase(repo.path):
1080 # check collision between files only in p2 for clean update
1085 # check collision between files only in p2 for clean update
1081 if (not branchmerge and
1086 if (not branchmerge and
1082 (force or not wc.dirty(missing=True, branch=False))):
1087 (force or not wc.dirty(missing=True, branch=False))):
1083 _checkcollision(repo, p2.manifest(), None)
1088 _checkcollision(repo, p2.manifest(), None)
1084 else:
1089 else:
1085 _checkcollision(repo, wc.manifest(), actions)
1090 _checkcollision(repo, wc.manifest(), actions)
1086
1091
1087 # Prompt and create actions. TODO: Move this towards resolve phase.
1092 # Prompt and create actions. TODO: Move this towards resolve phase.
1088 for f, args, msg in sorted(actions['cd']):
1093 for f, args, msg in sorted(actions['cd']):
1089 if repo.ui.promptchoice(
1094 if repo.ui.promptchoice(
1090 _("local changed %s which remote deleted\n"
1095 _("local changed %s which remote deleted\n"
1091 "use (c)hanged version or (d)elete?"
1096 "use (c)hanged version or (d)elete?"
1092 "$$ &Changed $$ &Delete") % f, 0):
1097 "$$ &Changed $$ &Delete") % f, 0):
1093 actions['r'].append((f, None, "prompt delete"))
1098 actions['r'].append((f, None, "prompt delete"))
1094 else:
1099 else:
1095 actions['a'].append((f, None, "prompt keep"))
1100 actions['a'].append((f, None, "prompt keep"))
1096 del actions['cd'][:]
1101 del actions['cd'][:]
1097
1102
1098 for f, args, msg in sorted(actions['dc']):
1103 for f, args, msg in sorted(actions['dc']):
1099 flags, = args
1104 flags, = args
1100 if repo.ui.promptchoice(
1105 if repo.ui.promptchoice(
1101 _("remote changed %s which local deleted\n"
1106 _("remote changed %s which local deleted\n"
1102 "use (c)hanged version or leave (d)eleted?"
1107 "use (c)hanged version or leave (d)eleted?"
1103 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1108 "$$ &Changed $$ &Deleted") % f, 0) == 0:
1104 actions['g'].append((f, (flags,), "prompt recreating"))
1109 actions['g'].append((f, (flags,), "prompt recreating"))
1105 del actions['dc'][:]
1110 del actions['dc'][:]
1106
1111
1107 ### apply phase
1112 ### apply phase
1108 if not branchmerge: # just jump to the new rev
1113 if not branchmerge: # just jump to the new rev
1109 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1114 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
1110 if not partial:
1115 if not partial:
1111 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1116 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
1112 # note that we're in the middle of an update
1117 # note that we're in the middle of an update
1113 repo.vfs.write('updatestate', p2.hex())
1118 repo.vfs.write('updatestate', p2.hex())
1114
1119
1115 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1120 stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
1116
1121
1117 # divergent renames
1122 # divergent renames
1118 for f, fl in sorted(diverge.iteritems()):
1123 for f, fl in sorted(diverge.iteritems()):
1119 repo.ui.warn(_("note: possible conflict - %s was renamed "
1124 repo.ui.warn(_("note: possible conflict - %s was renamed "
1120 "multiple times to:\n") % f)
1125 "multiple times to:\n") % f)
1121 for nf in fl:
1126 for nf in fl:
1122 repo.ui.warn(" %s\n" % nf)
1127 repo.ui.warn(" %s\n" % nf)
1123
1128
1124 # rename and delete
1129 # rename and delete
1125 for f, fl in sorted(renamedelete.iteritems()):
1130 for f, fl in sorted(renamedelete.iteritems()):
1126 repo.ui.warn(_("note: possible conflict - %s was deleted "
1131 repo.ui.warn(_("note: possible conflict - %s was deleted "
1127 "and renamed to:\n") % f)
1132 "and renamed to:\n") % f)
1128 for nf in fl:
1133 for nf in fl:
1129 repo.ui.warn(" %s\n" % nf)
1134 repo.ui.warn(" %s\n" % nf)
1130
1135
1131 if not partial:
1136 if not partial:
1132 repo.dirstate.beginparentchange()
1137 repo.dirstate.beginparentchange()
1133 repo.setparents(fp1, fp2)
1138 repo.setparents(fp1, fp2)
1134 recordupdates(repo, actions, branchmerge)
1139 recordupdates(repo, actions, branchmerge)
1135 # update completed, clear state
1140 # update completed, clear state
1136 util.unlink(repo.join('updatestate'))
1141 util.unlink(repo.join('updatestate'))
1137
1142
1138 if not branchmerge:
1143 if not branchmerge:
1139 repo.dirstate.setbranch(p2.branch())
1144 repo.dirstate.setbranch(p2.branch())
1140 repo.dirstate.endparentchange()
1145 repo.dirstate.endparentchange()
1141 finally:
1146 finally:
1142 wlock.release()
1147 wlock.release()
1143
1148
1144 if not partial:
1149 if not partial:
1145 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1150 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
1146 return stats
1151 return stats
1147
1152
1148 def graft(repo, ctx, pctx, labels):
1153 def graft(repo, ctx, pctx, labels):
1149 """Do a graft-like merge.
1154 """Do a graft-like merge.
1150
1155
1151 This is a merge where the merge ancestor is chosen such that one
1156 This is a merge where the merge ancestor is chosen such that one
1152 or more changesets are grafted onto the current changeset. In
1157 or more changesets are grafted onto the current changeset. In
1153 addition to the merge, this fixes up the dirstate to include only
1158 addition to the merge, this fixes up the dirstate to include only
1154 a single parent and tries to duplicate any renames/copies
1159 a single parent and tries to duplicate any renames/copies
1155 appropriately.
1160 appropriately.
1156
1161
1157 ctx - changeset to rebase
1162 ctx - changeset to rebase
1158 pctx - merge base, usually ctx.p1()
1163 pctx - merge base, usually ctx.p1()
1159 labels - merge labels eg ['local', 'graft']
1164 labels - merge labels eg ['local', 'graft']
1160
1165
1161 """
1166 """
1162
1167
1163 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1168 stats = update(repo, ctx.node(), True, True, False, pctx.node(),
1164 labels=labels)
1169 labels=labels)
1165 # drop the second merge parent
1170 # drop the second merge parent
1166 repo.dirstate.beginparentchange()
1171 repo.dirstate.beginparentchange()
1167 repo.setparents(repo['.'].node(), nullid)
1172 repo.setparents(repo['.'].node(), nullid)
1168 repo.dirstate.write()
1173 repo.dirstate.write()
1169 # fix up dirstate for copies and renames
1174 # fix up dirstate for copies and renames
1170 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1175 copies.duplicatecopies(repo, ctx.rev(), pctx.rev())
1171 repo.dirstate.endparentchange()
1176 repo.dirstate.endparentchange()
1172 return stats
1177 return stats
@@ -1,237 +1,239 b''
1 $ hg init t
1 $ hg init t
2 $ cd t
2 $ cd t
3
3
4 $ mkdir a
4 $ mkdir a
5 $ echo foo > a/a
5 $ echo foo > a/a
6 $ echo bar > a/b
6 $ echo bar > a/b
7 $ hg ci -Am "0"
7 $ hg ci -Am "0"
8 adding a/a
8 adding a/a
9 adding a/b
9 adding a/b
10
10
11 $ hg co -C 0
11 $ hg co -C 0
12 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 $ hg mv a b
13 $ hg mv a b
14 moving a/a to b/a (glob)
14 moving a/a to b/a (glob)
15 moving a/b to b/b (glob)
15 moving a/b to b/b (glob)
16 $ hg ci -m "1 mv a/ b/"
16 $ hg ci -m "1 mv a/ b/"
17
17
18 $ hg co -C 0
18 $ hg co -C 0
19 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
19 2 files updated, 0 files merged, 2 files removed, 0 files unresolved
20 $ echo baz > a/c
20 $ echo baz > a/c
21 $ echo quux > a/d
21 $ echo quux > a/d
22 $ hg add a/c
22 $ hg add a/c
23 $ hg ci -m "2 add a/c"
23 $ hg ci -m "2 add a/c"
24 created new head
24 created new head
25
25
26 $ hg merge --debug 1
26 $ hg merge --debug 1
27 searching for copies back to rev 1
27 searching for copies back to rev 1
28 unmatched files in local:
28 unmatched files in local:
29 a/c
29 a/c
30 unmatched files in other:
30 unmatched files in other:
31 b/a
31 b/a
32 b/b
32 b/b
33 all copies found (* = to merge, ! = divergent, % = renamed and deleted):
33 all copies found (* = to merge, ! = divergent, % = renamed and deleted):
34 src: 'a/a' -> dst: 'b/a'
34 src: 'a/a' -> dst: 'b/a'
35 src: 'a/b' -> dst: 'b/b'
35 src: 'a/b' -> dst: 'b/b'
36 checking for directory renames
36 checking for directory renames
37 discovered dir src: 'a/' -> dst: 'b/'
37 discovered dir src: 'a/' -> dst: 'b/'
38 pending file src: 'a/c' -> dst: 'b/c'
38 pending file src: 'a/c' -> dst: 'b/c'
39 resolving manifests
39 resolving manifests
40 branchmerge: True, force: False, partial: False
40 branchmerge: True, force: False, partial: False
41 ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
41 ancestor: f9b20c0d4c51, local: ce36d17b18fb+, remote: 397f8b00a740
42 a/a: other deleted -> r
42 a/a: other deleted -> r
43 removing a/a
43 removing a/a
44 a/b: other deleted -> r
44 a/b: other deleted -> r
45 removing a/b
45 removing a/b
46 updating: a/b 2/5 files (40.00%)
46 updating: a/b 2/5 files (40.00%)
47 b/a: remote created -> g
47 b/a: remote created -> g
48 getting b/a
48 getting b/a
49 b/b: remote created -> g
49 b/b: remote created -> g
50 getting b/b
50 getting b/b
51 updating: b/b 4/5 files (80.00%)
51 updating: b/b 4/5 files (80.00%)
52 b/c: remote directory rename - move from a/c -> dm
52 b/c: remote directory rename - move from a/c -> dm
53 updating: b/c 5/5 files (100.00%)
53 updating: b/c 5/5 files (100.00%)
54 moving a/c to b/c (glob)
54 moving a/c to b/c (glob)
55 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
55 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
56 (branch merge, don't forget to commit)
56 (branch merge, don't forget to commit)
57
57
58 $ echo a/* b/*
58 $ echo a/* b/*
59 a/d b/a b/b b/c
59 a/d b/a b/b b/c
60 $ hg st -C
60 $ hg st -C
61 M b/a
61 M b/a
62 M b/b
62 M b/b
63 A b/c
63 A b/c
64 a/c
64 a/c
65 R a/a
65 R a/a
66 R a/b
66 R a/b
67 R a/c
67 R a/c
68 ? a/d
68 ? a/d
69 $ hg ci -m "3 merge 2+1"
69 $ hg ci -m "3 merge 2+1"
70 $ hg debugrename b/c
70 $ hg debugrename b/c
71 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
71 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
72
72
73 $ hg co -C 1
73 $ hg co -C 1
74 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
74 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
75 $ hg merge --debug 2
75 $ hg merge --debug 2
76 searching for copies back to rev 1
76 searching for copies back to rev 1
77 unmatched files in local:
77 unmatched files in local:
78 b/a
78 b/a
79 b/b
79 b/b
80 unmatched files in other:
80 unmatched files in other:
81 a/c
81 a/c
82 all copies found (* = to merge, ! = divergent, % = renamed and deleted):
82 all copies found (* = to merge, ! = divergent, % = renamed and deleted):
83 src: 'a/a' -> dst: 'b/a'
83 src: 'a/a' -> dst: 'b/a'
84 src: 'a/b' -> dst: 'b/b'
84 src: 'a/b' -> dst: 'b/b'
85 checking for directory renames
85 checking for directory renames
86 discovered dir src: 'a/' -> dst: 'b/'
86 discovered dir src: 'a/' -> dst: 'b/'
87 pending file src: 'a/c' -> dst: 'b/c'
87 pending file src: 'a/c' -> dst: 'b/c'
88 resolving manifests
88 resolving manifests
89 branchmerge: True, force: False, partial: False
89 branchmerge: True, force: False, partial: False
90 ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
90 ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb
91 b/c: local directory rename - get from a/c -> dg
91 b/c: local directory rename - get from a/c -> dg
92 updating: b/c 1/1 files (100.00%)
92 updating: b/c 1/1 files (100.00%)
93 getting a/c to b/c
93 getting a/c to b/c
94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 (branch merge, don't forget to commit)
95 (branch merge, don't forget to commit)
96
96
97 $ echo a/* b/*
97 $ echo a/* b/*
98 a/d b/a b/b b/c
98 a/d b/a b/b b/c
99 $ hg st -C
99 $ hg st -C
100 A b/c
100 A b/c
101 a/c
101 a/c
102 ? a/d
102 ? a/d
103 $ hg ci -m "4 merge 1+2"
103 $ hg ci -m "4 merge 1+2"
104 created new head
104 created new head
105 $ hg debugrename b/c
105 $ hg debugrename b/c
106 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
106 b/c renamed from a/c:354ae8da6e890359ef49ade27b68bbc361f3ca88 (glob)
107
107
108 Local directory rename with conflicting file added in remote source directory
108 Local directory rename with conflicting file added in remote source directory
109 and untracked in local target directory.
109 and untracked in local target directory.
110
110
111 BROKEN: the uncommitted file is overwritten; we should abort
112
113 $ hg co -qC 1
111 $ hg co -qC 1
114 $ echo target > b/c
112 $ echo target > b/c
115 $ hg merge 2
113 $ hg merge 2
114 b/c: untracked file differs
115 abort: untracked files in working directory differ from files in requested revision
116 [255]
117 $ cat b/c
118 target
119 but it should succeed if the content matches
120 $ hg cat -r 2 a/c > b/c
121 $ hg merge 2
116 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
117 (branch merge, don't forget to commit)
123 (branch merge, don't forget to commit)
118 $ hg st -A
124 $ hg st -C
119 A b/c
125 A b/c
120 a/c
126 a/c
121 ? a/d
127 ? a/d
122 C b/a
123 C b/b
124 $ cat b/c
125 baz
126
128
127 Local directory rename with conflicting file added in remote source directory
129 Local directory rename with conflicting file added in remote source directory
128 and committed in local target directory.
130 and committed in local target directory.
129
131
130 $ hg co -qC 1
132 $ hg co -qC 1
131 $ echo target > b/c
133 $ echo target > b/c
132 $ hg add b/c
134 $ hg add b/c
133 $ hg commit -qm 'new file in target directory'
135 $ hg commit -qm 'new file in target directory'
134 $ hg merge 2
136 $ hg merge 2
135 merging b/c and a/c to b/c
137 merging b/c and a/c to b/c
136 warning: conflicts during merge.
138 warning: conflicts during merge.
137 merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
139 merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
138 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
140 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
139 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
141 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
140 [1]
142 [1]
141 $ hg st -A
143 $ hg st -A
142 M b/c
144 M b/c
143 a/c
145 a/c
144 ? a/d
146 ? a/d
145 ? b/c.orig
147 ? b/c.orig
146 C b/a
148 C b/a
147 C b/b
149 C b/b
148 $ cat b/c
150 $ cat b/c
149 <<<<<<< local: f1c50ca4f127 - test: new file in target directory
151 <<<<<<< local: f1c50ca4f127 - test: new file in target directory
150 target
152 target
151 =======
153 =======
152 baz
154 baz
153 >>>>>>> other: ce36d17b18fb - test: 2 add a/c
155 >>>>>>> other: ce36d17b18fb - test: 2 add a/c
154 $ rm b/c.orig
156 $ rm b/c.orig
155
157
156 Remote directory rename with conflicting file added in remote target directory
158 Remote directory rename with conflicting file added in remote target directory
157 and committed in local source directory.
159 and committed in local source directory.
158
160
159 $ hg co -qC 2
161 $ hg co -qC 2
160 $ hg st -A
162 $ hg st -A
161 ? a/d
163 ? a/d
162 C a/a
164 C a/a
163 C a/b
165 C a/b
164 C a/c
166 C a/c
165 $ hg merge 5
167 $ hg merge 5
166 merging a/c and b/c to b/c
168 merging a/c and b/c to b/c
167 warning: conflicts during merge.
169 warning: conflicts during merge.
168 merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
170 merging b/c incomplete! (edit conflicts, then use 'hg resolve --mark')
169 2 files updated, 0 files merged, 2 files removed, 1 files unresolved
171 2 files updated, 0 files merged, 2 files removed, 1 files unresolved
170 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
172 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
171 [1]
173 [1]
172 $ hg st -A
174 $ hg st -A
173 M b/a
175 M b/a
174 M b/b
176 M b/b
175 M b/c
177 M b/c
176 a/c
178 a/c
177 R a/a
179 R a/a
178 R a/b
180 R a/b
179 R a/c
181 R a/c
180 ? a/d
182 ? a/d
181 ? b/c.orig
183 ? b/c.orig
182 $ cat b/c
184 $ cat b/c
183 <<<<<<< local: ce36d17b18fb - test: 2 add a/c
185 <<<<<<< local: ce36d17b18fb - test: 2 add a/c
184 baz
186 baz
185 =======
187 =======
186 target
188 target
187 >>>>>>> other: f1c50ca4f127 - test: new file in target directory
189 >>>>>>> other: f1c50ca4f127 - test: new file in target directory
188
190
189 Second scenario with two repos:
191 Second scenario with two repos:
190
192
191 $ cd ..
193 $ cd ..
192 $ hg init r1
194 $ hg init r1
193 $ cd r1
195 $ cd r1
194 $ mkdir a
196 $ mkdir a
195 $ echo foo > a/f
197 $ echo foo > a/f
196 $ hg add a
198 $ hg add a
197 adding a/f (glob)
199 adding a/f (glob)
198 $ hg ci -m "a/f == foo"
200 $ hg ci -m "a/f == foo"
199 $ cd ..
201 $ cd ..
200
202
201 $ hg clone r1 r2
203 $ hg clone r1 r2
202 updating to branch default
204 updating to branch default
203 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 $ cd r2
206 $ cd r2
205 $ hg mv a b
207 $ hg mv a b
206 moving a/f to b/f (glob)
208 moving a/f to b/f (glob)
207 $ echo foo1 > b/f
209 $ echo foo1 > b/f
208 $ hg ci -m" a -> b, b/f == foo1"
210 $ hg ci -m" a -> b, b/f == foo1"
209 $ cd ..
211 $ cd ..
210
212
211 $ cd r1
213 $ cd r1
212 $ mkdir a/aa
214 $ mkdir a/aa
213 $ echo bar > a/aa/g
215 $ echo bar > a/aa/g
214 $ hg add a/aa
216 $ hg add a/aa
215 adding a/aa/g (glob)
217 adding a/aa/g (glob)
216 $ hg ci -m "a/aa/g"
218 $ hg ci -m "a/aa/g"
217 $ hg pull ../r2
219 $ hg pull ../r2
218 pulling from ../r2
220 pulling from ../r2
219 searching for changes
221 searching for changes
220 adding changesets
222 adding changesets
221 adding manifests
223 adding manifests
222 adding file changes
224 adding file changes
223 added 1 changesets with 1 changes to 1 files (+1 heads)
225 added 1 changesets with 1 changes to 1 files (+1 heads)
224 (run 'hg heads' to see heads, 'hg merge' to merge)
226 (run 'hg heads' to see heads, 'hg merge' to merge)
225
227
226 $ hg merge
228 $ hg merge
227 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
229 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
228 (branch merge, don't forget to commit)
230 (branch merge, don't forget to commit)
229
231
230 $ hg st -C
232 $ hg st -C
231 M b/f
233 M b/f
232 A b/aa/g
234 A b/aa/g
233 a/aa/g
235 a/aa/g
234 R a/aa/g
236 R a/aa/g
235 R a/f
237 R a/f
236
238
237 $ cd ..
239 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now