##// END OF EJS Templates
copies: add matcher parameter to copy logic...
Durham Goode -
r24782:4906dc0e default
parent child Browse files
Show More
@@ -1,1373 +1,1373
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fmap = set(m._files)
30 m._fmap = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fmap = set(m._files)
45 m._fmap = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 default='relpath'):
54 default='relpath'):
55 match = oldmatch(ctx, pats, opts, globbed, default)
55 match = oldmatch(ctx, pats, opts, globbed, default)
56 return composenormalfilematcher(match, manifest)
56 return composenormalfilematcher(match, manifest)
57 oldmatch = installmatchfn(overridematch)
57 oldmatch = installmatchfn(overridematch)
58
58
59 def installmatchfn(f):
59 def installmatchfn(f):
60 '''monkey patch the scmutil module with a custom match function.
60 '''monkey patch the scmutil module with a custom match function.
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 oldmatch = scmutil.match
62 oldmatch = scmutil.match
63 setattr(f, 'oldmatch', oldmatch)
63 setattr(f, 'oldmatch', oldmatch)
64 scmutil.match = f
64 scmutil.match = f
65 return oldmatch
65 return oldmatch
66
66
67 def restorematchfn():
67 def restorematchfn():
68 '''restores scmutil.match to what it was before installmatchfn
68 '''restores scmutil.match to what it was before installmatchfn
69 was called. no-op if scmutil.match is its original function.
69 was called. no-op if scmutil.match is its original function.
70
70
71 Note that n calls to installmatchfn will require n calls to
71 Note that n calls to installmatchfn will require n calls to
72 restore the original matchfn.'''
72 restore the original matchfn.'''
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74
74
75 def installmatchandpatsfn(f):
75 def installmatchandpatsfn(f):
76 oldmatchandpats = scmutil.matchandpats
76 oldmatchandpats = scmutil.matchandpats
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 scmutil.matchandpats = f
78 scmutil.matchandpats = f
79 return oldmatchandpats
79 return oldmatchandpats
80
80
81 def restorematchandpatsfn():
81 def restorematchandpatsfn():
82 '''restores scmutil.matchandpats to what it was before
82 '''restores scmutil.matchandpats to what it was before
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 is its original function.
84 is its original function.
85
85
86 Note that n calls to installmatchandpatsfn will require n calls
86 Note that n calls to installmatchandpatsfn will require n calls
87 to restore the original matchfn.'''
87 to restore the original matchfn.'''
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 scmutil.matchandpats)
89 scmutil.matchandpats)
90
90
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 large = opts.get('large')
92 large = opts.get('large')
93 lfsize = lfutil.getminsize(
93 lfsize = lfutil.getminsize(
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 if lfpats:
99 if lfpats:
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = copy.copy(matcher)
103 m = copy.copy(matcher)
104 m.bad = lambda x, y: None
104 m.bad = lambda x, y: None
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in repo.walk(m):
106 for f in repo.walk(m):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # addremove in core gets fancy with the name, add doesn't
112 # addremove in core gets fancy with the name, add doesn't
113 if isaddremove:
113 if isaddremove:
114 name = m.uipath(f)
114 name = m.uipath(f)
115 else:
115 else:
116 name = m.rel(f)
116 name = m.rel(f)
117
117
118 # Don't warn the user when they attempt to add a normal tracked file.
118 # Don't warn the user when they attempt to add a normal tracked file.
119 # The normal add code will do that for us.
119 # The normal add code will do that for us.
120 if exact and exists:
120 if exact and exists:
121 if lfile:
121 if lfile:
122 ui.warn(_('%s already a largefile\n') % name)
122 ui.warn(_('%s already a largefile\n') % name)
123 continue
123 continue
124
124
125 if (exact or not exists) and not lfutil.isstandin(f):
125 if (exact or not exists) and not lfutil.isstandin(f):
126 # In case the file was removed previously, but not committed
126 # In case the file was removed previously, but not committed
127 # (issue3507)
127 # (issue3507)
128 if not repo.wvfs.exists(f):
128 if not repo.wvfs.exists(f):
129 continue
129 continue
130
130
131 abovemin = (lfsize and
131 abovemin = (lfsize and
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 lfnames.append(f)
134 lfnames.append(f)
135 if ui.verbose or not exact:
135 if ui.verbose or not exact:
136 ui.status(_('adding %s as a largefile\n') % name)
136 ui.status(_('adding %s as a largefile\n') % name)
137
137
138 bad = []
138 bad = []
139
139
140 # Need to lock, otherwise there could be a race condition between
140 # Need to lock, otherwise there could be a race condition between
141 # when standins are created and added to the repo.
141 # when standins are created and added to the repo.
142 wlock = repo.wlock()
142 wlock = repo.wlock()
143 try:
143 try:
144 if not opts.get('dry_run'):
144 if not opts.get('dry_run'):
145 standins = []
145 standins = []
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 for f in lfnames:
147 for f in lfnames:
148 standinname = lfutil.standin(f)
148 standinname = lfutil.standin(f)
149 lfutil.writestandin(repo, standinname, hash='',
149 lfutil.writestandin(repo, standinname, hash='',
150 executable=lfutil.getexecutable(repo.wjoin(f)))
150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 standins.append(standinname)
151 standins.append(standinname)
152 if lfdirstate[f] == 'r':
152 if lfdirstate[f] == 'r':
153 lfdirstate.normallookup(f)
153 lfdirstate.normallookup(f)
154 else:
154 else:
155 lfdirstate.add(f)
155 lfdirstate.add(f)
156 lfdirstate.write()
156 lfdirstate.write()
157 bad += [lfutil.splitstandin(f)
157 bad += [lfutil.splitstandin(f)
158 for f in repo[None].add(standins)
158 for f in repo[None].add(standins)
159 if f in m.files()]
159 if f in m.files()]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 finally:
162 finally:
163 wlock.release()
163 wlock.release()
164 return added, bad
164 return added, bad
165
165
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 after = opts.get('after')
167 after = opts.get('after')
168 m = composelargefilematcher(matcher, repo[None].manifest())
168 m = composelargefilematcher(matcher, repo[None].manifest())
169 try:
169 try:
170 repo.lfstatus = True
170 repo.lfstatus = True
171 s = repo.status(match=m, clean=not isaddremove)
171 s = repo.status(match=m, clean=not isaddremove)
172 finally:
172 finally:
173 repo.lfstatus = False
173 repo.lfstatus = False
174 manifest = repo[None].manifest()
174 manifest = repo[None].manifest()
175 modified, added, deleted, clean = [[f for f in list
175 modified, added, deleted, clean = [[f for f in list
176 if lfutil.standin(f) in manifest]
176 if lfutil.standin(f) in manifest]
177 for list in (s.modified, s.added,
177 for list in (s.modified, s.added,
178 s.deleted, s.clean)]
178 s.deleted, s.clean)]
179
179
180 def warn(files, msg):
180 def warn(files, msg):
181 for f in files:
181 for f in files:
182 ui.warn(msg % m.rel(f))
182 ui.warn(msg % m.rel(f))
183 return int(len(files) > 0)
183 return int(len(files) > 0)
184
184
185 result = 0
185 result = 0
186
186
187 if after:
187 if after:
188 remove = deleted
188 remove = deleted
189 result = warn(modified + added + clean,
189 result = warn(modified + added + clean,
190 _('not removing %s: file still exists\n'))
190 _('not removing %s: file still exists\n'))
191 else:
191 else:
192 remove = deleted + clean
192 remove = deleted + clean
193 result = warn(modified, _('not removing %s: file is modified (use -f'
193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 ' to force removal)\n'))
194 ' to force removal)\n'))
195 result = warn(added, _('not removing %s: file has been marked for add'
195 result = warn(added, _('not removing %s: file has been marked for add'
196 ' (use forget to undo)\n')) or result
196 ' (use forget to undo)\n')) or result
197
197
198 # Need to lock because standin files are deleted then removed from the
198 # Need to lock because standin files are deleted then removed from the
199 # repository and we could race in-between.
199 # repository and we could race in-between.
200 wlock = repo.wlock()
200 wlock = repo.wlock()
201 try:
201 try:
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 for f in sorted(remove):
203 for f in sorted(remove):
204 if ui.verbose or not m.exact(f):
204 if ui.verbose or not m.exact(f):
205 # addremove in core gets fancy with the name, remove doesn't
205 # addremove in core gets fancy with the name, remove doesn't
206 if isaddremove:
206 if isaddremove:
207 name = m.uipath(f)
207 name = m.uipath(f)
208 else:
208 else:
209 name = m.rel(f)
209 name = m.rel(f)
210 ui.status(_('removing %s\n') % name)
210 ui.status(_('removing %s\n') % name)
211
211
212 if not opts.get('dry_run'):
212 if not opts.get('dry_run'):
213 if not after:
213 if not after:
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215
215
216 if opts.get('dry_run'):
216 if opts.get('dry_run'):
217 return result
217 return result
218
218
219 remove = [lfutil.standin(f) for f in remove]
219 remove = [lfutil.standin(f) for f in remove]
220 # If this is being called by addremove, let the original addremove
220 # If this is being called by addremove, let the original addremove
221 # function handle this.
221 # function handle this.
222 if not isaddremove:
222 if not isaddremove:
223 for f in remove:
223 for f in remove:
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 repo[None].forget(remove)
225 repo[None].forget(remove)
226
226
227 for f in remove:
227 for f in remove:
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 False)
229 False)
230
230
231 lfdirstate.write()
231 lfdirstate.write()
232 finally:
232 finally:
233 wlock.release()
233 wlock.release()
234
234
235 return result
235 return result
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 def decodepath(orig, path):
239 def decodepath(orig, path):
240 return lfutil.splitstandin(path) or path
240 return lfutil.splitstandin(path) or path
241
241
242 # -- Wrappers: modify existing commands --------------------------------
242 # -- Wrappers: modify existing commands --------------------------------
243
243
244 def overrideadd(orig, ui, repo, *pats, **opts):
244 def overrideadd(orig, ui, repo, *pats, **opts):
245 if opts.get('normal') and opts.get('large'):
245 if opts.get('normal') and opts.get('large'):
246 raise util.Abort(_('--normal cannot be used with --large'))
246 raise util.Abort(_('--normal cannot be used with --large'))
247 return orig(ui, repo, *pats, **opts)
247 return orig(ui, repo, *pats, **opts)
248
248
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 # The --normal flag short circuits this override
250 # The --normal flag short circuits this override
251 if opts.get('normal'):
251 if opts.get('normal'):
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253
253
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 ladded)
256 ladded)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258
258
259 bad.extend(f for f in lbad)
259 bad.extend(f for f in lbad)
260 return bad
260 return bad
261
261
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 return removelargefiles(ui, repo, False, matcher, after=after,
265 return removelargefiles(ui, repo, False, matcher, after=after,
266 force=force) or result
266 force=force) or result
267
267
268 def overridestatusfn(orig, repo, rev2, **opts):
268 def overridestatusfn(orig, repo, rev2, **opts):
269 try:
269 try:
270 repo._repo.lfstatus = True
270 repo._repo.lfstatus = True
271 return orig(repo, rev2, **opts)
271 return orig(repo, rev2, **opts)
272 finally:
272 finally:
273 repo._repo.lfstatus = False
273 repo._repo.lfstatus = False
274
274
275 def overridestatus(orig, ui, repo, *pats, **opts):
275 def overridestatus(orig, ui, repo, *pats, **opts):
276 try:
276 try:
277 repo.lfstatus = True
277 repo.lfstatus = True
278 return orig(ui, repo, *pats, **opts)
278 return orig(ui, repo, *pats, **opts)
279 finally:
279 finally:
280 repo.lfstatus = False
280 repo.lfstatus = False
281
281
282 def overridedirty(orig, repo, ignoreupdate=False):
282 def overridedirty(orig, repo, ignoreupdate=False):
283 try:
283 try:
284 repo._repo.lfstatus = True
284 repo._repo.lfstatus = True
285 return orig(repo, ignoreupdate)
285 return orig(repo, ignoreupdate)
286 finally:
286 finally:
287 repo._repo.lfstatus = False
287 repo._repo.lfstatus = False
288
288
289 def overridelog(orig, ui, repo, *pats, **opts):
289 def overridelog(orig, ui, repo, *pats, **opts):
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 default='relpath'):
291 default='relpath'):
292 """Matcher that merges root directory with .hglf, suitable for log.
292 """Matcher that merges root directory with .hglf, suitable for log.
293 It is still possible to match .hglf directly.
293 It is still possible to match .hglf directly.
294 For any listed files run log on the standin too.
294 For any listed files run log on the standin too.
295 matchfn tries both the given filename and with .hglf stripped.
295 matchfn tries both the given filename and with .hglf stripped.
296 """
296 """
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 m, p = copy.copy(matchandpats)
298 m, p = copy.copy(matchandpats)
299
299
300 if m.always():
300 if m.always():
301 # We want to match everything anyway, so there's no benefit trying
301 # We want to match everything anyway, so there's no benefit trying
302 # to add standins.
302 # to add standins.
303 return matchandpats
303 return matchandpats
304
304
305 pats = set(p)
305 pats = set(p)
306
306
307 def fixpats(pat, tostandin=lfutil.standin):
307 def fixpats(pat, tostandin=lfutil.standin):
308 kindpat = match_._patsplit(pat, None)
308 kindpat = match_._patsplit(pat, None)
309
309
310 if kindpat[0] is not None:
310 if kindpat[0] is not None:
311 return kindpat[0] + ':' + tostandin(kindpat[1])
311 return kindpat[0] + ':' + tostandin(kindpat[1])
312 return tostandin(kindpat[1])
312 return tostandin(kindpat[1])
313
313
314 if m._cwd:
314 if m._cwd:
315 hglf = lfutil.shortname
315 hglf = lfutil.shortname
316 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
316 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
317
317
318 def tostandin(f):
318 def tostandin(f):
319 # The file may already be a standin, so trucate the back
319 # The file may already be a standin, so trucate the back
320 # prefix and test before mangling it. This avoids turning
320 # prefix and test before mangling it. This avoids turning
321 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
321 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
322 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
322 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
323 return f
323 return f
324
324
325 # An absolute path is from outside the repo, so truncate the
325 # An absolute path is from outside the repo, so truncate the
326 # path to the root before building the standin. Otherwise cwd
326 # path to the root before building the standin. Otherwise cwd
327 # is somewhere in the repo, relative to root, and needs to be
327 # is somewhere in the repo, relative to root, and needs to be
328 # prepended before building the standin.
328 # prepended before building the standin.
329 if os.path.isabs(m._cwd):
329 if os.path.isabs(m._cwd):
330 f = f[len(back):]
330 f = f[len(back):]
331 else:
331 else:
332 f = m._cwd + '/' + f
332 f = m._cwd + '/' + f
333 return back + lfutil.standin(f)
333 return back + lfutil.standin(f)
334
334
335 pats.update(fixpats(f, tostandin) for f in p)
335 pats.update(fixpats(f, tostandin) for f in p)
336 else:
336 else:
337 def tostandin(f):
337 def tostandin(f):
338 if lfutil.splitstandin(f):
338 if lfutil.splitstandin(f):
339 return f
339 return f
340 return lfutil.standin(f)
340 return lfutil.standin(f)
341 pats.update(fixpats(f, tostandin) for f in p)
341 pats.update(fixpats(f, tostandin) for f in p)
342
342
343 for i in range(0, len(m._files)):
343 for i in range(0, len(m._files)):
344 # Don't add '.hglf' to m.files, since that is already covered by '.'
344 # Don't add '.hglf' to m.files, since that is already covered by '.'
345 if m._files[i] == '.':
345 if m._files[i] == '.':
346 continue
346 continue
347 standin = lfutil.standin(m._files[i])
347 standin = lfutil.standin(m._files[i])
348 # If the "standin" is a directory, append instead of replace to
348 # If the "standin" is a directory, append instead of replace to
349 # support naming a directory on the command line with only
349 # support naming a directory on the command line with only
350 # largefiles. The original directory is kept to support normal
350 # largefiles. The original directory is kept to support normal
351 # files.
351 # files.
352 if standin in repo[ctx.node()]:
352 if standin in repo[ctx.node()]:
353 m._files[i] = standin
353 m._files[i] = standin
354 elif m._files[i] not in repo[ctx.node()] \
354 elif m._files[i] not in repo[ctx.node()] \
355 and repo.wvfs.isdir(standin):
355 and repo.wvfs.isdir(standin):
356 m._files.append(standin)
356 m._files.append(standin)
357
357
358 m._fmap = set(m._files)
358 m._fmap = set(m._files)
359 m._always = False
359 m._always = False
360 origmatchfn = m.matchfn
360 origmatchfn = m.matchfn
361 def lfmatchfn(f):
361 def lfmatchfn(f):
362 lf = lfutil.splitstandin(f)
362 lf = lfutil.splitstandin(f)
363 if lf is not None and origmatchfn(lf):
363 if lf is not None and origmatchfn(lf):
364 return True
364 return True
365 r = origmatchfn(f)
365 r = origmatchfn(f)
366 return r
366 return r
367 m.matchfn = lfmatchfn
367 m.matchfn = lfmatchfn
368
368
369 ui.debug('updated patterns: %s\n' % sorted(pats))
369 ui.debug('updated patterns: %s\n' % sorted(pats))
370 return m, pats
370 return m, pats
371
371
372 # For hg log --patch, the match object is used in two different senses:
372 # For hg log --patch, the match object is used in two different senses:
373 # (1) to determine what revisions should be printed out, and
373 # (1) to determine what revisions should be printed out, and
374 # (2) to determine what files to print out diffs for.
374 # (2) to determine what files to print out diffs for.
375 # The magic matchandpats override should be used for case (1) but not for
375 # The magic matchandpats override should be used for case (1) but not for
376 # case (2).
376 # case (2).
377 def overridemakelogfilematcher(repo, pats, opts):
377 def overridemakelogfilematcher(repo, pats, opts):
378 wctx = repo[None]
378 wctx = repo[None]
379 match, pats = oldmatchandpats(wctx, pats, opts)
379 match, pats = oldmatchandpats(wctx, pats, opts)
380 return lambda rev: match
380 return lambda rev: match
381
381
382 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
382 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
383 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
383 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
384 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
384 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
385
385
386 try:
386 try:
387 return orig(ui, repo, *pats, **opts)
387 return orig(ui, repo, *pats, **opts)
388 finally:
388 finally:
389 restorematchandpatsfn()
389 restorematchandpatsfn()
390 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
390 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
391
391
392 def overrideverify(orig, ui, repo, *pats, **opts):
392 def overrideverify(orig, ui, repo, *pats, **opts):
393 large = opts.pop('large', False)
393 large = opts.pop('large', False)
394 all = opts.pop('lfa', False)
394 all = opts.pop('lfa', False)
395 contents = opts.pop('lfc', False)
395 contents = opts.pop('lfc', False)
396
396
397 result = orig(ui, repo, *pats, **opts)
397 result = orig(ui, repo, *pats, **opts)
398 if large or all or contents:
398 if large or all or contents:
399 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
399 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
400 return result
400 return result
401
401
402 def overridedebugstate(orig, ui, repo, *pats, **opts):
402 def overridedebugstate(orig, ui, repo, *pats, **opts):
403 large = opts.pop('large', False)
403 large = opts.pop('large', False)
404 if large:
404 if large:
405 class fakerepo(object):
405 class fakerepo(object):
406 dirstate = lfutil.openlfdirstate(ui, repo)
406 dirstate = lfutil.openlfdirstate(ui, repo)
407 orig(ui, fakerepo, *pats, **opts)
407 orig(ui, fakerepo, *pats, **opts)
408 else:
408 else:
409 orig(ui, repo, *pats, **opts)
409 orig(ui, repo, *pats, **opts)
410
410
411 # Before starting the manifest merge, merge.updates will call
411 # Before starting the manifest merge, merge.updates will call
412 # _checkunknownfile to check if there are any files in the merged-in
412 # _checkunknownfile to check if there are any files in the merged-in
413 # changeset that collide with unknown files in the working copy.
413 # changeset that collide with unknown files in the working copy.
414 #
414 #
415 # The largefiles are seen as unknown, so this prevents us from merging
415 # The largefiles are seen as unknown, so this prevents us from merging
416 # in a file 'foo' if we already have a largefile with the same name.
416 # in a file 'foo' if we already have a largefile with the same name.
417 #
417 #
418 # The overridden function filters the unknown files by removing any
418 # The overridden function filters the unknown files by removing any
419 # largefiles. This makes the merge proceed and we can then handle this
419 # largefiles. This makes the merge proceed and we can then handle this
420 # case further in the overridden calculateupdates function below.
420 # case further in the overridden calculateupdates function below.
421 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
421 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
422 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
422 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
423 return False
423 return False
424 return origfn(repo, wctx, mctx, f, f2)
424 return origfn(repo, wctx, mctx, f, f2)
425
425
426 # The manifest merge handles conflicts on the manifest level. We want
426 # The manifest merge handles conflicts on the manifest level. We want
427 # to handle changes in largefile-ness of files at this level too.
427 # to handle changes in largefile-ness of files at this level too.
428 #
428 #
429 # The strategy is to run the original calculateupdates and then process
429 # The strategy is to run the original calculateupdates and then process
430 # the action list it outputs. There are two cases we need to deal with:
430 # the action list it outputs. There are two cases we need to deal with:
431 #
431 #
432 # 1. Normal file in p1, largefile in p2. Here the largefile is
432 # 1. Normal file in p1, largefile in p2. Here the largefile is
433 # detected via its standin file, which will enter the working copy
433 # detected via its standin file, which will enter the working copy
434 # with a "get" action. It is not "merge" since the standin is all
434 # with a "get" action. It is not "merge" since the standin is all
435 # Mercurial is concerned with at this level -- the link to the
435 # Mercurial is concerned with at this level -- the link to the
436 # existing normal file is not relevant here.
436 # existing normal file is not relevant here.
437 #
437 #
438 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
438 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
439 # since the largefile will be present in the working copy and
439 # since the largefile will be present in the working copy and
440 # different from the normal file in p2. Mercurial therefore
440 # different from the normal file in p2. Mercurial therefore
441 # triggers a merge action.
441 # triggers a merge action.
442 #
442 #
443 # In both cases, we prompt the user and emit new actions to either
443 # In both cases, we prompt the user and emit new actions to either
444 # remove the standin (if the normal file was kept) or to remove the
444 # remove the standin (if the normal file was kept) or to remove the
445 # normal file and get the standin (if the largefile was kept). The
445 # normal file and get the standin (if the largefile was kept). The
446 # default prompt answer is to use the largefile version since it was
446 # default prompt answer is to use the largefile version since it was
447 # presumably changed on purpose.
447 # presumably changed on purpose.
448 #
448 #
449 # Finally, the merge.applyupdates function will then take care of
449 # Finally, the merge.applyupdates function will then take care of
450 # writing the files into the working copy and lfcommands.updatelfiles
450 # writing the files into the working copy and lfcommands.updatelfiles
451 # will update the largefiles.
451 # will update the largefiles.
452 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
452 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
453 partial, acceptremote, followcopies):
453 partial, acceptremote, followcopies):
454 overwrite = force and not branchmerge
454 overwrite = force and not branchmerge
455 actions, diverge, renamedelete = origfn(
455 actions, diverge, renamedelete = origfn(
456 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
456 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
457 followcopies)
457 followcopies)
458
458
459 if overwrite:
459 if overwrite:
460 return actions, diverge, renamedelete
460 return actions, diverge, renamedelete
461
461
462 # Convert to dictionary with filename as key and action as value.
462 # Convert to dictionary with filename as key and action as value.
463 lfiles = set()
463 lfiles = set()
464 for f in actions:
464 for f in actions:
465 splitstandin = f and lfutil.splitstandin(f)
465 splitstandin = f and lfutil.splitstandin(f)
466 if splitstandin in p1:
466 if splitstandin in p1:
467 lfiles.add(splitstandin)
467 lfiles.add(splitstandin)
468 elif lfutil.standin(f) in p1:
468 elif lfutil.standin(f) in p1:
469 lfiles.add(f)
469 lfiles.add(f)
470
470
471 for lfile in lfiles:
471 for lfile in lfiles:
472 standin = lfutil.standin(lfile)
472 standin = lfutil.standin(lfile)
473 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
473 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
474 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
474 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
475 if sm in ('g', 'dc') and lm != 'r':
475 if sm in ('g', 'dc') and lm != 'r':
476 # Case 1: normal file in the working copy, largefile in
476 # Case 1: normal file in the working copy, largefile in
477 # the second parent
477 # the second parent
478 usermsg = _('remote turned local normal file %s into a largefile\n'
478 usermsg = _('remote turned local normal file %s into a largefile\n'
479 'use (l)argefile or keep (n)ormal file?'
479 'use (l)argefile or keep (n)ormal file?'
480 '$$ &Largefile $$ &Normal file') % lfile
480 '$$ &Largefile $$ &Normal file') % lfile
481 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
481 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
482 actions[lfile] = ('r', None, 'replaced by standin')
482 actions[lfile] = ('r', None, 'replaced by standin')
483 actions[standin] = ('g', sargs, 'replaces standin')
483 actions[standin] = ('g', sargs, 'replaces standin')
484 else: # keep local normal file
484 else: # keep local normal file
485 actions[lfile] = ('k', None, 'replaces standin')
485 actions[lfile] = ('k', None, 'replaces standin')
486 if branchmerge:
486 if branchmerge:
487 actions[standin] = ('k', None, 'replaced by non-standin')
487 actions[standin] = ('k', None, 'replaced by non-standin')
488 else:
488 else:
489 actions[standin] = ('r', None, 'replaced by non-standin')
489 actions[standin] = ('r', None, 'replaced by non-standin')
490 elif lm in ('g', 'dc') and sm != 'r':
490 elif lm in ('g', 'dc') and sm != 'r':
491 # Case 2: largefile in the working copy, normal file in
491 # Case 2: largefile in the working copy, normal file in
492 # the second parent
492 # the second parent
493 usermsg = _('remote turned local largefile %s into a normal file\n'
493 usermsg = _('remote turned local largefile %s into a normal file\n'
494 'keep (l)argefile or use (n)ormal file?'
494 'keep (l)argefile or use (n)ormal file?'
495 '$$ &Largefile $$ &Normal file') % lfile
495 '$$ &Largefile $$ &Normal file') % lfile
496 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
496 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
497 if branchmerge:
497 if branchmerge:
498 # largefile can be restored from standin safely
498 # largefile can be restored from standin safely
499 actions[lfile] = ('k', None, 'replaced by standin')
499 actions[lfile] = ('k', None, 'replaced by standin')
500 actions[standin] = ('k', None, 'replaces standin')
500 actions[standin] = ('k', None, 'replaces standin')
501 else:
501 else:
502 # "lfile" should be marked as "removed" without
502 # "lfile" should be marked as "removed" without
503 # removal of itself
503 # removal of itself
504 actions[lfile] = ('lfmr', None,
504 actions[lfile] = ('lfmr', None,
505 'forget non-standin largefile')
505 'forget non-standin largefile')
506
506
507 # linear-merge should treat this largefile as 're-added'
507 # linear-merge should treat this largefile as 're-added'
508 actions[standin] = ('a', None, 'keep standin')
508 actions[standin] = ('a', None, 'keep standin')
509 else: # pick remote normal file
509 else: # pick remote normal file
510 actions[lfile] = ('g', largs, 'replaces standin')
510 actions[lfile] = ('g', largs, 'replaces standin')
511 actions[standin] = ('r', None, 'replaced by non-standin')
511 actions[standin] = ('r', None, 'replaced by non-standin')
512
512
513 return actions, diverge, renamedelete
513 return actions, diverge, renamedelete
514
514
515 def mergerecordupdates(orig, repo, actions, branchmerge):
515 def mergerecordupdates(orig, repo, actions, branchmerge):
516 if 'lfmr' in actions:
516 if 'lfmr' in actions:
517 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
517 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
518 for lfile, args, msg in actions['lfmr']:
518 for lfile, args, msg in actions['lfmr']:
519 # this should be executed before 'orig', to execute 'remove'
519 # this should be executed before 'orig', to execute 'remove'
520 # before all other actions
520 # before all other actions
521 repo.dirstate.remove(lfile)
521 repo.dirstate.remove(lfile)
522 # make sure lfile doesn't get synclfdirstate'd as normal
522 # make sure lfile doesn't get synclfdirstate'd as normal
523 lfdirstate.add(lfile)
523 lfdirstate.add(lfile)
524 lfdirstate.write()
524 lfdirstate.write()
525
525
526 return orig(repo, actions, branchmerge)
526 return orig(repo, actions, branchmerge)
527
527
528
528
529 # Override filemerge to prompt the user about how they wish to merge
529 # Override filemerge to prompt the user about how they wish to merge
530 # largefiles. This will handle identical edits without prompting the user.
530 # largefiles. This will handle identical edits without prompting the user.
531 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
531 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
532 if not lfutil.isstandin(orig):
532 if not lfutil.isstandin(orig):
533 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
533 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
534
534
535 ahash = fca.data().strip().lower()
535 ahash = fca.data().strip().lower()
536 dhash = fcd.data().strip().lower()
536 dhash = fcd.data().strip().lower()
537 ohash = fco.data().strip().lower()
537 ohash = fco.data().strip().lower()
538 if (ohash != ahash and
538 if (ohash != ahash and
539 ohash != dhash and
539 ohash != dhash and
540 (dhash == ahash or
540 (dhash == ahash or
541 repo.ui.promptchoice(
541 repo.ui.promptchoice(
542 _('largefile %s has a merge conflict\nancestor was %s\n'
542 _('largefile %s has a merge conflict\nancestor was %s\n'
543 'keep (l)ocal %s or\ntake (o)ther %s?'
543 'keep (l)ocal %s or\ntake (o)ther %s?'
544 '$$ &Local $$ &Other') %
544 '$$ &Local $$ &Other') %
545 (lfutil.splitstandin(orig), ahash, dhash, ohash),
545 (lfutil.splitstandin(orig), ahash, dhash, ohash),
546 0) == 1)):
546 0) == 1)):
547 repo.wwrite(fcd.path(), fco.data(), fco.flags())
547 repo.wwrite(fcd.path(), fco.data(), fco.flags())
548 return 0
548 return 0
549
549
550 def copiespathcopies(orig, ctx1, ctx2):
550 def copiespathcopies(orig, ctx1, ctx2, match=None):
551 copies = orig(ctx1, ctx2)
551 copies = orig(ctx1, ctx2, match=match)
552 updated = {}
552 updated = {}
553
553
554 for k, v in copies.iteritems():
554 for k, v in copies.iteritems():
555 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
555 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
556
556
557 return updated
557 return updated
558
558
559 # Copy first changes the matchers to match standins instead of
559 # Copy first changes the matchers to match standins instead of
560 # largefiles. Then it overrides util.copyfile in that function it
560 # largefiles. Then it overrides util.copyfile in that function it
561 # checks if the destination largefile already exists. It also keeps a
561 # checks if the destination largefile already exists. It also keeps a
562 # list of copied files so that the largefiles can be copied and the
562 # list of copied files so that the largefiles can be copied and the
563 # dirstate updated.
563 # dirstate updated.
564 def overridecopy(orig, ui, repo, pats, opts, rename=False):
564 def overridecopy(orig, ui, repo, pats, opts, rename=False):
565 # doesn't remove largefile on rename
565 # doesn't remove largefile on rename
566 if len(pats) < 2:
566 if len(pats) < 2:
567 # this isn't legal, let the original function deal with it
567 # this isn't legal, let the original function deal with it
568 return orig(ui, repo, pats, opts, rename)
568 return orig(ui, repo, pats, opts, rename)
569
569
570 # This could copy both lfiles and normal files in one command,
570 # This could copy both lfiles and normal files in one command,
571 # but we don't want to do that. First replace their matcher to
571 # but we don't want to do that. First replace their matcher to
572 # only match normal files and run it, then replace it to just
572 # only match normal files and run it, then replace it to just
573 # match largefiles and run it again.
573 # match largefiles and run it again.
574 nonormalfiles = False
574 nonormalfiles = False
575 nolfiles = False
575 nolfiles = False
576 installnormalfilesmatchfn(repo[None].manifest())
576 installnormalfilesmatchfn(repo[None].manifest())
577 try:
577 try:
578 try:
578 try:
579 result = orig(ui, repo, pats, opts, rename)
579 result = orig(ui, repo, pats, opts, rename)
580 except util.Abort, e:
580 except util.Abort, e:
581 if str(e) != _('no files to copy'):
581 if str(e) != _('no files to copy'):
582 raise e
582 raise e
583 else:
583 else:
584 nonormalfiles = True
584 nonormalfiles = True
585 result = 0
585 result = 0
586 finally:
586 finally:
587 restorematchfn()
587 restorematchfn()
588
588
589 # The first rename can cause our current working directory to be removed.
589 # The first rename can cause our current working directory to be removed.
590 # In that case there is nothing left to copy/rename so just quit.
590 # In that case there is nothing left to copy/rename so just quit.
591 try:
591 try:
592 repo.getcwd()
592 repo.getcwd()
593 except OSError:
593 except OSError:
594 return result
594 return result
595
595
596 def makestandin(relpath):
596 def makestandin(relpath):
597 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
597 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
598 return os.path.join(repo.wjoin(lfutil.standin(path)))
598 return os.path.join(repo.wjoin(lfutil.standin(path)))
599
599
600 fullpats = scmutil.expandpats(pats)
600 fullpats = scmutil.expandpats(pats)
601 dest = fullpats[-1]
601 dest = fullpats[-1]
602
602
603 if os.path.isdir(dest):
603 if os.path.isdir(dest):
604 if not os.path.isdir(makestandin(dest)):
604 if not os.path.isdir(makestandin(dest)):
605 os.makedirs(makestandin(dest))
605 os.makedirs(makestandin(dest))
606
606
607 try:
607 try:
608 try:
608 try:
609 # When we call orig below it creates the standins but we don't add
609 # When we call orig below it creates the standins but we don't add
610 # them to the dir state until later so lock during that time.
610 # them to the dir state until later so lock during that time.
611 wlock = repo.wlock()
611 wlock = repo.wlock()
612
612
613 manifest = repo[None].manifest()
613 manifest = repo[None].manifest()
614 def overridematch(ctx, pats=[], opts={}, globbed=False,
614 def overridematch(ctx, pats=[], opts={}, globbed=False,
615 default='relpath'):
615 default='relpath'):
616 newpats = []
616 newpats = []
617 # The patterns were previously mangled to add the standin
617 # The patterns were previously mangled to add the standin
618 # directory; we need to remove that now
618 # directory; we need to remove that now
619 for pat in pats:
619 for pat in pats:
620 if match_.patkind(pat) is None and lfutil.shortname in pat:
620 if match_.patkind(pat) is None and lfutil.shortname in pat:
621 newpats.append(pat.replace(lfutil.shortname, ''))
621 newpats.append(pat.replace(lfutil.shortname, ''))
622 else:
622 else:
623 newpats.append(pat)
623 newpats.append(pat)
624 match = oldmatch(ctx, newpats, opts, globbed, default)
624 match = oldmatch(ctx, newpats, opts, globbed, default)
625 m = copy.copy(match)
625 m = copy.copy(match)
626 lfile = lambda f: lfutil.standin(f) in manifest
626 lfile = lambda f: lfutil.standin(f) in manifest
627 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
627 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
628 m._fmap = set(m._files)
628 m._fmap = set(m._files)
629 origmatchfn = m.matchfn
629 origmatchfn = m.matchfn
630 m.matchfn = lambda f: (lfutil.isstandin(f) and
630 m.matchfn = lambda f: (lfutil.isstandin(f) and
631 (f in manifest) and
631 (f in manifest) and
632 origmatchfn(lfutil.splitstandin(f)) or
632 origmatchfn(lfutil.splitstandin(f)) or
633 None)
633 None)
634 return m
634 return m
635 oldmatch = installmatchfn(overridematch)
635 oldmatch = installmatchfn(overridematch)
636 listpats = []
636 listpats = []
637 for pat in pats:
637 for pat in pats:
638 if match_.patkind(pat) is not None:
638 if match_.patkind(pat) is not None:
639 listpats.append(pat)
639 listpats.append(pat)
640 else:
640 else:
641 listpats.append(makestandin(pat))
641 listpats.append(makestandin(pat))
642
642
643 try:
643 try:
644 origcopyfile = util.copyfile
644 origcopyfile = util.copyfile
645 copiedfiles = []
645 copiedfiles = []
646 def overridecopyfile(src, dest):
646 def overridecopyfile(src, dest):
647 if (lfutil.shortname in src and
647 if (lfutil.shortname in src and
648 dest.startswith(repo.wjoin(lfutil.shortname))):
648 dest.startswith(repo.wjoin(lfutil.shortname))):
649 destlfile = dest.replace(lfutil.shortname, '')
649 destlfile = dest.replace(lfutil.shortname, '')
650 if not opts['force'] and os.path.exists(destlfile):
650 if not opts['force'] and os.path.exists(destlfile):
651 raise IOError('',
651 raise IOError('',
652 _('destination largefile already exists'))
652 _('destination largefile already exists'))
653 copiedfiles.append((src, dest))
653 copiedfiles.append((src, dest))
654 origcopyfile(src, dest)
654 origcopyfile(src, dest)
655
655
656 util.copyfile = overridecopyfile
656 util.copyfile = overridecopyfile
657 result += orig(ui, repo, listpats, opts, rename)
657 result += orig(ui, repo, listpats, opts, rename)
658 finally:
658 finally:
659 util.copyfile = origcopyfile
659 util.copyfile = origcopyfile
660
660
661 lfdirstate = lfutil.openlfdirstate(ui, repo)
661 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 for (src, dest) in copiedfiles:
662 for (src, dest) in copiedfiles:
663 if (lfutil.shortname in src and
663 if (lfutil.shortname in src and
664 dest.startswith(repo.wjoin(lfutil.shortname))):
664 dest.startswith(repo.wjoin(lfutil.shortname))):
665 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
665 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
666 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
666 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
667 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
667 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
668 if not os.path.isdir(destlfiledir):
668 if not os.path.isdir(destlfiledir):
669 os.makedirs(destlfiledir)
669 os.makedirs(destlfiledir)
670 if rename:
670 if rename:
671 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
671 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
672
672
673 # The file is gone, but this deletes any empty parent
673 # The file is gone, but this deletes any empty parent
674 # directories as a side-effect.
674 # directories as a side-effect.
675 util.unlinkpath(repo.wjoin(srclfile), True)
675 util.unlinkpath(repo.wjoin(srclfile), True)
676 lfdirstate.remove(srclfile)
676 lfdirstate.remove(srclfile)
677 else:
677 else:
678 util.copyfile(repo.wjoin(srclfile),
678 util.copyfile(repo.wjoin(srclfile),
679 repo.wjoin(destlfile))
679 repo.wjoin(destlfile))
680
680
681 lfdirstate.add(destlfile)
681 lfdirstate.add(destlfile)
682 lfdirstate.write()
682 lfdirstate.write()
683 except util.Abort, e:
683 except util.Abort, e:
684 if str(e) != _('no files to copy'):
684 if str(e) != _('no files to copy'):
685 raise e
685 raise e
686 else:
686 else:
687 nolfiles = True
687 nolfiles = True
688 finally:
688 finally:
689 restorematchfn()
689 restorematchfn()
690 wlock.release()
690 wlock.release()
691
691
692 if nolfiles and nonormalfiles:
692 if nolfiles and nonormalfiles:
693 raise util.Abort(_('no files to copy'))
693 raise util.Abort(_('no files to copy'))
694
694
695 return result
695 return result
696
696
697 # When the user calls revert, we have to be careful to not revert any
697 # When the user calls revert, we have to be careful to not revert any
698 # changes to other largefiles accidentally. This means we have to keep
698 # changes to other largefiles accidentally. This means we have to keep
699 # track of the largefiles that are being reverted so we only pull down
699 # track of the largefiles that are being reverted so we only pull down
700 # the necessary largefiles.
700 # the necessary largefiles.
701 #
701 #
702 # Standins are only updated (to match the hash of largefiles) before
702 # Standins are only updated (to match the hash of largefiles) before
703 # commits. Update the standins then run the original revert, changing
703 # commits. Update the standins then run the original revert, changing
704 # the matcher to hit standins instead of largefiles. Based on the
704 # the matcher to hit standins instead of largefiles. Based on the
705 # resulting standins update the largefiles.
705 # resulting standins update the largefiles.
706 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
706 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
707 # Because we put the standins in a bad state (by updating them)
707 # Because we put the standins in a bad state (by updating them)
708 # and then return them to a correct state we need to lock to
708 # and then return them to a correct state we need to lock to
709 # prevent others from changing them in their incorrect state.
709 # prevent others from changing them in their incorrect state.
710 wlock = repo.wlock()
710 wlock = repo.wlock()
711 try:
711 try:
712 lfdirstate = lfutil.openlfdirstate(ui, repo)
712 lfdirstate = lfutil.openlfdirstate(ui, repo)
713 s = lfutil.lfdirstatestatus(lfdirstate, repo)
713 s = lfutil.lfdirstatestatus(lfdirstate, repo)
714 lfdirstate.write()
714 lfdirstate.write()
715 for lfile in s.modified:
715 for lfile in s.modified:
716 lfutil.updatestandin(repo, lfutil.standin(lfile))
716 lfutil.updatestandin(repo, lfutil.standin(lfile))
717 for lfile in s.deleted:
717 for lfile in s.deleted:
718 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
718 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
719 os.unlink(repo.wjoin(lfutil.standin(lfile)))
719 os.unlink(repo.wjoin(lfutil.standin(lfile)))
720
720
721 oldstandins = lfutil.getstandinsstate(repo)
721 oldstandins = lfutil.getstandinsstate(repo)
722
722
723 def overridematch(mctx, pats=[], opts={}, globbed=False,
723 def overridematch(mctx, pats=[], opts={}, globbed=False,
724 default='relpath'):
724 default='relpath'):
725 match = oldmatch(mctx, pats, opts, globbed, default)
725 match = oldmatch(mctx, pats, opts, globbed, default)
726 m = copy.copy(match)
726 m = copy.copy(match)
727
727
728 # revert supports recursing into subrepos, and though largefiles
728 # revert supports recursing into subrepos, and though largefiles
729 # currently doesn't work correctly in that case, this match is
729 # currently doesn't work correctly in that case, this match is
730 # called, so the lfdirstate above may not be the correct one for
730 # called, so the lfdirstate above may not be the correct one for
731 # this invocation of match.
731 # this invocation of match.
732 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
732 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
733 False)
733 False)
734
734
735 def tostandin(f):
735 def tostandin(f):
736 standin = lfutil.standin(f)
736 standin = lfutil.standin(f)
737 if standin in ctx or standin in mctx:
737 if standin in ctx or standin in mctx:
738 return standin
738 return standin
739 elif standin in repo[None] or lfdirstate[f] == 'r':
739 elif standin in repo[None] or lfdirstate[f] == 'r':
740 return None
740 return None
741 return f
741 return f
742 m._files = [tostandin(f) for f in m._files]
742 m._files = [tostandin(f) for f in m._files]
743 m._files = [f for f in m._files if f is not None]
743 m._files = [f for f in m._files if f is not None]
744 m._fmap = set(m._files)
744 m._fmap = set(m._files)
745 origmatchfn = m.matchfn
745 origmatchfn = m.matchfn
746 def matchfn(f):
746 def matchfn(f):
747 if lfutil.isstandin(f):
747 if lfutil.isstandin(f):
748 return (origmatchfn(lfutil.splitstandin(f)) and
748 return (origmatchfn(lfutil.splitstandin(f)) and
749 (f in ctx or f in mctx))
749 (f in ctx or f in mctx))
750 return origmatchfn(f)
750 return origmatchfn(f)
751 m.matchfn = matchfn
751 m.matchfn = matchfn
752 return m
752 return m
753 oldmatch = installmatchfn(overridematch)
753 oldmatch = installmatchfn(overridematch)
754 try:
754 try:
755 orig(ui, repo, ctx, parents, *pats, **opts)
755 orig(ui, repo, ctx, parents, *pats, **opts)
756 finally:
756 finally:
757 restorematchfn()
757 restorematchfn()
758
758
759 newstandins = lfutil.getstandinsstate(repo)
759 newstandins = lfutil.getstandinsstate(repo)
760 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
760 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
761 # lfdirstate should be 'normallookup'-ed for updated files,
761 # lfdirstate should be 'normallookup'-ed for updated files,
762 # because reverting doesn't touch dirstate for 'normal' files
762 # because reverting doesn't touch dirstate for 'normal' files
763 # when target revision is explicitly specified: in such case,
763 # when target revision is explicitly specified: in such case,
764 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
764 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
765 # of target (standin) file.
765 # of target (standin) file.
766 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
766 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
767 normallookup=True)
767 normallookup=True)
768
768
769 finally:
769 finally:
770 wlock.release()
770 wlock.release()
771
771
772 # after pulling changesets, we need to take some extra care to get
772 # after pulling changesets, we need to take some extra care to get
773 # largefiles updated remotely
773 # largefiles updated remotely
774 def overridepull(orig, ui, repo, source=None, **opts):
774 def overridepull(orig, ui, repo, source=None, **opts):
775 revsprepull = len(repo)
775 revsprepull = len(repo)
776 if not source:
776 if not source:
777 source = 'default'
777 source = 'default'
778 repo.lfpullsource = source
778 repo.lfpullsource = source
779 result = orig(ui, repo, source, **opts)
779 result = orig(ui, repo, source, **opts)
780 revspostpull = len(repo)
780 revspostpull = len(repo)
781 lfrevs = opts.get('lfrev', [])
781 lfrevs = opts.get('lfrev', [])
782 if opts.get('all_largefiles'):
782 if opts.get('all_largefiles'):
783 lfrevs.append('pulled()')
783 lfrevs.append('pulled()')
784 if lfrevs and revspostpull > revsprepull:
784 if lfrevs and revspostpull > revsprepull:
785 numcached = 0
785 numcached = 0
786 repo.firstpulled = revsprepull # for pulled() revset expression
786 repo.firstpulled = revsprepull # for pulled() revset expression
787 try:
787 try:
788 for rev in scmutil.revrange(repo, lfrevs):
788 for rev in scmutil.revrange(repo, lfrevs):
789 ui.note(_('pulling largefiles for revision %s\n') % rev)
789 ui.note(_('pulling largefiles for revision %s\n') % rev)
790 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
790 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
791 numcached += len(cached)
791 numcached += len(cached)
792 finally:
792 finally:
793 del repo.firstpulled
793 del repo.firstpulled
794 ui.status(_("%d largefiles cached\n") % numcached)
794 ui.status(_("%d largefiles cached\n") % numcached)
795 return result
795 return result
796
796
797 def pulledrevsetsymbol(repo, subset, x):
797 def pulledrevsetsymbol(repo, subset, x):
798 """``pulled()``
798 """``pulled()``
799 Changesets that just has been pulled.
799 Changesets that just has been pulled.
800
800
801 Only available with largefiles from pull --lfrev expressions.
801 Only available with largefiles from pull --lfrev expressions.
802
802
803 .. container:: verbose
803 .. container:: verbose
804
804
805 Some examples:
805 Some examples:
806
806
807 - pull largefiles for all new changesets::
807 - pull largefiles for all new changesets::
808
808
809 hg pull -lfrev "pulled()"
809 hg pull -lfrev "pulled()"
810
810
811 - pull largefiles for all new branch heads::
811 - pull largefiles for all new branch heads::
812
812
813 hg pull -lfrev "head(pulled()) and not closed()"
813 hg pull -lfrev "head(pulled()) and not closed()"
814
814
815 """
815 """
816
816
817 try:
817 try:
818 firstpulled = repo.firstpulled
818 firstpulled = repo.firstpulled
819 except AttributeError:
819 except AttributeError:
820 raise util.Abort(_("pulled() only available in --lfrev"))
820 raise util.Abort(_("pulled() only available in --lfrev"))
821 return revset.baseset([r for r in subset if r >= firstpulled])
821 return revset.baseset([r for r in subset if r >= firstpulled])
822
822
823 def overrideclone(orig, ui, source, dest=None, **opts):
823 def overrideclone(orig, ui, source, dest=None, **opts):
824 d = dest
824 d = dest
825 if d is None:
825 if d is None:
826 d = hg.defaultdest(source)
826 d = hg.defaultdest(source)
827 if opts.get('all_largefiles') and not hg.islocal(d):
827 if opts.get('all_largefiles') and not hg.islocal(d):
828 raise util.Abort(_(
828 raise util.Abort(_(
829 '--all-largefiles is incompatible with non-local destination %s') %
829 '--all-largefiles is incompatible with non-local destination %s') %
830 d)
830 d)
831
831
832 return orig(ui, source, dest, **opts)
832 return orig(ui, source, dest, **opts)
833
833
834 def hgclone(orig, ui, opts, *args, **kwargs):
834 def hgclone(orig, ui, opts, *args, **kwargs):
835 result = orig(ui, opts, *args, **kwargs)
835 result = orig(ui, opts, *args, **kwargs)
836
836
837 if result is not None:
837 if result is not None:
838 sourcerepo, destrepo = result
838 sourcerepo, destrepo = result
839 repo = destrepo.local()
839 repo = destrepo.local()
840
840
841 # If largefiles is required for this repo, permanently enable it locally
841 # If largefiles is required for this repo, permanently enable it locally
842 if 'largefiles' in repo.requirements:
842 if 'largefiles' in repo.requirements:
843 fp = repo.vfs('hgrc', 'a', text=True)
843 fp = repo.vfs('hgrc', 'a', text=True)
844 try:
844 try:
845 fp.write('\n[extensions]\nlargefiles=\n')
845 fp.write('\n[extensions]\nlargefiles=\n')
846 finally:
846 finally:
847 fp.close()
847 fp.close()
848
848
849 # Caching is implicitly limited to 'rev' option, since the dest repo was
849 # Caching is implicitly limited to 'rev' option, since the dest repo was
850 # truncated at that point. The user may expect a download count with
850 # truncated at that point. The user may expect a download count with
851 # this option, so attempt whether or not this is a largefile repo.
851 # this option, so attempt whether or not this is a largefile repo.
852 if opts.get('all_largefiles'):
852 if opts.get('all_largefiles'):
853 success, missing = lfcommands.downloadlfiles(ui, repo, None)
853 success, missing = lfcommands.downloadlfiles(ui, repo, None)
854
854
855 if missing != 0:
855 if missing != 0:
856 return None
856 return None
857
857
858 return result
858 return result
859
859
860 def overriderebase(orig, ui, repo, **opts):
860 def overriderebase(orig, ui, repo, **opts):
861 if not util.safehasattr(repo, '_largefilesenabled'):
861 if not util.safehasattr(repo, '_largefilesenabled'):
862 return orig(ui, repo, **opts)
862 return orig(ui, repo, **opts)
863
863
864 resuming = opts.get('continue')
864 resuming = opts.get('continue')
865 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
865 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
866 repo._lfstatuswriters.append(lambda *msg, **opts: None)
866 repo._lfstatuswriters.append(lambda *msg, **opts: None)
867 try:
867 try:
868 return orig(ui, repo, **opts)
868 return orig(ui, repo, **opts)
869 finally:
869 finally:
870 repo._lfstatuswriters.pop()
870 repo._lfstatuswriters.pop()
871 repo._lfcommithooks.pop()
871 repo._lfcommithooks.pop()
872
872
873 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
873 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
874 prefix='', mtime=None, subrepos=None):
874 prefix='', mtime=None, subrepos=None):
875 # No need to lock because we are only reading history and
875 # No need to lock because we are only reading history and
876 # largefile caches, neither of which are modified.
876 # largefile caches, neither of which are modified.
877 lfcommands.cachelfiles(repo.ui, repo, node)
877 lfcommands.cachelfiles(repo.ui, repo, node)
878
878
879 if kind not in archival.archivers:
879 if kind not in archival.archivers:
880 raise util.Abort(_("unknown archive type '%s'") % kind)
880 raise util.Abort(_("unknown archive type '%s'") % kind)
881
881
882 ctx = repo[node]
882 ctx = repo[node]
883
883
884 if kind == 'files':
884 if kind == 'files':
885 if prefix:
885 if prefix:
886 raise util.Abort(
886 raise util.Abort(
887 _('cannot give prefix when archiving to files'))
887 _('cannot give prefix when archiving to files'))
888 else:
888 else:
889 prefix = archival.tidyprefix(dest, kind, prefix)
889 prefix = archival.tidyprefix(dest, kind, prefix)
890
890
891 def write(name, mode, islink, getdata):
891 def write(name, mode, islink, getdata):
892 if matchfn and not matchfn(name):
892 if matchfn and not matchfn(name):
893 return
893 return
894 data = getdata()
894 data = getdata()
895 if decode:
895 if decode:
896 data = repo.wwritedata(name, data)
896 data = repo.wwritedata(name, data)
897 archiver.addfile(prefix + name, mode, islink, data)
897 archiver.addfile(prefix + name, mode, islink, data)
898
898
899 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
899 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
900
900
901 if repo.ui.configbool("ui", "archivemeta", True):
901 if repo.ui.configbool("ui", "archivemeta", True):
902 write('.hg_archival.txt', 0644, False,
902 write('.hg_archival.txt', 0644, False,
903 lambda: archival.buildmetadata(ctx))
903 lambda: archival.buildmetadata(ctx))
904
904
905 for f in ctx:
905 for f in ctx:
906 ff = ctx.flags(f)
906 ff = ctx.flags(f)
907 getdata = ctx[f].data
907 getdata = ctx[f].data
908 if lfutil.isstandin(f):
908 if lfutil.isstandin(f):
909 path = lfutil.findfile(repo, getdata().strip())
909 path = lfutil.findfile(repo, getdata().strip())
910 if path is None:
910 if path is None:
911 raise util.Abort(
911 raise util.Abort(
912 _('largefile %s not found in repo store or system cache')
912 _('largefile %s not found in repo store or system cache')
913 % lfutil.splitstandin(f))
913 % lfutil.splitstandin(f))
914 f = lfutil.splitstandin(f)
914 f = lfutil.splitstandin(f)
915
915
916 def getdatafn():
916 def getdatafn():
917 fd = None
917 fd = None
918 try:
918 try:
919 fd = open(path, 'rb')
919 fd = open(path, 'rb')
920 return fd.read()
920 return fd.read()
921 finally:
921 finally:
922 if fd:
922 if fd:
923 fd.close()
923 fd.close()
924
924
925 getdata = getdatafn
925 getdata = getdatafn
926 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
926 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
927
927
928 if subrepos:
928 if subrepos:
929 for subpath in sorted(ctx.substate):
929 for subpath in sorted(ctx.substate):
930 sub = ctx.sub(subpath)
930 sub = ctx.sub(subpath)
931 submatch = match_.narrowmatcher(subpath, matchfn)
931 submatch = match_.narrowmatcher(subpath, matchfn)
932 sub.archive(archiver, prefix, submatch)
932 sub.archive(archiver, prefix, submatch)
933
933
934 archiver.done()
934 archiver.done()
935
935
936 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
936 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
937 repo._get(repo._state + ('hg',))
937 repo._get(repo._state + ('hg',))
938 rev = repo._state[1]
938 rev = repo._state[1]
939 ctx = repo._repo[rev]
939 ctx = repo._repo[rev]
940
940
941 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
941 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
942
942
943 def write(name, mode, islink, getdata):
943 def write(name, mode, islink, getdata):
944 # At this point, the standin has been replaced with the largefile name,
944 # At this point, the standin has been replaced with the largefile name,
945 # so the normal matcher works here without the lfutil variants.
945 # so the normal matcher works here without the lfutil variants.
946 if match and not match(f):
946 if match and not match(f):
947 return
947 return
948 data = getdata()
948 data = getdata()
949
949
950 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
950 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
951
951
952 for f in ctx:
952 for f in ctx:
953 ff = ctx.flags(f)
953 ff = ctx.flags(f)
954 getdata = ctx[f].data
954 getdata = ctx[f].data
955 if lfutil.isstandin(f):
955 if lfutil.isstandin(f):
956 path = lfutil.findfile(repo._repo, getdata().strip())
956 path = lfutil.findfile(repo._repo, getdata().strip())
957 if path is None:
957 if path is None:
958 raise util.Abort(
958 raise util.Abort(
959 _('largefile %s not found in repo store or system cache')
959 _('largefile %s not found in repo store or system cache')
960 % lfutil.splitstandin(f))
960 % lfutil.splitstandin(f))
961 f = lfutil.splitstandin(f)
961 f = lfutil.splitstandin(f)
962
962
963 def getdatafn():
963 def getdatafn():
964 fd = None
964 fd = None
965 try:
965 try:
966 fd = open(os.path.join(prefix, path), 'rb')
966 fd = open(os.path.join(prefix, path), 'rb')
967 return fd.read()
967 return fd.read()
968 finally:
968 finally:
969 if fd:
969 if fd:
970 fd.close()
970 fd.close()
971
971
972 getdata = getdatafn
972 getdata = getdatafn
973
973
974 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
974 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
975
975
976 for subpath in sorted(ctx.substate):
976 for subpath in sorted(ctx.substate):
977 sub = ctx.sub(subpath)
977 sub = ctx.sub(subpath)
978 submatch = match_.narrowmatcher(subpath, match)
978 submatch = match_.narrowmatcher(subpath, match)
979 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
979 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
980
980
981 # If a largefile is modified, the change is not reflected in its
981 # If a largefile is modified, the change is not reflected in its
982 # standin until a commit. cmdutil.bailifchanged() raises an exception
982 # standin until a commit. cmdutil.bailifchanged() raises an exception
983 # if the repo has uncommitted changes. Wrap it to also check if
983 # if the repo has uncommitted changes. Wrap it to also check if
984 # largefiles were changed. This is used by bisect, backout and fetch.
984 # largefiles were changed. This is used by bisect, backout and fetch.
985 def overridebailifchanged(orig, repo, *args, **kwargs):
985 def overridebailifchanged(orig, repo, *args, **kwargs):
986 orig(repo, *args, **kwargs)
986 orig(repo, *args, **kwargs)
987 repo.lfstatus = True
987 repo.lfstatus = True
988 s = repo.status()
988 s = repo.status()
989 repo.lfstatus = False
989 repo.lfstatus = False
990 if s.modified or s.added or s.removed or s.deleted:
990 if s.modified or s.added or s.removed or s.deleted:
991 raise util.Abort(_('uncommitted changes'))
991 raise util.Abort(_('uncommitted changes'))
992
992
993 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
993 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
994 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
994 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
995 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
995 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
996 m = composelargefilematcher(match, repo[None].manifest())
996 m = composelargefilematcher(match, repo[None].manifest())
997
997
998 try:
998 try:
999 repo.lfstatus = True
999 repo.lfstatus = True
1000 s = repo.status(match=m, clean=True)
1000 s = repo.status(match=m, clean=True)
1001 finally:
1001 finally:
1002 repo.lfstatus = False
1002 repo.lfstatus = False
1003 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1003 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1004 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1004 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1005
1005
1006 for f in forget:
1006 for f in forget:
1007 if lfutil.standin(f) not in repo.dirstate and not \
1007 if lfutil.standin(f) not in repo.dirstate and not \
1008 repo.wvfs.isdir(lfutil.standin(f)):
1008 repo.wvfs.isdir(lfutil.standin(f)):
1009 ui.warn(_('not removing %s: file is already untracked\n')
1009 ui.warn(_('not removing %s: file is already untracked\n')
1010 % m.rel(f))
1010 % m.rel(f))
1011 bad.append(f)
1011 bad.append(f)
1012
1012
1013 for f in forget:
1013 for f in forget:
1014 if ui.verbose or not m.exact(f):
1014 if ui.verbose or not m.exact(f):
1015 ui.status(_('removing %s\n') % m.rel(f))
1015 ui.status(_('removing %s\n') % m.rel(f))
1016
1016
1017 # Need to lock because standin files are deleted then removed from the
1017 # Need to lock because standin files are deleted then removed from the
1018 # repository and we could race in-between.
1018 # repository and we could race in-between.
1019 wlock = repo.wlock()
1019 wlock = repo.wlock()
1020 try:
1020 try:
1021 lfdirstate = lfutil.openlfdirstate(ui, repo)
1021 lfdirstate = lfutil.openlfdirstate(ui, repo)
1022 for f in forget:
1022 for f in forget:
1023 if lfdirstate[f] == 'a':
1023 if lfdirstate[f] == 'a':
1024 lfdirstate.drop(f)
1024 lfdirstate.drop(f)
1025 else:
1025 else:
1026 lfdirstate.remove(f)
1026 lfdirstate.remove(f)
1027 lfdirstate.write()
1027 lfdirstate.write()
1028 standins = [lfutil.standin(f) for f in forget]
1028 standins = [lfutil.standin(f) for f in forget]
1029 for f in standins:
1029 for f in standins:
1030 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1030 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1031 rejected = repo[None].forget(standins)
1031 rejected = repo[None].forget(standins)
1032 finally:
1032 finally:
1033 wlock.release()
1033 wlock.release()
1034
1034
1035 bad.extend(f for f in rejected if f in m.files())
1035 bad.extend(f for f in rejected if f in m.files())
1036 forgot.extend(f for f in forget if f not in rejected)
1036 forgot.extend(f for f in forget if f not in rejected)
1037 return bad, forgot
1037 return bad, forgot
1038
1038
1039 def _getoutgoings(repo, other, missing, addfunc):
1039 def _getoutgoings(repo, other, missing, addfunc):
1040 """get pairs of filename and largefile hash in outgoing revisions
1040 """get pairs of filename and largefile hash in outgoing revisions
1041 in 'missing'.
1041 in 'missing'.
1042
1042
1043 largefiles already existing on 'other' repository are ignored.
1043 largefiles already existing on 'other' repository are ignored.
1044
1044
1045 'addfunc' is invoked with each unique pairs of filename and
1045 'addfunc' is invoked with each unique pairs of filename and
1046 largefile hash value.
1046 largefile hash value.
1047 """
1047 """
1048 knowns = set()
1048 knowns = set()
1049 lfhashes = set()
1049 lfhashes = set()
1050 def dedup(fn, lfhash):
1050 def dedup(fn, lfhash):
1051 k = (fn, lfhash)
1051 k = (fn, lfhash)
1052 if k not in knowns:
1052 if k not in knowns:
1053 knowns.add(k)
1053 knowns.add(k)
1054 lfhashes.add(lfhash)
1054 lfhashes.add(lfhash)
1055 lfutil.getlfilestoupload(repo, missing, dedup)
1055 lfutil.getlfilestoupload(repo, missing, dedup)
1056 if lfhashes:
1056 if lfhashes:
1057 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1057 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1058 for fn, lfhash in knowns:
1058 for fn, lfhash in knowns:
1059 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1059 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1060 addfunc(fn, lfhash)
1060 addfunc(fn, lfhash)
1061
1061
1062 def outgoinghook(ui, repo, other, opts, missing):
1062 def outgoinghook(ui, repo, other, opts, missing):
1063 if opts.pop('large', None):
1063 if opts.pop('large', None):
1064 lfhashes = set()
1064 lfhashes = set()
1065 if ui.debugflag:
1065 if ui.debugflag:
1066 toupload = {}
1066 toupload = {}
1067 def addfunc(fn, lfhash):
1067 def addfunc(fn, lfhash):
1068 if fn not in toupload:
1068 if fn not in toupload:
1069 toupload[fn] = []
1069 toupload[fn] = []
1070 toupload[fn].append(lfhash)
1070 toupload[fn].append(lfhash)
1071 lfhashes.add(lfhash)
1071 lfhashes.add(lfhash)
1072 def showhashes(fn):
1072 def showhashes(fn):
1073 for lfhash in sorted(toupload[fn]):
1073 for lfhash in sorted(toupload[fn]):
1074 ui.debug(' %s\n' % (lfhash))
1074 ui.debug(' %s\n' % (lfhash))
1075 else:
1075 else:
1076 toupload = set()
1076 toupload = set()
1077 def addfunc(fn, lfhash):
1077 def addfunc(fn, lfhash):
1078 toupload.add(fn)
1078 toupload.add(fn)
1079 lfhashes.add(lfhash)
1079 lfhashes.add(lfhash)
1080 def showhashes(fn):
1080 def showhashes(fn):
1081 pass
1081 pass
1082 _getoutgoings(repo, other, missing, addfunc)
1082 _getoutgoings(repo, other, missing, addfunc)
1083
1083
1084 if not toupload:
1084 if not toupload:
1085 ui.status(_('largefiles: no files to upload\n'))
1085 ui.status(_('largefiles: no files to upload\n'))
1086 else:
1086 else:
1087 ui.status(_('largefiles to upload (%d entities):\n')
1087 ui.status(_('largefiles to upload (%d entities):\n')
1088 % (len(lfhashes)))
1088 % (len(lfhashes)))
1089 for file in sorted(toupload):
1089 for file in sorted(toupload):
1090 ui.status(lfutil.splitstandin(file) + '\n')
1090 ui.status(lfutil.splitstandin(file) + '\n')
1091 showhashes(file)
1091 showhashes(file)
1092 ui.status('\n')
1092 ui.status('\n')
1093
1093
1094 def summaryremotehook(ui, repo, opts, changes):
1094 def summaryremotehook(ui, repo, opts, changes):
1095 largeopt = opts.get('large', False)
1095 largeopt = opts.get('large', False)
1096 if changes is None:
1096 if changes is None:
1097 if largeopt:
1097 if largeopt:
1098 return (False, True) # only outgoing check is needed
1098 return (False, True) # only outgoing check is needed
1099 else:
1099 else:
1100 return (False, False)
1100 return (False, False)
1101 elif largeopt:
1101 elif largeopt:
1102 url, branch, peer, outgoing = changes[1]
1102 url, branch, peer, outgoing = changes[1]
1103 if peer is None:
1103 if peer is None:
1104 # i18n: column positioning for "hg summary"
1104 # i18n: column positioning for "hg summary"
1105 ui.status(_('largefiles: (no remote repo)\n'))
1105 ui.status(_('largefiles: (no remote repo)\n'))
1106 return
1106 return
1107
1107
1108 toupload = set()
1108 toupload = set()
1109 lfhashes = set()
1109 lfhashes = set()
1110 def addfunc(fn, lfhash):
1110 def addfunc(fn, lfhash):
1111 toupload.add(fn)
1111 toupload.add(fn)
1112 lfhashes.add(lfhash)
1112 lfhashes.add(lfhash)
1113 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1113 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1114
1114
1115 if not toupload:
1115 if not toupload:
1116 # i18n: column positioning for "hg summary"
1116 # i18n: column positioning for "hg summary"
1117 ui.status(_('largefiles: (no files to upload)\n'))
1117 ui.status(_('largefiles: (no files to upload)\n'))
1118 else:
1118 else:
1119 # i18n: column positioning for "hg summary"
1119 # i18n: column positioning for "hg summary"
1120 ui.status(_('largefiles: %d entities for %d files to upload\n')
1120 ui.status(_('largefiles: %d entities for %d files to upload\n')
1121 % (len(lfhashes), len(toupload)))
1121 % (len(lfhashes), len(toupload)))
1122
1122
1123 def overridesummary(orig, ui, repo, *pats, **opts):
1123 def overridesummary(orig, ui, repo, *pats, **opts):
1124 try:
1124 try:
1125 repo.lfstatus = True
1125 repo.lfstatus = True
1126 orig(ui, repo, *pats, **opts)
1126 orig(ui, repo, *pats, **opts)
1127 finally:
1127 finally:
1128 repo.lfstatus = False
1128 repo.lfstatus = False
1129
1129
1130 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1130 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1131 similarity=None):
1131 similarity=None):
1132 if not lfutil.islfilesrepo(repo):
1132 if not lfutil.islfilesrepo(repo):
1133 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1133 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1134 # Get the list of missing largefiles so we can remove them
1134 # Get the list of missing largefiles so we can remove them
1135 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1135 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1136 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1136 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1137 False, False, False)
1137 False, False, False)
1138
1138
1139 # Call into the normal remove code, but the removing of the standin, we want
1139 # Call into the normal remove code, but the removing of the standin, we want
1140 # to have handled by original addremove. Monkey patching here makes sure
1140 # to have handled by original addremove. Monkey patching here makes sure
1141 # we don't remove the standin in the largefiles code, preventing a very
1141 # we don't remove the standin in the largefiles code, preventing a very
1142 # confused state later.
1142 # confused state later.
1143 if s.deleted:
1143 if s.deleted:
1144 m = copy.copy(matcher)
1144 m = copy.copy(matcher)
1145
1145
1146 # The m._files and m._map attributes are not changed to the deleted list
1146 # The m._files and m._map attributes are not changed to the deleted list
1147 # because that affects the m.exact() test, which in turn governs whether
1147 # because that affects the m.exact() test, which in turn governs whether
1148 # or not the file name is printed, and how. Simply limit the original
1148 # or not the file name is printed, and how. Simply limit the original
1149 # matches to those in the deleted status list.
1149 # matches to those in the deleted status list.
1150 matchfn = m.matchfn
1150 matchfn = m.matchfn
1151 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1151 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1152
1152
1153 removelargefiles(repo.ui, repo, True, m, **opts)
1153 removelargefiles(repo.ui, repo, True, m, **opts)
1154 # Call into the normal add code, and any files that *should* be added as
1154 # Call into the normal add code, and any files that *should* be added as
1155 # largefiles will be
1155 # largefiles will be
1156 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1156 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1157 # Now that we've handled largefiles, hand off to the original addremove
1157 # Now that we've handled largefiles, hand off to the original addremove
1158 # function to take care of the rest. Make sure it doesn't do anything with
1158 # function to take care of the rest. Make sure it doesn't do anything with
1159 # largefiles by passing a matcher that will ignore them.
1159 # largefiles by passing a matcher that will ignore them.
1160 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1160 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1161 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1161 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1162
1162
1163 # Calling purge with --all will cause the largefiles to be deleted.
1163 # Calling purge with --all will cause the largefiles to be deleted.
1164 # Override repo.status to prevent this from happening.
1164 # Override repo.status to prevent this from happening.
1165 def overridepurge(orig, ui, repo, *dirs, **opts):
1165 def overridepurge(orig, ui, repo, *dirs, **opts):
1166 # XXX Monkey patching a repoview will not work. The assigned attribute will
1166 # XXX Monkey patching a repoview will not work. The assigned attribute will
1167 # be set on the unfiltered repo, but we will only lookup attributes in the
1167 # be set on the unfiltered repo, but we will only lookup attributes in the
1168 # unfiltered repo if the lookup in the repoview object itself fails. As the
1168 # unfiltered repo if the lookup in the repoview object itself fails. As the
1169 # monkey patched method exists on the repoview class the lookup will not
1169 # monkey patched method exists on the repoview class the lookup will not
1170 # fail. As a result, the original version will shadow the monkey patched
1170 # fail. As a result, the original version will shadow the monkey patched
1171 # one, defeating the monkey patch.
1171 # one, defeating the monkey patch.
1172 #
1172 #
1173 # As a work around we use an unfiltered repo here. We should do something
1173 # As a work around we use an unfiltered repo here. We should do something
1174 # cleaner instead.
1174 # cleaner instead.
1175 repo = repo.unfiltered()
1175 repo = repo.unfiltered()
1176 oldstatus = repo.status
1176 oldstatus = repo.status
1177 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1177 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1178 clean=False, unknown=False, listsubrepos=False):
1178 clean=False, unknown=False, listsubrepos=False):
1179 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1179 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1180 listsubrepos)
1180 listsubrepos)
1181 lfdirstate = lfutil.openlfdirstate(ui, repo)
1181 lfdirstate = lfutil.openlfdirstate(ui, repo)
1182 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1182 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1183 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1183 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1184 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1184 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1185 unknown, ignored, r.clean)
1185 unknown, ignored, r.clean)
1186 repo.status = overridestatus
1186 repo.status = overridestatus
1187 orig(ui, repo, *dirs, **opts)
1187 orig(ui, repo, *dirs, **opts)
1188 repo.status = oldstatus
1188 repo.status = oldstatus
1189 def overriderollback(orig, ui, repo, **opts):
1189 def overriderollback(orig, ui, repo, **opts):
1190 wlock = repo.wlock()
1190 wlock = repo.wlock()
1191 try:
1191 try:
1192 before = repo.dirstate.parents()
1192 before = repo.dirstate.parents()
1193 orphans = set(f for f in repo.dirstate
1193 orphans = set(f for f in repo.dirstate
1194 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1194 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1195 result = orig(ui, repo, **opts)
1195 result = orig(ui, repo, **opts)
1196 after = repo.dirstate.parents()
1196 after = repo.dirstate.parents()
1197 if before == after:
1197 if before == after:
1198 return result # no need to restore standins
1198 return result # no need to restore standins
1199
1199
1200 pctx = repo['.']
1200 pctx = repo['.']
1201 for f in repo.dirstate:
1201 for f in repo.dirstate:
1202 if lfutil.isstandin(f):
1202 if lfutil.isstandin(f):
1203 orphans.discard(f)
1203 orphans.discard(f)
1204 if repo.dirstate[f] == 'r':
1204 if repo.dirstate[f] == 'r':
1205 repo.wvfs.unlinkpath(f, ignoremissing=True)
1205 repo.wvfs.unlinkpath(f, ignoremissing=True)
1206 elif f in pctx:
1206 elif f in pctx:
1207 fctx = pctx[f]
1207 fctx = pctx[f]
1208 repo.wwrite(f, fctx.data(), fctx.flags())
1208 repo.wwrite(f, fctx.data(), fctx.flags())
1209 else:
1209 else:
1210 # content of standin is not so important in 'a',
1210 # content of standin is not so important in 'a',
1211 # 'm' or 'n' (coming from the 2nd parent) cases
1211 # 'm' or 'n' (coming from the 2nd parent) cases
1212 lfutil.writestandin(repo, f, '', False)
1212 lfutil.writestandin(repo, f, '', False)
1213 for standin in orphans:
1213 for standin in orphans:
1214 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1214 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1215
1215
1216 lfdirstate = lfutil.openlfdirstate(ui, repo)
1216 lfdirstate = lfutil.openlfdirstate(ui, repo)
1217 orphans = set(lfdirstate)
1217 orphans = set(lfdirstate)
1218 lfiles = lfutil.listlfiles(repo)
1218 lfiles = lfutil.listlfiles(repo)
1219 for file in lfiles:
1219 for file in lfiles:
1220 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1220 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1221 orphans.discard(file)
1221 orphans.discard(file)
1222 for lfile in orphans:
1222 for lfile in orphans:
1223 lfdirstate.drop(lfile)
1223 lfdirstate.drop(lfile)
1224 lfdirstate.write()
1224 lfdirstate.write()
1225 finally:
1225 finally:
1226 wlock.release()
1226 wlock.release()
1227 return result
1227 return result
1228
1228
1229 def overridetransplant(orig, ui, repo, *revs, **opts):
1229 def overridetransplant(orig, ui, repo, *revs, **opts):
1230 resuming = opts.get('continue')
1230 resuming = opts.get('continue')
1231 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1231 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1232 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1232 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1233 try:
1233 try:
1234 result = orig(ui, repo, *revs, **opts)
1234 result = orig(ui, repo, *revs, **opts)
1235 finally:
1235 finally:
1236 repo._lfstatuswriters.pop()
1236 repo._lfstatuswriters.pop()
1237 repo._lfcommithooks.pop()
1237 repo._lfcommithooks.pop()
1238 return result
1238 return result
1239
1239
1240 def overridecat(orig, ui, repo, file1, *pats, **opts):
1240 def overridecat(orig, ui, repo, file1, *pats, **opts):
1241 ctx = scmutil.revsingle(repo, opts.get('rev'))
1241 ctx = scmutil.revsingle(repo, opts.get('rev'))
1242 err = 1
1242 err = 1
1243 notbad = set()
1243 notbad = set()
1244 m = scmutil.match(ctx, (file1,) + pats, opts)
1244 m = scmutil.match(ctx, (file1,) + pats, opts)
1245 origmatchfn = m.matchfn
1245 origmatchfn = m.matchfn
1246 def lfmatchfn(f):
1246 def lfmatchfn(f):
1247 if origmatchfn(f):
1247 if origmatchfn(f):
1248 return True
1248 return True
1249 lf = lfutil.splitstandin(f)
1249 lf = lfutil.splitstandin(f)
1250 if lf is None:
1250 if lf is None:
1251 return False
1251 return False
1252 notbad.add(lf)
1252 notbad.add(lf)
1253 return origmatchfn(lf)
1253 return origmatchfn(lf)
1254 m.matchfn = lfmatchfn
1254 m.matchfn = lfmatchfn
1255 origbadfn = m.bad
1255 origbadfn = m.bad
1256 def lfbadfn(f, msg):
1256 def lfbadfn(f, msg):
1257 if not f in notbad:
1257 if not f in notbad:
1258 origbadfn(f, msg)
1258 origbadfn(f, msg)
1259 m.bad = lfbadfn
1259 m.bad = lfbadfn
1260
1260
1261 origvisitdirfn = m.visitdir
1261 origvisitdirfn = m.visitdir
1262 def lfvisitdirfn(dir):
1262 def lfvisitdirfn(dir):
1263 if dir == lfutil.shortname:
1263 if dir == lfutil.shortname:
1264 return True
1264 return True
1265 ret = origvisitdirfn(dir)
1265 ret = origvisitdirfn(dir)
1266 if ret:
1266 if ret:
1267 return ret
1267 return ret
1268 lf = lfutil.splitstandin(dir)
1268 lf = lfutil.splitstandin(dir)
1269 if lf is None:
1269 if lf is None:
1270 return False
1270 return False
1271 return origvisitdirfn(lf)
1271 return origvisitdirfn(lf)
1272 m.visitdir = lfvisitdirfn
1272 m.visitdir = lfvisitdirfn
1273
1273
1274 for f in ctx.walk(m):
1274 for f in ctx.walk(m):
1275 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1275 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1276 pathname=f)
1276 pathname=f)
1277 lf = lfutil.splitstandin(f)
1277 lf = lfutil.splitstandin(f)
1278 if lf is None or origmatchfn(f):
1278 if lf is None or origmatchfn(f):
1279 # duplicating unreachable code from commands.cat
1279 # duplicating unreachable code from commands.cat
1280 data = ctx[f].data()
1280 data = ctx[f].data()
1281 if opts.get('decode'):
1281 if opts.get('decode'):
1282 data = repo.wwritedata(f, data)
1282 data = repo.wwritedata(f, data)
1283 fp.write(data)
1283 fp.write(data)
1284 else:
1284 else:
1285 hash = lfutil.readstandin(repo, lf, ctx.rev())
1285 hash = lfutil.readstandin(repo, lf, ctx.rev())
1286 if not lfutil.inusercache(repo.ui, hash):
1286 if not lfutil.inusercache(repo.ui, hash):
1287 store = basestore._openstore(repo)
1287 store = basestore._openstore(repo)
1288 success, missing = store.get([(lf, hash)])
1288 success, missing = store.get([(lf, hash)])
1289 if len(success) != 1:
1289 if len(success) != 1:
1290 raise util.Abort(
1290 raise util.Abort(
1291 _('largefile %s is not in cache and could not be '
1291 _('largefile %s is not in cache and could not be '
1292 'downloaded') % lf)
1292 'downloaded') % lf)
1293 path = lfutil.usercachepath(repo.ui, hash)
1293 path = lfutil.usercachepath(repo.ui, hash)
1294 fpin = open(path, "rb")
1294 fpin = open(path, "rb")
1295 for chunk in util.filechunkiter(fpin, 128 * 1024):
1295 for chunk in util.filechunkiter(fpin, 128 * 1024):
1296 fp.write(chunk)
1296 fp.write(chunk)
1297 fpin.close()
1297 fpin.close()
1298 fp.close()
1298 fp.close()
1299 err = 0
1299 err = 0
1300 return err
1300 return err
1301
1301
1302 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1302 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1303 *args, **kwargs):
1303 *args, **kwargs):
1304 wlock = repo.wlock()
1304 wlock = repo.wlock()
1305 try:
1305 try:
1306 # branch | | |
1306 # branch | | |
1307 # merge | force | partial | action
1307 # merge | force | partial | action
1308 # -------+-------+---------+--------------
1308 # -------+-------+---------+--------------
1309 # x | x | x | linear-merge
1309 # x | x | x | linear-merge
1310 # o | x | x | branch-merge
1310 # o | x | x | branch-merge
1311 # x | o | x | overwrite (as clean update)
1311 # x | o | x | overwrite (as clean update)
1312 # o | o | x | force-branch-merge (*1)
1312 # o | o | x | force-branch-merge (*1)
1313 # x | x | o | (*)
1313 # x | x | o | (*)
1314 # o | x | o | (*)
1314 # o | x | o | (*)
1315 # x | o | o | overwrite (as revert)
1315 # x | o | o | overwrite (as revert)
1316 # o | o | o | (*)
1316 # o | o | o | (*)
1317 #
1317 #
1318 # (*) don't care
1318 # (*) don't care
1319 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1319 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1320
1320
1321 linearmerge = not branchmerge and not force and not partial
1321 linearmerge = not branchmerge and not force and not partial
1322
1322
1323 if linearmerge or (branchmerge and force and not partial):
1323 if linearmerge or (branchmerge and force and not partial):
1324 # update standins for linear-merge or force-branch-merge,
1324 # update standins for linear-merge or force-branch-merge,
1325 # because largefiles in the working directory may be modified
1325 # because largefiles in the working directory may be modified
1326 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1326 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1327 unsure, s = lfdirstate.status(match_.always(repo.root,
1327 unsure, s = lfdirstate.status(match_.always(repo.root,
1328 repo.getcwd()),
1328 repo.getcwd()),
1329 [], False, False, False)
1329 [], False, False, False)
1330 pctx = repo['.']
1330 pctx = repo['.']
1331 for lfile in unsure + s.modified:
1331 for lfile in unsure + s.modified:
1332 lfileabs = repo.wvfs.join(lfile)
1332 lfileabs = repo.wvfs.join(lfile)
1333 if not os.path.exists(lfileabs):
1333 if not os.path.exists(lfileabs):
1334 continue
1334 continue
1335 lfhash = lfutil.hashrepofile(repo, lfile)
1335 lfhash = lfutil.hashrepofile(repo, lfile)
1336 standin = lfutil.standin(lfile)
1336 standin = lfutil.standin(lfile)
1337 lfutil.writestandin(repo, standin, lfhash,
1337 lfutil.writestandin(repo, standin, lfhash,
1338 lfutil.getexecutable(lfileabs))
1338 lfutil.getexecutable(lfileabs))
1339 if (standin in pctx and
1339 if (standin in pctx and
1340 lfhash == lfutil.readstandin(repo, lfile, '.')):
1340 lfhash == lfutil.readstandin(repo, lfile, '.')):
1341 lfdirstate.normal(lfile)
1341 lfdirstate.normal(lfile)
1342 for lfile in s.added:
1342 for lfile in s.added:
1343 lfutil.updatestandin(repo, lfutil.standin(lfile))
1343 lfutil.updatestandin(repo, lfutil.standin(lfile))
1344 lfdirstate.write()
1344 lfdirstate.write()
1345
1345
1346 if linearmerge:
1346 if linearmerge:
1347 # Only call updatelfiles on the standins that have changed
1347 # Only call updatelfiles on the standins that have changed
1348 # to save time
1348 # to save time
1349 oldstandins = lfutil.getstandinsstate(repo)
1349 oldstandins = lfutil.getstandinsstate(repo)
1350
1350
1351 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1351 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1352
1352
1353 filelist = None
1353 filelist = None
1354 if linearmerge:
1354 if linearmerge:
1355 newstandins = lfutil.getstandinsstate(repo)
1355 newstandins = lfutil.getstandinsstate(repo)
1356 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1356 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1357
1357
1358 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1358 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1359 normallookup=partial, checked=linearmerge)
1359 normallookup=partial, checked=linearmerge)
1360
1360
1361 return result
1361 return result
1362 finally:
1362 finally:
1363 wlock.release()
1363 wlock.release()
1364
1364
1365 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1365 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1366 result = orig(repo, files, *args, **kwargs)
1366 result = orig(repo, files, *args, **kwargs)
1367
1367
1368 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1368 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1369 if filelist:
1369 if filelist:
1370 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1370 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1371 printmessage=False, normallookup=True)
1371 printmessage=False, normallookup=True)
1372
1372
1373 return result
1373 return result
@@ -1,517 +1,523
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import util
8 import util
9 import heapq
9 import heapq
10
10
11 def _dirname(f):
11 def _dirname(f):
12 s = f.rfind("/")
12 s = f.rfind("/")
13 if s == -1:
13 if s == -1:
14 return ""
14 return ""
15 return f[:s]
15 return f[:s]
16
16
17 def _findlimit(repo, a, b):
17 def _findlimit(repo, a, b):
18 """
18 """
19 Find the last revision that needs to be checked to ensure that a full
19 Find the last revision that needs to be checked to ensure that a full
20 transitive closure for file copies can be properly calculated.
20 transitive closure for file copies can be properly calculated.
21 Generally, this means finding the earliest revision number that's an
21 Generally, this means finding the earliest revision number that's an
22 ancestor of a or b but not both, except when a or b is a direct descendent
22 ancestor of a or b but not both, except when a or b is a direct descendent
23 of the other, in which case we can return the minimum revnum of a and b.
23 of the other, in which case we can return the minimum revnum of a and b.
24 None if no such revision exists.
24 None if no such revision exists.
25 """
25 """
26
26
27 # basic idea:
27 # basic idea:
28 # - mark a and b with different sides
28 # - mark a and b with different sides
29 # - if a parent's children are all on the same side, the parent is
29 # - if a parent's children are all on the same side, the parent is
30 # on that side, otherwise it is on no side
30 # on that side, otherwise it is on no side
31 # - walk the graph in topological order with the help of a heap;
31 # - walk the graph in topological order with the help of a heap;
32 # - add unseen parents to side map
32 # - add unseen parents to side map
33 # - clear side of any parent that has children on different sides
33 # - clear side of any parent that has children on different sides
34 # - track number of interesting revs that might still be on a side
34 # - track number of interesting revs that might still be on a side
35 # - track the lowest interesting rev seen
35 # - track the lowest interesting rev seen
36 # - quit when interesting revs is zero
36 # - quit when interesting revs is zero
37
37
38 cl = repo.changelog
38 cl = repo.changelog
39 working = len(cl) # pseudo rev for the working directory
39 working = len(cl) # pseudo rev for the working directory
40 if a is None:
40 if a is None:
41 a = working
41 a = working
42 if b is None:
42 if b is None:
43 b = working
43 b = working
44
44
45 side = {a: -1, b: 1}
45 side = {a: -1, b: 1}
46 visit = [-a, -b]
46 visit = [-a, -b]
47 heapq.heapify(visit)
47 heapq.heapify(visit)
48 interesting = len(visit)
48 interesting = len(visit)
49 hascommonancestor = False
49 hascommonancestor = False
50 limit = working
50 limit = working
51
51
52 while interesting:
52 while interesting:
53 r = -heapq.heappop(visit)
53 r = -heapq.heappop(visit)
54 if r == working:
54 if r == working:
55 parents = [cl.rev(p) for p in repo.dirstate.parents()]
55 parents = [cl.rev(p) for p in repo.dirstate.parents()]
56 else:
56 else:
57 parents = cl.parentrevs(r)
57 parents = cl.parentrevs(r)
58 for p in parents:
58 for p in parents:
59 if p < 0:
59 if p < 0:
60 continue
60 continue
61 if p not in side:
61 if p not in side:
62 # first time we see p; add it to visit
62 # first time we see p; add it to visit
63 side[p] = side[r]
63 side[p] = side[r]
64 if side[p]:
64 if side[p]:
65 interesting += 1
65 interesting += 1
66 heapq.heappush(visit, -p)
66 heapq.heappush(visit, -p)
67 elif side[p] and side[p] != side[r]:
67 elif side[p] and side[p] != side[r]:
68 # p was interesting but now we know better
68 # p was interesting but now we know better
69 side[p] = 0
69 side[p] = 0
70 interesting -= 1
70 interesting -= 1
71 hascommonancestor = True
71 hascommonancestor = True
72 if side[r]:
72 if side[r]:
73 limit = r # lowest rev visited
73 limit = r # lowest rev visited
74 interesting -= 1
74 interesting -= 1
75
75
76 if not hascommonancestor:
76 if not hascommonancestor:
77 return None
77 return None
78
78
79 # Consider the following flow (see test-commit-amend.t under issue4405):
79 # Consider the following flow (see test-commit-amend.t under issue4405):
80 # 1/ File 'a0' committed
80 # 1/ File 'a0' committed
81 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
81 # 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
82 # 3/ Move back to first commit
82 # 3/ Move back to first commit
83 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
83 # 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
84 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
84 # 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
85 #
85 #
86 # During the amend in step five, we will be in this state:
86 # During the amend in step five, we will be in this state:
87 #
87 #
88 # @ 3 temporary amend commit for a1-amend
88 # @ 3 temporary amend commit for a1-amend
89 # |
89 # |
90 # o 2 a1-amend
90 # o 2 a1-amend
91 # |
91 # |
92 # | o 1 a1
92 # | o 1 a1
93 # |/
93 # |/
94 # o 0 a0
94 # o 0 a0
95 #
95 #
96 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
96 # When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
97 # yet the filelog has the copy information in rev 1 and we will not look
97 # yet the filelog has the copy information in rev 1 and we will not look
98 # back far enough unless we also look at the a and b as candidates.
98 # back far enough unless we also look at the a and b as candidates.
99 # This only occurs when a is a descendent of b or visa-versa.
99 # This only occurs when a is a descendent of b or visa-versa.
100 return min(limit, a, b)
100 return min(limit, a, b)
101
101
102 def _chain(src, dst, a, b):
102 def _chain(src, dst, a, b):
103 '''chain two sets of copies a->b'''
103 '''chain two sets of copies a->b'''
104 t = a.copy()
104 t = a.copy()
105 for k, v in b.iteritems():
105 for k, v in b.iteritems():
106 if v in t:
106 if v in t:
107 # found a chain
107 # found a chain
108 if t[v] != k:
108 if t[v] != k:
109 # file wasn't renamed back to itself
109 # file wasn't renamed back to itself
110 t[k] = t[v]
110 t[k] = t[v]
111 if v not in dst:
111 if v not in dst:
112 # chain was a rename, not a copy
112 # chain was a rename, not a copy
113 del t[v]
113 del t[v]
114 if v in src:
114 if v in src:
115 # file is a copy of an existing file
115 # file is a copy of an existing file
116 t[k] = v
116 t[k] = v
117
117
118 # remove criss-crossed copies
118 # remove criss-crossed copies
119 for k, v in t.items():
119 for k, v in t.items():
120 if k in src and v in dst:
120 if k in src and v in dst:
121 del t[k]
121 del t[k]
122
122
123 return t
123 return t
124
124
125 def _tracefile(fctx, am, limit=-1):
125 def _tracefile(fctx, am, limit=-1):
126 '''return file context that is the ancestor of fctx present in ancestor
126 '''return file context that is the ancestor of fctx present in ancestor
127 manifest am, stopping after the first ancestor lower than limit'''
127 manifest am, stopping after the first ancestor lower than limit'''
128
128
129 for f in fctx.ancestors():
129 for f in fctx.ancestors():
130 if am.get(f.path(), None) == f.filenode():
130 if am.get(f.path(), None) == f.filenode():
131 return f
131 return f
132 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
132 if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
133 return None
133 return None
134
134
135 def _dirstatecopies(d):
135 def _dirstatecopies(d):
136 ds = d._repo.dirstate
136 ds = d._repo.dirstate
137 c = ds.copies().copy()
137 c = ds.copies().copy()
138 for k in c.keys():
138 for k in c.keys():
139 if ds[k] not in 'anm':
139 if ds[k] not in 'anm':
140 del c[k]
140 del c[k]
141 return c
141 return c
142
142
143 def _computeforwardmissing(a, b):
143 def _computeforwardmissing(a, b, match=None):
144 """Computes which files are in b but not a.
144 """Computes which files are in b but not a.
145 This is its own function so extensions can easily wrap this call to see what
145 This is its own function so extensions can easily wrap this call to see what
146 files _forwardcopies is about to process.
146 files _forwardcopies is about to process.
147 """
147 """
148 return b.manifest().filesnotin(a.manifest())
148 ma = a.manifest()
149 mb = b.manifest()
150 if match:
151 ma = ma.matches(match)
152 mb = mb.matches(match)
153 return mb.filesnotin(ma)
149
154
150 def _forwardcopies(a, b):
155 def _forwardcopies(a, b, match=None):
151 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
156 '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
152
157
153 # check for working copy
158 # check for working copy
154 w = None
159 w = None
155 if b.rev() is None:
160 if b.rev() is None:
156 w = b
161 w = b
157 b = w.p1()
162 b = w.p1()
158 if a == b:
163 if a == b:
159 # short-circuit to avoid issues with merge states
164 # short-circuit to avoid issues with merge states
160 return _dirstatecopies(w)
165 return _dirstatecopies(w)
161
166
162 # files might have to be traced back to the fctx parent of the last
167 # files might have to be traced back to the fctx parent of the last
163 # one-side-only changeset, but not further back than that
168 # one-side-only changeset, but not further back than that
164 limit = _findlimit(a._repo, a.rev(), b.rev())
169 limit = _findlimit(a._repo, a.rev(), b.rev())
165 if limit is None:
170 if limit is None:
166 limit = -1
171 limit = -1
167 am = a.manifest()
172 am = a.manifest()
168
173
169 # find where new files came from
174 # find where new files came from
170 # we currently don't try to find where old files went, too expensive
175 # we currently don't try to find where old files went, too expensive
171 # this means we can miss a case like 'hg rm b; hg cp a b'
176 # this means we can miss a case like 'hg rm b; hg cp a b'
172 cm = {}
177 cm = {}
173 missing = _computeforwardmissing(a, b)
178 missing = _computeforwardmissing(a, b, match=match)
174 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
179 ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
175 for f in missing:
180 for f in missing:
176 fctx = b[f]
181 fctx = b[f]
177 fctx._ancestrycontext = ancestrycontext
182 fctx._ancestrycontext = ancestrycontext
178 ofctx = _tracefile(fctx, am, limit)
183 ofctx = _tracefile(fctx, am, limit)
179 if ofctx:
184 if ofctx:
180 cm[f] = ofctx.path()
185 cm[f] = ofctx.path()
181
186
182 # combine copies from dirstate if necessary
187 # combine copies from dirstate if necessary
183 if w is not None:
188 if w is not None:
184 cm = _chain(a, w, cm, _dirstatecopies(w))
189 cm = _chain(a, w, cm, _dirstatecopies(w))
185
190
186 return cm
191 return cm
187
192
188 def _backwardrenames(a, b):
193 def _backwardrenames(a, b):
189 # Even though we're not taking copies into account, 1:n rename situations
194 # Even though we're not taking copies into account, 1:n rename situations
190 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
195 # can still exist (e.g. hg cp a b; hg mv a c). In those cases we
191 # arbitrarily pick one of the renames.
196 # arbitrarily pick one of the renames.
192 f = _forwardcopies(b, a)
197 f = _forwardcopies(b, a)
193 r = {}
198 r = {}
194 for k, v in sorted(f.iteritems()):
199 for k, v in sorted(f.iteritems()):
195 # remove copies
200 # remove copies
196 if v in a:
201 if v in a:
197 continue
202 continue
198 r[v] = k
203 r[v] = k
199 return r
204 return r
200
205
201 def pathcopies(x, y):
206 def pathcopies(x, y, match=None):
202 '''find {dst@y: src@x} copy mapping for directed compare'''
207 '''find {dst@y: src@x} copy mapping for directed compare'''
203 if x == y or not x or not y:
208 if x == y or not x or not y:
204 return {}
209 return {}
205 a = y.ancestor(x)
210 a = y.ancestor(x)
206 if a == x:
211 if a == x:
207 return _forwardcopies(x, y)
212 return _forwardcopies(x, y, match=match)
208 if a == y:
213 if a == y:
209 return _backwardrenames(x, y)
214 return _backwardrenames(x, y)
210 return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y))
215 return _chain(x, y, _backwardrenames(x, a),
216 _forwardcopies(a, y, match=match))
211
217
212 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
218 def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
213 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
219 """Computes, based on addedinm1 and addedinm2, the files exclusive to c1
214 and c2. This is its own function so extensions can easily wrap this call
220 and c2. This is its own function so extensions can easily wrap this call
215 to see what files mergecopies is about to process.
221 to see what files mergecopies is about to process.
216
222
217 Even though c1 and c2 are not used in this function, they are useful in
223 Even though c1 and c2 are not used in this function, they are useful in
218 other extensions for being able to read the file nodes of the changed files.
224 other extensions for being able to read the file nodes of the changed files.
219 """
225 """
220 u1 = sorted(addedinm1 - addedinm2)
226 u1 = sorted(addedinm1 - addedinm2)
221 u2 = sorted(addedinm2 - addedinm1)
227 u2 = sorted(addedinm2 - addedinm1)
222
228
223 if u1:
229 if u1:
224 repo.ui.debug(" unmatched files in local:\n %s\n"
230 repo.ui.debug(" unmatched files in local:\n %s\n"
225 % "\n ".join(u1))
231 % "\n ".join(u1))
226 if u2:
232 if u2:
227 repo.ui.debug(" unmatched files in other:\n %s\n"
233 repo.ui.debug(" unmatched files in other:\n %s\n"
228 % "\n ".join(u2))
234 % "\n ".join(u2))
229 return u1, u2
235 return u1, u2
230
236
231 def mergecopies(repo, c1, c2, ca):
237 def mergecopies(repo, c1, c2, ca):
232 """
238 """
233 Find moves and copies between context c1 and c2 that are relevant
239 Find moves and copies between context c1 and c2 that are relevant
234 for merging.
240 for merging.
235
241
236 Returns four dicts: "copy", "movewithdir", "diverge", and
242 Returns four dicts: "copy", "movewithdir", "diverge", and
237 "renamedelete".
243 "renamedelete".
238
244
239 "copy" is a mapping from destination name -> source name,
245 "copy" is a mapping from destination name -> source name,
240 where source is in c1 and destination is in c2 or vice-versa.
246 where source is in c1 and destination is in c2 or vice-versa.
241
247
242 "movewithdir" is a mapping from source name -> destination name,
248 "movewithdir" is a mapping from source name -> destination name,
243 where the file at source present in one context but not the other
249 where the file at source present in one context but not the other
244 needs to be moved to destination by the merge process, because the
250 needs to be moved to destination by the merge process, because the
245 other context moved the directory it is in.
251 other context moved the directory it is in.
246
252
247 "diverge" is a mapping of source name -> list of destination names
253 "diverge" is a mapping of source name -> list of destination names
248 for divergent renames.
254 for divergent renames.
249
255
250 "renamedelete" is a mapping of source name -> list of destination
256 "renamedelete" is a mapping of source name -> list of destination
251 names for files deleted in c1 that were renamed in c2 or vice-versa.
257 names for files deleted in c1 that were renamed in c2 or vice-versa.
252 """
258 """
253 # avoid silly behavior for update from empty dir
259 # avoid silly behavior for update from empty dir
254 if not c1 or not c2 or c1 == c2:
260 if not c1 or not c2 or c1 == c2:
255 return {}, {}, {}, {}
261 return {}, {}, {}, {}
256
262
257 # avoid silly behavior for parent -> working dir
263 # avoid silly behavior for parent -> working dir
258 if c2.node() is None and c1.node() == repo.dirstate.p1():
264 if c2.node() is None and c1.node() == repo.dirstate.p1():
259 return repo.dirstate.copies(), {}, {}, {}
265 return repo.dirstate.copies(), {}, {}, {}
260
266
261 limit = _findlimit(repo, c1.rev(), c2.rev())
267 limit = _findlimit(repo, c1.rev(), c2.rev())
262 if limit is None:
268 if limit is None:
263 # no common ancestor, no copies
269 # no common ancestor, no copies
264 return {}, {}, {}, {}
270 return {}, {}, {}, {}
265 m1 = c1.manifest()
271 m1 = c1.manifest()
266 m2 = c2.manifest()
272 m2 = c2.manifest()
267 ma = ca.manifest()
273 ma = ca.manifest()
268
274
269
275
270 def setupctx(ctx):
276 def setupctx(ctx):
271 """return a 'makectx' function suitable for checkcopies usage from ctx
277 """return a 'makectx' function suitable for checkcopies usage from ctx
272
278
273 We have to re-setup the function building 'filectx' for each
279 We have to re-setup the function building 'filectx' for each
274 'checkcopies' to ensure the linkrev adjustement is properly setup for
280 'checkcopies' to ensure the linkrev adjustement is properly setup for
275 each. Linkrev adjustment is important to avoid bug in rename
281 each. Linkrev adjustment is important to avoid bug in rename
276 detection. Moreover, having a proper '_ancestrycontext' setup ensures
282 detection. Moreover, having a proper '_ancestrycontext' setup ensures
277 the performance impact of this adjustment is kept limited. Without it,
283 the performance impact of this adjustment is kept limited. Without it,
278 each file could do a full dag traversal making the time complexity of
284 each file could do a full dag traversal making the time complexity of
279 the operation explode (see issue4537).
285 the operation explode (see issue4537).
280
286
281 This function exists here mostly to limit the impact on stable. Feel
287 This function exists here mostly to limit the impact on stable. Feel
282 free to refactor on default.
288 free to refactor on default.
283 """
289 """
284 rev = ctx.rev()
290 rev = ctx.rev()
285 ac = getattr(ctx, '_ancestrycontext', None)
291 ac = getattr(ctx, '_ancestrycontext', None)
286 if ac is None:
292 if ac is None:
287 revs = [rev]
293 revs = [rev]
288 if rev is None:
294 if rev is None:
289 revs = [p.rev() for p in ctx.parents()]
295 revs = [p.rev() for p in ctx.parents()]
290 ac = ctx._repo.changelog.ancestors(revs, inclusive=True)
296 ac = ctx._repo.changelog.ancestors(revs, inclusive=True)
291 ctx._ancestrycontext = ac
297 ctx._ancestrycontext = ac
292 def makectx(f, n):
298 def makectx(f, n):
293 if len(n) != 20: # in a working context?
299 if len(n) != 20: # in a working context?
294 if c1.rev() is None:
300 if c1.rev() is None:
295 return c1.filectx(f)
301 return c1.filectx(f)
296 return c2.filectx(f)
302 return c2.filectx(f)
297 fctx = repo.filectx(f, fileid=n)
303 fctx = repo.filectx(f, fileid=n)
298 # setup only needed for filectx not create from a changectx
304 # setup only needed for filectx not create from a changectx
299 fctx._ancestrycontext = ac
305 fctx._ancestrycontext = ac
300 fctx._descendantrev = rev
306 fctx._descendantrev = rev
301 return fctx
307 return fctx
302 return util.lrucachefunc(makectx)
308 return util.lrucachefunc(makectx)
303
309
304 copy = {}
310 copy = {}
305 movewithdir = {}
311 movewithdir = {}
306 fullcopy = {}
312 fullcopy = {}
307 diverge = {}
313 diverge = {}
308
314
309 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
315 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
310
316
311 addedinm1 = m1.filesnotin(ma)
317 addedinm1 = m1.filesnotin(ma)
312 addedinm2 = m2.filesnotin(ma)
318 addedinm2 = m2.filesnotin(ma)
313 u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
319 u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
314
320
315 for f in u1:
321 for f in u1:
316 ctx = setupctx(c1)
322 ctx = setupctx(c1)
317 checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
323 checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy)
318
324
319 for f in u2:
325 for f in u2:
320 ctx = setupctx(c2)
326 ctx = setupctx(c2)
321 checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
327 checkcopies(ctx, f, m2, m1, ca, limit, diverge, copy, fullcopy)
322
328
323 renamedelete = {}
329 renamedelete = {}
324 renamedelete2 = set()
330 renamedelete2 = set()
325 diverge2 = set()
331 diverge2 = set()
326 for of, fl in diverge.items():
332 for of, fl in diverge.items():
327 if len(fl) == 1 or of in c1 or of in c2:
333 if len(fl) == 1 or of in c1 or of in c2:
328 del diverge[of] # not actually divergent, or not a rename
334 del diverge[of] # not actually divergent, or not a rename
329 if of not in c1 and of not in c2:
335 if of not in c1 and of not in c2:
330 # renamed on one side, deleted on the other side, but filter
336 # renamed on one side, deleted on the other side, but filter
331 # out files that have been renamed and then deleted
337 # out files that have been renamed and then deleted
332 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
338 renamedelete[of] = [f for f in fl if f in c1 or f in c2]
333 renamedelete2.update(fl) # reverse map for below
339 renamedelete2.update(fl) # reverse map for below
334 else:
340 else:
335 diverge2.update(fl) # reverse map for below
341 diverge2.update(fl) # reverse map for below
336
342
337 bothnew = sorted(addedinm1 & addedinm2)
343 bothnew = sorted(addedinm1 & addedinm2)
338 if bothnew:
344 if bothnew:
339 repo.ui.debug(" unmatched files new in both:\n %s\n"
345 repo.ui.debug(" unmatched files new in both:\n %s\n"
340 % "\n ".join(bothnew))
346 % "\n ".join(bothnew))
341 bothdiverge, _copy, _fullcopy = {}, {}, {}
347 bothdiverge, _copy, _fullcopy = {}, {}, {}
342 for f in bothnew:
348 for f in bothnew:
343 ctx = setupctx(c1)
349 ctx = setupctx(c1)
344 checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
350 checkcopies(ctx, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
345 ctx = setupctx(c2)
351 ctx = setupctx(c2)
346 checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
352 checkcopies(ctx, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
347 for of, fl in bothdiverge.items():
353 for of, fl in bothdiverge.items():
348 if len(fl) == 2 and fl[0] == fl[1]:
354 if len(fl) == 2 and fl[0] == fl[1]:
349 copy[fl[0]] = of # not actually divergent, just matching renames
355 copy[fl[0]] = of # not actually divergent, just matching renames
350
356
351 if fullcopy and repo.ui.debugflag:
357 if fullcopy and repo.ui.debugflag:
352 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
358 repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
353 "% = renamed and deleted):\n")
359 "% = renamed and deleted):\n")
354 for f in sorted(fullcopy):
360 for f in sorted(fullcopy):
355 note = ""
361 note = ""
356 if f in copy:
362 if f in copy:
357 note += "*"
363 note += "*"
358 if f in diverge2:
364 if f in diverge2:
359 note += "!"
365 note += "!"
360 if f in renamedelete2:
366 if f in renamedelete2:
361 note += "%"
367 note += "%"
362 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
368 repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
363 note))
369 note))
364 del diverge2
370 del diverge2
365
371
366 if not fullcopy:
372 if not fullcopy:
367 return copy, movewithdir, diverge, renamedelete
373 return copy, movewithdir, diverge, renamedelete
368
374
369 repo.ui.debug(" checking for directory renames\n")
375 repo.ui.debug(" checking for directory renames\n")
370
376
371 # generate a directory move map
377 # generate a directory move map
372 d1, d2 = c1.dirs(), c2.dirs()
378 d1, d2 = c1.dirs(), c2.dirs()
373 d1.addpath('/')
379 d1.addpath('/')
374 d2.addpath('/')
380 d2.addpath('/')
375 invalid = set()
381 invalid = set()
376 dirmove = {}
382 dirmove = {}
377
383
378 # examine each file copy for a potential directory move, which is
384 # examine each file copy for a potential directory move, which is
379 # when all the files in a directory are moved to a new directory
385 # when all the files in a directory are moved to a new directory
380 for dst, src in fullcopy.iteritems():
386 for dst, src in fullcopy.iteritems():
381 dsrc, ddst = _dirname(src), _dirname(dst)
387 dsrc, ddst = _dirname(src), _dirname(dst)
382 if dsrc in invalid:
388 if dsrc in invalid:
383 # already seen to be uninteresting
389 # already seen to be uninteresting
384 continue
390 continue
385 elif dsrc in d1 and ddst in d1:
391 elif dsrc in d1 and ddst in d1:
386 # directory wasn't entirely moved locally
392 # directory wasn't entirely moved locally
387 invalid.add(dsrc)
393 invalid.add(dsrc)
388 elif dsrc in d2 and ddst in d2:
394 elif dsrc in d2 and ddst in d2:
389 # directory wasn't entirely moved remotely
395 # directory wasn't entirely moved remotely
390 invalid.add(dsrc)
396 invalid.add(dsrc)
391 elif dsrc in dirmove and dirmove[dsrc] != ddst:
397 elif dsrc in dirmove and dirmove[dsrc] != ddst:
392 # files from the same directory moved to two different places
398 # files from the same directory moved to two different places
393 invalid.add(dsrc)
399 invalid.add(dsrc)
394 else:
400 else:
395 # looks good so far
401 # looks good so far
396 dirmove[dsrc + "/"] = ddst + "/"
402 dirmove[dsrc + "/"] = ddst + "/"
397
403
398 for i in invalid:
404 for i in invalid:
399 if i in dirmove:
405 if i in dirmove:
400 del dirmove[i]
406 del dirmove[i]
401 del d1, d2, invalid
407 del d1, d2, invalid
402
408
403 if not dirmove:
409 if not dirmove:
404 return copy, movewithdir, diverge, renamedelete
410 return copy, movewithdir, diverge, renamedelete
405
411
406 for d in dirmove:
412 for d in dirmove:
407 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
413 repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
408 (d, dirmove[d]))
414 (d, dirmove[d]))
409
415
410 # check unaccounted nonoverlapping files against directory moves
416 # check unaccounted nonoverlapping files against directory moves
411 for f in u1 + u2:
417 for f in u1 + u2:
412 if f not in fullcopy:
418 if f not in fullcopy:
413 for d in dirmove:
419 for d in dirmove:
414 if f.startswith(d):
420 if f.startswith(d):
415 # new file added in a directory that was moved, move it
421 # new file added in a directory that was moved, move it
416 df = dirmove[d] + f[len(d):]
422 df = dirmove[d] + f[len(d):]
417 if df not in copy:
423 if df not in copy:
418 movewithdir[f] = df
424 movewithdir[f] = df
419 repo.ui.debug((" pending file src: '%s' -> "
425 repo.ui.debug((" pending file src: '%s' -> "
420 "dst: '%s'\n") % (f, df))
426 "dst: '%s'\n") % (f, df))
421 break
427 break
422
428
423 return copy, movewithdir, diverge, renamedelete
429 return copy, movewithdir, diverge, renamedelete
424
430
425 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
431 def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
426 """
432 """
427 check possible copies of f from m1 to m2
433 check possible copies of f from m1 to m2
428
434
429 ctx = function accepting (filename, node) that returns a filectx.
435 ctx = function accepting (filename, node) that returns a filectx.
430 f = the filename to check
436 f = the filename to check
431 m1 = the source manifest
437 m1 = the source manifest
432 m2 = the destination manifest
438 m2 = the destination manifest
433 ca = the changectx of the common ancestor
439 ca = the changectx of the common ancestor
434 limit = the rev number to not search beyond
440 limit = the rev number to not search beyond
435 diverge = record all diverges in this dict
441 diverge = record all diverges in this dict
436 copy = record all non-divergent copies in this dict
442 copy = record all non-divergent copies in this dict
437 fullcopy = record all copies in this dict
443 fullcopy = record all copies in this dict
438 """
444 """
439
445
440 ma = ca.manifest()
446 ma = ca.manifest()
441
447
442 def _related(f1, f2, limit):
448 def _related(f1, f2, limit):
443 # Walk back to common ancestor to see if the two files originate
449 # Walk back to common ancestor to see if the two files originate
444 # from the same file. Since workingfilectx's rev() is None it messes
450 # from the same file. Since workingfilectx's rev() is None it messes
445 # up the integer comparison logic, hence the pre-step check for
451 # up the integer comparison logic, hence the pre-step check for
446 # None (f1 and f2 can only be workingfilectx's initially).
452 # None (f1 and f2 can only be workingfilectx's initially).
447
453
448 if f1 == f2:
454 if f1 == f2:
449 return f1 # a match
455 return f1 # a match
450
456
451 g1, g2 = f1.ancestors(), f2.ancestors()
457 g1, g2 = f1.ancestors(), f2.ancestors()
452 try:
458 try:
453 f1r, f2r = f1.rev(), f2.rev()
459 f1r, f2r = f1.rev(), f2.rev()
454
460
455 if f1r is None:
461 if f1r is None:
456 f1 = g1.next()
462 f1 = g1.next()
457 if f2r is None:
463 if f2r is None:
458 f2 = g2.next()
464 f2 = g2.next()
459
465
460 while True:
466 while True:
461 f1r, f2r = f1.rev(), f2.rev()
467 f1r, f2r = f1.rev(), f2.rev()
462 if f1r > f2r:
468 if f1r > f2r:
463 f1 = g1.next()
469 f1 = g1.next()
464 elif f2r > f1r:
470 elif f2r > f1r:
465 f2 = g2.next()
471 f2 = g2.next()
466 elif f1 == f2:
472 elif f1 == f2:
467 return f1 # a match
473 return f1 # a match
468 elif f1r == f2r or f1r < limit or f2r < limit:
474 elif f1r == f2r or f1r < limit or f2r < limit:
469 return False # copy no longer relevant
475 return False # copy no longer relevant
470 except StopIteration:
476 except StopIteration:
471 return False
477 return False
472
478
473 of = None
479 of = None
474 seen = set([f])
480 seen = set([f])
475 for oc in ctx(f, m1[f]).ancestors():
481 for oc in ctx(f, m1[f]).ancestors():
476 ocr = oc.rev()
482 ocr = oc.rev()
477 of = oc.path()
483 of = oc.path()
478 if of in seen:
484 if of in seen:
479 # check limit late - grab last rename before
485 # check limit late - grab last rename before
480 if ocr < limit:
486 if ocr < limit:
481 break
487 break
482 continue
488 continue
483 seen.add(of)
489 seen.add(of)
484
490
485 fullcopy[f] = of # remember for dir rename detection
491 fullcopy[f] = of # remember for dir rename detection
486 if of not in m2:
492 if of not in m2:
487 continue # no match, keep looking
493 continue # no match, keep looking
488 if m2[of] == ma.get(of):
494 if m2[of] == ma.get(of):
489 break # no merge needed, quit early
495 break # no merge needed, quit early
490 c2 = ctx(of, m2[of])
496 c2 = ctx(of, m2[of])
491 cr = _related(oc, c2, ca.rev())
497 cr = _related(oc, c2, ca.rev())
492 if cr and (of == f or of == c2.path()): # non-divergent
498 if cr and (of == f or of == c2.path()): # non-divergent
493 copy[f] = of
499 copy[f] = of
494 of = None
500 of = None
495 break
501 break
496
502
497 if of in ma:
503 if of in ma:
498 diverge.setdefault(of, []).append(f)
504 diverge.setdefault(of, []).append(f)
499
505
500 def duplicatecopies(repo, rev, fromrev, skiprev=None):
506 def duplicatecopies(repo, rev, fromrev, skiprev=None):
501 '''reproduce copies from fromrev to rev in the dirstate
507 '''reproduce copies from fromrev to rev in the dirstate
502
508
503 If skiprev is specified, it's a revision that should be used to
509 If skiprev is specified, it's a revision that should be used to
504 filter copy records. Any copies that occur between fromrev and
510 filter copy records. Any copies that occur between fromrev and
505 skiprev will not be duplicated, even if they appear in the set of
511 skiprev will not be duplicated, even if they appear in the set of
506 copies between fromrev and rev.
512 copies between fromrev and rev.
507 '''
513 '''
508 exclude = {}
514 exclude = {}
509 if skiprev is not None:
515 if skiprev is not None:
510 exclude = pathcopies(repo[fromrev], repo[skiprev])
516 exclude = pathcopies(repo[fromrev], repo[skiprev])
511 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
517 for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
512 # copies.pathcopies returns backward renames, so dst might not
518 # copies.pathcopies returns backward renames, so dst might not
513 # actually be in the dirstate
519 # actually be in the dirstate
514 if dst in exclude:
520 if dst in exclude:
515 continue
521 continue
516 if repo.dirstate[dst] in "nma":
522 if repo.dirstate[dst] in "nma":
517 repo.dirstate.copy(src, dst)
523 repo.dirstate.copy(src, dst)
General Comments 0
You need to be logged in to leave comments. Login now