##// END OF EJS Templates
match: rename _fmap to _fileroots for clarity...
Drew Gottlieb -
r25189:1c8c33ea default
parent child Browse files
Show More
@@ -1,1372 +1,1372
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fmap = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fmap = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 default='relpath'):
54 default='relpath'):
55 match = oldmatch(ctx, pats, opts, globbed, default)
55 match = oldmatch(ctx, pats, opts, globbed, default)
56 return composenormalfilematcher(match, manifest)
56 return composenormalfilematcher(match, manifest)
57 oldmatch = installmatchfn(overridematch)
57 oldmatch = installmatchfn(overridematch)
58
58
59 def installmatchfn(f):
59 def installmatchfn(f):
60 '''monkey patch the scmutil module with a custom match function.
60 '''monkey patch the scmutil module with a custom match function.
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 oldmatch = scmutil.match
62 oldmatch = scmutil.match
63 setattr(f, 'oldmatch', oldmatch)
63 setattr(f, 'oldmatch', oldmatch)
64 scmutil.match = f
64 scmutil.match = f
65 return oldmatch
65 return oldmatch
66
66
67 def restorematchfn():
67 def restorematchfn():
68 '''restores scmutil.match to what it was before installmatchfn
68 '''restores scmutil.match to what it was before installmatchfn
69 was called. no-op if scmutil.match is its original function.
69 was called. no-op if scmutil.match is its original function.
70
70
71 Note that n calls to installmatchfn will require n calls to
71 Note that n calls to installmatchfn will require n calls to
72 restore the original matchfn.'''
72 restore the original matchfn.'''
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74
74
75 def installmatchandpatsfn(f):
75 def installmatchandpatsfn(f):
76 oldmatchandpats = scmutil.matchandpats
76 oldmatchandpats = scmutil.matchandpats
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 scmutil.matchandpats = f
78 scmutil.matchandpats = f
79 return oldmatchandpats
79 return oldmatchandpats
80
80
81 def restorematchandpatsfn():
81 def restorematchandpatsfn():
82 '''restores scmutil.matchandpats to what it was before
82 '''restores scmutil.matchandpats to what it was before
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 is its original function.
84 is its original function.
85
85
86 Note that n calls to installmatchandpatsfn will require n calls
86 Note that n calls to installmatchandpatsfn will require n calls
87 to restore the original matchfn.'''
87 to restore the original matchfn.'''
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 scmutil.matchandpats)
89 scmutil.matchandpats)
90
90
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 large = opts.get('large')
92 large = opts.get('large')
93 lfsize = lfutil.getminsize(
93 lfsize = lfutil.getminsize(
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 if lfpats:
99 if lfpats:
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = copy.copy(matcher)
103 m = copy.copy(matcher)
104 m.bad = lambda x, y: None
104 m.bad = lambda x, y: None
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in repo.walk(m):
106 for f in repo.walk(m):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # addremove in core gets fancy with the name, add doesn't
112 # addremove in core gets fancy with the name, add doesn't
113 if isaddremove:
113 if isaddremove:
114 name = m.uipath(f)
114 name = m.uipath(f)
115 else:
115 else:
116 name = m.rel(f)
116 name = m.rel(f)
117
117
118 # Don't warn the user when they attempt to add a normal tracked file.
118 # Don't warn the user when they attempt to add a normal tracked file.
119 # The normal add code will do that for us.
119 # The normal add code will do that for us.
120 if exact and exists:
120 if exact and exists:
121 if lfile:
121 if lfile:
122 ui.warn(_('%s already a largefile\n') % name)
122 ui.warn(_('%s already a largefile\n') % name)
123 continue
123 continue
124
124
125 if (exact or not exists) and not lfutil.isstandin(f):
125 if (exact or not exists) and not lfutil.isstandin(f):
126 # In case the file was removed previously, but not committed
126 # In case the file was removed previously, but not committed
127 # (issue3507)
127 # (issue3507)
128 if not repo.wvfs.exists(f):
128 if not repo.wvfs.exists(f):
129 continue
129 continue
130
130
131 abovemin = (lfsize and
131 abovemin = (lfsize and
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 lfnames.append(f)
134 lfnames.append(f)
135 if ui.verbose or not exact:
135 if ui.verbose or not exact:
136 ui.status(_('adding %s as a largefile\n') % name)
136 ui.status(_('adding %s as a largefile\n') % name)
137
137
138 bad = []
138 bad = []
139
139
140 # Need to lock, otherwise there could be a race condition between
140 # Need to lock, otherwise there could be a race condition between
141 # when standins are created and added to the repo.
141 # when standins are created and added to the repo.
142 wlock = repo.wlock()
142 wlock = repo.wlock()
143 try:
143 try:
144 if not opts.get('dry_run'):
144 if not opts.get('dry_run'):
145 standins = []
145 standins = []
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 for f in lfnames:
147 for f in lfnames:
148 standinname = lfutil.standin(f)
148 standinname = lfutil.standin(f)
149 lfutil.writestandin(repo, standinname, hash='',
149 lfutil.writestandin(repo, standinname, hash='',
150 executable=lfutil.getexecutable(repo.wjoin(f)))
150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 standins.append(standinname)
151 standins.append(standinname)
152 if lfdirstate[f] == 'r':
152 if lfdirstate[f] == 'r':
153 lfdirstate.normallookup(f)
153 lfdirstate.normallookup(f)
154 else:
154 else:
155 lfdirstate.add(f)
155 lfdirstate.add(f)
156 lfdirstate.write()
156 lfdirstate.write()
157 bad += [lfutil.splitstandin(f)
157 bad += [lfutil.splitstandin(f)
158 for f in repo[None].add(standins)
158 for f in repo[None].add(standins)
159 if f in m.files()]
159 if f in m.files()]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 finally:
162 finally:
163 wlock.release()
163 wlock.release()
164 return added, bad
164 return added, bad
165
165
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 after = opts.get('after')
167 after = opts.get('after')
168 m = composelargefilematcher(matcher, repo[None].manifest())
168 m = composelargefilematcher(matcher, repo[None].manifest())
169 try:
169 try:
170 repo.lfstatus = True
170 repo.lfstatus = True
171 s = repo.status(match=m, clean=not isaddremove)
171 s = repo.status(match=m, clean=not isaddremove)
172 finally:
172 finally:
173 repo.lfstatus = False
173 repo.lfstatus = False
174 manifest = repo[None].manifest()
174 manifest = repo[None].manifest()
175 modified, added, deleted, clean = [[f for f in list
175 modified, added, deleted, clean = [[f for f in list
176 if lfutil.standin(f) in manifest]
176 if lfutil.standin(f) in manifest]
177 for list in (s.modified, s.added,
177 for list in (s.modified, s.added,
178 s.deleted, s.clean)]
178 s.deleted, s.clean)]
179
179
180 def warn(files, msg):
180 def warn(files, msg):
181 for f in files:
181 for f in files:
182 ui.warn(msg % m.rel(f))
182 ui.warn(msg % m.rel(f))
183 return int(len(files) > 0)
183 return int(len(files) > 0)
184
184
185 result = 0
185 result = 0
186
186
187 if after:
187 if after:
188 remove = deleted
188 remove = deleted
189 result = warn(modified + added + clean,
189 result = warn(modified + added + clean,
190 _('not removing %s: file still exists\n'))
190 _('not removing %s: file still exists\n'))
191 else:
191 else:
192 remove = deleted + clean
192 remove = deleted + clean
193 result = warn(modified, _('not removing %s: file is modified (use -f'
193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 ' to force removal)\n'))
194 ' to force removal)\n'))
195 result = warn(added, _('not removing %s: file has been marked for add'
195 result = warn(added, _('not removing %s: file has been marked for add'
196 ' (use forget to undo)\n')) or result
196 ' (use forget to undo)\n')) or result
197
197
198 # Need to lock because standin files are deleted then removed from the
198 # Need to lock because standin files are deleted then removed from the
199 # repository and we could race in-between.
199 # repository and we could race in-between.
200 wlock = repo.wlock()
200 wlock = repo.wlock()
201 try:
201 try:
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 for f in sorted(remove):
203 for f in sorted(remove):
204 if ui.verbose or not m.exact(f):
204 if ui.verbose or not m.exact(f):
205 # addremove in core gets fancy with the name, remove doesn't
205 # addremove in core gets fancy with the name, remove doesn't
206 if isaddremove:
206 if isaddremove:
207 name = m.uipath(f)
207 name = m.uipath(f)
208 else:
208 else:
209 name = m.rel(f)
209 name = m.rel(f)
210 ui.status(_('removing %s\n') % name)
210 ui.status(_('removing %s\n') % name)
211
211
212 if not opts.get('dry_run'):
212 if not opts.get('dry_run'):
213 if not after:
213 if not after:
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215
215
216 if opts.get('dry_run'):
216 if opts.get('dry_run'):
217 return result
217 return result
218
218
219 remove = [lfutil.standin(f) for f in remove]
219 remove = [lfutil.standin(f) for f in remove]
220 # If this is being called by addremove, let the original addremove
220 # If this is being called by addremove, let the original addremove
221 # function handle this.
221 # function handle this.
222 if not isaddremove:
222 if not isaddremove:
223 for f in remove:
223 for f in remove:
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 repo[None].forget(remove)
225 repo[None].forget(remove)
226
226
227 for f in remove:
227 for f in remove:
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 False)
229 False)
230
230
231 lfdirstate.write()
231 lfdirstate.write()
232 finally:
232 finally:
233 wlock.release()
233 wlock.release()
234
234
235 return result
235 return result
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 def decodepath(orig, path):
239 def decodepath(orig, path):
240 return lfutil.splitstandin(path) or path
240 return lfutil.splitstandin(path) or path
241
241
242 # -- Wrappers: modify existing commands --------------------------------
242 # -- Wrappers: modify existing commands --------------------------------
243
243
244 def overrideadd(orig, ui, repo, *pats, **opts):
244 def overrideadd(orig, ui, repo, *pats, **opts):
245 if opts.get('normal') and opts.get('large'):
245 if opts.get('normal') and opts.get('large'):
246 raise util.Abort(_('--normal cannot be used with --large'))
246 raise util.Abort(_('--normal cannot be used with --large'))
247 return orig(ui, repo, *pats, **opts)
247 return orig(ui, repo, *pats, **opts)
248
248
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 # The --normal flag short circuits this override
250 # The --normal flag short circuits this override
251 if opts.get('normal'):
251 if opts.get('normal'):
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253
253
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 ladded)
256 ladded)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258
258
259 bad.extend(f for f in lbad)
259 bad.extend(f for f in lbad)
260 return bad
260 return bad
261
261
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 return removelargefiles(ui, repo, False, matcher, after=after,
265 return removelargefiles(ui, repo, False, matcher, after=after,
266 force=force) or result
266 force=force) or result
267
267
268 def overridestatusfn(orig, repo, rev2, **opts):
268 def overridestatusfn(orig, repo, rev2, **opts):
269 try:
269 try:
270 repo._repo.lfstatus = True
270 repo._repo.lfstatus = True
271 return orig(repo, rev2, **opts)
271 return orig(repo, rev2, **opts)
272 finally:
272 finally:
273 repo._repo.lfstatus = False
273 repo._repo.lfstatus = False
274
274
275 def overridestatus(orig, ui, repo, *pats, **opts):
275 def overridestatus(orig, ui, repo, *pats, **opts):
276 try:
276 try:
277 repo.lfstatus = True
277 repo.lfstatus = True
278 return orig(ui, repo, *pats, **opts)
278 return orig(ui, repo, *pats, **opts)
279 finally:
279 finally:
280 repo.lfstatus = False
280 repo.lfstatus = False
281
281
282 def overridedirty(orig, repo, ignoreupdate=False):
282 def overridedirty(orig, repo, ignoreupdate=False):
283 try:
283 try:
284 repo._repo.lfstatus = True
284 repo._repo.lfstatus = True
285 return orig(repo, ignoreupdate)
285 return orig(repo, ignoreupdate)
286 finally:
286 finally:
287 repo._repo.lfstatus = False
287 repo._repo.lfstatus = False
288
288
289 def overridelog(orig, ui, repo, *pats, **opts):
289 def overridelog(orig, ui, repo, *pats, **opts):
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 default='relpath'):
291 default='relpath'):
292 """Matcher that merges root directory with .hglf, suitable for log.
292 """Matcher that merges root directory with .hglf, suitable for log.
293 It is still possible to match .hglf directly.
293 It is still possible to match .hglf directly.
294 For any listed files run log on the standin too.
294 For any listed files run log on the standin too.
295 matchfn tries both the given filename and with .hglf stripped.
295 matchfn tries both the given filename and with .hglf stripped.
296 """
296 """
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 m, p = copy.copy(matchandpats)
298 m, p = copy.copy(matchandpats)
299
299
300 if m.always():
300 if m.always():
301 # We want to match everything anyway, so there's no benefit trying
301 # We want to match everything anyway, so there's no benefit trying
302 # to add standins.
302 # to add standins.
303 return matchandpats
303 return matchandpats
304
304
305 pats = set(p)
305 pats = set(p)
306
306
307 def fixpats(pat, tostandin=lfutil.standin):
307 def fixpats(pat, tostandin=lfutil.standin):
308 if pat.startswith('set:'):
308 if pat.startswith('set:'):
309 return pat
309 return pat
310
310
311 kindpat = match_._patsplit(pat, None)
311 kindpat = match_._patsplit(pat, None)
312
312
313 if kindpat[0] is not None:
313 if kindpat[0] is not None:
314 return kindpat[0] + ':' + tostandin(kindpat[1])
314 return kindpat[0] + ':' + tostandin(kindpat[1])
315 return tostandin(kindpat[1])
315 return tostandin(kindpat[1])
316
316
317 if m._cwd:
317 if m._cwd:
318 hglf = lfutil.shortname
318 hglf = lfutil.shortname
319 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
319 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
320
320
321 def tostandin(f):
321 def tostandin(f):
322 # The file may already be a standin, so trucate the back
322 # The file may already be a standin, so trucate the back
323 # prefix and test before mangling it. This avoids turning
323 # prefix and test before mangling it. This avoids turning
324 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
324 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
325 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
325 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
326 return f
326 return f
327
327
328 # An absolute path is from outside the repo, so truncate the
328 # An absolute path is from outside the repo, so truncate the
329 # path to the root before building the standin. Otherwise cwd
329 # path to the root before building the standin. Otherwise cwd
330 # is somewhere in the repo, relative to root, and needs to be
330 # is somewhere in the repo, relative to root, and needs to be
331 # prepended before building the standin.
331 # prepended before building the standin.
332 if os.path.isabs(m._cwd):
332 if os.path.isabs(m._cwd):
333 f = f[len(back):]
333 f = f[len(back):]
334 else:
334 else:
335 f = m._cwd + '/' + f
335 f = m._cwd + '/' + f
336 return back + lfutil.standin(f)
336 return back + lfutil.standin(f)
337
337
338 pats.update(fixpats(f, tostandin) for f in p)
338 pats.update(fixpats(f, tostandin) for f in p)
339 else:
339 else:
340 def tostandin(f):
340 def tostandin(f):
341 if lfutil.splitstandin(f):
341 if lfutil.splitstandin(f):
342 return f
342 return f
343 return lfutil.standin(f)
343 return lfutil.standin(f)
344 pats.update(fixpats(f, tostandin) for f in p)
344 pats.update(fixpats(f, tostandin) for f in p)
345
345
346 for i in range(0, len(m._files)):
346 for i in range(0, len(m._files)):
347 # Don't add '.hglf' to m.files, since that is already covered by '.'
347 # Don't add '.hglf' to m.files, since that is already covered by '.'
348 if m._files[i] == '.':
348 if m._files[i] == '.':
349 continue
349 continue
350 standin = lfutil.standin(m._files[i])
350 standin = lfutil.standin(m._files[i])
351 # If the "standin" is a directory, append instead of replace to
351 # If the "standin" is a directory, append instead of replace to
352 # support naming a directory on the command line with only
352 # support naming a directory on the command line with only
353 # largefiles. The original directory is kept to support normal
353 # largefiles. The original directory is kept to support normal
354 # files.
354 # files.
355 if standin in repo[ctx.node()]:
355 if standin in repo[ctx.node()]:
356 m._files[i] = standin
356 m._files[i] = standin
357 elif m._files[i] not in repo[ctx.node()] \
357 elif m._files[i] not in repo[ctx.node()] \
358 and repo.wvfs.isdir(standin):
358 and repo.wvfs.isdir(standin):
359 m._files.append(standin)
359 m._files.append(standin)
360
360
361 m._fmap = set(m._files)
361 m._fileroots = set(m._files)
362 m._always = False
362 m._always = False
363 origmatchfn = m.matchfn
363 origmatchfn = m.matchfn
364 def lfmatchfn(f):
364 def lfmatchfn(f):
365 lf = lfutil.splitstandin(f)
365 lf = lfutil.splitstandin(f)
366 if lf is not None and origmatchfn(lf):
366 if lf is not None and origmatchfn(lf):
367 return True
367 return True
368 r = origmatchfn(f)
368 r = origmatchfn(f)
369 return r
369 return r
370 m.matchfn = lfmatchfn
370 m.matchfn = lfmatchfn
371
371
372 ui.debug('updated patterns: %s\n' % sorted(pats))
372 ui.debug('updated patterns: %s\n' % sorted(pats))
373 return m, pats
373 return m, pats
374
374
375 # For hg log --patch, the match object is used in two different senses:
375 # For hg log --patch, the match object is used in two different senses:
376 # (1) to determine what revisions should be printed out, and
376 # (1) to determine what revisions should be printed out, and
377 # (2) to determine what files to print out diffs for.
377 # (2) to determine what files to print out diffs for.
378 # The magic matchandpats override should be used for case (1) but not for
378 # The magic matchandpats override should be used for case (1) but not for
379 # case (2).
379 # case (2).
380 def overridemakelogfilematcher(repo, pats, opts):
380 def overridemakelogfilematcher(repo, pats, opts):
381 wctx = repo[None]
381 wctx = repo[None]
382 match, pats = oldmatchandpats(wctx, pats, opts)
382 match, pats = oldmatchandpats(wctx, pats, opts)
383 return lambda rev: match
383 return lambda rev: match
384
384
385 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
385 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
386 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
386 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
387 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
387 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
388
388
389 try:
389 try:
390 return orig(ui, repo, *pats, **opts)
390 return orig(ui, repo, *pats, **opts)
391 finally:
391 finally:
392 restorematchandpatsfn()
392 restorematchandpatsfn()
393 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
393 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
394
394
395 def overrideverify(orig, ui, repo, *pats, **opts):
395 def overrideverify(orig, ui, repo, *pats, **opts):
396 large = opts.pop('large', False)
396 large = opts.pop('large', False)
397 all = opts.pop('lfa', False)
397 all = opts.pop('lfa', False)
398 contents = opts.pop('lfc', False)
398 contents = opts.pop('lfc', False)
399
399
400 result = orig(ui, repo, *pats, **opts)
400 result = orig(ui, repo, *pats, **opts)
401 if large or all or contents:
401 if large or all or contents:
402 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
402 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
403 return result
403 return result
404
404
405 def overridedebugstate(orig, ui, repo, *pats, **opts):
405 def overridedebugstate(orig, ui, repo, *pats, **opts):
406 large = opts.pop('large', False)
406 large = opts.pop('large', False)
407 if large:
407 if large:
408 class fakerepo(object):
408 class fakerepo(object):
409 dirstate = lfutil.openlfdirstate(ui, repo)
409 dirstate = lfutil.openlfdirstate(ui, repo)
410 orig(ui, fakerepo, *pats, **opts)
410 orig(ui, fakerepo, *pats, **opts)
411 else:
411 else:
412 orig(ui, repo, *pats, **opts)
412 orig(ui, repo, *pats, **opts)
413
413
414 # Before starting the manifest merge, merge.updates will call
414 # Before starting the manifest merge, merge.updates will call
415 # _checkunknownfile to check if there are any files in the merged-in
415 # _checkunknownfile to check if there are any files in the merged-in
416 # changeset that collide with unknown files in the working copy.
416 # changeset that collide with unknown files in the working copy.
417 #
417 #
418 # The largefiles are seen as unknown, so this prevents us from merging
418 # The largefiles are seen as unknown, so this prevents us from merging
419 # in a file 'foo' if we already have a largefile with the same name.
419 # in a file 'foo' if we already have a largefile with the same name.
420 #
420 #
421 # The overridden function filters the unknown files by removing any
421 # The overridden function filters the unknown files by removing any
422 # largefiles. This makes the merge proceed and we can then handle this
422 # largefiles. This makes the merge proceed and we can then handle this
423 # case further in the overridden calculateupdates function below.
423 # case further in the overridden calculateupdates function below.
424 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
424 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
425 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
425 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
426 return False
426 return False
427 return origfn(repo, wctx, mctx, f, f2)
427 return origfn(repo, wctx, mctx, f, f2)
428
428
429 # The manifest merge handles conflicts on the manifest level. We want
429 # The manifest merge handles conflicts on the manifest level. We want
430 # to handle changes in largefile-ness of files at this level too.
430 # to handle changes in largefile-ness of files at this level too.
431 #
431 #
432 # The strategy is to run the original calculateupdates and then process
432 # The strategy is to run the original calculateupdates and then process
433 # the action list it outputs. There are two cases we need to deal with:
433 # the action list it outputs. There are two cases we need to deal with:
434 #
434 #
435 # 1. Normal file in p1, largefile in p2. Here the largefile is
435 # 1. Normal file in p1, largefile in p2. Here the largefile is
436 # detected via its standin file, which will enter the working copy
436 # detected via its standin file, which will enter the working copy
437 # with a "get" action. It is not "merge" since the standin is all
437 # with a "get" action. It is not "merge" since the standin is all
438 # Mercurial is concerned with at this level -- the link to the
438 # Mercurial is concerned with at this level -- the link to the
439 # existing normal file is not relevant here.
439 # existing normal file is not relevant here.
440 #
440 #
441 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
441 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
442 # since the largefile will be present in the working copy and
442 # since the largefile will be present in the working copy and
443 # different from the normal file in p2. Mercurial therefore
443 # different from the normal file in p2. Mercurial therefore
444 # triggers a merge action.
444 # triggers a merge action.
445 #
445 #
446 # In both cases, we prompt the user and emit new actions to either
446 # In both cases, we prompt the user and emit new actions to either
447 # remove the standin (if the normal file was kept) or to remove the
447 # remove the standin (if the normal file was kept) or to remove the
448 # normal file and get the standin (if the largefile was kept). The
448 # normal file and get the standin (if the largefile was kept). The
449 # default prompt answer is to use the largefile version since it was
449 # default prompt answer is to use the largefile version since it was
450 # presumably changed on purpose.
450 # presumably changed on purpose.
451 #
451 #
452 # Finally, the merge.applyupdates function will then take care of
452 # Finally, the merge.applyupdates function will then take care of
453 # writing the files into the working copy and lfcommands.updatelfiles
453 # writing the files into the working copy and lfcommands.updatelfiles
454 # will update the largefiles.
454 # will update the largefiles.
455 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
455 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
456 partial, acceptremote, followcopies):
456 partial, acceptremote, followcopies):
457 overwrite = force and not branchmerge
457 overwrite = force and not branchmerge
458 actions, diverge, renamedelete = origfn(
458 actions, diverge, renamedelete = origfn(
459 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
459 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
460 followcopies)
460 followcopies)
461
461
462 if overwrite:
462 if overwrite:
463 return actions, diverge, renamedelete
463 return actions, diverge, renamedelete
464
464
465 # Convert to dictionary with filename as key and action as value.
465 # Convert to dictionary with filename as key and action as value.
466 lfiles = set()
466 lfiles = set()
467 for f in actions:
467 for f in actions:
468 splitstandin = f and lfutil.splitstandin(f)
468 splitstandin = f and lfutil.splitstandin(f)
469 if splitstandin in p1:
469 if splitstandin in p1:
470 lfiles.add(splitstandin)
470 lfiles.add(splitstandin)
471 elif lfutil.standin(f) in p1:
471 elif lfutil.standin(f) in p1:
472 lfiles.add(f)
472 lfiles.add(f)
473
473
474 for lfile in lfiles:
474 for lfile in lfiles:
475 standin = lfutil.standin(lfile)
475 standin = lfutil.standin(lfile)
476 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
476 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
477 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
477 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
478 if sm in ('g', 'dc') and lm != 'r':
478 if sm in ('g', 'dc') and lm != 'r':
479 # Case 1: normal file in the working copy, largefile in
479 # Case 1: normal file in the working copy, largefile in
480 # the second parent
480 # the second parent
481 usermsg = _('remote turned local normal file %s into a largefile\n'
481 usermsg = _('remote turned local normal file %s into a largefile\n'
482 'use (l)argefile or keep (n)ormal file?'
482 'use (l)argefile or keep (n)ormal file?'
483 '$$ &Largefile $$ &Normal file') % lfile
483 '$$ &Largefile $$ &Normal file') % lfile
484 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
484 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
485 actions[lfile] = ('r', None, 'replaced by standin')
485 actions[lfile] = ('r', None, 'replaced by standin')
486 actions[standin] = ('g', sargs, 'replaces standin')
486 actions[standin] = ('g', sargs, 'replaces standin')
487 else: # keep local normal file
487 else: # keep local normal file
488 actions[lfile] = ('k', None, 'replaces standin')
488 actions[lfile] = ('k', None, 'replaces standin')
489 if branchmerge:
489 if branchmerge:
490 actions[standin] = ('k', None, 'replaced by non-standin')
490 actions[standin] = ('k', None, 'replaced by non-standin')
491 else:
491 else:
492 actions[standin] = ('r', None, 'replaced by non-standin')
492 actions[standin] = ('r', None, 'replaced by non-standin')
493 elif lm in ('g', 'dc') and sm != 'r':
493 elif lm in ('g', 'dc') and sm != 'r':
494 # Case 2: largefile in the working copy, normal file in
494 # Case 2: largefile in the working copy, normal file in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local largefile %s into a normal file\n'
496 usermsg = _('remote turned local largefile %s into a normal file\n'
497 'keep (l)argefile or use (n)ormal file?'
497 'keep (l)argefile or use (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
500 if branchmerge:
500 if branchmerge:
501 # largefile can be restored from standin safely
501 # largefile can be restored from standin safely
502 actions[lfile] = ('k', None, 'replaced by standin')
502 actions[lfile] = ('k', None, 'replaced by standin')
503 actions[standin] = ('k', None, 'replaces standin')
503 actions[standin] = ('k', None, 'replaces standin')
504 else:
504 else:
505 # "lfile" should be marked as "removed" without
505 # "lfile" should be marked as "removed" without
506 # removal of itself
506 # removal of itself
507 actions[lfile] = ('lfmr', None,
507 actions[lfile] = ('lfmr', None,
508 'forget non-standin largefile')
508 'forget non-standin largefile')
509
509
510 # linear-merge should treat this largefile as 're-added'
510 # linear-merge should treat this largefile as 're-added'
511 actions[standin] = ('a', None, 'keep standin')
511 actions[standin] = ('a', None, 'keep standin')
512 else: # pick remote normal file
512 else: # pick remote normal file
513 actions[lfile] = ('g', largs, 'replaces standin')
513 actions[lfile] = ('g', largs, 'replaces standin')
514 actions[standin] = ('r', None, 'replaced by non-standin')
514 actions[standin] = ('r', None, 'replaced by non-standin')
515
515
516 return actions, diverge, renamedelete
516 return actions, diverge, renamedelete
517
517
518 def mergerecordupdates(orig, repo, actions, branchmerge):
518 def mergerecordupdates(orig, repo, actions, branchmerge):
519 if 'lfmr' in actions:
519 if 'lfmr' in actions:
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
521 for lfile, args, msg in actions['lfmr']:
521 for lfile, args, msg in actions['lfmr']:
522 # this should be executed before 'orig', to execute 'remove'
522 # this should be executed before 'orig', to execute 'remove'
523 # before all other actions
523 # before all other actions
524 repo.dirstate.remove(lfile)
524 repo.dirstate.remove(lfile)
525 # make sure lfile doesn't get synclfdirstate'd as normal
525 # make sure lfile doesn't get synclfdirstate'd as normal
526 lfdirstate.add(lfile)
526 lfdirstate.add(lfile)
527 lfdirstate.write()
527 lfdirstate.write()
528
528
529 return orig(repo, actions, branchmerge)
529 return orig(repo, actions, branchmerge)
530
530
531
531
532 # Override filemerge to prompt the user about how they wish to merge
532 # Override filemerge to prompt the user about how they wish to merge
533 # largefiles. This will handle identical edits without prompting the user.
533 # largefiles. This will handle identical edits without prompting the user.
534 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
534 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
535 if not lfutil.isstandin(orig):
535 if not lfutil.isstandin(orig):
536 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
536 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
537
537
538 ahash = fca.data().strip().lower()
538 ahash = fca.data().strip().lower()
539 dhash = fcd.data().strip().lower()
539 dhash = fcd.data().strip().lower()
540 ohash = fco.data().strip().lower()
540 ohash = fco.data().strip().lower()
541 if (ohash != ahash and
541 if (ohash != ahash and
542 ohash != dhash and
542 ohash != dhash and
543 (dhash == ahash or
543 (dhash == ahash or
544 repo.ui.promptchoice(
544 repo.ui.promptchoice(
545 _('largefile %s has a merge conflict\nancestor was %s\n'
545 _('largefile %s has a merge conflict\nancestor was %s\n'
546 'keep (l)ocal %s or\ntake (o)ther %s?'
546 'keep (l)ocal %s or\ntake (o)ther %s?'
547 '$$ &Local $$ &Other') %
547 '$$ &Local $$ &Other') %
548 (lfutil.splitstandin(orig), ahash, dhash, ohash),
548 (lfutil.splitstandin(orig), ahash, dhash, ohash),
549 0) == 1)):
549 0) == 1)):
550 repo.wwrite(fcd.path(), fco.data(), fco.flags())
550 repo.wwrite(fcd.path(), fco.data(), fco.flags())
551 return 0
551 return 0
552
552
553 def copiespathcopies(orig, ctx1, ctx2, match=None):
553 def copiespathcopies(orig, ctx1, ctx2, match=None):
554 copies = orig(ctx1, ctx2, match=match)
554 copies = orig(ctx1, ctx2, match=match)
555 updated = {}
555 updated = {}
556
556
557 for k, v in copies.iteritems():
557 for k, v in copies.iteritems():
558 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
558 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
559
559
560 return updated
560 return updated
561
561
562 # Copy first changes the matchers to match standins instead of
562 # Copy first changes the matchers to match standins instead of
563 # largefiles. Then it overrides util.copyfile in that function it
563 # largefiles. Then it overrides util.copyfile in that function it
564 # checks if the destination largefile already exists. It also keeps a
564 # checks if the destination largefile already exists. It also keeps a
565 # list of copied files so that the largefiles can be copied and the
565 # list of copied files so that the largefiles can be copied and the
566 # dirstate updated.
566 # dirstate updated.
567 def overridecopy(orig, ui, repo, pats, opts, rename=False):
567 def overridecopy(orig, ui, repo, pats, opts, rename=False):
568 # doesn't remove largefile on rename
568 # doesn't remove largefile on rename
569 if len(pats) < 2:
569 if len(pats) < 2:
570 # this isn't legal, let the original function deal with it
570 # this isn't legal, let the original function deal with it
571 return orig(ui, repo, pats, opts, rename)
571 return orig(ui, repo, pats, opts, rename)
572
572
573 # This could copy both lfiles and normal files in one command,
573 # This could copy both lfiles and normal files in one command,
574 # but we don't want to do that. First replace their matcher to
574 # but we don't want to do that. First replace their matcher to
575 # only match normal files and run it, then replace it to just
575 # only match normal files and run it, then replace it to just
576 # match largefiles and run it again.
576 # match largefiles and run it again.
577 nonormalfiles = False
577 nonormalfiles = False
578 nolfiles = False
578 nolfiles = False
579 installnormalfilesmatchfn(repo[None].manifest())
579 installnormalfilesmatchfn(repo[None].manifest())
580 try:
580 try:
581 result = orig(ui, repo, pats, opts, rename)
581 result = orig(ui, repo, pats, opts, rename)
582 except util.Abort, e:
582 except util.Abort, e:
583 if str(e) != _('no files to copy'):
583 if str(e) != _('no files to copy'):
584 raise e
584 raise e
585 else:
585 else:
586 nonormalfiles = True
586 nonormalfiles = True
587 result = 0
587 result = 0
588 finally:
588 finally:
589 restorematchfn()
589 restorematchfn()
590
590
591 # The first rename can cause our current working directory to be removed.
591 # The first rename can cause our current working directory to be removed.
592 # In that case there is nothing left to copy/rename so just quit.
592 # In that case there is nothing left to copy/rename so just quit.
593 try:
593 try:
594 repo.getcwd()
594 repo.getcwd()
595 except OSError:
595 except OSError:
596 return result
596 return result
597
597
598 def makestandin(relpath):
598 def makestandin(relpath):
599 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
599 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
600 return os.path.join(repo.wjoin(lfutil.standin(path)))
600 return os.path.join(repo.wjoin(lfutil.standin(path)))
601
601
602 fullpats = scmutil.expandpats(pats)
602 fullpats = scmutil.expandpats(pats)
603 dest = fullpats[-1]
603 dest = fullpats[-1]
604
604
605 if os.path.isdir(dest):
605 if os.path.isdir(dest):
606 if not os.path.isdir(makestandin(dest)):
606 if not os.path.isdir(makestandin(dest)):
607 os.makedirs(makestandin(dest))
607 os.makedirs(makestandin(dest))
608
608
609 try:
609 try:
610 # When we call orig below it creates the standins but we don't add
610 # When we call orig below it creates the standins but we don't add
611 # them to the dir state until later so lock during that time.
611 # them to the dir state until later so lock during that time.
612 wlock = repo.wlock()
612 wlock = repo.wlock()
613
613
614 manifest = repo[None].manifest()
614 manifest = repo[None].manifest()
615 def overridematch(ctx, pats=[], opts={}, globbed=False,
615 def overridematch(ctx, pats=[], opts={}, globbed=False,
616 default='relpath'):
616 default='relpath'):
617 newpats = []
617 newpats = []
618 # The patterns were previously mangled to add the standin
618 # The patterns were previously mangled to add the standin
619 # directory; we need to remove that now
619 # directory; we need to remove that now
620 for pat in pats:
620 for pat in pats:
621 if match_.patkind(pat) is None and lfutil.shortname in pat:
621 if match_.patkind(pat) is None and lfutil.shortname in pat:
622 newpats.append(pat.replace(lfutil.shortname, ''))
622 newpats.append(pat.replace(lfutil.shortname, ''))
623 else:
623 else:
624 newpats.append(pat)
624 newpats.append(pat)
625 match = oldmatch(ctx, newpats, opts, globbed, default)
625 match = oldmatch(ctx, newpats, opts, globbed, default)
626 m = copy.copy(match)
626 m = copy.copy(match)
627 lfile = lambda f: lfutil.standin(f) in manifest
627 lfile = lambda f: lfutil.standin(f) in manifest
628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 m._fmap = set(m._files)
629 m._fileroots = set(m._files)
630 origmatchfn = m.matchfn
630 origmatchfn = m.matchfn
631 m.matchfn = lambda f: (lfutil.isstandin(f) and
631 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 (f in manifest) and
632 (f in manifest) and
633 origmatchfn(lfutil.splitstandin(f)) or
633 origmatchfn(lfutil.splitstandin(f)) or
634 None)
634 None)
635 return m
635 return m
636 oldmatch = installmatchfn(overridematch)
636 oldmatch = installmatchfn(overridematch)
637 listpats = []
637 listpats = []
638 for pat in pats:
638 for pat in pats:
639 if match_.patkind(pat) is not None:
639 if match_.patkind(pat) is not None:
640 listpats.append(pat)
640 listpats.append(pat)
641 else:
641 else:
642 listpats.append(makestandin(pat))
642 listpats.append(makestandin(pat))
643
643
644 try:
644 try:
645 origcopyfile = util.copyfile
645 origcopyfile = util.copyfile
646 copiedfiles = []
646 copiedfiles = []
647 def overridecopyfile(src, dest):
647 def overridecopyfile(src, dest):
648 if (lfutil.shortname in src and
648 if (lfutil.shortname in src and
649 dest.startswith(repo.wjoin(lfutil.shortname))):
649 dest.startswith(repo.wjoin(lfutil.shortname))):
650 destlfile = dest.replace(lfutil.shortname, '')
650 destlfile = dest.replace(lfutil.shortname, '')
651 if not opts['force'] and os.path.exists(destlfile):
651 if not opts['force'] and os.path.exists(destlfile):
652 raise IOError('',
652 raise IOError('',
653 _('destination largefile already exists'))
653 _('destination largefile already exists'))
654 copiedfiles.append((src, dest))
654 copiedfiles.append((src, dest))
655 origcopyfile(src, dest)
655 origcopyfile(src, dest)
656
656
657 util.copyfile = overridecopyfile
657 util.copyfile = overridecopyfile
658 result += orig(ui, repo, listpats, opts, rename)
658 result += orig(ui, repo, listpats, opts, rename)
659 finally:
659 finally:
660 util.copyfile = origcopyfile
660 util.copyfile = origcopyfile
661
661
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 for (src, dest) in copiedfiles:
663 for (src, dest) in copiedfiles:
664 if (lfutil.shortname in src and
664 if (lfutil.shortname in src and
665 dest.startswith(repo.wjoin(lfutil.shortname))):
665 dest.startswith(repo.wjoin(lfutil.shortname))):
666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 if not os.path.isdir(destlfiledir):
669 if not os.path.isdir(destlfiledir):
670 os.makedirs(destlfiledir)
670 os.makedirs(destlfiledir)
671 if rename:
671 if rename:
672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
673
673
674 # The file is gone, but this deletes any empty parent
674 # The file is gone, but this deletes any empty parent
675 # directories as a side-effect.
675 # directories as a side-effect.
676 util.unlinkpath(repo.wjoin(srclfile), True)
676 util.unlinkpath(repo.wjoin(srclfile), True)
677 lfdirstate.remove(srclfile)
677 lfdirstate.remove(srclfile)
678 else:
678 else:
679 util.copyfile(repo.wjoin(srclfile),
679 util.copyfile(repo.wjoin(srclfile),
680 repo.wjoin(destlfile))
680 repo.wjoin(destlfile))
681
681
682 lfdirstate.add(destlfile)
682 lfdirstate.add(destlfile)
683 lfdirstate.write()
683 lfdirstate.write()
684 except util.Abort, e:
684 except util.Abort, e:
685 if str(e) != _('no files to copy'):
685 if str(e) != _('no files to copy'):
686 raise e
686 raise e
687 else:
687 else:
688 nolfiles = True
688 nolfiles = True
689 finally:
689 finally:
690 restorematchfn()
690 restorematchfn()
691 wlock.release()
691 wlock.release()
692
692
693 if nolfiles and nonormalfiles:
693 if nolfiles and nonormalfiles:
694 raise util.Abort(_('no files to copy'))
694 raise util.Abort(_('no files to copy'))
695
695
696 return result
696 return result
697
697
698 # When the user calls revert, we have to be careful to not revert any
698 # When the user calls revert, we have to be careful to not revert any
699 # changes to other largefiles accidentally. This means we have to keep
699 # changes to other largefiles accidentally. This means we have to keep
700 # track of the largefiles that are being reverted so we only pull down
700 # track of the largefiles that are being reverted so we only pull down
701 # the necessary largefiles.
701 # the necessary largefiles.
702 #
702 #
703 # Standins are only updated (to match the hash of largefiles) before
703 # Standins are only updated (to match the hash of largefiles) before
704 # commits. Update the standins then run the original revert, changing
704 # commits. Update the standins then run the original revert, changing
705 # the matcher to hit standins instead of largefiles. Based on the
705 # the matcher to hit standins instead of largefiles. Based on the
706 # resulting standins update the largefiles.
706 # resulting standins update the largefiles.
707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
708 # Because we put the standins in a bad state (by updating them)
708 # Because we put the standins in a bad state (by updating them)
709 # and then return them to a correct state we need to lock to
709 # and then return them to a correct state we need to lock to
710 # prevent others from changing them in their incorrect state.
710 # prevent others from changing them in their incorrect state.
711 wlock = repo.wlock()
711 wlock = repo.wlock()
712 try:
712 try:
713 lfdirstate = lfutil.openlfdirstate(ui, repo)
713 lfdirstate = lfutil.openlfdirstate(ui, repo)
714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
715 lfdirstate.write()
715 lfdirstate.write()
716 for lfile in s.modified:
716 for lfile in s.modified:
717 lfutil.updatestandin(repo, lfutil.standin(lfile))
717 lfutil.updatestandin(repo, lfutil.standin(lfile))
718 for lfile in s.deleted:
718 for lfile in s.deleted:
719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
721
721
722 oldstandins = lfutil.getstandinsstate(repo)
722 oldstandins = lfutil.getstandinsstate(repo)
723
723
724 def overridematch(mctx, pats=[], opts={}, globbed=False,
724 def overridematch(mctx, pats=[], opts={}, globbed=False,
725 default='relpath'):
725 default='relpath'):
726 match = oldmatch(mctx, pats, opts, globbed, default)
726 match = oldmatch(mctx, pats, opts, globbed, default)
727 m = copy.copy(match)
727 m = copy.copy(match)
728
728
729 # revert supports recursing into subrepos, and though largefiles
729 # revert supports recursing into subrepos, and though largefiles
730 # currently doesn't work correctly in that case, this match is
730 # currently doesn't work correctly in that case, this match is
731 # called, so the lfdirstate above may not be the correct one for
731 # called, so the lfdirstate above may not be the correct one for
732 # this invocation of match.
732 # this invocation of match.
733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
734 False)
734 False)
735
735
736 def tostandin(f):
736 def tostandin(f):
737 standin = lfutil.standin(f)
737 standin = lfutil.standin(f)
738 if standin in ctx or standin in mctx:
738 if standin in ctx or standin in mctx:
739 return standin
739 return standin
740 elif standin in repo[None] or lfdirstate[f] == 'r':
740 elif standin in repo[None] or lfdirstate[f] == 'r':
741 return None
741 return None
742 return f
742 return f
743 m._files = [tostandin(f) for f in m._files]
743 m._files = [tostandin(f) for f in m._files]
744 m._files = [f for f in m._files if f is not None]
744 m._files = [f for f in m._files if f is not None]
745 m._fmap = set(m._files)
745 m._fileroots = set(m._files)
746 origmatchfn = m.matchfn
746 origmatchfn = m.matchfn
747 def matchfn(f):
747 def matchfn(f):
748 if lfutil.isstandin(f):
748 if lfutil.isstandin(f):
749 return (origmatchfn(lfutil.splitstandin(f)) and
749 return (origmatchfn(lfutil.splitstandin(f)) and
750 (f in ctx or f in mctx))
750 (f in ctx or f in mctx))
751 return origmatchfn(f)
751 return origmatchfn(f)
752 m.matchfn = matchfn
752 m.matchfn = matchfn
753 return m
753 return m
754 oldmatch = installmatchfn(overridematch)
754 oldmatch = installmatchfn(overridematch)
755 try:
755 try:
756 orig(ui, repo, ctx, parents, *pats, **opts)
756 orig(ui, repo, ctx, parents, *pats, **opts)
757 finally:
757 finally:
758 restorematchfn()
758 restorematchfn()
759
759
760 newstandins = lfutil.getstandinsstate(repo)
760 newstandins = lfutil.getstandinsstate(repo)
761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
762 # lfdirstate should be 'normallookup'-ed for updated files,
762 # lfdirstate should be 'normallookup'-ed for updated files,
763 # because reverting doesn't touch dirstate for 'normal' files
763 # because reverting doesn't touch dirstate for 'normal' files
764 # when target revision is explicitly specified: in such case,
764 # when target revision is explicitly specified: in such case,
765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
766 # of target (standin) file.
766 # of target (standin) file.
767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
768 normallookup=True)
768 normallookup=True)
769
769
770 finally:
770 finally:
771 wlock.release()
771 wlock.release()
772
772
773 # after pulling changesets, we need to take some extra care to get
773 # after pulling changesets, we need to take some extra care to get
774 # largefiles updated remotely
774 # largefiles updated remotely
775 def overridepull(orig, ui, repo, source=None, **opts):
775 def overridepull(orig, ui, repo, source=None, **opts):
776 revsprepull = len(repo)
776 revsprepull = len(repo)
777 if not source:
777 if not source:
778 source = 'default'
778 source = 'default'
779 repo.lfpullsource = source
779 repo.lfpullsource = source
780 result = orig(ui, repo, source, **opts)
780 result = orig(ui, repo, source, **opts)
781 revspostpull = len(repo)
781 revspostpull = len(repo)
782 lfrevs = opts.get('lfrev', [])
782 lfrevs = opts.get('lfrev', [])
783 if opts.get('all_largefiles'):
783 if opts.get('all_largefiles'):
784 lfrevs.append('pulled()')
784 lfrevs.append('pulled()')
785 if lfrevs and revspostpull > revsprepull:
785 if lfrevs and revspostpull > revsprepull:
786 numcached = 0
786 numcached = 0
787 repo.firstpulled = revsprepull # for pulled() revset expression
787 repo.firstpulled = revsprepull # for pulled() revset expression
788 try:
788 try:
789 for rev in scmutil.revrange(repo, lfrevs):
789 for rev in scmutil.revrange(repo, lfrevs):
790 ui.note(_('pulling largefiles for revision %s\n') % rev)
790 ui.note(_('pulling largefiles for revision %s\n') % rev)
791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
792 numcached += len(cached)
792 numcached += len(cached)
793 finally:
793 finally:
794 del repo.firstpulled
794 del repo.firstpulled
795 ui.status(_("%d largefiles cached\n") % numcached)
795 ui.status(_("%d largefiles cached\n") % numcached)
796 return result
796 return result
797
797
798 def pulledrevsetsymbol(repo, subset, x):
798 def pulledrevsetsymbol(repo, subset, x):
799 """``pulled()``
799 """``pulled()``
800 Changesets that just has been pulled.
800 Changesets that just has been pulled.
801
801
802 Only available with largefiles from pull --lfrev expressions.
802 Only available with largefiles from pull --lfrev expressions.
803
803
804 .. container:: verbose
804 .. container:: verbose
805
805
806 Some examples:
806 Some examples:
807
807
808 - pull largefiles for all new changesets::
808 - pull largefiles for all new changesets::
809
809
810 hg pull -lfrev "pulled()"
810 hg pull -lfrev "pulled()"
811
811
812 - pull largefiles for all new branch heads::
812 - pull largefiles for all new branch heads::
813
813
814 hg pull -lfrev "head(pulled()) and not closed()"
814 hg pull -lfrev "head(pulled()) and not closed()"
815
815
816 """
816 """
817
817
818 try:
818 try:
819 firstpulled = repo.firstpulled
819 firstpulled = repo.firstpulled
820 except AttributeError:
820 except AttributeError:
821 raise util.Abort(_("pulled() only available in --lfrev"))
821 raise util.Abort(_("pulled() only available in --lfrev"))
822 return revset.baseset([r for r in subset if r >= firstpulled])
822 return revset.baseset([r for r in subset if r >= firstpulled])
823
823
824 def overrideclone(orig, ui, source, dest=None, **opts):
824 def overrideclone(orig, ui, source, dest=None, **opts):
825 d = dest
825 d = dest
826 if d is None:
826 if d is None:
827 d = hg.defaultdest(source)
827 d = hg.defaultdest(source)
828 if opts.get('all_largefiles') and not hg.islocal(d):
828 if opts.get('all_largefiles') and not hg.islocal(d):
829 raise util.Abort(_(
829 raise util.Abort(_(
830 '--all-largefiles is incompatible with non-local destination %s') %
830 '--all-largefiles is incompatible with non-local destination %s') %
831 d)
831 d)
832
832
833 return orig(ui, source, dest, **opts)
833 return orig(ui, source, dest, **opts)
834
834
835 def hgclone(orig, ui, opts, *args, **kwargs):
835 def hgclone(orig, ui, opts, *args, **kwargs):
836 result = orig(ui, opts, *args, **kwargs)
836 result = orig(ui, opts, *args, **kwargs)
837
837
838 if result is not None:
838 if result is not None:
839 sourcerepo, destrepo = result
839 sourcerepo, destrepo = result
840 repo = destrepo.local()
840 repo = destrepo.local()
841
841
842 # When cloning to a remote repo (like through SSH), no repo is available
842 # When cloning to a remote repo (like through SSH), no repo is available
843 # from the peer. Therefore the largefiles can't be downloaded and the
843 # from the peer. Therefore the largefiles can't be downloaded and the
844 # hgrc can't be updated.
844 # hgrc can't be updated.
845 if not repo:
845 if not repo:
846 return result
846 return result
847
847
848 # If largefiles is required for this repo, permanently enable it locally
848 # If largefiles is required for this repo, permanently enable it locally
849 if 'largefiles' in repo.requirements:
849 if 'largefiles' in repo.requirements:
850 fp = repo.vfs('hgrc', 'a', text=True)
850 fp = repo.vfs('hgrc', 'a', text=True)
851 try:
851 try:
852 fp.write('\n[extensions]\nlargefiles=\n')
852 fp.write('\n[extensions]\nlargefiles=\n')
853 finally:
853 finally:
854 fp.close()
854 fp.close()
855
855
856 # Caching is implicitly limited to 'rev' option, since the dest repo was
856 # Caching is implicitly limited to 'rev' option, since the dest repo was
857 # truncated at that point. The user may expect a download count with
857 # truncated at that point. The user may expect a download count with
858 # this option, so attempt whether or not this is a largefile repo.
858 # this option, so attempt whether or not this is a largefile repo.
859 if opts.get('all_largefiles'):
859 if opts.get('all_largefiles'):
860 success, missing = lfcommands.downloadlfiles(ui, repo, None)
860 success, missing = lfcommands.downloadlfiles(ui, repo, None)
861
861
862 if missing != 0:
862 if missing != 0:
863 return None
863 return None
864
864
865 return result
865 return result
866
866
867 def overriderebase(orig, ui, repo, **opts):
867 def overriderebase(orig, ui, repo, **opts):
868 if not util.safehasattr(repo, '_largefilesenabled'):
868 if not util.safehasattr(repo, '_largefilesenabled'):
869 return orig(ui, repo, **opts)
869 return orig(ui, repo, **opts)
870
870
871 resuming = opts.get('continue')
871 resuming = opts.get('continue')
872 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
872 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
873 repo._lfstatuswriters.append(lambda *msg, **opts: None)
873 repo._lfstatuswriters.append(lambda *msg, **opts: None)
874 try:
874 try:
875 return orig(ui, repo, **opts)
875 return orig(ui, repo, **opts)
876 finally:
876 finally:
877 repo._lfstatuswriters.pop()
877 repo._lfstatuswriters.pop()
878 repo._lfcommithooks.pop()
878 repo._lfcommithooks.pop()
879
879
880 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
880 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
881 prefix='', mtime=None, subrepos=None):
881 prefix='', mtime=None, subrepos=None):
882 # No need to lock because we are only reading history and
882 # No need to lock because we are only reading history and
883 # largefile caches, neither of which are modified.
883 # largefile caches, neither of which are modified.
884 lfcommands.cachelfiles(repo.ui, repo, node)
884 lfcommands.cachelfiles(repo.ui, repo, node)
885
885
886 if kind not in archival.archivers:
886 if kind not in archival.archivers:
887 raise util.Abort(_("unknown archive type '%s'") % kind)
887 raise util.Abort(_("unknown archive type '%s'") % kind)
888
888
889 ctx = repo[node]
889 ctx = repo[node]
890
890
891 if kind == 'files':
891 if kind == 'files':
892 if prefix:
892 if prefix:
893 raise util.Abort(
893 raise util.Abort(
894 _('cannot give prefix when archiving to files'))
894 _('cannot give prefix when archiving to files'))
895 else:
895 else:
896 prefix = archival.tidyprefix(dest, kind, prefix)
896 prefix = archival.tidyprefix(dest, kind, prefix)
897
897
898 def write(name, mode, islink, getdata):
898 def write(name, mode, islink, getdata):
899 if matchfn and not matchfn(name):
899 if matchfn and not matchfn(name):
900 return
900 return
901 data = getdata()
901 data = getdata()
902 if decode:
902 if decode:
903 data = repo.wwritedata(name, data)
903 data = repo.wwritedata(name, data)
904 archiver.addfile(prefix + name, mode, islink, data)
904 archiver.addfile(prefix + name, mode, islink, data)
905
905
906 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
906 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
907
907
908 if repo.ui.configbool("ui", "archivemeta", True):
908 if repo.ui.configbool("ui", "archivemeta", True):
909 write('.hg_archival.txt', 0644, False,
909 write('.hg_archival.txt', 0644, False,
910 lambda: archival.buildmetadata(ctx))
910 lambda: archival.buildmetadata(ctx))
911
911
912 for f in ctx:
912 for f in ctx:
913 ff = ctx.flags(f)
913 ff = ctx.flags(f)
914 getdata = ctx[f].data
914 getdata = ctx[f].data
915 if lfutil.isstandin(f):
915 if lfutil.isstandin(f):
916 path = lfutil.findfile(repo, getdata().strip())
916 path = lfutil.findfile(repo, getdata().strip())
917 if path is None:
917 if path is None:
918 raise util.Abort(
918 raise util.Abort(
919 _('largefile %s not found in repo store or system cache')
919 _('largefile %s not found in repo store or system cache')
920 % lfutil.splitstandin(f))
920 % lfutil.splitstandin(f))
921 f = lfutil.splitstandin(f)
921 f = lfutil.splitstandin(f)
922
922
923 def getdatafn():
923 def getdatafn():
924 fd = None
924 fd = None
925 try:
925 try:
926 fd = open(path, 'rb')
926 fd = open(path, 'rb')
927 return fd.read()
927 return fd.read()
928 finally:
928 finally:
929 if fd:
929 if fd:
930 fd.close()
930 fd.close()
931
931
932 getdata = getdatafn
932 getdata = getdatafn
933 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
933 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
934
934
935 if subrepos:
935 if subrepos:
936 for subpath in sorted(ctx.substate):
936 for subpath in sorted(ctx.substate):
937 sub = ctx.sub(subpath)
937 sub = ctx.sub(subpath)
938 submatch = match_.narrowmatcher(subpath, matchfn)
938 submatch = match_.narrowmatcher(subpath, matchfn)
939 sub.archive(archiver, prefix, submatch)
939 sub.archive(archiver, prefix, submatch)
940
940
941 archiver.done()
941 archiver.done()
942
942
943 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
943 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
944 repo._get(repo._state + ('hg',))
944 repo._get(repo._state + ('hg',))
945 rev = repo._state[1]
945 rev = repo._state[1]
946 ctx = repo._repo[rev]
946 ctx = repo._repo[rev]
947
947
948 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
948 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
949
949
950 def write(name, mode, islink, getdata):
950 def write(name, mode, islink, getdata):
951 # At this point, the standin has been replaced with the largefile name,
951 # At this point, the standin has been replaced with the largefile name,
952 # so the normal matcher works here without the lfutil variants.
952 # so the normal matcher works here without the lfutil variants.
953 if match and not match(f):
953 if match and not match(f):
954 return
954 return
955 data = getdata()
955 data = getdata()
956
956
957 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
957 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
958
958
959 for f in ctx:
959 for f in ctx:
960 ff = ctx.flags(f)
960 ff = ctx.flags(f)
961 getdata = ctx[f].data
961 getdata = ctx[f].data
962 if lfutil.isstandin(f):
962 if lfutil.isstandin(f):
963 path = lfutil.findfile(repo._repo, getdata().strip())
963 path = lfutil.findfile(repo._repo, getdata().strip())
964 if path is None:
964 if path is None:
965 raise util.Abort(
965 raise util.Abort(
966 _('largefile %s not found in repo store or system cache')
966 _('largefile %s not found in repo store or system cache')
967 % lfutil.splitstandin(f))
967 % lfutil.splitstandin(f))
968 f = lfutil.splitstandin(f)
968 f = lfutil.splitstandin(f)
969
969
970 def getdatafn():
970 def getdatafn():
971 fd = None
971 fd = None
972 try:
972 try:
973 fd = open(os.path.join(prefix, path), 'rb')
973 fd = open(os.path.join(prefix, path), 'rb')
974 return fd.read()
974 return fd.read()
975 finally:
975 finally:
976 if fd:
976 if fd:
977 fd.close()
977 fd.close()
978
978
979 getdata = getdatafn
979 getdata = getdatafn
980
980
981 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
981 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
982
982
983 for subpath in sorted(ctx.substate):
983 for subpath in sorted(ctx.substate):
984 sub = ctx.sub(subpath)
984 sub = ctx.sub(subpath)
985 submatch = match_.narrowmatcher(subpath, match)
985 submatch = match_.narrowmatcher(subpath, match)
986 sub.archive(archiver, prefix + repo._path + '/', submatch)
986 sub.archive(archiver, prefix + repo._path + '/', submatch)
987
987
988 # If a largefile is modified, the change is not reflected in its
988 # If a largefile is modified, the change is not reflected in its
989 # standin until a commit. cmdutil.bailifchanged() raises an exception
989 # standin until a commit. cmdutil.bailifchanged() raises an exception
990 # if the repo has uncommitted changes. Wrap it to also check if
990 # if the repo has uncommitted changes. Wrap it to also check if
991 # largefiles were changed. This is used by bisect, backout and fetch.
991 # largefiles were changed. This is used by bisect, backout and fetch.
992 def overridebailifchanged(orig, repo, *args, **kwargs):
992 def overridebailifchanged(orig, repo, *args, **kwargs):
993 orig(repo, *args, **kwargs)
993 orig(repo, *args, **kwargs)
994 repo.lfstatus = True
994 repo.lfstatus = True
995 s = repo.status()
995 s = repo.status()
996 repo.lfstatus = False
996 repo.lfstatus = False
997 if s.modified or s.added or s.removed or s.deleted:
997 if s.modified or s.added or s.removed or s.deleted:
998 raise util.Abort(_('uncommitted changes'))
998 raise util.Abort(_('uncommitted changes'))
999
999
1000 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1000 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1001 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1001 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1002 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1002 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1003 m = composelargefilematcher(match, repo[None].manifest())
1003 m = composelargefilematcher(match, repo[None].manifest())
1004
1004
1005 try:
1005 try:
1006 repo.lfstatus = True
1006 repo.lfstatus = True
1007 s = repo.status(match=m, clean=True)
1007 s = repo.status(match=m, clean=True)
1008 finally:
1008 finally:
1009 repo.lfstatus = False
1009 repo.lfstatus = False
1010 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1010 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1011 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1011 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1012
1012
1013 for f in forget:
1013 for f in forget:
1014 if lfutil.standin(f) not in repo.dirstate and not \
1014 if lfutil.standin(f) not in repo.dirstate and not \
1015 repo.wvfs.isdir(lfutil.standin(f)):
1015 repo.wvfs.isdir(lfutil.standin(f)):
1016 ui.warn(_('not removing %s: file is already untracked\n')
1016 ui.warn(_('not removing %s: file is already untracked\n')
1017 % m.rel(f))
1017 % m.rel(f))
1018 bad.append(f)
1018 bad.append(f)
1019
1019
1020 for f in forget:
1020 for f in forget:
1021 if ui.verbose or not m.exact(f):
1021 if ui.verbose or not m.exact(f):
1022 ui.status(_('removing %s\n') % m.rel(f))
1022 ui.status(_('removing %s\n') % m.rel(f))
1023
1023
1024 # Need to lock because standin files are deleted then removed from the
1024 # Need to lock because standin files are deleted then removed from the
1025 # repository and we could race in-between.
1025 # repository and we could race in-between.
1026 wlock = repo.wlock()
1026 wlock = repo.wlock()
1027 try:
1027 try:
1028 lfdirstate = lfutil.openlfdirstate(ui, repo)
1028 lfdirstate = lfutil.openlfdirstate(ui, repo)
1029 for f in forget:
1029 for f in forget:
1030 if lfdirstate[f] == 'a':
1030 if lfdirstate[f] == 'a':
1031 lfdirstate.drop(f)
1031 lfdirstate.drop(f)
1032 else:
1032 else:
1033 lfdirstate.remove(f)
1033 lfdirstate.remove(f)
1034 lfdirstate.write()
1034 lfdirstate.write()
1035 standins = [lfutil.standin(f) for f in forget]
1035 standins = [lfutil.standin(f) for f in forget]
1036 for f in standins:
1036 for f in standins:
1037 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1037 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1038 rejected = repo[None].forget(standins)
1038 rejected = repo[None].forget(standins)
1039 finally:
1039 finally:
1040 wlock.release()
1040 wlock.release()
1041
1041
1042 bad.extend(f for f in rejected if f in m.files())
1042 bad.extend(f for f in rejected if f in m.files())
1043 forgot.extend(f for f in forget if f not in rejected)
1043 forgot.extend(f for f in forget if f not in rejected)
1044 return bad, forgot
1044 return bad, forgot
1045
1045
1046 def _getoutgoings(repo, other, missing, addfunc):
1046 def _getoutgoings(repo, other, missing, addfunc):
1047 """get pairs of filename and largefile hash in outgoing revisions
1047 """get pairs of filename and largefile hash in outgoing revisions
1048 in 'missing'.
1048 in 'missing'.
1049
1049
1050 largefiles already existing on 'other' repository are ignored.
1050 largefiles already existing on 'other' repository are ignored.
1051
1051
1052 'addfunc' is invoked with each unique pairs of filename and
1052 'addfunc' is invoked with each unique pairs of filename and
1053 largefile hash value.
1053 largefile hash value.
1054 """
1054 """
1055 knowns = set()
1055 knowns = set()
1056 lfhashes = set()
1056 lfhashes = set()
1057 def dedup(fn, lfhash):
1057 def dedup(fn, lfhash):
1058 k = (fn, lfhash)
1058 k = (fn, lfhash)
1059 if k not in knowns:
1059 if k not in knowns:
1060 knowns.add(k)
1060 knowns.add(k)
1061 lfhashes.add(lfhash)
1061 lfhashes.add(lfhash)
1062 lfutil.getlfilestoupload(repo, missing, dedup)
1062 lfutil.getlfilestoupload(repo, missing, dedup)
1063 if lfhashes:
1063 if lfhashes:
1064 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1064 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1065 for fn, lfhash in knowns:
1065 for fn, lfhash in knowns:
1066 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1066 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1067 addfunc(fn, lfhash)
1067 addfunc(fn, lfhash)
1068
1068
1069 def outgoinghook(ui, repo, other, opts, missing):
1069 def outgoinghook(ui, repo, other, opts, missing):
1070 if opts.pop('large', None):
1070 if opts.pop('large', None):
1071 lfhashes = set()
1071 lfhashes = set()
1072 if ui.debugflag:
1072 if ui.debugflag:
1073 toupload = {}
1073 toupload = {}
1074 def addfunc(fn, lfhash):
1074 def addfunc(fn, lfhash):
1075 if fn not in toupload:
1075 if fn not in toupload:
1076 toupload[fn] = []
1076 toupload[fn] = []
1077 toupload[fn].append(lfhash)
1077 toupload[fn].append(lfhash)
1078 lfhashes.add(lfhash)
1078 lfhashes.add(lfhash)
1079 def showhashes(fn):
1079 def showhashes(fn):
1080 for lfhash in sorted(toupload[fn]):
1080 for lfhash in sorted(toupload[fn]):
1081 ui.debug(' %s\n' % (lfhash))
1081 ui.debug(' %s\n' % (lfhash))
1082 else:
1082 else:
1083 toupload = set()
1083 toupload = set()
1084 def addfunc(fn, lfhash):
1084 def addfunc(fn, lfhash):
1085 toupload.add(fn)
1085 toupload.add(fn)
1086 lfhashes.add(lfhash)
1086 lfhashes.add(lfhash)
1087 def showhashes(fn):
1087 def showhashes(fn):
1088 pass
1088 pass
1089 _getoutgoings(repo, other, missing, addfunc)
1089 _getoutgoings(repo, other, missing, addfunc)
1090
1090
1091 if not toupload:
1091 if not toupload:
1092 ui.status(_('largefiles: no files to upload\n'))
1092 ui.status(_('largefiles: no files to upload\n'))
1093 else:
1093 else:
1094 ui.status(_('largefiles to upload (%d entities):\n')
1094 ui.status(_('largefiles to upload (%d entities):\n')
1095 % (len(lfhashes)))
1095 % (len(lfhashes)))
1096 for file in sorted(toupload):
1096 for file in sorted(toupload):
1097 ui.status(lfutil.splitstandin(file) + '\n')
1097 ui.status(lfutil.splitstandin(file) + '\n')
1098 showhashes(file)
1098 showhashes(file)
1099 ui.status('\n')
1099 ui.status('\n')
1100
1100
1101 def summaryremotehook(ui, repo, opts, changes):
1101 def summaryremotehook(ui, repo, opts, changes):
1102 largeopt = opts.get('large', False)
1102 largeopt = opts.get('large', False)
1103 if changes is None:
1103 if changes is None:
1104 if largeopt:
1104 if largeopt:
1105 return (False, True) # only outgoing check is needed
1105 return (False, True) # only outgoing check is needed
1106 else:
1106 else:
1107 return (False, False)
1107 return (False, False)
1108 elif largeopt:
1108 elif largeopt:
1109 url, branch, peer, outgoing = changes[1]
1109 url, branch, peer, outgoing = changes[1]
1110 if peer is None:
1110 if peer is None:
1111 # i18n: column positioning for "hg summary"
1111 # i18n: column positioning for "hg summary"
1112 ui.status(_('largefiles: (no remote repo)\n'))
1112 ui.status(_('largefiles: (no remote repo)\n'))
1113 return
1113 return
1114
1114
1115 toupload = set()
1115 toupload = set()
1116 lfhashes = set()
1116 lfhashes = set()
1117 def addfunc(fn, lfhash):
1117 def addfunc(fn, lfhash):
1118 toupload.add(fn)
1118 toupload.add(fn)
1119 lfhashes.add(lfhash)
1119 lfhashes.add(lfhash)
1120 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1120 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1121
1121
1122 if not toupload:
1122 if not toupload:
1123 # i18n: column positioning for "hg summary"
1123 # i18n: column positioning for "hg summary"
1124 ui.status(_('largefiles: (no files to upload)\n'))
1124 ui.status(_('largefiles: (no files to upload)\n'))
1125 else:
1125 else:
1126 # i18n: column positioning for "hg summary"
1126 # i18n: column positioning for "hg summary"
1127 ui.status(_('largefiles: %d entities for %d files to upload\n')
1127 ui.status(_('largefiles: %d entities for %d files to upload\n')
1128 % (len(lfhashes), len(toupload)))
1128 % (len(lfhashes), len(toupload)))
1129
1129
1130 def overridesummary(orig, ui, repo, *pats, **opts):
1130 def overridesummary(orig, ui, repo, *pats, **opts):
1131 try:
1131 try:
1132 repo.lfstatus = True
1132 repo.lfstatus = True
1133 orig(ui, repo, *pats, **opts)
1133 orig(ui, repo, *pats, **opts)
1134 finally:
1134 finally:
1135 repo.lfstatus = False
1135 repo.lfstatus = False
1136
1136
1137 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1137 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1138 similarity=None):
1138 similarity=None):
1139 if not lfutil.islfilesrepo(repo):
1139 if not lfutil.islfilesrepo(repo):
1140 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1140 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1141 # Get the list of missing largefiles so we can remove them
1141 # Get the list of missing largefiles so we can remove them
1142 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1142 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1143 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1143 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1144 False, False, False)
1144 False, False, False)
1145
1145
1146 # Call into the normal remove code, but the removing of the standin, we want
1146 # Call into the normal remove code, but the removing of the standin, we want
1147 # to have handled by original addremove. Monkey patching here makes sure
1147 # to have handled by original addremove. Monkey patching here makes sure
1148 # we don't remove the standin in the largefiles code, preventing a very
1148 # we don't remove the standin in the largefiles code, preventing a very
1149 # confused state later.
1149 # confused state later.
1150 if s.deleted:
1150 if s.deleted:
1151 m = copy.copy(matcher)
1151 m = copy.copy(matcher)
1152
1152
1153 # The m._files and m._map attributes are not changed to the deleted list
1153 # The m._files and m._map attributes are not changed to the deleted list
1154 # because that affects the m.exact() test, which in turn governs whether
1154 # because that affects the m.exact() test, which in turn governs whether
1155 # or not the file name is printed, and how. Simply limit the original
1155 # or not the file name is printed, and how. Simply limit the original
1156 # matches to those in the deleted status list.
1156 # matches to those in the deleted status list.
1157 matchfn = m.matchfn
1157 matchfn = m.matchfn
1158 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1158 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1159
1159
1160 removelargefiles(repo.ui, repo, True, m, **opts)
1160 removelargefiles(repo.ui, repo, True, m, **opts)
1161 # Call into the normal add code, and any files that *should* be added as
1161 # Call into the normal add code, and any files that *should* be added as
1162 # largefiles will be
1162 # largefiles will be
1163 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1163 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1164 # Now that we've handled largefiles, hand off to the original addremove
1164 # Now that we've handled largefiles, hand off to the original addremove
1165 # function to take care of the rest. Make sure it doesn't do anything with
1165 # function to take care of the rest. Make sure it doesn't do anything with
1166 # largefiles by passing a matcher that will ignore them.
1166 # largefiles by passing a matcher that will ignore them.
1167 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1167 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1168 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1168 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1169
1169
1170 # Calling purge with --all will cause the largefiles to be deleted.
1170 # Calling purge with --all will cause the largefiles to be deleted.
1171 # Override repo.status to prevent this from happening.
1171 # Override repo.status to prevent this from happening.
1172 def overridepurge(orig, ui, repo, *dirs, **opts):
1172 def overridepurge(orig, ui, repo, *dirs, **opts):
1173 # XXX Monkey patching a repoview will not work. The assigned attribute will
1173 # XXX Monkey patching a repoview will not work. The assigned attribute will
1174 # be set on the unfiltered repo, but we will only lookup attributes in the
1174 # be set on the unfiltered repo, but we will only lookup attributes in the
1175 # unfiltered repo if the lookup in the repoview object itself fails. As the
1175 # unfiltered repo if the lookup in the repoview object itself fails. As the
1176 # monkey patched method exists on the repoview class the lookup will not
1176 # monkey patched method exists on the repoview class the lookup will not
1177 # fail. As a result, the original version will shadow the monkey patched
1177 # fail. As a result, the original version will shadow the monkey patched
1178 # one, defeating the monkey patch.
1178 # one, defeating the monkey patch.
1179 #
1179 #
1180 # As a work around we use an unfiltered repo here. We should do something
1180 # As a work around we use an unfiltered repo here. We should do something
1181 # cleaner instead.
1181 # cleaner instead.
1182 repo = repo.unfiltered()
1182 repo = repo.unfiltered()
1183 oldstatus = repo.status
1183 oldstatus = repo.status
1184 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1184 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1185 clean=False, unknown=False, listsubrepos=False):
1185 clean=False, unknown=False, listsubrepos=False):
1186 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1186 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1187 listsubrepos)
1187 listsubrepos)
1188 lfdirstate = lfutil.openlfdirstate(ui, repo)
1188 lfdirstate = lfutil.openlfdirstate(ui, repo)
1189 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1189 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1190 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1190 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1191 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1191 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1192 unknown, ignored, r.clean)
1192 unknown, ignored, r.clean)
1193 repo.status = overridestatus
1193 repo.status = overridestatus
1194 orig(ui, repo, *dirs, **opts)
1194 orig(ui, repo, *dirs, **opts)
1195 repo.status = oldstatus
1195 repo.status = oldstatus
1196 def overriderollback(orig, ui, repo, **opts):
1196 def overriderollback(orig, ui, repo, **opts):
1197 wlock = repo.wlock()
1197 wlock = repo.wlock()
1198 try:
1198 try:
1199 before = repo.dirstate.parents()
1199 before = repo.dirstate.parents()
1200 orphans = set(f for f in repo.dirstate
1200 orphans = set(f for f in repo.dirstate
1201 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1201 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1202 result = orig(ui, repo, **opts)
1202 result = orig(ui, repo, **opts)
1203 after = repo.dirstate.parents()
1203 after = repo.dirstate.parents()
1204 if before == after:
1204 if before == after:
1205 return result # no need to restore standins
1205 return result # no need to restore standins
1206
1206
1207 pctx = repo['.']
1207 pctx = repo['.']
1208 for f in repo.dirstate:
1208 for f in repo.dirstate:
1209 if lfutil.isstandin(f):
1209 if lfutil.isstandin(f):
1210 orphans.discard(f)
1210 orphans.discard(f)
1211 if repo.dirstate[f] == 'r':
1211 if repo.dirstate[f] == 'r':
1212 repo.wvfs.unlinkpath(f, ignoremissing=True)
1212 repo.wvfs.unlinkpath(f, ignoremissing=True)
1213 elif f in pctx:
1213 elif f in pctx:
1214 fctx = pctx[f]
1214 fctx = pctx[f]
1215 repo.wwrite(f, fctx.data(), fctx.flags())
1215 repo.wwrite(f, fctx.data(), fctx.flags())
1216 else:
1216 else:
1217 # content of standin is not so important in 'a',
1217 # content of standin is not so important in 'a',
1218 # 'm' or 'n' (coming from the 2nd parent) cases
1218 # 'm' or 'n' (coming from the 2nd parent) cases
1219 lfutil.writestandin(repo, f, '', False)
1219 lfutil.writestandin(repo, f, '', False)
1220 for standin in orphans:
1220 for standin in orphans:
1221 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1221 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1222
1222
1223 lfdirstate = lfutil.openlfdirstate(ui, repo)
1223 lfdirstate = lfutil.openlfdirstate(ui, repo)
1224 orphans = set(lfdirstate)
1224 orphans = set(lfdirstate)
1225 lfiles = lfutil.listlfiles(repo)
1225 lfiles = lfutil.listlfiles(repo)
1226 for file in lfiles:
1226 for file in lfiles:
1227 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1227 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1228 orphans.discard(file)
1228 orphans.discard(file)
1229 for lfile in orphans:
1229 for lfile in orphans:
1230 lfdirstate.drop(lfile)
1230 lfdirstate.drop(lfile)
1231 lfdirstate.write()
1231 lfdirstate.write()
1232 finally:
1232 finally:
1233 wlock.release()
1233 wlock.release()
1234 return result
1234 return result
1235
1235
1236 def overridetransplant(orig, ui, repo, *revs, **opts):
1236 def overridetransplant(orig, ui, repo, *revs, **opts):
1237 resuming = opts.get('continue')
1237 resuming = opts.get('continue')
1238 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1238 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1239 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1239 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1240 try:
1240 try:
1241 result = orig(ui, repo, *revs, **opts)
1241 result = orig(ui, repo, *revs, **opts)
1242 finally:
1242 finally:
1243 repo._lfstatuswriters.pop()
1243 repo._lfstatuswriters.pop()
1244 repo._lfcommithooks.pop()
1244 repo._lfcommithooks.pop()
1245 return result
1245 return result
1246
1246
1247 def overridecat(orig, ui, repo, file1, *pats, **opts):
1247 def overridecat(orig, ui, repo, file1, *pats, **opts):
1248 ctx = scmutil.revsingle(repo, opts.get('rev'))
1248 ctx = scmutil.revsingle(repo, opts.get('rev'))
1249 err = 1
1249 err = 1
1250 notbad = set()
1250 notbad = set()
1251 m = scmutil.match(ctx, (file1,) + pats, opts)
1251 m = scmutil.match(ctx, (file1,) + pats, opts)
1252 origmatchfn = m.matchfn
1252 origmatchfn = m.matchfn
1253 def lfmatchfn(f):
1253 def lfmatchfn(f):
1254 if origmatchfn(f):
1254 if origmatchfn(f):
1255 return True
1255 return True
1256 lf = lfutil.splitstandin(f)
1256 lf = lfutil.splitstandin(f)
1257 if lf is None:
1257 if lf is None:
1258 return False
1258 return False
1259 notbad.add(lf)
1259 notbad.add(lf)
1260 return origmatchfn(lf)
1260 return origmatchfn(lf)
1261 m.matchfn = lfmatchfn
1261 m.matchfn = lfmatchfn
1262 origbadfn = m.bad
1262 origbadfn = m.bad
1263 def lfbadfn(f, msg):
1263 def lfbadfn(f, msg):
1264 if not f in notbad:
1264 if not f in notbad:
1265 origbadfn(f, msg)
1265 origbadfn(f, msg)
1266 m.bad = lfbadfn
1266 m.bad = lfbadfn
1267
1267
1268 origvisitdirfn = m.visitdir
1268 origvisitdirfn = m.visitdir
1269 def lfvisitdirfn(dir):
1269 def lfvisitdirfn(dir):
1270 if dir == lfutil.shortname:
1270 if dir == lfutil.shortname:
1271 return True
1271 return True
1272 ret = origvisitdirfn(dir)
1272 ret = origvisitdirfn(dir)
1273 if ret:
1273 if ret:
1274 return ret
1274 return ret
1275 lf = lfutil.splitstandin(dir)
1275 lf = lfutil.splitstandin(dir)
1276 if lf is None:
1276 if lf is None:
1277 return False
1277 return False
1278 return origvisitdirfn(lf)
1278 return origvisitdirfn(lf)
1279 m.visitdir = lfvisitdirfn
1279 m.visitdir = lfvisitdirfn
1280
1280
1281 for f in ctx.walk(m):
1281 for f in ctx.walk(m):
1282 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1282 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1283 pathname=f)
1283 pathname=f)
1284 lf = lfutil.splitstandin(f)
1284 lf = lfutil.splitstandin(f)
1285 if lf is None or origmatchfn(f):
1285 if lf is None or origmatchfn(f):
1286 # duplicating unreachable code from commands.cat
1286 # duplicating unreachable code from commands.cat
1287 data = ctx[f].data()
1287 data = ctx[f].data()
1288 if opts.get('decode'):
1288 if opts.get('decode'):
1289 data = repo.wwritedata(f, data)
1289 data = repo.wwritedata(f, data)
1290 fp.write(data)
1290 fp.write(data)
1291 else:
1291 else:
1292 hash = lfutil.readstandin(repo, lf, ctx.rev())
1292 hash = lfutil.readstandin(repo, lf, ctx.rev())
1293 if not lfutil.inusercache(repo.ui, hash):
1293 if not lfutil.inusercache(repo.ui, hash):
1294 store = basestore._openstore(repo)
1294 store = basestore._openstore(repo)
1295 success, missing = store.get([(lf, hash)])
1295 success, missing = store.get([(lf, hash)])
1296 if len(success) != 1:
1296 if len(success) != 1:
1297 raise util.Abort(
1297 raise util.Abort(
1298 _('largefile %s is not in cache and could not be '
1298 _('largefile %s is not in cache and could not be '
1299 'downloaded') % lf)
1299 'downloaded') % lf)
1300 path = lfutil.usercachepath(repo.ui, hash)
1300 path = lfutil.usercachepath(repo.ui, hash)
1301 fpin = open(path, "rb")
1301 fpin = open(path, "rb")
1302 for chunk in util.filechunkiter(fpin, 128 * 1024):
1302 for chunk in util.filechunkiter(fpin, 128 * 1024):
1303 fp.write(chunk)
1303 fp.write(chunk)
1304 fpin.close()
1304 fpin.close()
1305 fp.close()
1305 fp.close()
1306 err = 0
1306 err = 0
1307 return err
1307 return err
1308
1308
1309 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1309 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1310 *args, **kwargs):
1310 *args, **kwargs):
1311 wlock = repo.wlock()
1311 wlock = repo.wlock()
1312 try:
1312 try:
1313 # branch | | |
1313 # branch | | |
1314 # merge | force | partial | action
1314 # merge | force | partial | action
1315 # -------+-------+---------+--------------
1315 # -------+-------+---------+--------------
1316 # x | x | x | linear-merge
1316 # x | x | x | linear-merge
1317 # o | x | x | branch-merge
1317 # o | x | x | branch-merge
1318 # x | o | x | overwrite (as clean update)
1318 # x | o | x | overwrite (as clean update)
1319 # o | o | x | force-branch-merge (*1)
1319 # o | o | x | force-branch-merge (*1)
1320 # x | x | o | (*)
1320 # x | x | o | (*)
1321 # o | x | o | (*)
1321 # o | x | o | (*)
1322 # x | o | o | overwrite (as revert)
1322 # x | o | o | overwrite (as revert)
1323 # o | o | o | (*)
1323 # o | o | o | (*)
1324 #
1324 #
1325 # (*) don't care
1325 # (*) don't care
1326 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1326 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1327
1327
1328 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1328 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1329 unsure, s = lfdirstate.status(match_.always(repo.root,
1329 unsure, s = lfdirstate.status(match_.always(repo.root,
1330 repo.getcwd()),
1330 repo.getcwd()),
1331 [], False, False, False)
1331 [], False, False, False)
1332 pctx = repo['.']
1332 pctx = repo['.']
1333 for lfile in unsure + s.modified:
1333 for lfile in unsure + s.modified:
1334 lfileabs = repo.wvfs.join(lfile)
1334 lfileabs = repo.wvfs.join(lfile)
1335 if not os.path.exists(lfileabs):
1335 if not os.path.exists(lfileabs):
1336 continue
1336 continue
1337 lfhash = lfutil.hashrepofile(repo, lfile)
1337 lfhash = lfutil.hashrepofile(repo, lfile)
1338 standin = lfutil.standin(lfile)
1338 standin = lfutil.standin(lfile)
1339 lfutil.writestandin(repo, standin, lfhash,
1339 lfutil.writestandin(repo, standin, lfhash,
1340 lfutil.getexecutable(lfileabs))
1340 lfutil.getexecutable(lfileabs))
1341 if (standin in pctx and
1341 if (standin in pctx and
1342 lfhash == lfutil.readstandin(repo, lfile, '.')):
1342 lfhash == lfutil.readstandin(repo, lfile, '.')):
1343 lfdirstate.normal(lfile)
1343 lfdirstate.normal(lfile)
1344 for lfile in s.added:
1344 for lfile in s.added:
1345 lfutil.updatestandin(repo, lfutil.standin(lfile))
1345 lfutil.updatestandin(repo, lfutil.standin(lfile))
1346 lfdirstate.write()
1346 lfdirstate.write()
1347
1347
1348 oldstandins = lfutil.getstandinsstate(repo)
1348 oldstandins = lfutil.getstandinsstate(repo)
1349
1349
1350 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1350 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1351
1351
1352 newstandins = lfutil.getstandinsstate(repo)
1352 newstandins = lfutil.getstandinsstate(repo)
1353 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1353 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1354 if branchmerge or force or partial:
1354 if branchmerge or force or partial:
1355 filelist.extend(s.deleted + s.removed)
1355 filelist.extend(s.deleted + s.removed)
1356
1356
1357 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1357 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1358 normallookup=partial)
1358 normallookup=partial)
1359
1359
1360 return result
1360 return result
1361 finally:
1361 finally:
1362 wlock.release()
1362 wlock.release()
1363
1363
1364 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1364 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1365 result = orig(repo, files, *args, **kwargs)
1365 result = orig(repo, files, *args, **kwargs)
1366
1366
1367 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1367 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1368 if filelist:
1368 if filelist:
1369 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1369 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1370 printmessage=False, normallookup=True)
1370 printmessage=False, normallookup=True)
1371
1371
1372 return result
1372 return result
@@ -1,542 +1,542
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import util, pathutil
9 import util, pathutil
10 from i18n import _
10 from i18n import _
11
11
12 propertycache = util.propertycache
12 propertycache = util.propertycache
13
13
14 def _rematcher(regex):
14 def _rematcher(regex):
15 '''compile the regexp with the best available regexp engine and return a
15 '''compile the regexp with the best available regexp engine and return a
16 matcher function'''
16 matcher function'''
17 m = util.re.compile(regex)
17 m = util.re.compile(regex)
18 try:
18 try:
19 # slightly faster, provided by facebook's re2 bindings
19 # slightly faster, provided by facebook's re2 bindings
20 return m.test_match
20 return m.test_match
21 except AttributeError:
21 except AttributeError:
22 return m.match
22 return m.match
23
23
24 def _expandsets(kindpats, ctx, listsubrepos):
24 def _expandsets(kindpats, ctx, listsubrepos):
25 '''Returns the kindpats list with the 'set' patterns expanded.'''
25 '''Returns the kindpats list with the 'set' patterns expanded.'''
26 fset = set()
26 fset = set()
27 other = []
27 other = []
28
28
29 for kind, pat in kindpats:
29 for kind, pat in kindpats:
30 if kind == 'set':
30 if kind == 'set':
31 if not ctx:
31 if not ctx:
32 raise util.Abort("fileset expression with no context")
32 raise util.Abort("fileset expression with no context")
33 s = ctx.getfileset(pat)
33 s = ctx.getfileset(pat)
34 fset.update(s)
34 fset.update(s)
35
35
36 if listsubrepos:
36 if listsubrepos:
37 for subpath in ctx.substate:
37 for subpath in ctx.substate:
38 s = ctx.sub(subpath).getfileset(pat)
38 s = ctx.sub(subpath).getfileset(pat)
39 fset.update(subpath + '/' + f for f in s)
39 fset.update(subpath + '/' + f for f in s)
40
40
41 continue
41 continue
42 other.append((kind, pat))
42 other.append((kind, pat))
43 return fset, other
43 return fset, other
44
44
45 def _kindpatsalwaysmatch(kindpats):
45 def _kindpatsalwaysmatch(kindpats):
46 """"Checks whether the kindspats match everything, as e.g.
46 """"Checks whether the kindspats match everything, as e.g.
47 'relpath:.' does.
47 'relpath:.' does.
48 """
48 """
49 for kind, pat in kindpats:
49 for kind, pat in kindpats:
50 if pat != '' or kind not in ['relpath', 'glob']:
50 if pat != '' or kind not in ['relpath', 'glob']:
51 return False
51 return False
52 return True
52 return True
53
53
54 class match(object):
54 class match(object):
55 def __init__(self, root, cwd, patterns, include=[], exclude=[],
55 def __init__(self, root, cwd, patterns, include=[], exclude=[],
56 default='glob', exact=False, auditor=None, ctx=None,
56 default='glob', exact=False, auditor=None, ctx=None,
57 listsubrepos=False):
57 listsubrepos=False):
58 """build an object to match a set of file patterns
58 """build an object to match a set of file patterns
59
59
60 arguments:
60 arguments:
61 root - the canonical root of the tree you're matching against
61 root - the canonical root of the tree you're matching against
62 cwd - the current working directory, if relevant
62 cwd - the current working directory, if relevant
63 patterns - patterns to find
63 patterns - patterns to find
64 include - patterns to include (unless they are excluded)
64 include - patterns to include (unless they are excluded)
65 exclude - patterns to exclude (even if they are included)
65 exclude - patterns to exclude (even if they are included)
66 default - if a pattern in patterns has no explicit type, assume this one
66 default - if a pattern in patterns has no explicit type, assume this one
67 exact - patterns are actually filenames (include/exclude still apply)
67 exact - patterns are actually filenames (include/exclude still apply)
68
68
69 a pattern is one of:
69 a pattern is one of:
70 'glob:<glob>' - a glob relative to cwd
70 'glob:<glob>' - a glob relative to cwd
71 're:<regexp>' - a regular expression
71 're:<regexp>' - a regular expression
72 'path:<path>' - a path relative to repository root
72 'path:<path>' - a path relative to repository root
73 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
73 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
74 'relpath:<path>' - a path relative to cwd
74 'relpath:<path>' - a path relative to cwd
75 'relre:<regexp>' - a regexp that needn't match the start of a name
75 'relre:<regexp>' - a regexp that needn't match the start of a name
76 'set:<fileset>' - a fileset expression
76 'set:<fileset>' - a fileset expression
77 '<something>' - a pattern of the specified default type
77 '<something>' - a pattern of the specified default type
78 """
78 """
79
79
80 self._root = root
80 self._root = root
81 self._cwd = cwd
81 self._cwd = cwd
82 self._files = [] # exact files and roots of patterns
82 self._files = [] # exact files and roots of patterns
83 self._anypats = bool(include or exclude)
83 self._anypats = bool(include or exclude)
84 self._always = False
84 self._always = False
85 self._pathrestricted = bool(include or exclude or patterns)
85 self._pathrestricted = bool(include or exclude or patterns)
86
86
87 matchfns = []
87 matchfns = []
88 if include:
88 if include:
89 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
89 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
90 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
90 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
91 listsubrepos)
91 listsubrepos)
92 matchfns.append(im)
92 matchfns.append(im)
93 if exclude:
93 if exclude:
94 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
94 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
95 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
95 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
96 listsubrepos)
96 listsubrepos)
97 matchfns.append(lambda f: not em(f))
97 matchfns.append(lambda f: not em(f))
98 if exact:
98 if exact:
99 if isinstance(patterns, list):
99 if isinstance(patterns, list):
100 self._files = patterns
100 self._files = patterns
101 else:
101 else:
102 self._files = list(patterns)
102 self._files = list(patterns)
103 matchfns.append(self.exact)
103 matchfns.append(self.exact)
104 elif patterns:
104 elif patterns:
105 kindpats = self._normalize(patterns, default, root, cwd, auditor)
105 kindpats = self._normalize(patterns, default, root, cwd, auditor)
106 if not _kindpatsalwaysmatch(kindpats):
106 if not _kindpatsalwaysmatch(kindpats):
107 self._files = _roots(kindpats)
107 self._files = _roots(kindpats)
108 self._anypats = self._anypats or _anypats(kindpats)
108 self._anypats = self._anypats or _anypats(kindpats)
109 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
109 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
110 listsubrepos)
110 listsubrepos)
111 matchfns.append(pm)
111 matchfns.append(pm)
112
112
113 if not matchfns:
113 if not matchfns:
114 m = util.always
114 m = util.always
115 self._always = True
115 self._always = True
116 elif len(matchfns) == 1:
116 elif len(matchfns) == 1:
117 m = matchfns[0]
117 m = matchfns[0]
118 else:
118 else:
119 def m(f):
119 def m(f):
120 for matchfn in matchfns:
120 for matchfn in matchfns:
121 if not matchfn(f):
121 if not matchfn(f):
122 return False
122 return False
123 return True
123 return True
124
124
125 self.matchfn = m
125 self.matchfn = m
126 self._fmap = set(self._files)
126 self._fileroots = set(self._files)
127
127
128 def __call__(self, fn):
128 def __call__(self, fn):
129 return self.matchfn(fn)
129 return self.matchfn(fn)
130 def __iter__(self):
130 def __iter__(self):
131 for f in self._files:
131 for f in self._files:
132 yield f
132 yield f
133
133
134 # Callbacks related to how the matcher is used by dirstate.walk.
134 # Callbacks related to how the matcher is used by dirstate.walk.
135 # Subscribers to these events must monkeypatch the matcher object.
135 # Subscribers to these events must monkeypatch the matcher object.
136 def bad(self, f, msg):
136 def bad(self, f, msg):
137 '''Callback from dirstate.walk for each explicit file that can't be
137 '''Callback from dirstate.walk for each explicit file that can't be
138 found/accessed, with an error message.'''
138 found/accessed, with an error message.'''
139 pass
139 pass
140
140
141 # If an explicitdir is set, it will be called when an explicitly listed
141 # If an explicitdir is set, it will be called when an explicitly listed
142 # directory is visited.
142 # directory is visited.
143 explicitdir = None
143 explicitdir = None
144
144
145 # If an traversedir is set, it will be called when a directory discovered
145 # If an traversedir is set, it will be called when a directory discovered
146 # by recursive traversal is visited.
146 # by recursive traversal is visited.
147 traversedir = None
147 traversedir = None
148
148
149 def abs(self, f):
149 def abs(self, f):
150 '''Convert a repo path back to path that is relative to the root of the
150 '''Convert a repo path back to path that is relative to the root of the
151 matcher.'''
151 matcher.'''
152 return f
152 return f
153
153
154 def rel(self, f):
154 def rel(self, f):
155 '''Convert repo path back to path that is relative to cwd of matcher.'''
155 '''Convert repo path back to path that is relative to cwd of matcher.'''
156 return util.pathto(self._root, self._cwd, f)
156 return util.pathto(self._root, self._cwd, f)
157
157
158 def uipath(self, f):
158 def uipath(self, f):
159 '''Convert repo path to a display path. If patterns or -I/-X were used
159 '''Convert repo path to a display path. If patterns or -I/-X were used
160 to create this matcher, the display path will be relative to cwd.
160 to create this matcher, the display path will be relative to cwd.
161 Otherwise it is relative to the root of the repo.'''
161 Otherwise it is relative to the root of the repo.'''
162 return (self._pathrestricted and self.rel(f)) or self.abs(f)
162 return (self._pathrestricted and self.rel(f)) or self.abs(f)
163
163
164 def files(self):
164 def files(self):
165 '''Explicitly listed files or patterns or roots:
165 '''Explicitly listed files or patterns or roots:
166 if no patterns or .always(): empty list,
166 if no patterns or .always(): empty list,
167 if exact: list exact files,
167 if exact: list exact files,
168 if not .anypats(): list all files and dirs,
168 if not .anypats(): list all files and dirs,
169 else: optimal roots'''
169 else: optimal roots'''
170 return self._files
170 return self._files
171
171
172 @propertycache
172 @propertycache
173 def _dirs(self):
173 def _dirs(self):
174 return set(util.dirs(self._fmap)) | set(['.'])
174 return set(util.dirs(self._fileroots)) | set(['.'])
175
175
176 def visitdir(self, dir):
176 def visitdir(self, dir):
177 return (not self._fmap or '.' in self._fmap or
177 return (not self._fileroots or '.' in self._fileroots or
178 dir in self._fmap or dir in self._dirs or
178 dir in self._fileroots or dir in self._dirs or
179 any(parentdir in self._fmap
179 any(parentdir in self._fileroots
180 for parentdir in util.finddirs(dir)))
180 for parentdir in util.finddirs(dir)))
181
181
182 def exact(self, f):
182 def exact(self, f):
183 '''Returns True if f is in .files().'''
183 '''Returns True if f is in .files().'''
184 return f in self._fmap
184 return f in self._fileroots
185
185
186 def anypats(self):
186 def anypats(self):
187 '''Matcher uses patterns or include/exclude.'''
187 '''Matcher uses patterns or include/exclude.'''
188 return self._anypats
188 return self._anypats
189
189
190 def always(self):
190 def always(self):
191 '''Matcher will match everything and .files() will be empty
191 '''Matcher will match everything and .files() will be empty
192 - optimization might be possible and necessary.'''
192 - optimization might be possible and necessary.'''
193 return self._always
193 return self._always
194
194
195 def ispartial(self):
195 def ispartial(self):
196 '''True if the matcher won't always match.
196 '''True if the matcher won't always match.
197
197
198 Although it's just the inverse of _always in this implementation,
198 Although it's just the inverse of _always in this implementation,
199 an extenion such as narrowhg might make it return something
199 an extenion such as narrowhg might make it return something
200 slightly different.'''
200 slightly different.'''
201 return not self._always
201 return not self._always
202
202
203 def isexact(self):
203 def isexact(self):
204 return self.matchfn == self.exact
204 return self.matchfn == self.exact
205
205
206 def _normalize(self, patterns, default, root, cwd, auditor):
206 def _normalize(self, patterns, default, root, cwd, auditor):
207 '''Convert 'kind:pat' from the patterns list to tuples with kind and
207 '''Convert 'kind:pat' from the patterns list to tuples with kind and
208 normalized and rooted patterns and with listfiles expanded.'''
208 normalized and rooted patterns and with listfiles expanded.'''
209 kindpats = []
209 kindpats = []
210 for kind, pat in [_patsplit(p, default) for p in patterns]:
210 for kind, pat in [_patsplit(p, default) for p in patterns]:
211 if kind in ('glob', 'relpath'):
211 if kind in ('glob', 'relpath'):
212 pat = pathutil.canonpath(root, cwd, pat, auditor)
212 pat = pathutil.canonpath(root, cwd, pat, auditor)
213 elif kind in ('relglob', 'path'):
213 elif kind in ('relglob', 'path'):
214 pat = util.normpath(pat)
214 pat = util.normpath(pat)
215 elif kind in ('listfile', 'listfile0'):
215 elif kind in ('listfile', 'listfile0'):
216 try:
216 try:
217 files = util.readfile(pat)
217 files = util.readfile(pat)
218 if kind == 'listfile0':
218 if kind == 'listfile0':
219 files = files.split('\0')
219 files = files.split('\0')
220 else:
220 else:
221 files = files.splitlines()
221 files = files.splitlines()
222 files = [f for f in files if f]
222 files = [f for f in files if f]
223 except EnvironmentError:
223 except EnvironmentError:
224 raise util.Abort(_("unable to read file list (%s)") % pat)
224 raise util.Abort(_("unable to read file list (%s)") % pat)
225 kindpats += self._normalize(files, default, root, cwd, auditor)
225 kindpats += self._normalize(files, default, root, cwd, auditor)
226 continue
226 continue
227 # else: re or relre - which cannot be normalized
227 # else: re or relre - which cannot be normalized
228 kindpats.append((kind, pat))
228 kindpats.append((kind, pat))
229 return kindpats
229 return kindpats
230
230
231 def exact(root, cwd, files):
231 def exact(root, cwd, files):
232 return match(root, cwd, files, exact=True)
232 return match(root, cwd, files, exact=True)
233
233
234 def always(root, cwd):
234 def always(root, cwd):
235 return match(root, cwd, [])
235 return match(root, cwd, [])
236
236
237 class narrowmatcher(match):
237 class narrowmatcher(match):
238 """Adapt a matcher to work on a subdirectory only.
238 """Adapt a matcher to work on a subdirectory only.
239
239
240 The paths are remapped to remove/insert the path as needed:
240 The paths are remapped to remove/insert the path as needed:
241
241
242 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
242 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
243 >>> m2 = narrowmatcher('sub', m1)
243 >>> m2 = narrowmatcher('sub', m1)
244 >>> bool(m2('a.txt'))
244 >>> bool(m2('a.txt'))
245 False
245 False
246 >>> bool(m2('b.txt'))
246 >>> bool(m2('b.txt'))
247 True
247 True
248 >>> bool(m2.matchfn('a.txt'))
248 >>> bool(m2.matchfn('a.txt'))
249 False
249 False
250 >>> bool(m2.matchfn('b.txt'))
250 >>> bool(m2.matchfn('b.txt'))
251 True
251 True
252 >>> m2.files()
252 >>> m2.files()
253 ['b.txt']
253 ['b.txt']
254 >>> m2.exact('b.txt')
254 >>> m2.exact('b.txt')
255 True
255 True
256 >>> util.pconvert(m2.rel('b.txt'))
256 >>> util.pconvert(m2.rel('b.txt'))
257 'sub/b.txt'
257 'sub/b.txt'
258 >>> def bad(f, msg):
258 >>> def bad(f, msg):
259 ... print "%s: %s" % (f, msg)
259 ... print "%s: %s" % (f, msg)
260 >>> m1.bad = bad
260 >>> m1.bad = bad
261 >>> m2.bad('x.txt', 'No such file')
261 >>> m2.bad('x.txt', 'No such file')
262 sub/x.txt: No such file
262 sub/x.txt: No such file
263 >>> m2.abs('c.txt')
263 >>> m2.abs('c.txt')
264 'sub/c.txt'
264 'sub/c.txt'
265 """
265 """
266
266
267 def __init__(self, path, matcher):
267 def __init__(self, path, matcher):
268 self._root = matcher._root
268 self._root = matcher._root
269 self._cwd = matcher._cwd
269 self._cwd = matcher._cwd
270 self._path = path
270 self._path = path
271 self._matcher = matcher
271 self._matcher = matcher
272 self._always = matcher._always
272 self._always = matcher._always
273 self._pathrestricted = matcher._pathrestricted
273 self._pathrestricted = matcher._pathrestricted
274
274
275 self._files = [f[len(path) + 1:] for f in matcher._files
275 self._files = [f[len(path) + 1:] for f in matcher._files
276 if f.startswith(path + "/")]
276 if f.startswith(path + "/")]
277 self._anypats = matcher._anypats
277 self._anypats = matcher._anypats
278 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
278 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
279 self._fmap = set(self._files)
279 self._fileroots = set(self._files)
280
280
281 def abs(self, f):
281 def abs(self, f):
282 return self._matcher.abs(self._path + "/" + f)
282 return self._matcher.abs(self._path + "/" + f)
283
283
284 def bad(self, f, msg):
284 def bad(self, f, msg):
285 self._matcher.bad(self._path + "/" + f, msg)
285 self._matcher.bad(self._path + "/" + f, msg)
286
286
287 def rel(self, f):
287 def rel(self, f):
288 return self._matcher.rel(self._path + "/" + f)
288 return self._matcher.rel(self._path + "/" + f)
289
289
290 class icasefsmatcher(match):
290 class icasefsmatcher(match):
291 """A matcher for wdir on case insensitive filesystems, which normalizes the
291 """A matcher for wdir on case insensitive filesystems, which normalizes the
292 given patterns to the case in the filesystem.
292 given patterns to the case in the filesystem.
293 """
293 """
294
294
295 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
295 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
296 ctx, listsubrepos=False):
296 ctx, listsubrepos=False):
297 init = super(icasefsmatcher, self).__init__
297 init = super(icasefsmatcher, self).__init__
298 self._dsnormalize = ctx.repo().dirstate.normalize
298 self._dsnormalize = ctx.repo().dirstate.normalize
299
299
300 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
300 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
301 ctx=ctx, listsubrepos=listsubrepos)
301 ctx=ctx, listsubrepos=listsubrepos)
302
302
303 # m.exact(file) must be based off of the actual user input, otherwise
303 # m.exact(file) must be based off of the actual user input, otherwise
304 # inexact case matches are treated as exact, and not noted without -v.
304 # inexact case matches are treated as exact, and not noted without -v.
305 if self._files:
305 if self._files:
306 self._fmap = set(_roots(self._kp))
306 self._fileroots = set(_roots(self._kp))
307
307
308 def _normalize(self, patterns, default, root, cwd, auditor):
308 def _normalize(self, patterns, default, root, cwd, auditor):
309 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
309 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
310 root, cwd, auditor)
310 root, cwd, auditor)
311 kindpats = []
311 kindpats = []
312 for kind, pats in self._kp:
312 for kind, pats in self._kp:
313 if kind not in ('re', 'relre'): # regex can't be normalized
313 if kind not in ('re', 'relre'): # regex can't be normalized
314 pats = self._dsnormalize(pats)
314 pats = self._dsnormalize(pats)
315 kindpats.append((kind, pats))
315 kindpats.append((kind, pats))
316 return kindpats
316 return kindpats
317
317
318 def patkind(pattern, default=None):
318 def patkind(pattern, default=None):
319 '''If pattern is 'kind:pat' with a known kind, return kind.'''
319 '''If pattern is 'kind:pat' with a known kind, return kind.'''
320 return _patsplit(pattern, default)[0]
320 return _patsplit(pattern, default)[0]
321
321
322 def _patsplit(pattern, default):
322 def _patsplit(pattern, default):
323 """Split a string into the optional pattern kind prefix and the actual
323 """Split a string into the optional pattern kind prefix and the actual
324 pattern."""
324 pattern."""
325 if ':' in pattern:
325 if ':' in pattern:
326 kind, pat = pattern.split(':', 1)
326 kind, pat = pattern.split(':', 1)
327 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
327 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
328 'listfile', 'listfile0', 'set'):
328 'listfile', 'listfile0', 'set'):
329 return kind, pat
329 return kind, pat
330 return default, pattern
330 return default, pattern
331
331
332 def _globre(pat):
332 def _globre(pat):
333 r'''Convert an extended glob string to a regexp string.
333 r'''Convert an extended glob string to a regexp string.
334
334
335 >>> print _globre(r'?')
335 >>> print _globre(r'?')
336 .
336 .
337 >>> print _globre(r'*')
337 >>> print _globre(r'*')
338 [^/]*
338 [^/]*
339 >>> print _globre(r'**')
339 >>> print _globre(r'**')
340 .*
340 .*
341 >>> print _globre(r'**/a')
341 >>> print _globre(r'**/a')
342 (?:.*/)?a
342 (?:.*/)?a
343 >>> print _globre(r'a/**/b')
343 >>> print _globre(r'a/**/b')
344 a\/(?:.*/)?b
344 a\/(?:.*/)?b
345 >>> print _globre(r'[a*?!^][^b][!c]')
345 >>> print _globre(r'[a*?!^][^b][!c]')
346 [a*?!^][\^b][^c]
346 [a*?!^][\^b][^c]
347 >>> print _globre(r'{a,b}')
347 >>> print _globre(r'{a,b}')
348 (?:a|b)
348 (?:a|b)
349 >>> print _globre(r'.\*\?')
349 >>> print _globre(r'.\*\?')
350 \.\*\?
350 \.\*\?
351 '''
351 '''
352 i, n = 0, len(pat)
352 i, n = 0, len(pat)
353 res = ''
353 res = ''
354 group = 0
354 group = 0
355 escape = util.re.escape
355 escape = util.re.escape
356 def peek():
356 def peek():
357 return i < n and pat[i]
357 return i < n and pat[i]
358 while i < n:
358 while i < n:
359 c = pat[i]
359 c = pat[i]
360 i += 1
360 i += 1
361 if c not in '*?[{},\\':
361 if c not in '*?[{},\\':
362 res += escape(c)
362 res += escape(c)
363 elif c == '*':
363 elif c == '*':
364 if peek() == '*':
364 if peek() == '*':
365 i += 1
365 i += 1
366 if peek() == '/':
366 if peek() == '/':
367 i += 1
367 i += 1
368 res += '(?:.*/)?'
368 res += '(?:.*/)?'
369 else:
369 else:
370 res += '.*'
370 res += '.*'
371 else:
371 else:
372 res += '[^/]*'
372 res += '[^/]*'
373 elif c == '?':
373 elif c == '?':
374 res += '.'
374 res += '.'
375 elif c == '[':
375 elif c == '[':
376 j = i
376 j = i
377 if j < n and pat[j] in '!]':
377 if j < n and pat[j] in '!]':
378 j += 1
378 j += 1
379 while j < n and pat[j] != ']':
379 while j < n and pat[j] != ']':
380 j += 1
380 j += 1
381 if j >= n:
381 if j >= n:
382 res += '\\['
382 res += '\\['
383 else:
383 else:
384 stuff = pat[i:j].replace('\\','\\\\')
384 stuff = pat[i:j].replace('\\','\\\\')
385 i = j + 1
385 i = j + 1
386 if stuff[0] == '!':
386 if stuff[0] == '!':
387 stuff = '^' + stuff[1:]
387 stuff = '^' + stuff[1:]
388 elif stuff[0] == '^':
388 elif stuff[0] == '^':
389 stuff = '\\' + stuff
389 stuff = '\\' + stuff
390 res = '%s[%s]' % (res, stuff)
390 res = '%s[%s]' % (res, stuff)
391 elif c == '{':
391 elif c == '{':
392 group += 1
392 group += 1
393 res += '(?:'
393 res += '(?:'
394 elif c == '}' and group:
394 elif c == '}' and group:
395 res += ')'
395 res += ')'
396 group -= 1
396 group -= 1
397 elif c == ',' and group:
397 elif c == ',' and group:
398 res += '|'
398 res += '|'
399 elif c == '\\':
399 elif c == '\\':
400 p = peek()
400 p = peek()
401 if p:
401 if p:
402 i += 1
402 i += 1
403 res += escape(p)
403 res += escape(p)
404 else:
404 else:
405 res += escape(c)
405 res += escape(c)
406 else:
406 else:
407 res += escape(c)
407 res += escape(c)
408 return res
408 return res
409
409
410 def _regex(kind, pat, globsuffix):
410 def _regex(kind, pat, globsuffix):
411 '''Convert a (normalized) pattern of any kind into a regular expression.
411 '''Convert a (normalized) pattern of any kind into a regular expression.
412 globsuffix is appended to the regexp of globs.'''
412 globsuffix is appended to the regexp of globs.'''
413 if not pat:
413 if not pat:
414 return ''
414 return ''
415 if kind == 're':
415 if kind == 're':
416 return pat
416 return pat
417 if kind == 'path':
417 if kind == 'path':
418 return '^' + util.re.escape(pat) + '(?:/|$)'
418 return '^' + util.re.escape(pat) + '(?:/|$)'
419 if kind == 'relglob':
419 if kind == 'relglob':
420 return '(?:|.*/)' + _globre(pat) + globsuffix
420 return '(?:|.*/)' + _globre(pat) + globsuffix
421 if kind == 'relpath':
421 if kind == 'relpath':
422 return util.re.escape(pat) + '(?:/|$)'
422 return util.re.escape(pat) + '(?:/|$)'
423 if kind == 'relre':
423 if kind == 'relre':
424 if pat.startswith('^'):
424 if pat.startswith('^'):
425 return pat
425 return pat
426 return '.*' + pat
426 return '.*' + pat
427 return _globre(pat) + globsuffix
427 return _globre(pat) + globsuffix
428
428
429 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos):
429 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos):
430 '''Return regexp string and a matcher function for kindpats.
430 '''Return regexp string and a matcher function for kindpats.
431 globsuffix is appended to the regexp of globs.'''
431 globsuffix is appended to the regexp of globs.'''
432 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
432 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
433 if not kindpats:
433 if not kindpats:
434 return "", fset.__contains__
434 return "", fset.__contains__
435
435
436 regex, mf = _buildregexmatch(kindpats, globsuffix)
436 regex, mf = _buildregexmatch(kindpats, globsuffix)
437 if fset:
437 if fset:
438 return regex, lambda f: f in fset or mf(f)
438 return regex, lambda f: f in fset or mf(f)
439 return regex, mf
439 return regex, mf
440
440
441 def _buildregexmatch(kindpats, globsuffix):
441 def _buildregexmatch(kindpats, globsuffix):
442 """Build a match function from a list of kinds and kindpats,
442 """Build a match function from a list of kinds and kindpats,
443 return regexp string and a matcher function."""
443 return regexp string and a matcher function."""
444 try:
444 try:
445 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
445 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
446 for (k, p) in kindpats])
446 for (k, p) in kindpats])
447 if len(regex) > 20000:
447 if len(regex) > 20000:
448 raise OverflowError
448 raise OverflowError
449 return regex, _rematcher(regex)
449 return regex, _rematcher(regex)
450 except OverflowError:
450 except OverflowError:
451 # We're using a Python with a tiny regex engine and we
451 # We're using a Python with a tiny regex engine and we
452 # made it explode, so we'll divide the pattern list in two
452 # made it explode, so we'll divide the pattern list in two
453 # until it works
453 # until it works
454 l = len(kindpats)
454 l = len(kindpats)
455 if l < 2:
455 if l < 2:
456 raise
456 raise
457 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
457 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
458 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
458 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
459 return regex, lambda s: a(s) or b(s)
459 return regex, lambda s: a(s) or b(s)
460 except re.error:
460 except re.error:
461 for k, p in kindpats:
461 for k, p in kindpats:
462 try:
462 try:
463 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
463 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
464 except re.error:
464 except re.error:
465 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
465 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
466 raise util.Abort(_("invalid pattern"))
466 raise util.Abort(_("invalid pattern"))
467
467
468 def _roots(kindpats):
468 def _roots(kindpats):
469 '''return roots and exact explicitly listed files from patterns
469 '''return roots and exact explicitly listed files from patterns
470
470
471 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
471 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
472 ['g', 'g', '.']
472 ['g', 'g', '.']
473 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
473 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
474 ['r', 'p/p', '.']
474 ['r', 'p/p', '.']
475 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
475 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
476 ['.', '.', '.']
476 ['.', '.', '.']
477 '''
477 '''
478 r = []
478 r = []
479 for kind, pat in kindpats:
479 for kind, pat in kindpats:
480 if kind == 'glob': # find the non-glob prefix
480 if kind == 'glob': # find the non-glob prefix
481 root = []
481 root = []
482 for p in pat.split('/'):
482 for p in pat.split('/'):
483 if '[' in p or '{' in p or '*' in p or '?' in p:
483 if '[' in p or '{' in p or '*' in p or '?' in p:
484 break
484 break
485 root.append(p)
485 root.append(p)
486 r.append('/'.join(root) or '.')
486 r.append('/'.join(root) or '.')
487 elif kind in ('relpath', 'path'):
487 elif kind in ('relpath', 'path'):
488 r.append(pat or '.')
488 r.append(pat or '.')
489 else: # relglob, re, relre
489 else: # relglob, re, relre
490 r.append('.')
490 r.append('.')
491 return r
491 return r
492
492
493 def _anypats(kindpats):
493 def _anypats(kindpats):
494 for kind, pat in kindpats:
494 for kind, pat in kindpats:
495 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
495 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
496 return True
496 return True
497
497
498 _commentre = None
498 _commentre = None
499
499
500 def readpatternfile(filepath, warn):
500 def readpatternfile(filepath, warn):
501 '''parse a pattern file, returning a list of
501 '''parse a pattern file, returning a list of
502 patterns. These patterns should be given to compile()
502 patterns. These patterns should be given to compile()
503 to be validated and converted into a match function.'''
503 to be validated and converted into a match function.'''
504 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
504 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
505 syntax = 'relre:'
505 syntax = 'relre:'
506 patterns = []
506 patterns = []
507
507
508 fp = open(filepath)
508 fp = open(filepath)
509 for line in fp:
509 for line in fp:
510 if "#" in line:
510 if "#" in line:
511 global _commentre
511 global _commentre
512 if not _commentre:
512 if not _commentre:
513 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
513 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
514 # remove comments prefixed by an even number of escapes
514 # remove comments prefixed by an even number of escapes
515 line = _commentre.sub(r'\1', line)
515 line = _commentre.sub(r'\1', line)
516 # fixup properly escaped comments that survived the above
516 # fixup properly escaped comments that survived the above
517 line = line.replace("\\#", "#")
517 line = line.replace("\\#", "#")
518 line = line.rstrip()
518 line = line.rstrip()
519 if not line:
519 if not line:
520 continue
520 continue
521
521
522 if line.startswith('syntax:'):
522 if line.startswith('syntax:'):
523 s = line[7:].strip()
523 s = line[7:].strip()
524 try:
524 try:
525 syntax = syntaxes[s]
525 syntax = syntaxes[s]
526 except KeyError:
526 except KeyError:
527 warn(_("%s: ignoring invalid syntax '%s'\n") % (filepath, s))
527 warn(_("%s: ignoring invalid syntax '%s'\n") % (filepath, s))
528 continue
528 continue
529
529
530 linesyntax = syntax
530 linesyntax = syntax
531 for s, rels in syntaxes.iteritems():
531 for s, rels in syntaxes.iteritems():
532 if line.startswith(rels):
532 if line.startswith(rels):
533 linesyntax = rels
533 linesyntax = rels
534 line = line[len(rels):]
534 line = line[len(rels):]
535 break
535 break
536 elif line.startswith(s+':'):
536 elif line.startswith(s+':'):
537 linesyntax = rels
537 linesyntax = rels
538 line = line[len(s) + 1:]
538 line = line[len(s) + 1:]
539 break
539 break
540 patterns.append(linesyntax + line)
540 patterns.append(linesyntax + line)
541 fp.close()
541 fp.close()
542 return patterns
542 return patterns
General Comments 0
You need to be logged in to leave comments. Login now