##// END OF EJS Templates
largefiles: use try/except/finally
Matt Mackall -
r25079:bee00e0c default
parent child Browse files
Show More
@@ -1,1374 +1,1372
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fmap = set(m._files)
30 m._fmap = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fmap = set(m._files)
45 m._fmap = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 default='relpath'):
54 default='relpath'):
55 match = oldmatch(ctx, pats, opts, globbed, default)
55 match = oldmatch(ctx, pats, opts, globbed, default)
56 return composenormalfilematcher(match, manifest)
56 return composenormalfilematcher(match, manifest)
57 oldmatch = installmatchfn(overridematch)
57 oldmatch = installmatchfn(overridematch)
58
58
59 def installmatchfn(f):
59 def installmatchfn(f):
60 '''monkey patch the scmutil module with a custom match function.
60 '''monkey patch the scmutil module with a custom match function.
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 oldmatch = scmutil.match
62 oldmatch = scmutil.match
63 setattr(f, 'oldmatch', oldmatch)
63 setattr(f, 'oldmatch', oldmatch)
64 scmutil.match = f
64 scmutil.match = f
65 return oldmatch
65 return oldmatch
66
66
67 def restorematchfn():
67 def restorematchfn():
68 '''restores scmutil.match to what it was before installmatchfn
68 '''restores scmutil.match to what it was before installmatchfn
69 was called. no-op if scmutil.match is its original function.
69 was called. no-op if scmutil.match is its original function.
70
70
71 Note that n calls to installmatchfn will require n calls to
71 Note that n calls to installmatchfn will require n calls to
72 restore the original matchfn.'''
72 restore the original matchfn.'''
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74
74
75 def installmatchandpatsfn(f):
75 def installmatchandpatsfn(f):
76 oldmatchandpats = scmutil.matchandpats
76 oldmatchandpats = scmutil.matchandpats
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 scmutil.matchandpats = f
78 scmutil.matchandpats = f
79 return oldmatchandpats
79 return oldmatchandpats
80
80
81 def restorematchandpatsfn():
81 def restorematchandpatsfn():
82 '''restores scmutil.matchandpats to what it was before
82 '''restores scmutil.matchandpats to what it was before
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 is its original function.
84 is its original function.
85
85
86 Note that n calls to installmatchandpatsfn will require n calls
86 Note that n calls to installmatchandpatsfn will require n calls
87 to restore the original matchfn.'''
87 to restore the original matchfn.'''
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 scmutil.matchandpats)
89 scmutil.matchandpats)
90
90
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 large = opts.get('large')
92 large = opts.get('large')
93 lfsize = lfutil.getminsize(
93 lfsize = lfutil.getminsize(
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 if lfpats:
99 if lfpats:
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = copy.copy(matcher)
103 m = copy.copy(matcher)
104 m.bad = lambda x, y: None
104 m.bad = lambda x, y: None
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in repo.walk(m):
106 for f in repo.walk(m):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # addremove in core gets fancy with the name, add doesn't
112 # addremove in core gets fancy with the name, add doesn't
113 if isaddremove:
113 if isaddremove:
114 name = m.uipath(f)
114 name = m.uipath(f)
115 else:
115 else:
116 name = m.rel(f)
116 name = m.rel(f)
117
117
118 # Don't warn the user when they attempt to add a normal tracked file.
118 # Don't warn the user when they attempt to add a normal tracked file.
119 # The normal add code will do that for us.
119 # The normal add code will do that for us.
120 if exact and exists:
120 if exact and exists:
121 if lfile:
121 if lfile:
122 ui.warn(_('%s already a largefile\n') % name)
122 ui.warn(_('%s already a largefile\n') % name)
123 continue
123 continue
124
124
125 if (exact or not exists) and not lfutil.isstandin(f):
125 if (exact or not exists) and not lfutil.isstandin(f):
126 # In case the file was removed previously, but not committed
126 # In case the file was removed previously, but not committed
127 # (issue3507)
127 # (issue3507)
128 if not repo.wvfs.exists(f):
128 if not repo.wvfs.exists(f):
129 continue
129 continue
130
130
131 abovemin = (lfsize and
131 abovemin = (lfsize and
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 lfnames.append(f)
134 lfnames.append(f)
135 if ui.verbose or not exact:
135 if ui.verbose or not exact:
136 ui.status(_('adding %s as a largefile\n') % name)
136 ui.status(_('adding %s as a largefile\n') % name)
137
137
138 bad = []
138 bad = []
139
139
140 # Need to lock, otherwise there could be a race condition between
140 # Need to lock, otherwise there could be a race condition between
141 # when standins are created and added to the repo.
141 # when standins are created and added to the repo.
142 wlock = repo.wlock()
142 wlock = repo.wlock()
143 try:
143 try:
144 if not opts.get('dry_run'):
144 if not opts.get('dry_run'):
145 standins = []
145 standins = []
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 for f in lfnames:
147 for f in lfnames:
148 standinname = lfutil.standin(f)
148 standinname = lfutil.standin(f)
149 lfutil.writestandin(repo, standinname, hash='',
149 lfutil.writestandin(repo, standinname, hash='',
150 executable=lfutil.getexecutable(repo.wjoin(f)))
150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 standins.append(standinname)
151 standins.append(standinname)
152 if lfdirstate[f] == 'r':
152 if lfdirstate[f] == 'r':
153 lfdirstate.normallookup(f)
153 lfdirstate.normallookup(f)
154 else:
154 else:
155 lfdirstate.add(f)
155 lfdirstate.add(f)
156 lfdirstate.write()
156 lfdirstate.write()
157 bad += [lfutil.splitstandin(f)
157 bad += [lfutil.splitstandin(f)
158 for f in repo[None].add(standins)
158 for f in repo[None].add(standins)
159 if f in m.files()]
159 if f in m.files()]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 finally:
162 finally:
163 wlock.release()
163 wlock.release()
164 return added, bad
164 return added, bad
165
165
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 after = opts.get('after')
167 after = opts.get('after')
168 m = composelargefilematcher(matcher, repo[None].manifest())
168 m = composelargefilematcher(matcher, repo[None].manifest())
169 try:
169 try:
170 repo.lfstatus = True
170 repo.lfstatus = True
171 s = repo.status(match=m, clean=not isaddremove)
171 s = repo.status(match=m, clean=not isaddremove)
172 finally:
172 finally:
173 repo.lfstatus = False
173 repo.lfstatus = False
174 manifest = repo[None].manifest()
174 manifest = repo[None].manifest()
175 modified, added, deleted, clean = [[f for f in list
175 modified, added, deleted, clean = [[f for f in list
176 if lfutil.standin(f) in manifest]
176 if lfutil.standin(f) in manifest]
177 for list in (s.modified, s.added,
177 for list in (s.modified, s.added,
178 s.deleted, s.clean)]
178 s.deleted, s.clean)]
179
179
180 def warn(files, msg):
180 def warn(files, msg):
181 for f in files:
181 for f in files:
182 ui.warn(msg % m.rel(f))
182 ui.warn(msg % m.rel(f))
183 return int(len(files) > 0)
183 return int(len(files) > 0)
184
184
185 result = 0
185 result = 0
186
186
187 if after:
187 if after:
188 remove = deleted
188 remove = deleted
189 result = warn(modified + added + clean,
189 result = warn(modified + added + clean,
190 _('not removing %s: file still exists\n'))
190 _('not removing %s: file still exists\n'))
191 else:
191 else:
192 remove = deleted + clean
192 remove = deleted + clean
193 result = warn(modified, _('not removing %s: file is modified (use -f'
193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 ' to force removal)\n'))
194 ' to force removal)\n'))
195 result = warn(added, _('not removing %s: file has been marked for add'
195 result = warn(added, _('not removing %s: file has been marked for add'
196 ' (use forget to undo)\n')) or result
196 ' (use forget to undo)\n')) or result
197
197
198 # Need to lock because standin files are deleted then removed from the
198 # Need to lock because standin files are deleted then removed from the
199 # repository and we could race in-between.
199 # repository and we could race in-between.
200 wlock = repo.wlock()
200 wlock = repo.wlock()
201 try:
201 try:
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 for f in sorted(remove):
203 for f in sorted(remove):
204 if ui.verbose or not m.exact(f):
204 if ui.verbose or not m.exact(f):
205 # addremove in core gets fancy with the name, remove doesn't
205 # addremove in core gets fancy with the name, remove doesn't
206 if isaddremove:
206 if isaddremove:
207 name = m.uipath(f)
207 name = m.uipath(f)
208 else:
208 else:
209 name = m.rel(f)
209 name = m.rel(f)
210 ui.status(_('removing %s\n') % name)
210 ui.status(_('removing %s\n') % name)
211
211
212 if not opts.get('dry_run'):
212 if not opts.get('dry_run'):
213 if not after:
213 if not after:
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215
215
216 if opts.get('dry_run'):
216 if opts.get('dry_run'):
217 return result
217 return result
218
218
219 remove = [lfutil.standin(f) for f in remove]
219 remove = [lfutil.standin(f) for f in remove]
220 # If this is being called by addremove, let the original addremove
220 # If this is being called by addremove, let the original addremove
221 # function handle this.
221 # function handle this.
222 if not isaddremove:
222 if not isaddremove:
223 for f in remove:
223 for f in remove:
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 repo[None].forget(remove)
225 repo[None].forget(remove)
226
226
227 for f in remove:
227 for f in remove:
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 False)
229 False)
230
230
231 lfdirstate.write()
231 lfdirstate.write()
232 finally:
232 finally:
233 wlock.release()
233 wlock.release()
234
234
235 return result
235 return result
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 def decodepath(orig, path):
239 def decodepath(orig, path):
240 return lfutil.splitstandin(path) or path
240 return lfutil.splitstandin(path) or path
241
241
242 # -- Wrappers: modify existing commands --------------------------------
242 # -- Wrappers: modify existing commands --------------------------------
243
243
244 def overrideadd(orig, ui, repo, *pats, **opts):
244 def overrideadd(orig, ui, repo, *pats, **opts):
245 if opts.get('normal') and opts.get('large'):
245 if opts.get('normal') and opts.get('large'):
246 raise util.Abort(_('--normal cannot be used with --large'))
246 raise util.Abort(_('--normal cannot be used with --large'))
247 return orig(ui, repo, *pats, **opts)
247 return orig(ui, repo, *pats, **opts)
248
248
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 # The --normal flag short circuits this override
250 # The --normal flag short circuits this override
251 if opts.get('normal'):
251 if opts.get('normal'):
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253
253
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 ladded)
256 ladded)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258
258
259 bad.extend(f for f in lbad)
259 bad.extend(f for f in lbad)
260 return bad
260 return bad
261
261
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 return removelargefiles(ui, repo, False, matcher, after=after,
265 return removelargefiles(ui, repo, False, matcher, after=after,
266 force=force) or result
266 force=force) or result
267
267
268 def overridestatusfn(orig, repo, rev2, **opts):
268 def overridestatusfn(orig, repo, rev2, **opts):
269 try:
269 try:
270 repo._repo.lfstatus = True
270 repo._repo.lfstatus = True
271 return orig(repo, rev2, **opts)
271 return orig(repo, rev2, **opts)
272 finally:
272 finally:
273 repo._repo.lfstatus = False
273 repo._repo.lfstatus = False
274
274
275 def overridestatus(orig, ui, repo, *pats, **opts):
275 def overridestatus(orig, ui, repo, *pats, **opts):
276 try:
276 try:
277 repo.lfstatus = True
277 repo.lfstatus = True
278 return orig(ui, repo, *pats, **opts)
278 return orig(ui, repo, *pats, **opts)
279 finally:
279 finally:
280 repo.lfstatus = False
280 repo.lfstatus = False
281
281
282 def overridedirty(orig, repo, ignoreupdate=False):
282 def overridedirty(orig, repo, ignoreupdate=False):
283 try:
283 try:
284 repo._repo.lfstatus = True
284 repo._repo.lfstatus = True
285 return orig(repo, ignoreupdate)
285 return orig(repo, ignoreupdate)
286 finally:
286 finally:
287 repo._repo.lfstatus = False
287 repo._repo.lfstatus = False
288
288
289 def overridelog(orig, ui, repo, *pats, **opts):
289 def overridelog(orig, ui, repo, *pats, **opts):
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 default='relpath'):
291 default='relpath'):
292 """Matcher that merges root directory with .hglf, suitable for log.
292 """Matcher that merges root directory with .hglf, suitable for log.
293 It is still possible to match .hglf directly.
293 It is still possible to match .hglf directly.
294 For any listed files run log on the standin too.
294 For any listed files run log on the standin too.
295 matchfn tries both the given filename and with .hglf stripped.
295 matchfn tries both the given filename and with .hglf stripped.
296 """
296 """
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 m, p = copy.copy(matchandpats)
298 m, p = copy.copy(matchandpats)
299
299
300 if m.always():
300 if m.always():
301 # We want to match everything anyway, so there's no benefit trying
301 # We want to match everything anyway, so there's no benefit trying
302 # to add standins.
302 # to add standins.
303 return matchandpats
303 return matchandpats
304
304
305 pats = set(p)
305 pats = set(p)
306
306
307 def fixpats(pat, tostandin=lfutil.standin):
307 def fixpats(pat, tostandin=lfutil.standin):
308 if pat.startswith('set:'):
308 if pat.startswith('set:'):
309 return pat
309 return pat
310
310
311 kindpat = match_._patsplit(pat, None)
311 kindpat = match_._patsplit(pat, None)
312
312
313 if kindpat[0] is not None:
313 if kindpat[0] is not None:
314 return kindpat[0] + ':' + tostandin(kindpat[1])
314 return kindpat[0] + ':' + tostandin(kindpat[1])
315 return tostandin(kindpat[1])
315 return tostandin(kindpat[1])
316
316
317 if m._cwd:
317 if m._cwd:
318 hglf = lfutil.shortname
318 hglf = lfutil.shortname
319 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
319 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
320
320
321 def tostandin(f):
321 def tostandin(f):
322 # The file may already be a standin, so trucate the back
322 # The file may already be a standin, so trucate the back
323 # prefix and test before mangling it. This avoids turning
323 # prefix and test before mangling it. This avoids turning
324 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
324 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
325 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
325 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
326 return f
326 return f
327
327
328 # An absolute path is from outside the repo, so truncate the
328 # An absolute path is from outside the repo, so truncate the
329 # path to the root before building the standin. Otherwise cwd
329 # path to the root before building the standin. Otherwise cwd
330 # is somewhere in the repo, relative to root, and needs to be
330 # is somewhere in the repo, relative to root, and needs to be
331 # prepended before building the standin.
331 # prepended before building the standin.
332 if os.path.isabs(m._cwd):
332 if os.path.isabs(m._cwd):
333 f = f[len(back):]
333 f = f[len(back):]
334 else:
334 else:
335 f = m._cwd + '/' + f
335 f = m._cwd + '/' + f
336 return back + lfutil.standin(f)
336 return back + lfutil.standin(f)
337
337
338 pats.update(fixpats(f, tostandin) for f in p)
338 pats.update(fixpats(f, tostandin) for f in p)
339 else:
339 else:
340 def tostandin(f):
340 def tostandin(f):
341 if lfutil.splitstandin(f):
341 if lfutil.splitstandin(f):
342 return f
342 return f
343 return lfutil.standin(f)
343 return lfutil.standin(f)
344 pats.update(fixpats(f, tostandin) for f in p)
344 pats.update(fixpats(f, tostandin) for f in p)
345
345
346 for i in range(0, len(m._files)):
346 for i in range(0, len(m._files)):
347 # Don't add '.hglf' to m.files, since that is already covered by '.'
347 # Don't add '.hglf' to m.files, since that is already covered by '.'
348 if m._files[i] == '.':
348 if m._files[i] == '.':
349 continue
349 continue
350 standin = lfutil.standin(m._files[i])
350 standin = lfutil.standin(m._files[i])
351 # If the "standin" is a directory, append instead of replace to
351 # If the "standin" is a directory, append instead of replace to
352 # support naming a directory on the command line with only
352 # support naming a directory on the command line with only
353 # largefiles. The original directory is kept to support normal
353 # largefiles. The original directory is kept to support normal
354 # files.
354 # files.
355 if standin in repo[ctx.node()]:
355 if standin in repo[ctx.node()]:
356 m._files[i] = standin
356 m._files[i] = standin
357 elif m._files[i] not in repo[ctx.node()] \
357 elif m._files[i] not in repo[ctx.node()] \
358 and repo.wvfs.isdir(standin):
358 and repo.wvfs.isdir(standin):
359 m._files.append(standin)
359 m._files.append(standin)
360
360
361 m._fmap = set(m._files)
361 m._fmap = set(m._files)
362 m._always = False
362 m._always = False
363 origmatchfn = m.matchfn
363 origmatchfn = m.matchfn
364 def lfmatchfn(f):
364 def lfmatchfn(f):
365 lf = lfutil.splitstandin(f)
365 lf = lfutil.splitstandin(f)
366 if lf is not None and origmatchfn(lf):
366 if lf is not None and origmatchfn(lf):
367 return True
367 return True
368 r = origmatchfn(f)
368 r = origmatchfn(f)
369 return r
369 return r
370 m.matchfn = lfmatchfn
370 m.matchfn = lfmatchfn
371
371
372 ui.debug('updated patterns: %s\n' % sorted(pats))
372 ui.debug('updated patterns: %s\n' % sorted(pats))
373 return m, pats
373 return m, pats
374
374
375 # For hg log --patch, the match object is used in two different senses:
375 # For hg log --patch, the match object is used in two different senses:
376 # (1) to determine what revisions should be printed out, and
376 # (1) to determine what revisions should be printed out, and
377 # (2) to determine what files to print out diffs for.
377 # (2) to determine what files to print out diffs for.
378 # The magic matchandpats override should be used for case (1) but not for
378 # The magic matchandpats override should be used for case (1) but not for
379 # case (2).
379 # case (2).
380 def overridemakelogfilematcher(repo, pats, opts):
380 def overridemakelogfilematcher(repo, pats, opts):
381 wctx = repo[None]
381 wctx = repo[None]
382 match, pats = oldmatchandpats(wctx, pats, opts)
382 match, pats = oldmatchandpats(wctx, pats, opts)
383 return lambda rev: match
383 return lambda rev: match
384
384
385 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
385 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
386 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
386 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
387 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
387 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
388
388
389 try:
389 try:
390 return orig(ui, repo, *pats, **opts)
390 return orig(ui, repo, *pats, **opts)
391 finally:
391 finally:
392 restorematchandpatsfn()
392 restorematchandpatsfn()
393 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
393 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
394
394
395 def overrideverify(orig, ui, repo, *pats, **opts):
395 def overrideverify(orig, ui, repo, *pats, **opts):
396 large = opts.pop('large', False)
396 large = opts.pop('large', False)
397 all = opts.pop('lfa', False)
397 all = opts.pop('lfa', False)
398 contents = opts.pop('lfc', False)
398 contents = opts.pop('lfc', False)
399
399
400 result = orig(ui, repo, *pats, **opts)
400 result = orig(ui, repo, *pats, **opts)
401 if large or all or contents:
401 if large or all or contents:
402 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
402 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
403 return result
403 return result
404
404
405 def overridedebugstate(orig, ui, repo, *pats, **opts):
405 def overridedebugstate(orig, ui, repo, *pats, **opts):
406 large = opts.pop('large', False)
406 large = opts.pop('large', False)
407 if large:
407 if large:
408 class fakerepo(object):
408 class fakerepo(object):
409 dirstate = lfutil.openlfdirstate(ui, repo)
409 dirstate = lfutil.openlfdirstate(ui, repo)
410 orig(ui, fakerepo, *pats, **opts)
410 orig(ui, fakerepo, *pats, **opts)
411 else:
411 else:
412 orig(ui, repo, *pats, **opts)
412 orig(ui, repo, *pats, **opts)
413
413
414 # Before starting the manifest merge, merge.updates will call
414 # Before starting the manifest merge, merge.updates will call
415 # _checkunknownfile to check if there are any files in the merged-in
415 # _checkunknownfile to check if there are any files in the merged-in
416 # changeset that collide with unknown files in the working copy.
416 # changeset that collide with unknown files in the working copy.
417 #
417 #
418 # The largefiles are seen as unknown, so this prevents us from merging
418 # The largefiles are seen as unknown, so this prevents us from merging
419 # in a file 'foo' if we already have a largefile with the same name.
419 # in a file 'foo' if we already have a largefile with the same name.
420 #
420 #
421 # The overridden function filters the unknown files by removing any
421 # The overridden function filters the unknown files by removing any
422 # largefiles. This makes the merge proceed and we can then handle this
422 # largefiles. This makes the merge proceed and we can then handle this
423 # case further in the overridden calculateupdates function below.
423 # case further in the overridden calculateupdates function below.
424 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
424 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
425 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
425 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
426 return False
426 return False
427 return origfn(repo, wctx, mctx, f, f2)
427 return origfn(repo, wctx, mctx, f, f2)
428
428
429 # The manifest merge handles conflicts on the manifest level. We want
429 # The manifest merge handles conflicts on the manifest level. We want
430 # to handle changes in largefile-ness of files at this level too.
430 # to handle changes in largefile-ness of files at this level too.
431 #
431 #
432 # The strategy is to run the original calculateupdates and then process
432 # The strategy is to run the original calculateupdates and then process
433 # the action list it outputs. There are two cases we need to deal with:
433 # the action list it outputs. There are two cases we need to deal with:
434 #
434 #
435 # 1. Normal file in p1, largefile in p2. Here the largefile is
435 # 1. Normal file in p1, largefile in p2. Here the largefile is
436 # detected via its standin file, which will enter the working copy
436 # detected via its standin file, which will enter the working copy
437 # with a "get" action. It is not "merge" since the standin is all
437 # with a "get" action. It is not "merge" since the standin is all
438 # Mercurial is concerned with at this level -- the link to the
438 # Mercurial is concerned with at this level -- the link to the
439 # existing normal file is not relevant here.
439 # existing normal file is not relevant here.
440 #
440 #
441 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
441 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
442 # since the largefile will be present in the working copy and
442 # since the largefile will be present in the working copy and
443 # different from the normal file in p2. Mercurial therefore
443 # different from the normal file in p2. Mercurial therefore
444 # triggers a merge action.
444 # triggers a merge action.
445 #
445 #
446 # In both cases, we prompt the user and emit new actions to either
446 # In both cases, we prompt the user and emit new actions to either
447 # remove the standin (if the normal file was kept) or to remove the
447 # remove the standin (if the normal file was kept) or to remove the
448 # normal file and get the standin (if the largefile was kept). The
448 # normal file and get the standin (if the largefile was kept). The
449 # default prompt answer is to use the largefile version since it was
449 # default prompt answer is to use the largefile version since it was
450 # presumably changed on purpose.
450 # presumably changed on purpose.
451 #
451 #
452 # Finally, the merge.applyupdates function will then take care of
452 # Finally, the merge.applyupdates function will then take care of
453 # writing the files into the working copy and lfcommands.updatelfiles
453 # writing the files into the working copy and lfcommands.updatelfiles
454 # will update the largefiles.
454 # will update the largefiles.
455 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
455 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
456 partial, acceptremote, followcopies):
456 partial, acceptremote, followcopies):
457 overwrite = force and not branchmerge
457 overwrite = force and not branchmerge
458 actions, diverge, renamedelete = origfn(
458 actions, diverge, renamedelete = origfn(
459 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
459 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
460 followcopies)
460 followcopies)
461
461
462 if overwrite:
462 if overwrite:
463 return actions, diverge, renamedelete
463 return actions, diverge, renamedelete
464
464
465 # Convert to dictionary with filename as key and action as value.
465 # Convert to dictionary with filename as key and action as value.
466 lfiles = set()
466 lfiles = set()
467 for f in actions:
467 for f in actions:
468 splitstandin = f and lfutil.splitstandin(f)
468 splitstandin = f and lfutil.splitstandin(f)
469 if splitstandin in p1:
469 if splitstandin in p1:
470 lfiles.add(splitstandin)
470 lfiles.add(splitstandin)
471 elif lfutil.standin(f) in p1:
471 elif lfutil.standin(f) in p1:
472 lfiles.add(f)
472 lfiles.add(f)
473
473
474 for lfile in lfiles:
474 for lfile in lfiles:
475 standin = lfutil.standin(lfile)
475 standin = lfutil.standin(lfile)
476 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
476 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
477 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
477 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
478 if sm in ('g', 'dc') and lm != 'r':
478 if sm in ('g', 'dc') and lm != 'r':
479 # Case 1: normal file in the working copy, largefile in
479 # Case 1: normal file in the working copy, largefile in
480 # the second parent
480 # the second parent
481 usermsg = _('remote turned local normal file %s into a largefile\n'
481 usermsg = _('remote turned local normal file %s into a largefile\n'
482 'use (l)argefile or keep (n)ormal file?'
482 'use (l)argefile or keep (n)ormal file?'
483 '$$ &Largefile $$ &Normal file') % lfile
483 '$$ &Largefile $$ &Normal file') % lfile
484 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
484 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
485 actions[lfile] = ('r', None, 'replaced by standin')
485 actions[lfile] = ('r', None, 'replaced by standin')
486 actions[standin] = ('g', sargs, 'replaces standin')
486 actions[standin] = ('g', sargs, 'replaces standin')
487 else: # keep local normal file
487 else: # keep local normal file
488 actions[lfile] = ('k', None, 'replaces standin')
488 actions[lfile] = ('k', None, 'replaces standin')
489 if branchmerge:
489 if branchmerge:
490 actions[standin] = ('k', None, 'replaced by non-standin')
490 actions[standin] = ('k', None, 'replaced by non-standin')
491 else:
491 else:
492 actions[standin] = ('r', None, 'replaced by non-standin')
492 actions[standin] = ('r', None, 'replaced by non-standin')
493 elif lm in ('g', 'dc') and sm != 'r':
493 elif lm in ('g', 'dc') and sm != 'r':
494 # Case 2: largefile in the working copy, normal file in
494 # Case 2: largefile in the working copy, normal file in
495 # the second parent
495 # the second parent
496 usermsg = _('remote turned local largefile %s into a normal file\n'
496 usermsg = _('remote turned local largefile %s into a normal file\n'
497 'keep (l)argefile or use (n)ormal file?'
497 'keep (l)argefile or use (n)ormal file?'
498 '$$ &Largefile $$ &Normal file') % lfile
498 '$$ &Largefile $$ &Normal file') % lfile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
499 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
500 if branchmerge:
500 if branchmerge:
501 # largefile can be restored from standin safely
501 # largefile can be restored from standin safely
502 actions[lfile] = ('k', None, 'replaced by standin')
502 actions[lfile] = ('k', None, 'replaced by standin')
503 actions[standin] = ('k', None, 'replaces standin')
503 actions[standin] = ('k', None, 'replaces standin')
504 else:
504 else:
505 # "lfile" should be marked as "removed" without
505 # "lfile" should be marked as "removed" without
506 # removal of itself
506 # removal of itself
507 actions[lfile] = ('lfmr', None,
507 actions[lfile] = ('lfmr', None,
508 'forget non-standin largefile')
508 'forget non-standin largefile')
509
509
510 # linear-merge should treat this largefile as 're-added'
510 # linear-merge should treat this largefile as 're-added'
511 actions[standin] = ('a', None, 'keep standin')
511 actions[standin] = ('a', None, 'keep standin')
512 else: # pick remote normal file
512 else: # pick remote normal file
513 actions[lfile] = ('g', largs, 'replaces standin')
513 actions[lfile] = ('g', largs, 'replaces standin')
514 actions[standin] = ('r', None, 'replaced by non-standin')
514 actions[standin] = ('r', None, 'replaced by non-standin')
515
515
516 return actions, diverge, renamedelete
516 return actions, diverge, renamedelete
517
517
518 def mergerecordupdates(orig, repo, actions, branchmerge):
518 def mergerecordupdates(orig, repo, actions, branchmerge):
519 if 'lfmr' in actions:
519 if 'lfmr' in actions:
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
521 for lfile, args, msg in actions['lfmr']:
521 for lfile, args, msg in actions['lfmr']:
522 # this should be executed before 'orig', to execute 'remove'
522 # this should be executed before 'orig', to execute 'remove'
523 # before all other actions
523 # before all other actions
524 repo.dirstate.remove(lfile)
524 repo.dirstate.remove(lfile)
525 # make sure lfile doesn't get synclfdirstate'd as normal
525 # make sure lfile doesn't get synclfdirstate'd as normal
526 lfdirstate.add(lfile)
526 lfdirstate.add(lfile)
527 lfdirstate.write()
527 lfdirstate.write()
528
528
529 return orig(repo, actions, branchmerge)
529 return orig(repo, actions, branchmerge)
530
530
531
531
532 # Override filemerge to prompt the user about how they wish to merge
532 # Override filemerge to prompt the user about how they wish to merge
533 # largefiles. This will handle identical edits without prompting the user.
533 # largefiles. This will handle identical edits without prompting the user.
534 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
534 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
535 if not lfutil.isstandin(orig):
535 if not lfutil.isstandin(orig):
536 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
536 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
537
537
538 ahash = fca.data().strip().lower()
538 ahash = fca.data().strip().lower()
539 dhash = fcd.data().strip().lower()
539 dhash = fcd.data().strip().lower()
540 ohash = fco.data().strip().lower()
540 ohash = fco.data().strip().lower()
541 if (ohash != ahash and
541 if (ohash != ahash and
542 ohash != dhash and
542 ohash != dhash and
543 (dhash == ahash or
543 (dhash == ahash or
544 repo.ui.promptchoice(
544 repo.ui.promptchoice(
545 _('largefile %s has a merge conflict\nancestor was %s\n'
545 _('largefile %s has a merge conflict\nancestor was %s\n'
546 'keep (l)ocal %s or\ntake (o)ther %s?'
546 'keep (l)ocal %s or\ntake (o)ther %s?'
547 '$$ &Local $$ &Other') %
547 '$$ &Local $$ &Other') %
548 (lfutil.splitstandin(orig), ahash, dhash, ohash),
548 (lfutil.splitstandin(orig), ahash, dhash, ohash),
549 0) == 1)):
549 0) == 1)):
550 repo.wwrite(fcd.path(), fco.data(), fco.flags())
550 repo.wwrite(fcd.path(), fco.data(), fco.flags())
551 return 0
551 return 0
552
552
553 def copiespathcopies(orig, ctx1, ctx2, match=None):
553 def copiespathcopies(orig, ctx1, ctx2, match=None):
554 copies = orig(ctx1, ctx2, match=match)
554 copies = orig(ctx1, ctx2, match=match)
555 updated = {}
555 updated = {}
556
556
557 for k, v in copies.iteritems():
557 for k, v in copies.iteritems():
558 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
558 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
559
559
560 return updated
560 return updated
561
561
562 # Copy first changes the matchers to match standins instead of
562 # Copy first changes the matchers to match standins instead of
563 # largefiles. Then it overrides util.copyfile in that function it
563 # largefiles. Then it overrides util.copyfile in that function it
564 # checks if the destination largefile already exists. It also keeps a
564 # checks if the destination largefile already exists. It also keeps a
565 # list of copied files so that the largefiles can be copied and the
565 # list of copied files so that the largefiles can be copied and the
566 # dirstate updated.
566 # dirstate updated.
567 def overridecopy(orig, ui, repo, pats, opts, rename=False):
567 def overridecopy(orig, ui, repo, pats, opts, rename=False):
568 # doesn't remove largefile on rename
568 # doesn't remove largefile on rename
569 if len(pats) < 2:
569 if len(pats) < 2:
570 # this isn't legal, let the original function deal with it
570 # this isn't legal, let the original function deal with it
571 return orig(ui, repo, pats, opts, rename)
571 return orig(ui, repo, pats, opts, rename)
572
572
573 # This could copy both lfiles and normal files in one command,
573 # This could copy both lfiles and normal files in one command,
574 # but we don't want to do that. First replace their matcher to
574 # but we don't want to do that. First replace their matcher to
575 # only match normal files and run it, then replace it to just
575 # only match normal files and run it, then replace it to just
576 # match largefiles and run it again.
576 # match largefiles and run it again.
577 nonormalfiles = False
577 nonormalfiles = False
578 nolfiles = False
578 nolfiles = False
579 installnormalfilesmatchfn(repo[None].manifest())
579 installnormalfilesmatchfn(repo[None].manifest())
580 try:
580 try:
581 try:
581 result = orig(ui, repo, pats, opts, rename)
582 result = orig(ui, repo, pats, opts, rename)
582 except util.Abort, e:
583 except util.Abort, e:
583 if str(e) != _('no files to copy'):
584 if str(e) != _('no files to copy'):
584 raise e
585 raise e
585 else:
586 else:
586 nonormalfiles = True
587 nonormalfiles = True
587 result = 0
588 result = 0
589 finally:
588 finally:
590 restorematchfn()
589 restorematchfn()
591
590
592 # The first rename can cause our current working directory to be removed.
591 # The first rename can cause our current working directory to be removed.
593 # In that case there is nothing left to copy/rename so just quit.
592 # In that case there is nothing left to copy/rename so just quit.
594 try:
593 try:
595 repo.getcwd()
594 repo.getcwd()
596 except OSError:
595 except OSError:
597 return result
596 return result
598
597
599 def makestandin(relpath):
598 def makestandin(relpath):
600 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
599 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
601 return os.path.join(repo.wjoin(lfutil.standin(path)))
600 return os.path.join(repo.wjoin(lfutil.standin(path)))
602
601
603 fullpats = scmutil.expandpats(pats)
602 fullpats = scmutil.expandpats(pats)
604 dest = fullpats[-1]
603 dest = fullpats[-1]
605
604
606 if os.path.isdir(dest):
605 if os.path.isdir(dest):
607 if not os.path.isdir(makestandin(dest)):
606 if not os.path.isdir(makestandin(dest)):
608 os.makedirs(makestandin(dest))
607 os.makedirs(makestandin(dest))
609
608
610 try:
609 try:
611 try:
610 # When we call orig below it creates the standins but we don't add
612 # When we call orig below it creates the standins but we don't add
611 # them to the dir state until later so lock during that time.
613 # them to the dir state until later so lock during that time.
612 wlock = repo.wlock()
614 wlock = repo.wlock()
615
613
616 manifest = repo[None].manifest()
614 manifest = repo[None].manifest()
617 def overridematch(ctx, pats=[], opts={}, globbed=False,
615 def overridematch(ctx, pats=[], opts={}, globbed=False,
618 default='relpath'):
616 default='relpath'):
619 newpats = []
617 newpats = []
620 # The patterns were previously mangled to add the standin
618 # The patterns were previously mangled to add the standin
621 # directory; we need to remove that now
619 # directory; we need to remove that now
622 for pat in pats:
623 if match_.patkind(pat) is None and lfutil.shortname in pat:
624 newpats.append(pat.replace(lfutil.shortname, ''))
625 else:
626 newpats.append(pat)
627 match = oldmatch(ctx, newpats, opts, globbed, default)
628 m = copy.copy(match)
629 lfile = lambda f: lfutil.standin(f) in manifest
630 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
631 m._fmap = set(m._files)
632 origmatchfn = m.matchfn
633 m.matchfn = lambda f: (lfutil.isstandin(f) and
634 (f in manifest) and
635 origmatchfn(lfutil.splitstandin(f)) or
636 None)
637 return m
638 oldmatch = installmatchfn(overridematch)
639 listpats = []
640 for pat in pats:
620 for pat in pats:
641 if match_.patkind(pat) is not None:
621 if match_.patkind(pat) is None and lfutil.shortname in pat:
642 listpats.append(pat)
622 newpats.append(pat.replace(lfutil.shortname, ''))
643 else:
623 else:
644 listpats.append(makestandin(pat))
624 newpats.append(pat)
625 match = oldmatch(ctx, newpats, opts, globbed, default)
626 m = copy.copy(match)
627 lfile = lambda f: lfutil.standin(f) in manifest
628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 m._fmap = set(m._files)
630 origmatchfn = m.matchfn
631 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 (f in manifest) and
633 origmatchfn(lfutil.splitstandin(f)) or
634 None)
635 return m
636 oldmatch = installmatchfn(overridematch)
637 listpats = []
638 for pat in pats:
639 if match_.patkind(pat) is not None:
640 listpats.append(pat)
641 else:
642 listpats.append(makestandin(pat))
645
643
646 try:
644 try:
647 origcopyfile = util.copyfile
645 origcopyfile = util.copyfile
648 copiedfiles = []
646 copiedfiles = []
649 def overridecopyfile(src, dest):
647 def overridecopyfile(src, dest):
650 if (lfutil.shortname in src and
651 dest.startswith(repo.wjoin(lfutil.shortname))):
652 destlfile = dest.replace(lfutil.shortname, '')
653 if not opts['force'] and os.path.exists(destlfile):
654 raise IOError('',
655 _('destination largefile already exists'))
656 copiedfiles.append((src, dest))
657 origcopyfile(src, dest)
658
659 util.copyfile = overridecopyfile
660 result += orig(ui, repo, listpats, opts, rename)
661 finally:
662 util.copyfile = origcopyfile
663
664 lfdirstate = lfutil.openlfdirstate(ui, repo)
665 for (src, dest) in copiedfiles:
666 if (lfutil.shortname in src and
648 if (lfutil.shortname in src and
667 dest.startswith(repo.wjoin(lfutil.shortname))):
649 dest.startswith(repo.wjoin(lfutil.shortname))):
668 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
650 destlfile = dest.replace(lfutil.shortname, '')
669 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
651 if not opts['force'] and os.path.exists(destlfile):
670 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
652 raise IOError('',
671 if not os.path.isdir(destlfiledir):
653 _('destination largefile already exists'))
672 os.makedirs(destlfiledir)
654 copiedfiles.append((src, dest))
673 if rename:
655 origcopyfile(src, dest)
674 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
656
657 util.copyfile = overridecopyfile
658 result += orig(ui, repo, listpats, opts, rename)
659 finally:
660 util.copyfile = origcopyfile
675
661
676 # The file is gone, but this deletes any empty parent
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 # directories as a side-effect.
663 for (src, dest) in copiedfiles:
678 util.unlinkpath(repo.wjoin(srclfile), True)
664 if (lfutil.shortname in src and
679 lfdirstate.remove(srclfile)
665 dest.startswith(repo.wjoin(lfutil.shortname))):
680 else:
666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
681 util.copyfile(repo.wjoin(srclfile),
667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
682 repo.wjoin(destlfile))
668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 if not os.path.isdir(destlfiledir):
670 os.makedirs(destlfiledir)
671 if rename:
672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
683
673
684 lfdirstate.add(destlfile)
674 # The file is gone, but this deletes any empty parent
685 lfdirstate.write()
675 # directories as a side-effect.
686 except util.Abort, e:
676 util.unlinkpath(repo.wjoin(srclfile), True)
687 if str(e) != _('no files to copy'):
677 lfdirstate.remove(srclfile)
688 raise e
678 else:
689 else:
679 util.copyfile(repo.wjoin(srclfile),
690 nolfiles = True
680 repo.wjoin(destlfile))
681
682 lfdirstate.add(destlfile)
683 lfdirstate.write()
684 except util.Abort, e:
685 if str(e) != _('no files to copy'):
686 raise e
687 else:
688 nolfiles = True
691 finally:
689 finally:
692 restorematchfn()
690 restorematchfn()
693 wlock.release()
691 wlock.release()
694
692
695 if nolfiles and nonormalfiles:
693 if nolfiles and nonormalfiles:
696 raise util.Abort(_('no files to copy'))
694 raise util.Abort(_('no files to copy'))
697
695
698 return result
696 return result
699
697
700 # When the user calls revert, we have to be careful to not revert any
698 # When the user calls revert, we have to be careful to not revert any
701 # changes to other largefiles accidentally. This means we have to keep
699 # changes to other largefiles accidentally. This means we have to keep
702 # track of the largefiles that are being reverted so we only pull down
700 # track of the largefiles that are being reverted so we only pull down
703 # the necessary largefiles.
701 # the necessary largefiles.
704 #
702 #
705 # Standins are only updated (to match the hash of largefiles) before
703 # Standins are only updated (to match the hash of largefiles) before
706 # commits. Update the standins then run the original revert, changing
704 # commits. Update the standins then run the original revert, changing
707 # the matcher to hit standins instead of largefiles. Based on the
705 # the matcher to hit standins instead of largefiles. Based on the
708 # resulting standins update the largefiles.
706 # resulting standins update the largefiles.
709 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
710 # Because we put the standins in a bad state (by updating them)
708 # Because we put the standins in a bad state (by updating them)
711 # and then return them to a correct state we need to lock to
709 # and then return them to a correct state we need to lock to
712 # prevent others from changing them in their incorrect state.
710 # prevent others from changing them in their incorrect state.
713 wlock = repo.wlock()
711 wlock = repo.wlock()
714 try:
712 try:
715 lfdirstate = lfutil.openlfdirstate(ui, repo)
713 lfdirstate = lfutil.openlfdirstate(ui, repo)
716 s = lfutil.lfdirstatestatus(lfdirstate, repo)
714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
717 lfdirstate.write()
715 lfdirstate.write()
718 for lfile in s.modified:
716 for lfile in s.modified:
719 lfutil.updatestandin(repo, lfutil.standin(lfile))
717 lfutil.updatestandin(repo, lfutil.standin(lfile))
720 for lfile in s.deleted:
718 for lfile in s.deleted:
721 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
722 os.unlink(repo.wjoin(lfutil.standin(lfile)))
720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
723
721
724 oldstandins = lfutil.getstandinsstate(repo)
722 oldstandins = lfutil.getstandinsstate(repo)
725
723
726 def overridematch(mctx, pats=[], opts={}, globbed=False,
724 def overridematch(mctx, pats=[], opts={}, globbed=False,
727 default='relpath'):
725 default='relpath'):
728 match = oldmatch(mctx, pats, opts, globbed, default)
726 match = oldmatch(mctx, pats, opts, globbed, default)
729 m = copy.copy(match)
727 m = copy.copy(match)
730
728
731 # revert supports recursing into subrepos, and though largefiles
729 # revert supports recursing into subrepos, and though largefiles
732 # currently doesn't work correctly in that case, this match is
730 # currently doesn't work correctly in that case, this match is
733 # called, so the lfdirstate above may not be the correct one for
731 # called, so the lfdirstate above may not be the correct one for
734 # this invocation of match.
732 # this invocation of match.
735 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
736 False)
734 False)
737
735
738 def tostandin(f):
736 def tostandin(f):
739 standin = lfutil.standin(f)
737 standin = lfutil.standin(f)
740 if standin in ctx or standin in mctx:
738 if standin in ctx or standin in mctx:
741 return standin
739 return standin
742 elif standin in repo[None] or lfdirstate[f] == 'r':
740 elif standin in repo[None] or lfdirstate[f] == 'r':
743 return None
741 return None
744 return f
742 return f
745 m._files = [tostandin(f) for f in m._files]
743 m._files = [tostandin(f) for f in m._files]
746 m._files = [f for f in m._files if f is not None]
744 m._files = [f for f in m._files if f is not None]
747 m._fmap = set(m._files)
745 m._fmap = set(m._files)
748 origmatchfn = m.matchfn
746 origmatchfn = m.matchfn
749 def matchfn(f):
747 def matchfn(f):
750 if lfutil.isstandin(f):
748 if lfutil.isstandin(f):
751 return (origmatchfn(lfutil.splitstandin(f)) and
749 return (origmatchfn(lfutil.splitstandin(f)) and
752 (f in ctx or f in mctx))
750 (f in ctx or f in mctx))
753 return origmatchfn(f)
751 return origmatchfn(f)
754 m.matchfn = matchfn
752 m.matchfn = matchfn
755 return m
753 return m
756 oldmatch = installmatchfn(overridematch)
754 oldmatch = installmatchfn(overridematch)
757 try:
755 try:
758 orig(ui, repo, ctx, parents, *pats, **opts)
756 orig(ui, repo, ctx, parents, *pats, **opts)
759 finally:
757 finally:
760 restorematchfn()
758 restorematchfn()
761
759
762 newstandins = lfutil.getstandinsstate(repo)
760 newstandins = lfutil.getstandinsstate(repo)
763 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
764 # lfdirstate should be 'normallookup'-ed for updated files,
762 # lfdirstate should be 'normallookup'-ed for updated files,
765 # because reverting doesn't touch dirstate for 'normal' files
763 # because reverting doesn't touch dirstate for 'normal' files
766 # when target revision is explicitly specified: in such case,
764 # when target revision is explicitly specified: in such case,
767 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
768 # of target (standin) file.
766 # of target (standin) file.
769 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
770 normallookup=True)
768 normallookup=True)
771
769
772 finally:
770 finally:
773 wlock.release()
771 wlock.release()
774
772
775 # after pulling changesets, we need to take some extra care to get
773 # after pulling changesets, we need to take some extra care to get
776 # largefiles updated remotely
774 # largefiles updated remotely
777 def overridepull(orig, ui, repo, source=None, **opts):
775 def overridepull(orig, ui, repo, source=None, **opts):
778 revsprepull = len(repo)
776 revsprepull = len(repo)
779 if not source:
777 if not source:
780 source = 'default'
778 source = 'default'
781 repo.lfpullsource = source
779 repo.lfpullsource = source
782 result = orig(ui, repo, source, **opts)
780 result = orig(ui, repo, source, **opts)
783 revspostpull = len(repo)
781 revspostpull = len(repo)
784 lfrevs = opts.get('lfrev', [])
782 lfrevs = opts.get('lfrev', [])
785 if opts.get('all_largefiles'):
783 if opts.get('all_largefiles'):
786 lfrevs.append('pulled()')
784 lfrevs.append('pulled()')
787 if lfrevs and revspostpull > revsprepull:
785 if lfrevs and revspostpull > revsprepull:
788 numcached = 0
786 numcached = 0
789 repo.firstpulled = revsprepull # for pulled() revset expression
787 repo.firstpulled = revsprepull # for pulled() revset expression
790 try:
788 try:
791 for rev in scmutil.revrange(repo, lfrevs):
789 for rev in scmutil.revrange(repo, lfrevs):
792 ui.note(_('pulling largefiles for revision %s\n') % rev)
790 ui.note(_('pulling largefiles for revision %s\n') % rev)
793 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
794 numcached += len(cached)
792 numcached += len(cached)
795 finally:
793 finally:
796 del repo.firstpulled
794 del repo.firstpulled
797 ui.status(_("%d largefiles cached\n") % numcached)
795 ui.status(_("%d largefiles cached\n") % numcached)
798 return result
796 return result
799
797
800 def pulledrevsetsymbol(repo, subset, x):
798 def pulledrevsetsymbol(repo, subset, x):
801 """``pulled()``
799 """``pulled()``
802 Changesets that just has been pulled.
800 Changesets that just has been pulled.
803
801
804 Only available with largefiles from pull --lfrev expressions.
802 Only available with largefiles from pull --lfrev expressions.
805
803
806 .. container:: verbose
804 .. container:: verbose
807
805
808 Some examples:
806 Some examples:
809
807
810 - pull largefiles for all new changesets::
808 - pull largefiles for all new changesets::
811
809
812 hg pull -lfrev "pulled()"
810 hg pull -lfrev "pulled()"
813
811
814 - pull largefiles for all new branch heads::
812 - pull largefiles for all new branch heads::
815
813
816 hg pull -lfrev "head(pulled()) and not closed()"
814 hg pull -lfrev "head(pulled()) and not closed()"
817
815
818 """
816 """
819
817
820 try:
818 try:
821 firstpulled = repo.firstpulled
819 firstpulled = repo.firstpulled
822 except AttributeError:
820 except AttributeError:
823 raise util.Abort(_("pulled() only available in --lfrev"))
821 raise util.Abort(_("pulled() only available in --lfrev"))
824 return revset.baseset([r for r in subset if r >= firstpulled])
822 return revset.baseset([r for r in subset if r >= firstpulled])
825
823
826 def overrideclone(orig, ui, source, dest=None, **opts):
824 def overrideclone(orig, ui, source, dest=None, **opts):
827 d = dest
825 d = dest
828 if d is None:
826 if d is None:
829 d = hg.defaultdest(source)
827 d = hg.defaultdest(source)
830 if opts.get('all_largefiles') and not hg.islocal(d):
828 if opts.get('all_largefiles') and not hg.islocal(d):
831 raise util.Abort(_(
829 raise util.Abort(_(
832 '--all-largefiles is incompatible with non-local destination %s') %
830 '--all-largefiles is incompatible with non-local destination %s') %
833 d)
831 d)
834
832
835 return orig(ui, source, dest, **opts)
833 return orig(ui, source, dest, **opts)
836
834
837 def hgclone(orig, ui, opts, *args, **kwargs):
835 def hgclone(orig, ui, opts, *args, **kwargs):
838 result = orig(ui, opts, *args, **kwargs)
836 result = orig(ui, opts, *args, **kwargs)
839
837
840 if result is not None:
838 if result is not None:
841 sourcerepo, destrepo = result
839 sourcerepo, destrepo = result
842 repo = destrepo.local()
840 repo = destrepo.local()
843
841
844 # When cloning to a remote repo (like through SSH), no repo is available
842 # When cloning to a remote repo (like through SSH), no repo is available
845 # from the peer. Therefore the largefiles can't be downloaded and the
843 # from the peer. Therefore the largefiles can't be downloaded and the
846 # hgrc can't be updated.
844 # hgrc can't be updated.
847 if not repo:
845 if not repo:
848 return result
846 return result
849
847
850 # If largefiles is required for this repo, permanently enable it locally
848 # If largefiles is required for this repo, permanently enable it locally
851 if 'largefiles' in repo.requirements:
849 if 'largefiles' in repo.requirements:
852 fp = repo.vfs('hgrc', 'a', text=True)
850 fp = repo.vfs('hgrc', 'a', text=True)
853 try:
851 try:
854 fp.write('\n[extensions]\nlargefiles=\n')
852 fp.write('\n[extensions]\nlargefiles=\n')
855 finally:
853 finally:
856 fp.close()
854 fp.close()
857
855
858 # Caching is implicitly limited to 'rev' option, since the dest repo was
856 # Caching is implicitly limited to 'rev' option, since the dest repo was
859 # truncated at that point. The user may expect a download count with
857 # truncated at that point. The user may expect a download count with
860 # this option, so attempt whether or not this is a largefile repo.
858 # this option, so attempt whether or not this is a largefile repo.
861 if opts.get('all_largefiles'):
859 if opts.get('all_largefiles'):
862 success, missing = lfcommands.downloadlfiles(ui, repo, None)
860 success, missing = lfcommands.downloadlfiles(ui, repo, None)
863
861
864 if missing != 0:
862 if missing != 0:
865 return None
863 return None
866
864
867 return result
865 return result
868
866
869 def overriderebase(orig, ui, repo, **opts):
867 def overriderebase(orig, ui, repo, **opts):
870 if not util.safehasattr(repo, '_largefilesenabled'):
868 if not util.safehasattr(repo, '_largefilesenabled'):
871 return orig(ui, repo, **opts)
869 return orig(ui, repo, **opts)
872
870
873 resuming = opts.get('continue')
871 resuming = opts.get('continue')
874 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
872 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
875 repo._lfstatuswriters.append(lambda *msg, **opts: None)
873 repo._lfstatuswriters.append(lambda *msg, **opts: None)
876 try:
874 try:
877 return orig(ui, repo, **opts)
875 return orig(ui, repo, **opts)
878 finally:
876 finally:
879 repo._lfstatuswriters.pop()
877 repo._lfstatuswriters.pop()
880 repo._lfcommithooks.pop()
878 repo._lfcommithooks.pop()
881
879
882 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
880 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
883 prefix='', mtime=None, subrepos=None):
881 prefix='', mtime=None, subrepos=None):
884 # No need to lock because we are only reading history and
882 # No need to lock because we are only reading history and
885 # largefile caches, neither of which are modified.
883 # largefile caches, neither of which are modified.
886 lfcommands.cachelfiles(repo.ui, repo, node)
884 lfcommands.cachelfiles(repo.ui, repo, node)
887
885
888 if kind not in archival.archivers:
886 if kind not in archival.archivers:
889 raise util.Abort(_("unknown archive type '%s'") % kind)
887 raise util.Abort(_("unknown archive type '%s'") % kind)
890
888
891 ctx = repo[node]
889 ctx = repo[node]
892
890
893 if kind == 'files':
891 if kind == 'files':
894 if prefix:
892 if prefix:
895 raise util.Abort(
893 raise util.Abort(
896 _('cannot give prefix when archiving to files'))
894 _('cannot give prefix when archiving to files'))
897 else:
895 else:
898 prefix = archival.tidyprefix(dest, kind, prefix)
896 prefix = archival.tidyprefix(dest, kind, prefix)
899
897
900 def write(name, mode, islink, getdata):
898 def write(name, mode, islink, getdata):
901 if matchfn and not matchfn(name):
899 if matchfn and not matchfn(name):
902 return
900 return
903 data = getdata()
901 data = getdata()
904 if decode:
902 if decode:
905 data = repo.wwritedata(name, data)
903 data = repo.wwritedata(name, data)
906 archiver.addfile(prefix + name, mode, islink, data)
904 archiver.addfile(prefix + name, mode, islink, data)
907
905
908 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
906 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
909
907
910 if repo.ui.configbool("ui", "archivemeta", True):
908 if repo.ui.configbool("ui", "archivemeta", True):
911 write('.hg_archival.txt', 0644, False,
909 write('.hg_archival.txt', 0644, False,
912 lambda: archival.buildmetadata(ctx))
910 lambda: archival.buildmetadata(ctx))
913
911
914 for f in ctx:
912 for f in ctx:
915 ff = ctx.flags(f)
913 ff = ctx.flags(f)
916 getdata = ctx[f].data
914 getdata = ctx[f].data
917 if lfutil.isstandin(f):
915 if lfutil.isstandin(f):
918 path = lfutil.findfile(repo, getdata().strip())
916 path = lfutil.findfile(repo, getdata().strip())
919 if path is None:
917 if path is None:
920 raise util.Abort(
918 raise util.Abort(
921 _('largefile %s not found in repo store or system cache')
919 _('largefile %s not found in repo store or system cache')
922 % lfutil.splitstandin(f))
920 % lfutil.splitstandin(f))
923 f = lfutil.splitstandin(f)
921 f = lfutil.splitstandin(f)
924
922
925 def getdatafn():
923 def getdatafn():
926 fd = None
924 fd = None
927 try:
925 try:
928 fd = open(path, 'rb')
926 fd = open(path, 'rb')
929 return fd.read()
927 return fd.read()
930 finally:
928 finally:
931 if fd:
929 if fd:
932 fd.close()
930 fd.close()
933
931
934 getdata = getdatafn
932 getdata = getdatafn
935 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
933 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
936
934
937 if subrepos:
935 if subrepos:
938 for subpath in sorted(ctx.substate):
936 for subpath in sorted(ctx.substate):
939 sub = ctx.sub(subpath)
937 sub = ctx.sub(subpath)
940 submatch = match_.narrowmatcher(subpath, matchfn)
938 submatch = match_.narrowmatcher(subpath, matchfn)
941 sub.archive(archiver, prefix, submatch)
939 sub.archive(archiver, prefix, submatch)
942
940
943 archiver.done()
941 archiver.done()
944
942
945 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
943 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
946 repo._get(repo._state + ('hg',))
944 repo._get(repo._state + ('hg',))
947 rev = repo._state[1]
945 rev = repo._state[1]
948 ctx = repo._repo[rev]
946 ctx = repo._repo[rev]
949
947
950 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
948 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
951
949
952 def write(name, mode, islink, getdata):
950 def write(name, mode, islink, getdata):
953 # At this point, the standin has been replaced with the largefile name,
951 # At this point, the standin has been replaced with the largefile name,
954 # so the normal matcher works here without the lfutil variants.
952 # so the normal matcher works here without the lfutil variants.
955 if match and not match(f):
953 if match and not match(f):
956 return
954 return
957 data = getdata()
955 data = getdata()
958
956
959 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
957 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
960
958
961 for f in ctx:
959 for f in ctx:
962 ff = ctx.flags(f)
960 ff = ctx.flags(f)
963 getdata = ctx[f].data
961 getdata = ctx[f].data
964 if lfutil.isstandin(f):
962 if lfutil.isstandin(f):
965 path = lfutil.findfile(repo._repo, getdata().strip())
963 path = lfutil.findfile(repo._repo, getdata().strip())
966 if path is None:
964 if path is None:
967 raise util.Abort(
965 raise util.Abort(
968 _('largefile %s not found in repo store or system cache')
966 _('largefile %s not found in repo store or system cache')
969 % lfutil.splitstandin(f))
967 % lfutil.splitstandin(f))
970 f = lfutil.splitstandin(f)
968 f = lfutil.splitstandin(f)
971
969
972 def getdatafn():
970 def getdatafn():
973 fd = None
971 fd = None
974 try:
972 try:
975 fd = open(os.path.join(prefix, path), 'rb')
973 fd = open(os.path.join(prefix, path), 'rb')
976 return fd.read()
974 return fd.read()
977 finally:
975 finally:
978 if fd:
976 if fd:
979 fd.close()
977 fd.close()
980
978
981 getdata = getdatafn
979 getdata = getdatafn
982
980
983 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
981 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
984
982
985 for subpath in sorted(ctx.substate):
983 for subpath in sorted(ctx.substate):
986 sub = ctx.sub(subpath)
984 sub = ctx.sub(subpath)
987 submatch = match_.narrowmatcher(subpath, match)
985 submatch = match_.narrowmatcher(subpath, match)
988 sub.archive(archiver, prefix + repo._path + '/', submatch)
986 sub.archive(archiver, prefix + repo._path + '/', submatch)
989
987
990 # If a largefile is modified, the change is not reflected in its
988 # If a largefile is modified, the change is not reflected in its
991 # standin until a commit. cmdutil.bailifchanged() raises an exception
989 # standin until a commit. cmdutil.bailifchanged() raises an exception
992 # if the repo has uncommitted changes. Wrap it to also check if
990 # if the repo has uncommitted changes. Wrap it to also check if
993 # largefiles were changed. This is used by bisect, backout and fetch.
991 # largefiles were changed. This is used by bisect, backout and fetch.
994 def overridebailifchanged(orig, repo, *args, **kwargs):
992 def overridebailifchanged(orig, repo, *args, **kwargs):
995 orig(repo, *args, **kwargs)
993 orig(repo, *args, **kwargs)
996 repo.lfstatus = True
994 repo.lfstatus = True
997 s = repo.status()
995 s = repo.status()
998 repo.lfstatus = False
996 repo.lfstatus = False
999 if s.modified or s.added or s.removed or s.deleted:
997 if s.modified or s.added or s.removed or s.deleted:
1000 raise util.Abort(_('uncommitted changes'))
998 raise util.Abort(_('uncommitted changes'))
1001
999
1002 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1000 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1003 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1001 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1004 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1002 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1005 m = composelargefilematcher(match, repo[None].manifest())
1003 m = composelargefilematcher(match, repo[None].manifest())
1006
1004
1007 try:
1005 try:
1008 repo.lfstatus = True
1006 repo.lfstatus = True
1009 s = repo.status(match=m, clean=True)
1007 s = repo.status(match=m, clean=True)
1010 finally:
1008 finally:
1011 repo.lfstatus = False
1009 repo.lfstatus = False
1012 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1010 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1013 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1011 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1014
1012
1015 for f in forget:
1013 for f in forget:
1016 if lfutil.standin(f) not in repo.dirstate and not \
1014 if lfutil.standin(f) not in repo.dirstate and not \
1017 repo.wvfs.isdir(lfutil.standin(f)):
1015 repo.wvfs.isdir(lfutil.standin(f)):
1018 ui.warn(_('not removing %s: file is already untracked\n')
1016 ui.warn(_('not removing %s: file is already untracked\n')
1019 % m.rel(f))
1017 % m.rel(f))
1020 bad.append(f)
1018 bad.append(f)
1021
1019
1022 for f in forget:
1020 for f in forget:
1023 if ui.verbose or not m.exact(f):
1021 if ui.verbose or not m.exact(f):
1024 ui.status(_('removing %s\n') % m.rel(f))
1022 ui.status(_('removing %s\n') % m.rel(f))
1025
1023
1026 # Need to lock because standin files are deleted then removed from the
1024 # Need to lock because standin files are deleted then removed from the
1027 # repository and we could race in-between.
1025 # repository and we could race in-between.
1028 wlock = repo.wlock()
1026 wlock = repo.wlock()
1029 try:
1027 try:
1030 lfdirstate = lfutil.openlfdirstate(ui, repo)
1028 lfdirstate = lfutil.openlfdirstate(ui, repo)
1031 for f in forget:
1029 for f in forget:
1032 if lfdirstate[f] == 'a':
1030 if lfdirstate[f] == 'a':
1033 lfdirstate.drop(f)
1031 lfdirstate.drop(f)
1034 else:
1032 else:
1035 lfdirstate.remove(f)
1033 lfdirstate.remove(f)
1036 lfdirstate.write()
1034 lfdirstate.write()
1037 standins = [lfutil.standin(f) for f in forget]
1035 standins = [lfutil.standin(f) for f in forget]
1038 for f in standins:
1036 for f in standins:
1039 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1037 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1040 rejected = repo[None].forget(standins)
1038 rejected = repo[None].forget(standins)
1041 finally:
1039 finally:
1042 wlock.release()
1040 wlock.release()
1043
1041
1044 bad.extend(f for f in rejected if f in m.files())
1042 bad.extend(f for f in rejected if f in m.files())
1045 forgot.extend(f for f in forget if f not in rejected)
1043 forgot.extend(f for f in forget if f not in rejected)
1046 return bad, forgot
1044 return bad, forgot
1047
1045
1048 def _getoutgoings(repo, other, missing, addfunc):
1046 def _getoutgoings(repo, other, missing, addfunc):
1049 """get pairs of filename and largefile hash in outgoing revisions
1047 """get pairs of filename and largefile hash in outgoing revisions
1050 in 'missing'.
1048 in 'missing'.
1051
1049
1052 largefiles already existing on 'other' repository are ignored.
1050 largefiles already existing on 'other' repository are ignored.
1053
1051
1054 'addfunc' is invoked with each unique pairs of filename and
1052 'addfunc' is invoked with each unique pairs of filename and
1055 largefile hash value.
1053 largefile hash value.
1056 """
1054 """
1057 knowns = set()
1055 knowns = set()
1058 lfhashes = set()
1056 lfhashes = set()
1059 def dedup(fn, lfhash):
1057 def dedup(fn, lfhash):
1060 k = (fn, lfhash)
1058 k = (fn, lfhash)
1061 if k not in knowns:
1059 if k not in knowns:
1062 knowns.add(k)
1060 knowns.add(k)
1063 lfhashes.add(lfhash)
1061 lfhashes.add(lfhash)
1064 lfutil.getlfilestoupload(repo, missing, dedup)
1062 lfutil.getlfilestoupload(repo, missing, dedup)
1065 if lfhashes:
1063 if lfhashes:
1066 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1064 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1067 for fn, lfhash in knowns:
1065 for fn, lfhash in knowns:
1068 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1066 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1069 addfunc(fn, lfhash)
1067 addfunc(fn, lfhash)
1070
1068
1071 def outgoinghook(ui, repo, other, opts, missing):
1069 def outgoinghook(ui, repo, other, opts, missing):
1072 if opts.pop('large', None):
1070 if opts.pop('large', None):
1073 lfhashes = set()
1071 lfhashes = set()
1074 if ui.debugflag:
1072 if ui.debugflag:
1075 toupload = {}
1073 toupload = {}
1076 def addfunc(fn, lfhash):
1074 def addfunc(fn, lfhash):
1077 if fn not in toupload:
1075 if fn not in toupload:
1078 toupload[fn] = []
1076 toupload[fn] = []
1079 toupload[fn].append(lfhash)
1077 toupload[fn].append(lfhash)
1080 lfhashes.add(lfhash)
1078 lfhashes.add(lfhash)
1081 def showhashes(fn):
1079 def showhashes(fn):
1082 for lfhash in sorted(toupload[fn]):
1080 for lfhash in sorted(toupload[fn]):
1083 ui.debug(' %s\n' % (lfhash))
1081 ui.debug(' %s\n' % (lfhash))
1084 else:
1082 else:
1085 toupload = set()
1083 toupload = set()
1086 def addfunc(fn, lfhash):
1084 def addfunc(fn, lfhash):
1087 toupload.add(fn)
1085 toupload.add(fn)
1088 lfhashes.add(lfhash)
1086 lfhashes.add(lfhash)
1089 def showhashes(fn):
1087 def showhashes(fn):
1090 pass
1088 pass
1091 _getoutgoings(repo, other, missing, addfunc)
1089 _getoutgoings(repo, other, missing, addfunc)
1092
1090
1093 if not toupload:
1091 if not toupload:
1094 ui.status(_('largefiles: no files to upload\n'))
1092 ui.status(_('largefiles: no files to upload\n'))
1095 else:
1093 else:
1096 ui.status(_('largefiles to upload (%d entities):\n')
1094 ui.status(_('largefiles to upload (%d entities):\n')
1097 % (len(lfhashes)))
1095 % (len(lfhashes)))
1098 for file in sorted(toupload):
1096 for file in sorted(toupload):
1099 ui.status(lfutil.splitstandin(file) + '\n')
1097 ui.status(lfutil.splitstandin(file) + '\n')
1100 showhashes(file)
1098 showhashes(file)
1101 ui.status('\n')
1099 ui.status('\n')
1102
1100
1103 def summaryremotehook(ui, repo, opts, changes):
1101 def summaryremotehook(ui, repo, opts, changes):
1104 largeopt = opts.get('large', False)
1102 largeopt = opts.get('large', False)
1105 if changes is None:
1103 if changes is None:
1106 if largeopt:
1104 if largeopt:
1107 return (False, True) # only outgoing check is needed
1105 return (False, True) # only outgoing check is needed
1108 else:
1106 else:
1109 return (False, False)
1107 return (False, False)
1110 elif largeopt:
1108 elif largeopt:
1111 url, branch, peer, outgoing = changes[1]
1109 url, branch, peer, outgoing = changes[1]
1112 if peer is None:
1110 if peer is None:
1113 # i18n: column positioning for "hg summary"
1111 # i18n: column positioning for "hg summary"
1114 ui.status(_('largefiles: (no remote repo)\n'))
1112 ui.status(_('largefiles: (no remote repo)\n'))
1115 return
1113 return
1116
1114
1117 toupload = set()
1115 toupload = set()
1118 lfhashes = set()
1116 lfhashes = set()
1119 def addfunc(fn, lfhash):
1117 def addfunc(fn, lfhash):
1120 toupload.add(fn)
1118 toupload.add(fn)
1121 lfhashes.add(lfhash)
1119 lfhashes.add(lfhash)
1122 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1120 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1123
1121
1124 if not toupload:
1122 if not toupload:
1125 # i18n: column positioning for "hg summary"
1123 # i18n: column positioning for "hg summary"
1126 ui.status(_('largefiles: (no files to upload)\n'))
1124 ui.status(_('largefiles: (no files to upload)\n'))
1127 else:
1125 else:
1128 # i18n: column positioning for "hg summary"
1126 # i18n: column positioning for "hg summary"
1129 ui.status(_('largefiles: %d entities for %d files to upload\n')
1127 ui.status(_('largefiles: %d entities for %d files to upload\n')
1130 % (len(lfhashes), len(toupload)))
1128 % (len(lfhashes), len(toupload)))
1131
1129
1132 def overridesummary(orig, ui, repo, *pats, **opts):
1130 def overridesummary(orig, ui, repo, *pats, **opts):
1133 try:
1131 try:
1134 repo.lfstatus = True
1132 repo.lfstatus = True
1135 orig(ui, repo, *pats, **opts)
1133 orig(ui, repo, *pats, **opts)
1136 finally:
1134 finally:
1137 repo.lfstatus = False
1135 repo.lfstatus = False
1138
1136
1139 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1137 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1140 similarity=None):
1138 similarity=None):
1141 if not lfutil.islfilesrepo(repo):
1139 if not lfutil.islfilesrepo(repo):
1142 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1140 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1143 # Get the list of missing largefiles so we can remove them
1141 # Get the list of missing largefiles so we can remove them
1144 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1142 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1145 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1143 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1146 False, False, False)
1144 False, False, False)
1147
1145
1148 # Call into the normal remove code, but the removing of the standin, we want
1146 # Call into the normal remove code, but the removing of the standin, we want
1149 # to have handled by original addremove. Monkey patching here makes sure
1147 # to have handled by original addremove. Monkey patching here makes sure
1150 # we don't remove the standin in the largefiles code, preventing a very
1148 # we don't remove the standin in the largefiles code, preventing a very
1151 # confused state later.
1149 # confused state later.
1152 if s.deleted:
1150 if s.deleted:
1153 m = copy.copy(matcher)
1151 m = copy.copy(matcher)
1154
1152
1155 # The m._files and m._map attributes are not changed to the deleted list
1153 # The m._files and m._map attributes are not changed to the deleted list
1156 # because that affects the m.exact() test, which in turn governs whether
1154 # because that affects the m.exact() test, which in turn governs whether
1157 # or not the file name is printed, and how. Simply limit the original
1155 # or not the file name is printed, and how. Simply limit the original
1158 # matches to those in the deleted status list.
1156 # matches to those in the deleted status list.
1159 matchfn = m.matchfn
1157 matchfn = m.matchfn
1160 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1158 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1161
1159
1162 removelargefiles(repo.ui, repo, True, m, **opts)
1160 removelargefiles(repo.ui, repo, True, m, **opts)
1163 # Call into the normal add code, and any files that *should* be added as
1161 # Call into the normal add code, and any files that *should* be added as
1164 # largefiles will be
1162 # largefiles will be
1165 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1163 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1166 # Now that we've handled largefiles, hand off to the original addremove
1164 # Now that we've handled largefiles, hand off to the original addremove
1167 # function to take care of the rest. Make sure it doesn't do anything with
1165 # function to take care of the rest. Make sure it doesn't do anything with
1168 # largefiles by passing a matcher that will ignore them.
1166 # largefiles by passing a matcher that will ignore them.
1169 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1167 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1170 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1168 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1171
1169
1172 # Calling purge with --all will cause the largefiles to be deleted.
1170 # Calling purge with --all will cause the largefiles to be deleted.
1173 # Override repo.status to prevent this from happening.
1171 # Override repo.status to prevent this from happening.
1174 def overridepurge(orig, ui, repo, *dirs, **opts):
1172 def overridepurge(orig, ui, repo, *dirs, **opts):
1175 # XXX Monkey patching a repoview will not work. The assigned attribute will
1173 # XXX Monkey patching a repoview will not work. The assigned attribute will
1176 # be set on the unfiltered repo, but we will only lookup attributes in the
1174 # be set on the unfiltered repo, but we will only lookup attributes in the
1177 # unfiltered repo if the lookup in the repoview object itself fails. As the
1175 # unfiltered repo if the lookup in the repoview object itself fails. As the
1178 # monkey patched method exists on the repoview class the lookup will not
1176 # monkey patched method exists on the repoview class the lookup will not
1179 # fail. As a result, the original version will shadow the monkey patched
1177 # fail. As a result, the original version will shadow the monkey patched
1180 # one, defeating the monkey patch.
1178 # one, defeating the monkey patch.
1181 #
1179 #
1182 # As a work around we use an unfiltered repo here. We should do something
1180 # As a work around we use an unfiltered repo here. We should do something
1183 # cleaner instead.
1181 # cleaner instead.
1184 repo = repo.unfiltered()
1182 repo = repo.unfiltered()
1185 oldstatus = repo.status
1183 oldstatus = repo.status
1186 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1184 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1187 clean=False, unknown=False, listsubrepos=False):
1185 clean=False, unknown=False, listsubrepos=False):
1188 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1186 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1189 listsubrepos)
1187 listsubrepos)
1190 lfdirstate = lfutil.openlfdirstate(ui, repo)
1188 lfdirstate = lfutil.openlfdirstate(ui, repo)
1191 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1189 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1192 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1190 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1193 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1191 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1194 unknown, ignored, r.clean)
1192 unknown, ignored, r.clean)
1195 repo.status = overridestatus
1193 repo.status = overridestatus
1196 orig(ui, repo, *dirs, **opts)
1194 orig(ui, repo, *dirs, **opts)
1197 repo.status = oldstatus
1195 repo.status = oldstatus
1198 def overriderollback(orig, ui, repo, **opts):
1196 def overriderollback(orig, ui, repo, **opts):
1199 wlock = repo.wlock()
1197 wlock = repo.wlock()
1200 try:
1198 try:
1201 before = repo.dirstate.parents()
1199 before = repo.dirstate.parents()
1202 orphans = set(f for f in repo.dirstate
1200 orphans = set(f for f in repo.dirstate
1203 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1201 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1204 result = orig(ui, repo, **opts)
1202 result = orig(ui, repo, **opts)
1205 after = repo.dirstate.parents()
1203 after = repo.dirstate.parents()
1206 if before == after:
1204 if before == after:
1207 return result # no need to restore standins
1205 return result # no need to restore standins
1208
1206
1209 pctx = repo['.']
1207 pctx = repo['.']
1210 for f in repo.dirstate:
1208 for f in repo.dirstate:
1211 if lfutil.isstandin(f):
1209 if lfutil.isstandin(f):
1212 orphans.discard(f)
1210 orphans.discard(f)
1213 if repo.dirstate[f] == 'r':
1211 if repo.dirstate[f] == 'r':
1214 repo.wvfs.unlinkpath(f, ignoremissing=True)
1212 repo.wvfs.unlinkpath(f, ignoremissing=True)
1215 elif f in pctx:
1213 elif f in pctx:
1216 fctx = pctx[f]
1214 fctx = pctx[f]
1217 repo.wwrite(f, fctx.data(), fctx.flags())
1215 repo.wwrite(f, fctx.data(), fctx.flags())
1218 else:
1216 else:
1219 # content of standin is not so important in 'a',
1217 # content of standin is not so important in 'a',
1220 # 'm' or 'n' (coming from the 2nd parent) cases
1218 # 'm' or 'n' (coming from the 2nd parent) cases
1221 lfutil.writestandin(repo, f, '', False)
1219 lfutil.writestandin(repo, f, '', False)
1222 for standin in orphans:
1220 for standin in orphans:
1223 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1221 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1224
1222
1225 lfdirstate = lfutil.openlfdirstate(ui, repo)
1223 lfdirstate = lfutil.openlfdirstate(ui, repo)
1226 orphans = set(lfdirstate)
1224 orphans = set(lfdirstate)
1227 lfiles = lfutil.listlfiles(repo)
1225 lfiles = lfutil.listlfiles(repo)
1228 for file in lfiles:
1226 for file in lfiles:
1229 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1227 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1230 orphans.discard(file)
1228 orphans.discard(file)
1231 for lfile in orphans:
1229 for lfile in orphans:
1232 lfdirstate.drop(lfile)
1230 lfdirstate.drop(lfile)
1233 lfdirstate.write()
1231 lfdirstate.write()
1234 finally:
1232 finally:
1235 wlock.release()
1233 wlock.release()
1236 return result
1234 return result
1237
1235
1238 def overridetransplant(orig, ui, repo, *revs, **opts):
1236 def overridetransplant(orig, ui, repo, *revs, **opts):
1239 resuming = opts.get('continue')
1237 resuming = opts.get('continue')
1240 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1238 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1241 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1239 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1242 try:
1240 try:
1243 result = orig(ui, repo, *revs, **opts)
1241 result = orig(ui, repo, *revs, **opts)
1244 finally:
1242 finally:
1245 repo._lfstatuswriters.pop()
1243 repo._lfstatuswriters.pop()
1246 repo._lfcommithooks.pop()
1244 repo._lfcommithooks.pop()
1247 return result
1245 return result
1248
1246
1249 def overridecat(orig, ui, repo, file1, *pats, **opts):
1247 def overridecat(orig, ui, repo, file1, *pats, **opts):
1250 ctx = scmutil.revsingle(repo, opts.get('rev'))
1248 ctx = scmutil.revsingle(repo, opts.get('rev'))
1251 err = 1
1249 err = 1
1252 notbad = set()
1250 notbad = set()
1253 m = scmutil.match(ctx, (file1,) + pats, opts)
1251 m = scmutil.match(ctx, (file1,) + pats, opts)
1254 origmatchfn = m.matchfn
1252 origmatchfn = m.matchfn
1255 def lfmatchfn(f):
1253 def lfmatchfn(f):
1256 if origmatchfn(f):
1254 if origmatchfn(f):
1257 return True
1255 return True
1258 lf = lfutil.splitstandin(f)
1256 lf = lfutil.splitstandin(f)
1259 if lf is None:
1257 if lf is None:
1260 return False
1258 return False
1261 notbad.add(lf)
1259 notbad.add(lf)
1262 return origmatchfn(lf)
1260 return origmatchfn(lf)
1263 m.matchfn = lfmatchfn
1261 m.matchfn = lfmatchfn
1264 origbadfn = m.bad
1262 origbadfn = m.bad
1265 def lfbadfn(f, msg):
1263 def lfbadfn(f, msg):
1266 if not f in notbad:
1264 if not f in notbad:
1267 origbadfn(f, msg)
1265 origbadfn(f, msg)
1268 m.bad = lfbadfn
1266 m.bad = lfbadfn
1269
1267
1270 origvisitdirfn = m.visitdir
1268 origvisitdirfn = m.visitdir
1271 def lfvisitdirfn(dir):
1269 def lfvisitdirfn(dir):
1272 if dir == lfutil.shortname:
1270 if dir == lfutil.shortname:
1273 return True
1271 return True
1274 ret = origvisitdirfn(dir)
1272 ret = origvisitdirfn(dir)
1275 if ret:
1273 if ret:
1276 return ret
1274 return ret
1277 lf = lfutil.splitstandin(dir)
1275 lf = lfutil.splitstandin(dir)
1278 if lf is None:
1276 if lf is None:
1279 return False
1277 return False
1280 return origvisitdirfn(lf)
1278 return origvisitdirfn(lf)
1281 m.visitdir = lfvisitdirfn
1279 m.visitdir = lfvisitdirfn
1282
1280
1283 for f in ctx.walk(m):
1281 for f in ctx.walk(m):
1284 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1282 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1285 pathname=f)
1283 pathname=f)
1286 lf = lfutil.splitstandin(f)
1284 lf = lfutil.splitstandin(f)
1287 if lf is None or origmatchfn(f):
1285 if lf is None or origmatchfn(f):
1288 # duplicating unreachable code from commands.cat
1286 # duplicating unreachable code from commands.cat
1289 data = ctx[f].data()
1287 data = ctx[f].data()
1290 if opts.get('decode'):
1288 if opts.get('decode'):
1291 data = repo.wwritedata(f, data)
1289 data = repo.wwritedata(f, data)
1292 fp.write(data)
1290 fp.write(data)
1293 else:
1291 else:
1294 hash = lfutil.readstandin(repo, lf, ctx.rev())
1292 hash = lfutil.readstandin(repo, lf, ctx.rev())
1295 if not lfutil.inusercache(repo.ui, hash):
1293 if not lfutil.inusercache(repo.ui, hash):
1296 store = basestore._openstore(repo)
1294 store = basestore._openstore(repo)
1297 success, missing = store.get([(lf, hash)])
1295 success, missing = store.get([(lf, hash)])
1298 if len(success) != 1:
1296 if len(success) != 1:
1299 raise util.Abort(
1297 raise util.Abort(
1300 _('largefile %s is not in cache and could not be '
1298 _('largefile %s is not in cache and could not be '
1301 'downloaded') % lf)
1299 'downloaded') % lf)
1302 path = lfutil.usercachepath(repo.ui, hash)
1300 path = lfutil.usercachepath(repo.ui, hash)
1303 fpin = open(path, "rb")
1301 fpin = open(path, "rb")
1304 for chunk in util.filechunkiter(fpin, 128 * 1024):
1302 for chunk in util.filechunkiter(fpin, 128 * 1024):
1305 fp.write(chunk)
1303 fp.write(chunk)
1306 fpin.close()
1304 fpin.close()
1307 fp.close()
1305 fp.close()
1308 err = 0
1306 err = 0
1309 return err
1307 return err
1310
1308
1311 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1309 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1312 *args, **kwargs):
1310 *args, **kwargs):
1313 wlock = repo.wlock()
1311 wlock = repo.wlock()
1314 try:
1312 try:
1315 # branch | | |
1313 # branch | | |
1316 # merge | force | partial | action
1314 # merge | force | partial | action
1317 # -------+-------+---------+--------------
1315 # -------+-------+---------+--------------
1318 # x | x | x | linear-merge
1316 # x | x | x | linear-merge
1319 # o | x | x | branch-merge
1317 # o | x | x | branch-merge
1320 # x | o | x | overwrite (as clean update)
1318 # x | o | x | overwrite (as clean update)
1321 # o | o | x | force-branch-merge (*1)
1319 # o | o | x | force-branch-merge (*1)
1322 # x | x | o | (*)
1320 # x | x | o | (*)
1323 # o | x | o | (*)
1321 # o | x | o | (*)
1324 # x | o | o | overwrite (as revert)
1322 # x | o | o | overwrite (as revert)
1325 # o | o | o | (*)
1323 # o | o | o | (*)
1326 #
1324 #
1327 # (*) don't care
1325 # (*) don't care
1328 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1326 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1329
1327
1330 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1328 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1331 unsure, s = lfdirstate.status(match_.always(repo.root,
1329 unsure, s = lfdirstate.status(match_.always(repo.root,
1332 repo.getcwd()),
1330 repo.getcwd()),
1333 [], False, False, False)
1331 [], False, False, False)
1334 pctx = repo['.']
1332 pctx = repo['.']
1335 for lfile in unsure + s.modified:
1333 for lfile in unsure + s.modified:
1336 lfileabs = repo.wvfs.join(lfile)
1334 lfileabs = repo.wvfs.join(lfile)
1337 if not os.path.exists(lfileabs):
1335 if not os.path.exists(lfileabs):
1338 continue
1336 continue
1339 lfhash = lfutil.hashrepofile(repo, lfile)
1337 lfhash = lfutil.hashrepofile(repo, lfile)
1340 standin = lfutil.standin(lfile)
1338 standin = lfutil.standin(lfile)
1341 lfutil.writestandin(repo, standin, lfhash,
1339 lfutil.writestandin(repo, standin, lfhash,
1342 lfutil.getexecutable(lfileabs))
1340 lfutil.getexecutable(lfileabs))
1343 if (standin in pctx and
1341 if (standin in pctx and
1344 lfhash == lfutil.readstandin(repo, lfile, '.')):
1342 lfhash == lfutil.readstandin(repo, lfile, '.')):
1345 lfdirstate.normal(lfile)
1343 lfdirstate.normal(lfile)
1346 for lfile in s.added:
1344 for lfile in s.added:
1347 lfutil.updatestandin(repo, lfutil.standin(lfile))
1345 lfutil.updatestandin(repo, lfutil.standin(lfile))
1348 lfdirstate.write()
1346 lfdirstate.write()
1349
1347
1350 oldstandins = lfutil.getstandinsstate(repo)
1348 oldstandins = lfutil.getstandinsstate(repo)
1351
1349
1352 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1350 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1353
1351
1354 newstandins = lfutil.getstandinsstate(repo)
1352 newstandins = lfutil.getstandinsstate(repo)
1355 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1353 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1356 if branchmerge or force or partial:
1354 if branchmerge or force or partial:
1357 filelist.extend(s.deleted + s.removed)
1355 filelist.extend(s.deleted + s.removed)
1358
1356
1359 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1357 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1360 normallookup=partial)
1358 normallookup=partial)
1361
1359
1362 return result
1360 return result
1363 finally:
1361 finally:
1364 wlock.release()
1362 wlock.release()
1365
1363
1366 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1364 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1367 result = orig(repo, files, *args, **kwargs)
1365 result = orig(repo, files, *args, **kwargs)
1368
1366
1369 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1367 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1370 if filelist:
1368 if filelist:
1371 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1369 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1372 printmessage=False, normallookup=True)
1370 printmessage=False, normallookup=True)
1373
1371
1374 return result
1372 return result
@@ -1,176 +1,175
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import urllib2
7 import urllib2
8 import re
8 import re
9
9
10 from mercurial import error, httppeer, util, wireproto
10 from mercurial import error, httppeer, util, wireproto
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 import lfutil
13 import lfutil
14
14
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
16 '\n\nPlease enable it in your Mercurial config '
16 '\n\nPlease enable it in your Mercurial config '
17 'file.\n')
17 'file.\n')
18
18
19 # these will all be replaced by largefiles.uisetup
19 # these will all be replaced by largefiles.uisetup
20 capabilitiesorig = None
20 capabilitiesorig = None
21 ssholdcallstream = None
21 ssholdcallstream = None
22 httpoldcallstream = None
22 httpoldcallstream = None
23
23
24 def putlfile(repo, proto, sha):
24 def putlfile(repo, proto, sha):
25 '''Put a largefile into a repository's local store and into the
25 '''Put a largefile into a repository's local store and into the
26 user cache.'''
26 user cache.'''
27 proto.redirect()
27 proto.redirect()
28
28
29 path = lfutil.storepath(repo, sha)
29 path = lfutil.storepath(repo, sha)
30 util.makedirs(os.path.dirname(path))
30 util.makedirs(os.path.dirname(path))
31 tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
31 tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
32
32
33 try:
33 try:
34 try:
34 proto.getfile(tmpfp)
35 proto.getfile(tmpfp)
35 tmpfp._fp.seek(0)
36 tmpfp._fp.seek(0)
36 if sha != lfutil.hexsha1(tmpfp._fp):
37 if sha != lfutil.hexsha1(tmpfp._fp):
37 raise IOError(0, _('largefile contents do not match hash'))
38 raise IOError(0, _('largefile contents do not match hash'))
38 tmpfp.close()
39 tmpfp.close()
39 lfutil.linktousercache(repo, sha)
40 lfutil.linktousercache(repo, sha)
40 except IOError, e:
41 except IOError, e:
41 repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
42 repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
42 (sha, e.strerror))
43 (sha, e.strerror))
43 return wireproto.pushres(1)
44 return wireproto.pushres(1)
45 finally:
44 finally:
46 tmpfp.discard()
45 tmpfp.discard()
47
46
48 return wireproto.pushres(0)
47 return wireproto.pushres(0)
49
48
50 def getlfile(repo, proto, sha):
49 def getlfile(repo, proto, sha):
51 '''Retrieve a largefile from the repository-local cache or system
50 '''Retrieve a largefile from the repository-local cache or system
52 cache.'''
51 cache.'''
53 filename = lfutil.findfile(repo, sha)
52 filename = lfutil.findfile(repo, sha)
54 if not filename:
53 if not filename:
55 raise util.Abort(_('requested largefile %s not present in cache') % sha)
54 raise util.Abort(_('requested largefile %s not present in cache') % sha)
56 f = open(filename, 'rb')
55 f = open(filename, 'rb')
57 length = os.fstat(f.fileno())[6]
56 length = os.fstat(f.fileno())[6]
58
57
59 # Since we can't set an HTTP content-length header here, and
58 # Since we can't set an HTTP content-length header here, and
60 # Mercurial core provides no way to give the length of a streamres
59 # Mercurial core provides no way to give the length of a streamres
61 # (and reading the entire file into RAM would be ill-advised), we
60 # (and reading the entire file into RAM would be ill-advised), we
62 # just send the length on the first line of the response, like the
61 # just send the length on the first line of the response, like the
63 # ssh proto does for string responses.
62 # ssh proto does for string responses.
64 def generator():
63 def generator():
65 yield '%d\n' % length
64 yield '%d\n' % length
66 for chunk in util.filechunkiter(f):
65 for chunk in util.filechunkiter(f):
67 yield chunk
66 yield chunk
68 return wireproto.streamres(generator())
67 return wireproto.streamres(generator())
69
68
70 def statlfile(repo, proto, sha):
69 def statlfile(repo, proto, sha):
71 '''Return '2\n' if the largefile is missing, '0\n' if it seems to be in
70 '''Return '2\n' if the largefile is missing, '0\n' if it seems to be in
72 good condition.
71 good condition.
73
72
74 The value 1 is reserved for mismatched checksum, but that is too expensive
73 The value 1 is reserved for mismatched checksum, but that is too expensive
75 to be verified on every stat and must be caught be running 'hg verify'
74 to be verified on every stat and must be caught be running 'hg verify'
76 server side.'''
75 server side.'''
77 filename = lfutil.findfile(repo, sha)
76 filename = lfutil.findfile(repo, sha)
78 if not filename:
77 if not filename:
79 return '2\n'
78 return '2\n'
80 return '0\n'
79 return '0\n'
81
80
82 def wirereposetup(ui, repo):
81 def wirereposetup(ui, repo):
83 class lfileswirerepository(repo.__class__):
82 class lfileswirerepository(repo.__class__):
84 def putlfile(self, sha, fd):
83 def putlfile(self, sha, fd):
85 # unfortunately, httprepository._callpush tries to convert its
84 # unfortunately, httprepository._callpush tries to convert its
86 # input file-like into a bundle before sending it, so we can't use
85 # input file-like into a bundle before sending it, so we can't use
87 # it ...
86 # it ...
88 if issubclass(self.__class__, httppeer.httppeer):
87 if issubclass(self.__class__, httppeer.httppeer):
89 res = None
88 res = None
90 try:
89 try:
91 res = self._call('putlfile', data=fd, sha=sha,
90 res = self._call('putlfile', data=fd, sha=sha,
92 headers={'content-type':'application/mercurial-0.1'})
91 headers={'content-type':'application/mercurial-0.1'})
93 d, output = res.split('\n', 1)
92 d, output = res.split('\n', 1)
94 for l in output.splitlines(True):
93 for l in output.splitlines(True):
95 self.ui.warn(_('remote: '), l) # assume l ends with \n
94 self.ui.warn(_('remote: '), l) # assume l ends with \n
96 return int(d)
95 return int(d)
97 except (ValueError, urllib2.HTTPError):
96 except (ValueError, urllib2.HTTPError):
98 self.ui.warn(_('unexpected putlfile response: %r\n') % res)
97 self.ui.warn(_('unexpected putlfile response: %r\n') % res)
99 return 1
98 return 1
100 # ... but we can't use sshrepository._call because the data=
99 # ... but we can't use sshrepository._call because the data=
101 # argument won't get sent, and _callpush does exactly what we want
100 # argument won't get sent, and _callpush does exactly what we want
102 # in this case: send the data straight through
101 # in this case: send the data straight through
103 else:
102 else:
104 try:
103 try:
105 ret, output = self._callpush("putlfile", fd, sha=sha)
104 ret, output = self._callpush("putlfile", fd, sha=sha)
106 if ret == "":
105 if ret == "":
107 raise error.ResponseError(_('putlfile failed:'),
106 raise error.ResponseError(_('putlfile failed:'),
108 output)
107 output)
109 return int(ret)
108 return int(ret)
110 except IOError:
109 except IOError:
111 return 1
110 return 1
112 except ValueError:
111 except ValueError:
113 raise error.ResponseError(
112 raise error.ResponseError(
114 _('putlfile failed (unexpected response):'), ret)
113 _('putlfile failed (unexpected response):'), ret)
115
114
116 def getlfile(self, sha):
115 def getlfile(self, sha):
117 """returns an iterable with the chunks of the file with sha sha"""
116 """returns an iterable with the chunks of the file with sha sha"""
118 stream = self._callstream("getlfile", sha=sha)
117 stream = self._callstream("getlfile", sha=sha)
119 length = stream.readline()
118 length = stream.readline()
120 try:
119 try:
121 length = int(length)
120 length = int(length)
122 except ValueError:
121 except ValueError:
123 self._abort(error.ResponseError(_("unexpected response:"),
122 self._abort(error.ResponseError(_("unexpected response:"),
124 length))
123 length))
125
124
126 # SSH streams will block if reading more than length
125 # SSH streams will block if reading more than length
127 for chunk in util.filechunkiter(stream, 128 * 1024, length):
126 for chunk in util.filechunkiter(stream, 128 * 1024, length):
128 yield chunk
127 yield chunk
129 # HTTP streams must hit the end to process the last empty
128 # HTTP streams must hit the end to process the last empty
130 # chunk of Chunked-Encoding so the connection can be reused.
129 # chunk of Chunked-Encoding so the connection can be reused.
131 if issubclass(self.__class__, httppeer.httppeer):
130 if issubclass(self.__class__, httppeer.httppeer):
132 chunk = stream.read(1)
131 chunk = stream.read(1)
133 if chunk:
132 if chunk:
134 self._abort(error.ResponseError(_("unexpected response:"),
133 self._abort(error.ResponseError(_("unexpected response:"),
135 chunk))
134 chunk))
136
135
137 @wireproto.batchable
136 @wireproto.batchable
138 def statlfile(self, sha):
137 def statlfile(self, sha):
139 f = wireproto.future()
138 f = wireproto.future()
140 result = {'sha': sha}
139 result = {'sha': sha}
141 yield result, f
140 yield result, f
142 try:
141 try:
143 yield int(f.value)
142 yield int(f.value)
144 except (ValueError, urllib2.HTTPError):
143 except (ValueError, urllib2.HTTPError):
145 # If the server returns anything but an integer followed by a
144 # If the server returns anything but an integer followed by a
146 # newline, newline, it's not speaking our language; if we get
145 # newline, newline, it's not speaking our language; if we get
147 # an HTTP error, we can't be sure the largefile is present;
146 # an HTTP error, we can't be sure the largefile is present;
148 # either way, consider it missing.
147 # either way, consider it missing.
149 yield 2
148 yield 2
150
149
151 repo.__class__ = lfileswirerepository
150 repo.__class__ = lfileswirerepository
152
151
153 # advertise the largefiles=serve capability
152 # advertise the largefiles=serve capability
154 def capabilities(repo, proto):
153 def capabilities(repo, proto):
155 return capabilitiesorig(repo, proto) + ' largefiles=serve'
154 return capabilitiesorig(repo, proto) + ' largefiles=serve'
156
155
157 def heads(repo, proto):
156 def heads(repo, proto):
158 if lfutil.islfilesrepo(repo):
157 if lfutil.islfilesrepo(repo):
159 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
158 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
160 return wireproto.heads(repo, proto)
159 return wireproto.heads(repo, proto)
161
160
162 def sshrepocallstream(self, cmd, **args):
161 def sshrepocallstream(self, cmd, **args):
163 if cmd == 'heads' and self.capable('largefiles'):
162 if cmd == 'heads' and self.capable('largefiles'):
164 cmd = 'lheads'
163 cmd = 'lheads'
165 if cmd == 'batch' and self.capable('largefiles'):
164 if cmd == 'batch' and self.capable('largefiles'):
166 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
165 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
167 return ssholdcallstream(self, cmd, **args)
166 return ssholdcallstream(self, cmd, **args)
168
167
169 headsre = re.compile(r'(^|;)heads\b')
168 headsre = re.compile(r'(^|;)heads\b')
170
169
171 def httprepocallstream(self, cmd, **args):
170 def httprepocallstream(self, cmd, **args):
172 if cmd == 'heads' and self.capable('largefiles'):
171 if cmd == 'heads' and self.capable('largefiles'):
173 cmd = 'lheads'
172 cmd = 'lheads'
174 if cmd == 'batch' and self.capable('largefiles'):
173 if cmd == 'batch' and self.capable('largefiles'):
175 args['cmds'] = headsre.sub('lheads', args['cmds'])
174 args['cmds'] = headsre.sub('lheads', args['cmds'])
176 return httpoldcallstream(self, cmd, **args)
175 return httpoldcallstream(self, cmd, **args)
@@ -1,99 +1,98
1 # Copyright 2010-2011 Fog Creek Software
1 # Copyright 2010-2011 Fog Creek Software
2 # Copyright 2010-2011 Unity Technologies
2 # Copyright 2010-2011 Unity Technologies
3 #
3 #
4 # This software may be used and distributed according to the terms of the
4 # This software may be used and distributed according to the terms of the
5 # GNU General Public License version 2 or any later version.
5 # GNU General Public License version 2 or any later version.
6
6
7 '''remote largefile store; the base class for wirestore'''
7 '''remote largefile store; the base class for wirestore'''
8
8
9 import urllib2
9 import urllib2
10
10
11 from mercurial import util, wireproto
11 from mercurial import util, wireproto
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13
13
14 import lfutil
14 import lfutil
15 import basestore
15 import basestore
16
16
17 class remotestore(basestore.basestore):
17 class remotestore(basestore.basestore):
18 '''a largefile store accessed over a network'''
18 '''a largefile store accessed over a network'''
19 def __init__(self, ui, repo, url):
19 def __init__(self, ui, repo, url):
20 super(remotestore, self).__init__(ui, repo, url)
20 super(remotestore, self).__init__(ui, repo, url)
21
21
22 def put(self, source, hash):
22 def put(self, source, hash):
23 if self.sendfile(source, hash):
23 if self.sendfile(source, hash):
24 raise util.Abort(
24 raise util.Abort(
25 _('remotestore: could not put %s to remote store %s')
25 _('remotestore: could not put %s to remote store %s')
26 % (source, util.hidepassword(self.url)))
26 % (source, util.hidepassword(self.url)))
27 self.ui.debug(
27 self.ui.debug(
28 _('remotestore: put %s to remote store %s\n')
28 _('remotestore: put %s to remote store %s\n')
29 % (source, util.hidepassword(self.url)))
29 % (source, util.hidepassword(self.url)))
30
30
31 def exists(self, hashes):
31 def exists(self, hashes):
32 return dict((h, s == 0) for (h, s) in # dict-from-generator
32 return dict((h, s == 0) for (h, s) in # dict-from-generator
33 self._stat(hashes).iteritems())
33 self._stat(hashes).iteritems())
34
34
35 def sendfile(self, filename, hash):
35 def sendfile(self, filename, hash):
36 self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
36 self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
37 fd = None
37 fd = None
38 try:
38 try:
39 try:
39 fd = lfutil.httpsendfile(self.ui, filename)
40 fd = lfutil.httpsendfile(self.ui, filename)
41 except IOError, e:
42 raise util.Abort(
43 _('remotestore: could not open file %s: %s')
44 % (filename, str(e)))
45 return self._put(hash, fd)
40 return self._put(hash, fd)
41 except IOError, e:
42 raise util.Abort(
43 _('remotestore: could not open file %s: %s')
44 % (filename, str(e)))
46 finally:
45 finally:
47 if fd:
46 if fd:
48 fd.close()
47 fd.close()
49
48
50 def _getfile(self, tmpfile, filename, hash):
49 def _getfile(self, tmpfile, filename, hash):
51 try:
50 try:
52 chunks = self._get(hash)
51 chunks = self._get(hash)
53 except urllib2.HTTPError, e:
52 except urllib2.HTTPError, e:
54 # 401s get converted to util.Aborts; everything else is fine being
53 # 401s get converted to util.Aborts; everything else is fine being
55 # turned into a StoreError
54 # turned into a StoreError
56 raise basestore.StoreError(filename, hash, self.url, str(e))
55 raise basestore.StoreError(filename, hash, self.url, str(e))
57 except urllib2.URLError, e:
56 except urllib2.URLError, e:
58 # This usually indicates a connection problem, so don't
57 # This usually indicates a connection problem, so don't
59 # keep trying with the other files... they will probably
58 # keep trying with the other files... they will probably
60 # all fail too.
59 # all fail too.
61 raise util.Abort('%s: %s' %
60 raise util.Abort('%s: %s' %
62 (util.hidepassword(self.url), e.reason))
61 (util.hidepassword(self.url), e.reason))
63 except IOError, e:
62 except IOError, e:
64 raise basestore.StoreError(filename, hash, self.url, str(e))
63 raise basestore.StoreError(filename, hash, self.url, str(e))
65
64
66 return lfutil.copyandhash(chunks, tmpfile)
65 return lfutil.copyandhash(chunks, tmpfile)
67
66
68 def _verifyfile(self, cctx, cset, contents, standin, verified):
67 def _verifyfile(self, cctx, cset, contents, standin, verified):
69 filename = lfutil.splitstandin(standin)
68 filename = lfutil.splitstandin(standin)
70 if not filename:
69 if not filename:
71 return False
70 return False
72 fctx = cctx[standin]
71 fctx = cctx[standin]
73 key = (filename, fctx.filenode())
72 key = (filename, fctx.filenode())
74 if key in verified:
73 if key in verified:
75 return False
74 return False
76
75
77 verified.add(key)
76 verified.add(key)
78
77
79 expecthash = fctx.data()[0:40]
78 expecthash = fctx.data()[0:40]
80 stat = self._stat([expecthash])[expecthash]
79 stat = self._stat([expecthash])[expecthash]
81 if not stat:
80 if not stat:
82 return False
81 return False
83 elif stat == 1:
82 elif stat == 1:
84 self.ui.warn(
83 self.ui.warn(
85 _('changeset %s: %s: contents differ\n')
84 _('changeset %s: %s: contents differ\n')
86 % (cset, filename))
85 % (cset, filename))
87 return True # failed
86 return True # failed
88 elif stat == 2:
87 elif stat == 2:
89 self.ui.warn(
88 self.ui.warn(
90 _('changeset %s: %s missing\n')
89 _('changeset %s: %s missing\n')
91 % (cset, filename))
90 % (cset, filename))
92 return True # failed
91 return True # failed
93 else:
92 else:
94 raise RuntimeError('verify failed: unexpected response from '
93 raise RuntimeError('verify failed: unexpected response from '
95 'statlfile (%r)' % stat)
94 'statlfile (%r)' % stat)
96
95
97 def batch(self):
96 def batch(self):
98 '''Support for remote batching.'''
97 '''Support for remote batching.'''
99 return wireproto.remotebatch(self)
98 return wireproto.remotebatch(self)
General Comments 0
You need to be logged in to leave comments. Login now