##// END OF EJS Templates
largefiles: restore archiving largefiles with hgweb (issue4859)...
Matt Harbison -
r26417:9a466b9f 3.5.2 stable
parent child Browse files
Show More
@@ -1,1402 +1,1412 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfutil
18 import lfutil
19 import lfcommands
19 import lfcommands
20 import basestore
20 import basestore
21
21
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23
23
24 def composelargefilematcher(match, manifest):
24 def composelargefilematcher(match, manifest):
25 '''create a matcher that matches only the largefiles in the original
25 '''create a matcher that matches only the largefiles in the original
26 matcher'''
26 matcher'''
27 m = copy.copy(match)
27 m = copy.copy(match)
28 lfile = lambda f: lfutil.standin(f) in manifest
28 lfile = lambda f: lfutil.standin(f) in manifest
29 m._files = filter(lfile, m._files)
29 m._files = filter(lfile, m._files)
30 m._fileroots = set(m._files)
30 m._fileroots = set(m._files)
31 m._always = False
31 m._always = False
32 origmatchfn = m.matchfn
32 origmatchfn = m.matchfn
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 return m
34 return m
35
35
36 def composenormalfilematcher(match, manifest, exclude=None):
36 def composenormalfilematcher(match, manifest, exclude=None):
37 excluded = set()
37 excluded = set()
38 if exclude is not None:
38 if exclude is not None:
39 excluded.update(exclude)
39 excluded.update(exclude)
40
40
41 m = copy.copy(match)
41 m = copy.copy(match)
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 manifest or f in excluded)
43 manifest or f in excluded)
44 m._files = filter(notlfile, m._files)
44 m._files = filter(notlfile, m._files)
45 m._fileroots = set(m._files)
45 m._fileroots = set(m._files)
46 m._always = False
46 m._always = False
47 origmatchfn = m.matchfn
47 origmatchfn = m.matchfn
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 return m
49 return m
50
50
51 def installnormalfilesmatchfn(manifest):
51 def installnormalfilesmatchfn(manifest):
52 '''installmatchfn with a matchfn that ignores all largefiles'''
52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 default='relpath', badfn=None):
54 default='relpath', badfn=None):
55 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
55 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
56 return composenormalfilematcher(match, manifest)
56 return composenormalfilematcher(match, manifest)
57 oldmatch = installmatchfn(overridematch)
57 oldmatch = installmatchfn(overridematch)
58
58
59 def installmatchfn(f):
59 def installmatchfn(f):
60 '''monkey patch the scmutil module with a custom match function.
60 '''monkey patch the scmutil module with a custom match function.
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 oldmatch = scmutil.match
62 oldmatch = scmutil.match
63 setattr(f, 'oldmatch', oldmatch)
63 setattr(f, 'oldmatch', oldmatch)
64 scmutil.match = f
64 scmutil.match = f
65 return oldmatch
65 return oldmatch
66
66
67 def restorematchfn():
67 def restorematchfn():
68 '''restores scmutil.match to what it was before installmatchfn
68 '''restores scmutil.match to what it was before installmatchfn
69 was called. no-op if scmutil.match is its original function.
69 was called. no-op if scmutil.match is its original function.
70
70
71 Note that n calls to installmatchfn will require n calls to
71 Note that n calls to installmatchfn will require n calls to
72 restore the original matchfn.'''
72 restore the original matchfn.'''
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74
74
75 def installmatchandpatsfn(f):
75 def installmatchandpatsfn(f):
76 oldmatchandpats = scmutil.matchandpats
76 oldmatchandpats = scmutil.matchandpats
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 scmutil.matchandpats = f
78 scmutil.matchandpats = f
79 return oldmatchandpats
79 return oldmatchandpats
80
80
81 def restorematchandpatsfn():
81 def restorematchandpatsfn():
82 '''restores scmutil.matchandpats to what it was before
82 '''restores scmutil.matchandpats to what it was before
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 is its original function.
84 is its original function.
85
85
86 Note that n calls to installmatchandpatsfn will require n calls
86 Note that n calls to installmatchandpatsfn will require n calls
87 to restore the original matchfn.'''
87 to restore the original matchfn.'''
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 scmutil.matchandpats)
89 scmutil.matchandpats)
90
90
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 large = opts.get('large')
92 large = opts.get('large')
93 lfsize = lfutil.getminsize(
93 lfsize = lfutil.getminsize(
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95
95
96 lfmatcher = None
96 lfmatcher = None
97 if lfutil.islfilesrepo(repo):
97 if lfutil.islfilesrepo(repo):
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 if lfpats:
99 if lfpats:
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101
101
102 lfnames = []
102 lfnames = []
103 m = matcher
103 m = matcher
104
104
105 wctx = repo[None]
105 wctx = repo[None]
106 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
106 for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
107 exact = m.exact(f)
107 exact = m.exact(f)
108 lfile = lfutil.standin(f) in wctx
108 lfile = lfutil.standin(f) in wctx
109 nfile = f in wctx
109 nfile = f in wctx
110 exists = lfile or nfile
110 exists = lfile or nfile
111
111
112 # addremove in core gets fancy with the name, add doesn't
112 # addremove in core gets fancy with the name, add doesn't
113 if isaddremove:
113 if isaddremove:
114 name = m.uipath(f)
114 name = m.uipath(f)
115 else:
115 else:
116 name = m.rel(f)
116 name = m.rel(f)
117
117
118 # Don't warn the user when they attempt to add a normal tracked file.
118 # Don't warn the user when they attempt to add a normal tracked file.
119 # The normal add code will do that for us.
119 # The normal add code will do that for us.
120 if exact and exists:
120 if exact and exists:
121 if lfile:
121 if lfile:
122 ui.warn(_('%s already a largefile\n') % name)
122 ui.warn(_('%s already a largefile\n') % name)
123 continue
123 continue
124
124
125 if (exact or not exists) and not lfutil.isstandin(f):
125 if (exact or not exists) and not lfutil.isstandin(f):
126 # In case the file was removed previously, but not committed
126 # In case the file was removed previously, but not committed
127 # (issue3507)
127 # (issue3507)
128 if not repo.wvfs.exists(f):
128 if not repo.wvfs.exists(f):
129 continue
129 continue
130
130
131 abovemin = (lfsize and
131 abovemin = (lfsize and
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 lfnames.append(f)
134 lfnames.append(f)
135 if ui.verbose or not exact:
135 if ui.verbose or not exact:
136 ui.status(_('adding %s as a largefile\n') % name)
136 ui.status(_('adding %s as a largefile\n') % name)
137
137
138 bad = []
138 bad = []
139
139
140 # Need to lock, otherwise there could be a race condition between
140 # Need to lock, otherwise there could be a race condition between
141 # when standins are created and added to the repo.
141 # when standins are created and added to the repo.
142 wlock = repo.wlock()
142 wlock = repo.wlock()
143 try:
143 try:
144 if not opts.get('dry_run'):
144 if not opts.get('dry_run'):
145 standins = []
145 standins = []
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 for f in lfnames:
147 for f in lfnames:
148 standinname = lfutil.standin(f)
148 standinname = lfutil.standin(f)
149 lfutil.writestandin(repo, standinname, hash='',
149 lfutil.writestandin(repo, standinname, hash='',
150 executable=lfutil.getexecutable(repo.wjoin(f)))
150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 standins.append(standinname)
151 standins.append(standinname)
152 if lfdirstate[f] == 'r':
152 if lfdirstate[f] == 'r':
153 lfdirstate.normallookup(f)
153 lfdirstate.normallookup(f)
154 else:
154 else:
155 lfdirstate.add(f)
155 lfdirstate.add(f)
156 lfdirstate.write()
156 lfdirstate.write()
157 bad += [lfutil.splitstandin(f)
157 bad += [lfutil.splitstandin(f)
158 for f in repo[None].add(standins)
158 for f in repo[None].add(standins)
159 if f in m.files()]
159 if f in m.files()]
160
160
161 added = [f for f in lfnames if f not in bad]
161 added = [f for f in lfnames if f not in bad]
162 finally:
162 finally:
163 wlock.release()
163 wlock.release()
164 return added, bad
164 return added, bad
165
165
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 after = opts.get('after')
167 after = opts.get('after')
168 m = composelargefilematcher(matcher, repo[None].manifest())
168 m = composelargefilematcher(matcher, repo[None].manifest())
169 try:
169 try:
170 repo.lfstatus = True
170 repo.lfstatus = True
171 s = repo.status(match=m, clean=not isaddremove)
171 s = repo.status(match=m, clean=not isaddremove)
172 finally:
172 finally:
173 repo.lfstatus = False
173 repo.lfstatus = False
174 manifest = repo[None].manifest()
174 manifest = repo[None].manifest()
175 modified, added, deleted, clean = [[f for f in list
175 modified, added, deleted, clean = [[f for f in list
176 if lfutil.standin(f) in manifest]
176 if lfutil.standin(f) in manifest]
177 for list in (s.modified, s.added,
177 for list in (s.modified, s.added,
178 s.deleted, s.clean)]
178 s.deleted, s.clean)]
179
179
180 def warn(files, msg):
180 def warn(files, msg):
181 for f in files:
181 for f in files:
182 ui.warn(msg % m.rel(f))
182 ui.warn(msg % m.rel(f))
183 return int(len(files) > 0)
183 return int(len(files) > 0)
184
184
185 result = 0
185 result = 0
186
186
187 if after:
187 if after:
188 remove = deleted
188 remove = deleted
189 result = warn(modified + added + clean,
189 result = warn(modified + added + clean,
190 _('not removing %s: file still exists\n'))
190 _('not removing %s: file still exists\n'))
191 else:
191 else:
192 remove = deleted + clean
192 remove = deleted + clean
193 result = warn(modified, _('not removing %s: file is modified (use -f'
193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 ' to force removal)\n'))
194 ' to force removal)\n'))
195 result = warn(added, _('not removing %s: file has been marked for add'
195 result = warn(added, _('not removing %s: file has been marked for add'
196 ' (use forget to undo)\n')) or result
196 ' (use forget to undo)\n')) or result
197
197
198 # Need to lock because standin files are deleted then removed from the
198 # Need to lock because standin files are deleted then removed from the
199 # repository and we could race in-between.
199 # repository and we could race in-between.
200 wlock = repo.wlock()
200 wlock = repo.wlock()
201 try:
201 try:
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 for f in sorted(remove):
203 for f in sorted(remove):
204 if ui.verbose or not m.exact(f):
204 if ui.verbose or not m.exact(f):
205 # addremove in core gets fancy with the name, remove doesn't
205 # addremove in core gets fancy with the name, remove doesn't
206 if isaddremove:
206 if isaddremove:
207 name = m.uipath(f)
207 name = m.uipath(f)
208 else:
208 else:
209 name = m.rel(f)
209 name = m.rel(f)
210 ui.status(_('removing %s\n') % name)
210 ui.status(_('removing %s\n') % name)
211
211
212 if not opts.get('dry_run'):
212 if not opts.get('dry_run'):
213 if not after:
213 if not after:
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215
215
216 if opts.get('dry_run'):
216 if opts.get('dry_run'):
217 return result
217 return result
218
218
219 remove = [lfutil.standin(f) for f in remove]
219 remove = [lfutil.standin(f) for f in remove]
220 # If this is being called by addremove, let the original addremove
220 # If this is being called by addremove, let the original addremove
221 # function handle this.
221 # function handle this.
222 if not isaddremove:
222 if not isaddremove:
223 for f in remove:
223 for f in remove:
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 repo[None].forget(remove)
225 repo[None].forget(remove)
226
226
227 for f in remove:
227 for f in remove:
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 False)
229 False)
230
230
231 lfdirstate.write()
231 lfdirstate.write()
232 finally:
232 finally:
233 wlock.release()
233 wlock.release()
234
234
235 return result
235 return result
236
236
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # appear at their right place in the manifests.
238 # appear at their right place in the manifests.
239 def decodepath(orig, path):
239 def decodepath(orig, path):
240 return lfutil.splitstandin(path) or path
240 return lfutil.splitstandin(path) or path
241
241
242 # -- Wrappers: modify existing commands --------------------------------
242 # -- Wrappers: modify existing commands --------------------------------
243
243
244 def overrideadd(orig, ui, repo, *pats, **opts):
244 def overrideadd(orig, ui, repo, *pats, **opts):
245 if opts.get('normal') and opts.get('large'):
245 if opts.get('normal') and opts.get('large'):
246 raise util.Abort(_('--normal cannot be used with --large'))
246 raise util.Abort(_('--normal cannot be used with --large'))
247 return orig(ui, repo, *pats, **opts)
247 return orig(ui, repo, *pats, **opts)
248
248
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 # The --normal flag short circuits this override
250 # The --normal flag short circuits this override
251 if opts.get('normal'):
251 if opts.get('normal'):
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253
253
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 ladded)
256 ladded)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258
258
259 bad.extend(f for f in lbad)
259 bad.extend(f for f in lbad)
260 return bad
260 return bad
261
261
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 return removelargefiles(ui, repo, False, matcher, after=after,
265 return removelargefiles(ui, repo, False, matcher, after=after,
266 force=force) or result
266 force=force) or result
267
267
268 def overridestatusfn(orig, repo, rev2, **opts):
268 def overridestatusfn(orig, repo, rev2, **opts):
269 try:
269 try:
270 repo._repo.lfstatus = True
270 repo._repo.lfstatus = True
271 return orig(repo, rev2, **opts)
271 return orig(repo, rev2, **opts)
272 finally:
272 finally:
273 repo._repo.lfstatus = False
273 repo._repo.lfstatus = False
274
274
275 def overridestatus(orig, ui, repo, *pats, **opts):
275 def overridestatus(orig, ui, repo, *pats, **opts):
276 try:
276 try:
277 repo.lfstatus = True
277 repo.lfstatus = True
278 return orig(ui, repo, *pats, **opts)
278 return orig(ui, repo, *pats, **opts)
279 finally:
279 finally:
280 repo.lfstatus = False
280 repo.lfstatus = False
281
281
282 def overridedirty(orig, repo, ignoreupdate=False):
282 def overridedirty(orig, repo, ignoreupdate=False):
283 try:
283 try:
284 repo._repo.lfstatus = True
284 repo._repo.lfstatus = True
285 return orig(repo, ignoreupdate)
285 return orig(repo, ignoreupdate)
286 finally:
286 finally:
287 repo._repo.lfstatus = False
287 repo._repo.lfstatus = False
288
288
289 def overridelog(orig, ui, repo, *pats, **opts):
289 def overridelog(orig, ui, repo, *pats, **opts):
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 default='relpath', badfn=None):
291 default='relpath', badfn=None):
292 """Matcher that merges root directory with .hglf, suitable for log.
292 """Matcher that merges root directory with .hglf, suitable for log.
293 It is still possible to match .hglf directly.
293 It is still possible to match .hglf directly.
294 For any listed files run log on the standin too.
294 For any listed files run log on the standin too.
295 matchfn tries both the given filename and with .hglf stripped.
295 matchfn tries both the given filename and with .hglf stripped.
296 """
296 """
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
298 badfn=badfn)
298 badfn=badfn)
299 m, p = copy.copy(matchandpats)
299 m, p = copy.copy(matchandpats)
300
300
301 if m.always():
301 if m.always():
302 # We want to match everything anyway, so there's no benefit trying
302 # We want to match everything anyway, so there's no benefit trying
303 # to add standins.
303 # to add standins.
304 return matchandpats
304 return matchandpats
305
305
306 pats = set(p)
306 pats = set(p)
307
307
308 def fixpats(pat, tostandin=lfutil.standin):
308 def fixpats(pat, tostandin=lfutil.standin):
309 if pat.startswith('set:'):
309 if pat.startswith('set:'):
310 return pat
310 return pat
311
311
312 kindpat = match_._patsplit(pat, None)
312 kindpat = match_._patsplit(pat, None)
313
313
314 if kindpat[0] is not None:
314 if kindpat[0] is not None:
315 return kindpat[0] + ':' + tostandin(kindpat[1])
315 return kindpat[0] + ':' + tostandin(kindpat[1])
316 return tostandin(kindpat[1])
316 return tostandin(kindpat[1])
317
317
318 if m._cwd:
318 if m._cwd:
319 hglf = lfutil.shortname
319 hglf = lfutil.shortname
320 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
320 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
321
321
322 def tostandin(f):
322 def tostandin(f):
323 # The file may already be a standin, so trucate the back
323 # The file may already be a standin, so trucate the back
324 # prefix and test before mangling it. This avoids turning
324 # prefix and test before mangling it. This avoids turning
325 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
325 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
326 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
326 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
327 return f
327 return f
328
328
329 # An absolute path is from outside the repo, so truncate the
329 # An absolute path is from outside the repo, so truncate the
330 # path to the root before building the standin. Otherwise cwd
330 # path to the root before building the standin. Otherwise cwd
331 # is somewhere in the repo, relative to root, and needs to be
331 # is somewhere in the repo, relative to root, and needs to be
332 # prepended before building the standin.
332 # prepended before building the standin.
333 if os.path.isabs(m._cwd):
333 if os.path.isabs(m._cwd):
334 f = f[len(back):]
334 f = f[len(back):]
335 else:
335 else:
336 f = m._cwd + '/' + f
336 f = m._cwd + '/' + f
337 return back + lfutil.standin(f)
337 return back + lfutil.standin(f)
338
338
339 pats.update(fixpats(f, tostandin) for f in p)
339 pats.update(fixpats(f, tostandin) for f in p)
340 else:
340 else:
341 def tostandin(f):
341 def tostandin(f):
342 if lfutil.splitstandin(f):
342 if lfutil.splitstandin(f):
343 return f
343 return f
344 return lfutil.standin(f)
344 return lfutil.standin(f)
345 pats.update(fixpats(f, tostandin) for f in p)
345 pats.update(fixpats(f, tostandin) for f in p)
346
346
347 for i in range(0, len(m._files)):
347 for i in range(0, len(m._files)):
348 # Don't add '.hglf' to m.files, since that is already covered by '.'
348 # Don't add '.hglf' to m.files, since that is already covered by '.'
349 if m._files[i] == '.':
349 if m._files[i] == '.':
350 continue
350 continue
351 standin = lfutil.standin(m._files[i])
351 standin = lfutil.standin(m._files[i])
352 # If the "standin" is a directory, append instead of replace to
352 # If the "standin" is a directory, append instead of replace to
353 # support naming a directory on the command line with only
353 # support naming a directory on the command line with only
354 # largefiles. The original directory is kept to support normal
354 # largefiles. The original directory is kept to support normal
355 # files.
355 # files.
356 if standin in repo[ctx.node()]:
356 if standin in repo[ctx.node()]:
357 m._files[i] = standin
357 m._files[i] = standin
358 elif m._files[i] not in repo[ctx.node()] \
358 elif m._files[i] not in repo[ctx.node()] \
359 and repo.wvfs.isdir(standin):
359 and repo.wvfs.isdir(standin):
360 m._files.append(standin)
360 m._files.append(standin)
361
361
362 m._fileroots = set(m._files)
362 m._fileroots = set(m._files)
363 m._always = False
363 m._always = False
364 origmatchfn = m.matchfn
364 origmatchfn = m.matchfn
365 def lfmatchfn(f):
365 def lfmatchfn(f):
366 lf = lfutil.splitstandin(f)
366 lf = lfutil.splitstandin(f)
367 if lf is not None and origmatchfn(lf):
367 if lf is not None and origmatchfn(lf):
368 return True
368 return True
369 r = origmatchfn(f)
369 r = origmatchfn(f)
370 return r
370 return r
371 m.matchfn = lfmatchfn
371 m.matchfn = lfmatchfn
372
372
373 ui.debug('updated patterns: %s\n' % sorted(pats))
373 ui.debug('updated patterns: %s\n' % sorted(pats))
374 return m, pats
374 return m, pats
375
375
376 # For hg log --patch, the match object is used in two different senses:
376 # For hg log --patch, the match object is used in two different senses:
377 # (1) to determine what revisions should be printed out, and
377 # (1) to determine what revisions should be printed out, and
378 # (2) to determine what files to print out diffs for.
378 # (2) to determine what files to print out diffs for.
379 # The magic matchandpats override should be used for case (1) but not for
379 # The magic matchandpats override should be used for case (1) but not for
380 # case (2).
380 # case (2).
381 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
381 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
382 wctx = repo[None]
382 wctx = repo[None]
383 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
383 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
384 return lambda rev: match
384 return lambda rev: match
385
385
386 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
386 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
387 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
387 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
388 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
388 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
389
389
390 try:
390 try:
391 return orig(ui, repo, *pats, **opts)
391 return orig(ui, repo, *pats, **opts)
392 finally:
392 finally:
393 restorematchandpatsfn()
393 restorematchandpatsfn()
394 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
394 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
395
395
396 def overrideverify(orig, ui, repo, *pats, **opts):
396 def overrideverify(orig, ui, repo, *pats, **opts):
397 large = opts.pop('large', False)
397 large = opts.pop('large', False)
398 all = opts.pop('lfa', False)
398 all = opts.pop('lfa', False)
399 contents = opts.pop('lfc', False)
399 contents = opts.pop('lfc', False)
400
400
401 result = orig(ui, repo, *pats, **opts)
401 result = orig(ui, repo, *pats, **opts)
402 if large or all or contents:
402 if large or all or contents:
403 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
403 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
404 return result
404 return result
405
405
406 def overridedebugstate(orig, ui, repo, *pats, **opts):
406 def overridedebugstate(orig, ui, repo, *pats, **opts):
407 large = opts.pop('large', False)
407 large = opts.pop('large', False)
408 if large:
408 if large:
409 class fakerepo(object):
409 class fakerepo(object):
410 dirstate = lfutil.openlfdirstate(ui, repo)
410 dirstate = lfutil.openlfdirstate(ui, repo)
411 orig(ui, fakerepo, *pats, **opts)
411 orig(ui, fakerepo, *pats, **opts)
412 else:
412 else:
413 orig(ui, repo, *pats, **opts)
413 orig(ui, repo, *pats, **opts)
414
414
415 # Before starting the manifest merge, merge.updates will call
415 # Before starting the manifest merge, merge.updates will call
416 # _checkunknownfile to check if there are any files in the merged-in
416 # _checkunknownfile to check if there are any files in the merged-in
417 # changeset that collide with unknown files in the working copy.
417 # changeset that collide with unknown files in the working copy.
418 #
418 #
419 # The largefiles are seen as unknown, so this prevents us from merging
419 # The largefiles are seen as unknown, so this prevents us from merging
420 # in a file 'foo' if we already have a largefile with the same name.
420 # in a file 'foo' if we already have a largefile with the same name.
421 #
421 #
422 # The overridden function filters the unknown files by removing any
422 # The overridden function filters the unknown files by removing any
423 # largefiles. This makes the merge proceed and we can then handle this
423 # largefiles. This makes the merge proceed and we can then handle this
424 # case further in the overridden calculateupdates function below.
424 # case further in the overridden calculateupdates function below.
425 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
425 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
426 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
426 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
427 return False
427 return False
428 return origfn(repo, wctx, mctx, f, f2)
428 return origfn(repo, wctx, mctx, f, f2)
429
429
430 # The manifest merge handles conflicts on the manifest level. We want
430 # The manifest merge handles conflicts on the manifest level. We want
431 # to handle changes in largefile-ness of files at this level too.
431 # to handle changes in largefile-ness of files at this level too.
432 #
432 #
433 # The strategy is to run the original calculateupdates and then process
433 # The strategy is to run the original calculateupdates and then process
434 # the action list it outputs. There are two cases we need to deal with:
434 # the action list it outputs. There are two cases we need to deal with:
435 #
435 #
436 # 1. Normal file in p1, largefile in p2. Here the largefile is
436 # 1. Normal file in p1, largefile in p2. Here the largefile is
437 # detected via its standin file, which will enter the working copy
437 # detected via its standin file, which will enter the working copy
438 # with a "get" action. It is not "merge" since the standin is all
438 # with a "get" action. It is not "merge" since the standin is all
439 # Mercurial is concerned with at this level -- the link to the
439 # Mercurial is concerned with at this level -- the link to the
440 # existing normal file is not relevant here.
440 # existing normal file is not relevant here.
441 #
441 #
442 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
442 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
443 # since the largefile will be present in the working copy and
443 # since the largefile will be present in the working copy and
444 # different from the normal file in p2. Mercurial therefore
444 # different from the normal file in p2. Mercurial therefore
445 # triggers a merge action.
445 # triggers a merge action.
446 #
446 #
447 # In both cases, we prompt the user and emit new actions to either
447 # In both cases, we prompt the user and emit new actions to either
448 # remove the standin (if the normal file was kept) or to remove the
448 # remove the standin (if the normal file was kept) or to remove the
449 # normal file and get the standin (if the largefile was kept). The
449 # normal file and get the standin (if the largefile was kept). The
450 # default prompt answer is to use the largefile version since it was
450 # default prompt answer is to use the largefile version since it was
451 # presumably changed on purpose.
451 # presumably changed on purpose.
452 #
452 #
453 # Finally, the merge.applyupdates function will then take care of
453 # Finally, the merge.applyupdates function will then take care of
454 # writing the files into the working copy and lfcommands.updatelfiles
454 # writing the files into the working copy and lfcommands.updatelfiles
455 # will update the largefiles.
455 # will update the largefiles.
456 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
456 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
457 partial, acceptremote, followcopies):
457 partial, acceptremote, followcopies):
458 overwrite = force and not branchmerge
458 overwrite = force and not branchmerge
459 actions, diverge, renamedelete = origfn(
459 actions, diverge, renamedelete = origfn(
460 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
460 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
461 followcopies)
461 followcopies)
462
462
463 if overwrite:
463 if overwrite:
464 return actions, diverge, renamedelete
464 return actions, diverge, renamedelete
465
465
466 # Convert to dictionary with filename as key and action as value.
466 # Convert to dictionary with filename as key and action as value.
467 lfiles = set()
467 lfiles = set()
468 for f in actions:
468 for f in actions:
469 splitstandin = f and lfutil.splitstandin(f)
469 splitstandin = f and lfutil.splitstandin(f)
470 if splitstandin in p1:
470 if splitstandin in p1:
471 lfiles.add(splitstandin)
471 lfiles.add(splitstandin)
472 elif lfutil.standin(f) in p1:
472 elif lfutil.standin(f) in p1:
473 lfiles.add(f)
473 lfiles.add(f)
474
474
475 for lfile in lfiles:
475 for lfile in lfiles:
476 standin = lfutil.standin(lfile)
476 standin = lfutil.standin(lfile)
477 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
477 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
478 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
478 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
479 if sm in ('g', 'dc') and lm != 'r':
479 if sm in ('g', 'dc') and lm != 'r':
480 # Case 1: normal file in the working copy, largefile in
480 # Case 1: normal file in the working copy, largefile in
481 # the second parent
481 # the second parent
482 usermsg = _('remote turned local normal file %s into a largefile\n'
482 usermsg = _('remote turned local normal file %s into a largefile\n'
483 'use (l)argefile or keep (n)ormal file?'
483 'use (l)argefile or keep (n)ormal file?'
484 '$$ &Largefile $$ &Normal file') % lfile
484 '$$ &Largefile $$ &Normal file') % lfile
485 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
485 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
486 actions[lfile] = ('r', None, 'replaced by standin')
486 actions[lfile] = ('r', None, 'replaced by standin')
487 actions[standin] = ('g', sargs, 'replaces standin')
487 actions[standin] = ('g', sargs, 'replaces standin')
488 else: # keep local normal file
488 else: # keep local normal file
489 actions[lfile] = ('k', None, 'replaces standin')
489 actions[lfile] = ('k', None, 'replaces standin')
490 if branchmerge:
490 if branchmerge:
491 actions[standin] = ('k', None, 'replaced by non-standin')
491 actions[standin] = ('k', None, 'replaced by non-standin')
492 else:
492 else:
493 actions[standin] = ('r', None, 'replaced by non-standin')
493 actions[standin] = ('r', None, 'replaced by non-standin')
494 elif lm in ('g', 'dc') and sm != 'r':
494 elif lm in ('g', 'dc') and sm != 'r':
495 # Case 2: largefile in the working copy, normal file in
495 # Case 2: largefile in the working copy, normal file in
496 # the second parent
496 # the second parent
497 usermsg = _('remote turned local largefile %s into a normal file\n'
497 usermsg = _('remote turned local largefile %s into a normal file\n'
498 'keep (l)argefile or use (n)ormal file?'
498 'keep (l)argefile or use (n)ormal file?'
499 '$$ &Largefile $$ &Normal file') % lfile
499 '$$ &Largefile $$ &Normal file') % lfile
500 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
500 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
501 if branchmerge:
501 if branchmerge:
502 # largefile can be restored from standin safely
502 # largefile can be restored from standin safely
503 actions[lfile] = ('k', None, 'replaced by standin')
503 actions[lfile] = ('k', None, 'replaced by standin')
504 actions[standin] = ('k', None, 'replaces standin')
504 actions[standin] = ('k', None, 'replaces standin')
505 else:
505 else:
506 # "lfile" should be marked as "removed" without
506 # "lfile" should be marked as "removed" without
507 # removal of itself
507 # removal of itself
508 actions[lfile] = ('lfmr', None,
508 actions[lfile] = ('lfmr', None,
509 'forget non-standin largefile')
509 'forget non-standin largefile')
510
510
511 # linear-merge should treat this largefile as 're-added'
511 # linear-merge should treat this largefile as 're-added'
512 actions[standin] = ('a', None, 'keep standin')
512 actions[standin] = ('a', None, 'keep standin')
513 else: # pick remote normal file
513 else: # pick remote normal file
514 actions[lfile] = ('g', largs, 'replaces standin')
514 actions[lfile] = ('g', largs, 'replaces standin')
515 actions[standin] = ('r', None, 'replaced by non-standin')
515 actions[standin] = ('r', None, 'replaced by non-standin')
516
516
517 return actions, diverge, renamedelete
517 return actions, diverge, renamedelete
518
518
519 def mergerecordupdates(orig, repo, actions, branchmerge):
519 def mergerecordupdates(orig, repo, actions, branchmerge):
520 if 'lfmr' in actions:
520 if 'lfmr' in actions:
521 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
521 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
522 for lfile, args, msg in actions['lfmr']:
522 for lfile, args, msg in actions['lfmr']:
523 # this should be executed before 'orig', to execute 'remove'
523 # this should be executed before 'orig', to execute 'remove'
524 # before all other actions
524 # before all other actions
525 repo.dirstate.remove(lfile)
525 repo.dirstate.remove(lfile)
526 # make sure lfile doesn't get synclfdirstate'd as normal
526 # make sure lfile doesn't get synclfdirstate'd as normal
527 lfdirstate.add(lfile)
527 lfdirstate.add(lfile)
528 lfdirstate.write()
528 lfdirstate.write()
529
529
530 return orig(repo, actions, branchmerge)
530 return orig(repo, actions, branchmerge)
531
531
532
532
533 # Override filemerge to prompt the user about how they wish to merge
533 # Override filemerge to prompt the user about how they wish to merge
534 # largefiles. This will handle identical edits without prompting the user.
534 # largefiles. This will handle identical edits without prompting the user.
535 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
535 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
536 if not lfutil.isstandin(orig):
536 if not lfutil.isstandin(orig):
537 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
537 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
538
538
539 ahash = fca.data().strip().lower()
539 ahash = fca.data().strip().lower()
540 dhash = fcd.data().strip().lower()
540 dhash = fcd.data().strip().lower()
541 ohash = fco.data().strip().lower()
541 ohash = fco.data().strip().lower()
542 if (ohash != ahash and
542 if (ohash != ahash and
543 ohash != dhash and
543 ohash != dhash and
544 (dhash == ahash or
544 (dhash == ahash or
545 repo.ui.promptchoice(
545 repo.ui.promptchoice(
546 _('largefile %s has a merge conflict\nancestor was %s\n'
546 _('largefile %s has a merge conflict\nancestor was %s\n'
547 'keep (l)ocal %s or\ntake (o)ther %s?'
547 'keep (l)ocal %s or\ntake (o)ther %s?'
548 '$$ &Local $$ &Other') %
548 '$$ &Local $$ &Other') %
549 (lfutil.splitstandin(orig), ahash, dhash, ohash),
549 (lfutil.splitstandin(orig), ahash, dhash, ohash),
550 0) == 1)):
550 0) == 1)):
551 repo.wwrite(fcd.path(), fco.data(), fco.flags())
551 repo.wwrite(fcd.path(), fco.data(), fco.flags())
552 return 0
552 return 0
553
553
554 def copiespathcopies(orig, ctx1, ctx2, match=None):
554 def copiespathcopies(orig, ctx1, ctx2, match=None):
555 copies = orig(ctx1, ctx2, match=match)
555 copies = orig(ctx1, ctx2, match=match)
556 updated = {}
556 updated = {}
557
557
558 for k, v in copies.iteritems():
558 for k, v in copies.iteritems():
559 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
559 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
560
560
561 return updated
561 return updated
562
562
563 # Copy first changes the matchers to match standins instead of
563 # Copy first changes the matchers to match standins instead of
564 # largefiles. Then it overrides util.copyfile in that function it
564 # largefiles. Then it overrides util.copyfile in that function it
565 # checks if the destination largefile already exists. It also keeps a
565 # checks if the destination largefile already exists. It also keeps a
566 # list of copied files so that the largefiles can be copied and the
566 # list of copied files so that the largefiles can be copied and the
567 # dirstate updated.
567 # dirstate updated.
568 def overridecopy(orig, ui, repo, pats, opts, rename=False):
568 def overridecopy(orig, ui, repo, pats, opts, rename=False):
569 # doesn't remove largefile on rename
569 # doesn't remove largefile on rename
570 if len(pats) < 2:
570 if len(pats) < 2:
571 # this isn't legal, let the original function deal with it
571 # this isn't legal, let the original function deal with it
572 return orig(ui, repo, pats, opts, rename)
572 return orig(ui, repo, pats, opts, rename)
573
573
574 # This could copy both lfiles and normal files in one command,
574 # This could copy both lfiles and normal files in one command,
575 # but we don't want to do that. First replace their matcher to
575 # but we don't want to do that. First replace their matcher to
576 # only match normal files and run it, then replace it to just
576 # only match normal files and run it, then replace it to just
577 # match largefiles and run it again.
577 # match largefiles and run it again.
578 nonormalfiles = False
578 nonormalfiles = False
579 nolfiles = False
579 nolfiles = False
580 installnormalfilesmatchfn(repo[None].manifest())
580 installnormalfilesmatchfn(repo[None].manifest())
581 try:
581 try:
582 result = orig(ui, repo, pats, opts, rename)
582 result = orig(ui, repo, pats, opts, rename)
583 except util.Abort as e:
583 except util.Abort as e:
584 if str(e) != _('no files to copy'):
584 if str(e) != _('no files to copy'):
585 raise e
585 raise e
586 else:
586 else:
587 nonormalfiles = True
587 nonormalfiles = True
588 result = 0
588 result = 0
589 finally:
589 finally:
590 restorematchfn()
590 restorematchfn()
591
591
592 # The first rename can cause our current working directory to be removed.
592 # The first rename can cause our current working directory to be removed.
593 # In that case there is nothing left to copy/rename so just quit.
593 # In that case there is nothing left to copy/rename so just quit.
594 try:
594 try:
595 repo.getcwd()
595 repo.getcwd()
596 except OSError:
596 except OSError:
597 return result
597 return result
598
598
599 def makestandin(relpath):
599 def makestandin(relpath):
600 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
600 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
601 return os.path.join(repo.wjoin(lfutil.standin(path)))
601 return os.path.join(repo.wjoin(lfutil.standin(path)))
602
602
603 fullpats = scmutil.expandpats(pats)
603 fullpats = scmutil.expandpats(pats)
604 dest = fullpats[-1]
604 dest = fullpats[-1]
605
605
606 if os.path.isdir(dest):
606 if os.path.isdir(dest):
607 if not os.path.isdir(makestandin(dest)):
607 if not os.path.isdir(makestandin(dest)):
608 os.makedirs(makestandin(dest))
608 os.makedirs(makestandin(dest))
609
609
610 try:
610 try:
611 # When we call orig below it creates the standins but we don't add
611 # When we call orig below it creates the standins but we don't add
612 # them to the dir state until later so lock during that time.
612 # them to the dir state until later so lock during that time.
613 wlock = repo.wlock()
613 wlock = repo.wlock()
614
614
615 manifest = repo[None].manifest()
615 manifest = repo[None].manifest()
616 def overridematch(ctx, pats=[], opts={}, globbed=False,
616 def overridematch(ctx, pats=[], opts={}, globbed=False,
617 default='relpath', badfn=None):
617 default='relpath', badfn=None):
618 newpats = []
618 newpats = []
619 # The patterns were previously mangled to add the standin
619 # The patterns were previously mangled to add the standin
620 # directory; we need to remove that now
620 # directory; we need to remove that now
621 for pat in pats:
621 for pat in pats:
622 if match_.patkind(pat) is None and lfutil.shortname in pat:
622 if match_.patkind(pat) is None and lfutil.shortname in pat:
623 newpats.append(pat.replace(lfutil.shortname, ''))
623 newpats.append(pat.replace(lfutil.shortname, ''))
624 else:
624 else:
625 newpats.append(pat)
625 newpats.append(pat)
626 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
626 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
627 m = copy.copy(match)
627 m = copy.copy(match)
628 lfile = lambda f: lfutil.standin(f) in manifest
628 lfile = lambda f: lfutil.standin(f) in manifest
629 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
630 m._fileroots = set(m._files)
630 m._fileroots = set(m._files)
631 origmatchfn = m.matchfn
631 origmatchfn = m.matchfn
632 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 m.matchfn = lambda f: (lfutil.isstandin(f) and
633 (f in manifest) and
633 (f in manifest) and
634 origmatchfn(lfutil.splitstandin(f)) or
634 origmatchfn(lfutil.splitstandin(f)) or
635 None)
635 None)
636 return m
636 return m
637 oldmatch = installmatchfn(overridematch)
637 oldmatch = installmatchfn(overridematch)
638 listpats = []
638 listpats = []
639 for pat in pats:
639 for pat in pats:
640 if match_.patkind(pat) is not None:
640 if match_.patkind(pat) is not None:
641 listpats.append(pat)
641 listpats.append(pat)
642 else:
642 else:
643 listpats.append(makestandin(pat))
643 listpats.append(makestandin(pat))
644
644
645 try:
645 try:
646 origcopyfile = util.copyfile
646 origcopyfile = util.copyfile
647 copiedfiles = []
647 copiedfiles = []
648 def overridecopyfile(src, dest):
648 def overridecopyfile(src, dest):
649 if (lfutil.shortname in src and
649 if (lfutil.shortname in src and
650 dest.startswith(repo.wjoin(lfutil.shortname))):
650 dest.startswith(repo.wjoin(lfutil.shortname))):
651 destlfile = dest.replace(lfutil.shortname, '')
651 destlfile = dest.replace(lfutil.shortname, '')
652 if not opts['force'] and os.path.exists(destlfile):
652 if not opts['force'] and os.path.exists(destlfile):
653 raise IOError('',
653 raise IOError('',
654 _('destination largefile already exists'))
654 _('destination largefile already exists'))
655 copiedfiles.append((src, dest))
655 copiedfiles.append((src, dest))
656 origcopyfile(src, dest)
656 origcopyfile(src, dest)
657
657
658 util.copyfile = overridecopyfile
658 util.copyfile = overridecopyfile
659 result += orig(ui, repo, listpats, opts, rename)
659 result += orig(ui, repo, listpats, opts, rename)
660 finally:
660 finally:
661 util.copyfile = origcopyfile
661 util.copyfile = origcopyfile
662
662
663 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 lfdirstate = lfutil.openlfdirstate(ui, repo)
664 for (src, dest) in copiedfiles:
664 for (src, dest) in copiedfiles:
665 if (lfutil.shortname in src and
665 if (lfutil.shortname in src and
666 dest.startswith(repo.wjoin(lfutil.shortname))):
666 dest.startswith(repo.wjoin(lfutil.shortname))):
667 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
667 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
668 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
668 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
669 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
670 if not os.path.isdir(destlfiledir):
670 if not os.path.isdir(destlfiledir):
671 os.makedirs(destlfiledir)
671 os.makedirs(destlfiledir)
672 if rename:
672 if rename:
673 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
673 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
674
674
675 # The file is gone, but this deletes any empty parent
675 # The file is gone, but this deletes any empty parent
676 # directories as a side-effect.
676 # directories as a side-effect.
677 util.unlinkpath(repo.wjoin(srclfile), True)
677 util.unlinkpath(repo.wjoin(srclfile), True)
678 lfdirstate.remove(srclfile)
678 lfdirstate.remove(srclfile)
679 else:
679 else:
680 util.copyfile(repo.wjoin(srclfile),
680 util.copyfile(repo.wjoin(srclfile),
681 repo.wjoin(destlfile))
681 repo.wjoin(destlfile))
682
682
683 lfdirstate.add(destlfile)
683 lfdirstate.add(destlfile)
684 lfdirstate.write()
684 lfdirstate.write()
685 except util.Abort as e:
685 except util.Abort as e:
686 if str(e) != _('no files to copy'):
686 if str(e) != _('no files to copy'):
687 raise e
687 raise e
688 else:
688 else:
689 nolfiles = True
689 nolfiles = True
690 finally:
690 finally:
691 restorematchfn()
691 restorematchfn()
692 wlock.release()
692 wlock.release()
693
693
694 if nolfiles and nonormalfiles:
694 if nolfiles and nonormalfiles:
695 raise util.Abort(_('no files to copy'))
695 raise util.Abort(_('no files to copy'))
696
696
697 return result
697 return result
698
698
699 # When the user calls revert, we have to be careful to not revert any
699 # When the user calls revert, we have to be careful to not revert any
700 # changes to other largefiles accidentally. This means we have to keep
700 # changes to other largefiles accidentally. This means we have to keep
701 # track of the largefiles that are being reverted so we only pull down
701 # track of the largefiles that are being reverted so we only pull down
702 # the necessary largefiles.
702 # the necessary largefiles.
703 #
703 #
704 # Standins are only updated (to match the hash of largefiles) before
704 # Standins are only updated (to match the hash of largefiles) before
705 # commits. Update the standins then run the original revert, changing
705 # commits. Update the standins then run the original revert, changing
706 # the matcher to hit standins instead of largefiles. Based on the
706 # the matcher to hit standins instead of largefiles. Based on the
707 # resulting standins update the largefiles.
707 # resulting standins update the largefiles.
708 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
708 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
709 # Because we put the standins in a bad state (by updating them)
709 # Because we put the standins in a bad state (by updating them)
710 # and then return them to a correct state we need to lock to
710 # and then return them to a correct state we need to lock to
711 # prevent others from changing them in their incorrect state.
711 # prevent others from changing them in their incorrect state.
712 wlock = repo.wlock()
712 wlock = repo.wlock()
713 try:
713 try:
714 lfdirstate = lfutil.openlfdirstate(ui, repo)
714 lfdirstate = lfutil.openlfdirstate(ui, repo)
715 s = lfutil.lfdirstatestatus(lfdirstate, repo)
715 s = lfutil.lfdirstatestatus(lfdirstate, repo)
716 lfdirstate.write()
716 lfdirstate.write()
717 for lfile in s.modified:
717 for lfile in s.modified:
718 lfutil.updatestandin(repo, lfutil.standin(lfile))
718 lfutil.updatestandin(repo, lfutil.standin(lfile))
719 for lfile in s.deleted:
719 for lfile in s.deleted:
720 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
720 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
721 os.unlink(repo.wjoin(lfutil.standin(lfile)))
721 os.unlink(repo.wjoin(lfutil.standin(lfile)))
722
722
723 oldstandins = lfutil.getstandinsstate(repo)
723 oldstandins = lfutil.getstandinsstate(repo)
724
724
725 def overridematch(mctx, pats=[], opts={}, globbed=False,
725 def overridematch(mctx, pats=[], opts={}, globbed=False,
726 default='relpath', badfn=None):
726 default='relpath', badfn=None):
727 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
727 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
728 m = copy.copy(match)
728 m = copy.copy(match)
729
729
730 # revert supports recursing into subrepos, and though largefiles
730 # revert supports recursing into subrepos, and though largefiles
731 # currently doesn't work correctly in that case, this match is
731 # currently doesn't work correctly in that case, this match is
732 # called, so the lfdirstate above may not be the correct one for
732 # called, so the lfdirstate above may not be the correct one for
733 # this invocation of match.
733 # this invocation of match.
734 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
734 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
735 False)
735 False)
736
736
737 def tostandin(f):
737 def tostandin(f):
738 standin = lfutil.standin(f)
738 standin = lfutil.standin(f)
739 if standin in ctx or standin in mctx:
739 if standin in ctx or standin in mctx:
740 return standin
740 return standin
741 elif standin in repo[None] or lfdirstate[f] == 'r':
741 elif standin in repo[None] or lfdirstate[f] == 'r':
742 return None
742 return None
743 return f
743 return f
744 m._files = [tostandin(f) for f in m._files]
744 m._files = [tostandin(f) for f in m._files]
745 m._files = [f for f in m._files if f is not None]
745 m._files = [f for f in m._files if f is not None]
746 m._fileroots = set(m._files)
746 m._fileroots = set(m._files)
747 origmatchfn = m.matchfn
747 origmatchfn = m.matchfn
748 def matchfn(f):
748 def matchfn(f):
749 if lfutil.isstandin(f):
749 if lfutil.isstandin(f):
750 return (origmatchfn(lfutil.splitstandin(f)) and
750 return (origmatchfn(lfutil.splitstandin(f)) and
751 (f in ctx or f in mctx))
751 (f in ctx or f in mctx))
752 return origmatchfn(f)
752 return origmatchfn(f)
753 m.matchfn = matchfn
753 m.matchfn = matchfn
754 return m
754 return m
755 oldmatch = installmatchfn(overridematch)
755 oldmatch = installmatchfn(overridematch)
756 try:
756 try:
757 orig(ui, repo, ctx, parents, *pats, **opts)
757 orig(ui, repo, ctx, parents, *pats, **opts)
758 finally:
758 finally:
759 restorematchfn()
759 restorematchfn()
760
760
761 newstandins = lfutil.getstandinsstate(repo)
761 newstandins = lfutil.getstandinsstate(repo)
762 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
762 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
763 # lfdirstate should be 'normallookup'-ed for updated files,
763 # lfdirstate should be 'normallookup'-ed for updated files,
764 # because reverting doesn't touch dirstate for 'normal' files
764 # because reverting doesn't touch dirstate for 'normal' files
765 # when target revision is explicitly specified: in such case,
765 # when target revision is explicitly specified: in such case,
766 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
766 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
767 # of target (standin) file.
767 # of target (standin) file.
768 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
768 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
769 normallookup=True)
769 normallookup=True)
770
770
771 finally:
771 finally:
772 wlock.release()
772 wlock.release()
773
773
774 # after pulling changesets, we need to take some extra care to get
774 # after pulling changesets, we need to take some extra care to get
775 # largefiles updated remotely
775 # largefiles updated remotely
776 def overridepull(orig, ui, repo, source=None, **opts):
776 def overridepull(orig, ui, repo, source=None, **opts):
777 revsprepull = len(repo)
777 revsprepull = len(repo)
778 if not source:
778 if not source:
779 source = 'default'
779 source = 'default'
780 repo.lfpullsource = source
780 repo.lfpullsource = source
781 result = orig(ui, repo, source, **opts)
781 result = orig(ui, repo, source, **opts)
782 revspostpull = len(repo)
782 revspostpull = len(repo)
783 lfrevs = opts.get('lfrev', [])
783 lfrevs = opts.get('lfrev', [])
784 if opts.get('all_largefiles'):
784 if opts.get('all_largefiles'):
785 lfrevs.append('pulled()')
785 lfrevs.append('pulled()')
786 if lfrevs and revspostpull > revsprepull:
786 if lfrevs and revspostpull > revsprepull:
787 numcached = 0
787 numcached = 0
788 repo.firstpulled = revsprepull # for pulled() revset expression
788 repo.firstpulled = revsprepull # for pulled() revset expression
789 try:
789 try:
790 for rev in scmutil.revrange(repo, lfrevs):
790 for rev in scmutil.revrange(repo, lfrevs):
791 ui.note(_('pulling largefiles for revision %s\n') % rev)
791 ui.note(_('pulling largefiles for revision %s\n') % rev)
792 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
792 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
793 numcached += len(cached)
793 numcached += len(cached)
794 finally:
794 finally:
795 del repo.firstpulled
795 del repo.firstpulled
796 ui.status(_("%d largefiles cached\n") % numcached)
796 ui.status(_("%d largefiles cached\n") % numcached)
797 return result
797 return result
798
798
799 def pulledrevsetsymbol(repo, subset, x):
799 def pulledrevsetsymbol(repo, subset, x):
800 """``pulled()``
800 """``pulled()``
801 Changesets that just has been pulled.
801 Changesets that just has been pulled.
802
802
803 Only available with largefiles from pull --lfrev expressions.
803 Only available with largefiles from pull --lfrev expressions.
804
804
805 .. container:: verbose
805 .. container:: verbose
806
806
807 Some examples:
807 Some examples:
808
808
809 - pull largefiles for all new changesets::
809 - pull largefiles for all new changesets::
810
810
811 hg pull -lfrev "pulled()"
811 hg pull -lfrev "pulled()"
812
812
813 - pull largefiles for all new branch heads::
813 - pull largefiles for all new branch heads::
814
814
815 hg pull -lfrev "head(pulled()) and not closed()"
815 hg pull -lfrev "head(pulled()) and not closed()"
816
816
817 """
817 """
818
818
819 try:
819 try:
820 firstpulled = repo.firstpulled
820 firstpulled = repo.firstpulled
821 except AttributeError:
821 except AttributeError:
822 raise util.Abort(_("pulled() only available in --lfrev"))
822 raise util.Abort(_("pulled() only available in --lfrev"))
823 return revset.baseset([r for r in subset if r >= firstpulled])
823 return revset.baseset([r for r in subset if r >= firstpulled])
824
824
825 def overrideclone(orig, ui, source, dest=None, **opts):
825 def overrideclone(orig, ui, source, dest=None, **opts):
826 d = dest
826 d = dest
827 if d is None:
827 if d is None:
828 d = hg.defaultdest(source)
828 d = hg.defaultdest(source)
829 if opts.get('all_largefiles') and not hg.islocal(d):
829 if opts.get('all_largefiles') and not hg.islocal(d):
830 raise util.Abort(_(
830 raise util.Abort(_(
831 '--all-largefiles is incompatible with non-local destination %s') %
831 '--all-largefiles is incompatible with non-local destination %s') %
832 d)
832 d)
833
833
834 return orig(ui, source, dest, **opts)
834 return orig(ui, source, dest, **opts)
835
835
836 def hgclone(orig, ui, opts, *args, **kwargs):
836 def hgclone(orig, ui, opts, *args, **kwargs):
837 result = orig(ui, opts, *args, **kwargs)
837 result = orig(ui, opts, *args, **kwargs)
838
838
839 if result is not None:
839 if result is not None:
840 sourcerepo, destrepo = result
840 sourcerepo, destrepo = result
841 repo = destrepo.local()
841 repo = destrepo.local()
842
842
843 # When cloning to a remote repo (like through SSH), no repo is available
843 # When cloning to a remote repo (like through SSH), no repo is available
844 # from the peer. Therefore the largefiles can't be downloaded and the
844 # from the peer. Therefore the largefiles can't be downloaded and the
845 # hgrc can't be updated.
845 # hgrc can't be updated.
846 if not repo:
846 if not repo:
847 return result
847 return result
848
848
849 # If largefiles is required for this repo, permanently enable it locally
849 # If largefiles is required for this repo, permanently enable it locally
850 if 'largefiles' in repo.requirements:
850 if 'largefiles' in repo.requirements:
851 fp = repo.vfs('hgrc', 'a', text=True)
851 fp = repo.vfs('hgrc', 'a', text=True)
852 try:
852 try:
853 fp.write('\n[extensions]\nlargefiles=\n')
853 fp.write('\n[extensions]\nlargefiles=\n')
854 finally:
854 finally:
855 fp.close()
855 fp.close()
856
856
857 # Caching is implicitly limited to 'rev' option, since the dest repo was
857 # Caching is implicitly limited to 'rev' option, since the dest repo was
858 # truncated at that point. The user may expect a download count with
858 # truncated at that point. The user may expect a download count with
859 # this option, so attempt whether or not this is a largefile repo.
859 # this option, so attempt whether or not this is a largefile repo.
860 if opts.get('all_largefiles'):
860 if opts.get('all_largefiles'):
861 success, missing = lfcommands.downloadlfiles(ui, repo, None)
861 success, missing = lfcommands.downloadlfiles(ui, repo, None)
862
862
863 if missing != 0:
863 if missing != 0:
864 return None
864 return None
865
865
866 return result
866 return result
867
867
868 def overriderebase(orig, ui, repo, **opts):
868 def overriderebase(orig, ui, repo, **opts):
869 if not util.safehasattr(repo, '_largefilesenabled'):
869 if not util.safehasattr(repo, '_largefilesenabled'):
870 return orig(ui, repo, **opts)
870 return orig(ui, repo, **opts)
871
871
872 resuming = opts.get('continue')
872 resuming = opts.get('continue')
873 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
873 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
874 repo._lfstatuswriters.append(lambda *msg, **opts: None)
874 repo._lfstatuswriters.append(lambda *msg, **opts: None)
875 try:
875 try:
876 return orig(ui, repo, **opts)
876 return orig(ui, repo, **opts)
877 finally:
877 finally:
878 repo._lfstatuswriters.pop()
878 repo._lfstatuswriters.pop()
879 repo._lfcommithooks.pop()
879 repo._lfcommithooks.pop()
880
880
881 def overridearchivecmd(orig, ui, repo, dest, **opts):
881 def overridearchivecmd(orig, ui, repo, dest, **opts):
882 repo.unfiltered().lfstatus = True
882 repo.unfiltered().lfstatus = True
883
883
884 try:
884 try:
885 return orig(ui, repo.unfiltered(), dest, **opts)
885 return orig(ui, repo.unfiltered(), dest, **opts)
886 finally:
886 finally:
887 repo.unfiltered().lfstatus = False
887 repo.unfiltered().lfstatus = False
888
888
889 def hgwebarchive(orig, web, req, tmpl):
890 web.repo.lfstatus = True
891
892 try:
893 return orig(web, req, tmpl)
894 finally:
895 web.repo.lfstatus = False
896
889 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
897 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
890 prefix='', mtime=None, subrepos=None):
898 prefix='', mtime=None, subrepos=None):
891 if not repo.lfstatus:
899 # For some reason setting repo.lfstatus in hgwebarchive only changes the
900 # unfiltered repo's attr, so check that as well.
901 if not repo.lfstatus and not repo.unfiltered().lfstatus:
892 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
902 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
893 subrepos)
903 subrepos)
894
904
895 # No need to lock because we are only reading history and
905 # No need to lock because we are only reading history and
896 # largefile caches, neither of which are modified.
906 # largefile caches, neither of which are modified.
897 if node is not None:
907 if node is not None:
898 lfcommands.cachelfiles(repo.ui, repo, node)
908 lfcommands.cachelfiles(repo.ui, repo, node)
899
909
900 if kind not in archival.archivers:
910 if kind not in archival.archivers:
901 raise util.Abort(_("unknown archive type '%s'") % kind)
911 raise util.Abort(_("unknown archive type '%s'") % kind)
902
912
903 ctx = repo[node]
913 ctx = repo[node]
904
914
905 if kind == 'files':
915 if kind == 'files':
906 if prefix:
916 if prefix:
907 raise util.Abort(
917 raise util.Abort(
908 _('cannot give prefix when archiving to files'))
918 _('cannot give prefix when archiving to files'))
909 else:
919 else:
910 prefix = archival.tidyprefix(dest, kind, prefix)
920 prefix = archival.tidyprefix(dest, kind, prefix)
911
921
912 def write(name, mode, islink, getdata):
922 def write(name, mode, islink, getdata):
913 if matchfn and not matchfn(name):
923 if matchfn and not matchfn(name):
914 return
924 return
915 data = getdata()
925 data = getdata()
916 if decode:
926 if decode:
917 data = repo.wwritedata(name, data)
927 data = repo.wwritedata(name, data)
918 archiver.addfile(prefix + name, mode, islink, data)
928 archiver.addfile(prefix + name, mode, islink, data)
919
929
920 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
930 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
921
931
922 if repo.ui.configbool("ui", "archivemeta", True):
932 if repo.ui.configbool("ui", "archivemeta", True):
923 write('.hg_archival.txt', 0o644, False,
933 write('.hg_archival.txt', 0o644, False,
924 lambda: archival.buildmetadata(ctx))
934 lambda: archival.buildmetadata(ctx))
925
935
926 for f in ctx:
936 for f in ctx:
927 ff = ctx.flags(f)
937 ff = ctx.flags(f)
928 getdata = ctx[f].data
938 getdata = ctx[f].data
929 if lfutil.isstandin(f):
939 if lfutil.isstandin(f):
930 if node is not None:
940 if node is not None:
931 path = lfutil.findfile(repo, getdata().strip())
941 path = lfutil.findfile(repo, getdata().strip())
932
942
933 if path is None:
943 if path is None:
934 raise util.Abort(
944 raise util.Abort(
935 _('largefile %s not found in repo store or system cache')
945 _('largefile %s not found in repo store or system cache')
936 % lfutil.splitstandin(f))
946 % lfutil.splitstandin(f))
937 else:
947 else:
938 path = lfutil.splitstandin(f)
948 path = lfutil.splitstandin(f)
939
949
940 f = lfutil.splitstandin(f)
950 f = lfutil.splitstandin(f)
941
951
942 def getdatafn():
952 def getdatafn():
943 fd = None
953 fd = None
944 try:
954 try:
945 fd = open(path, 'rb')
955 fd = open(path, 'rb')
946 return fd.read()
956 return fd.read()
947 finally:
957 finally:
948 if fd:
958 if fd:
949 fd.close()
959 fd.close()
950
960
951 getdata = getdatafn
961 getdata = getdatafn
952 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
962 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
953
963
954 if subrepos:
964 if subrepos:
955 for subpath in sorted(ctx.substate):
965 for subpath in sorted(ctx.substate):
956 sub = ctx.workingsub(subpath)
966 sub = ctx.workingsub(subpath)
957 submatch = match_.narrowmatcher(subpath, matchfn)
967 submatch = match_.narrowmatcher(subpath, matchfn)
958 sub._repo.lfstatus = True
968 sub._repo.lfstatus = True
959 sub.archive(archiver, prefix, submatch)
969 sub.archive(archiver, prefix, submatch)
960
970
961 archiver.done()
971 archiver.done()
962
972
963 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
973 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
964 if not repo._repo.lfstatus:
974 if not repo._repo.lfstatus:
965 return orig(repo, archiver, prefix, match)
975 return orig(repo, archiver, prefix, match)
966
976
967 repo._get(repo._state + ('hg',))
977 repo._get(repo._state + ('hg',))
968 rev = repo._state[1]
978 rev = repo._state[1]
969 ctx = repo._repo[rev]
979 ctx = repo._repo[rev]
970
980
971 if ctx.node() is not None:
981 if ctx.node() is not None:
972 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
982 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
973
983
974 def write(name, mode, islink, getdata):
984 def write(name, mode, islink, getdata):
975 # At this point, the standin has been replaced with the largefile name,
985 # At this point, the standin has been replaced with the largefile name,
976 # so the normal matcher works here without the lfutil variants.
986 # so the normal matcher works here without the lfutil variants.
977 if match and not match(f):
987 if match and not match(f):
978 return
988 return
979 data = getdata()
989 data = getdata()
980
990
981 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
991 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
982
992
983 for f in ctx:
993 for f in ctx:
984 ff = ctx.flags(f)
994 ff = ctx.flags(f)
985 getdata = ctx[f].data
995 getdata = ctx[f].data
986 if lfutil.isstandin(f):
996 if lfutil.isstandin(f):
987 if ctx.node() is not None:
997 if ctx.node() is not None:
988 path = lfutil.findfile(repo._repo, getdata().strip())
998 path = lfutil.findfile(repo._repo, getdata().strip())
989
999
990 if path is None:
1000 if path is None:
991 raise util.Abort(
1001 raise util.Abort(
992 _('largefile %s not found in repo store or system cache')
1002 _('largefile %s not found in repo store or system cache')
993 % lfutil.splitstandin(f))
1003 % lfutil.splitstandin(f))
994 else:
1004 else:
995 path = lfutil.splitstandin(f)
1005 path = lfutil.splitstandin(f)
996
1006
997 f = lfutil.splitstandin(f)
1007 f = lfutil.splitstandin(f)
998
1008
999 def getdatafn():
1009 def getdatafn():
1000 fd = None
1010 fd = None
1001 try:
1011 try:
1002 fd = open(os.path.join(prefix, path), 'rb')
1012 fd = open(os.path.join(prefix, path), 'rb')
1003 return fd.read()
1013 return fd.read()
1004 finally:
1014 finally:
1005 if fd:
1015 if fd:
1006 fd.close()
1016 fd.close()
1007
1017
1008 getdata = getdatafn
1018 getdata = getdatafn
1009
1019
1010 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1020 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1011
1021
1012 for subpath in sorted(ctx.substate):
1022 for subpath in sorted(ctx.substate):
1013 sub = ctx.workingsub(subpath)
1023 sub = ctx.workingsub(subpath)
1014 submatch = match_.narrowmatcher(subpath, match)
1024 submatch = match_.narrowmatcher(subpath, match)
1015 sub._repo.lfstatus = True
1025 sub._repo.lfstatus = True
1016 sub.archive(archiver, prefix + repo._path + '/', submatch)
1026 sub.archive(archiver, prefix + repo._path + '/', submatch)
1017
1027
1018 # If a largefile is modified, the change is not reflected in its
1028 # If a largefile is modified, the change is not reflected in its
1019 # standin until a commit. cmdutil.bailifchanged() raises an exception
1029 # standin until a commit. cmdutil.bailifchanged() raises an exception
1020 # if the repo has uncommitted changes. Wrap it to also check if
1030 # if the repo has uncommitted changes. Wrap it to also check if
1021 # largefiles were changed. This is used by bisect, backout and fetch.
1031 # largefiles were changed. This is used by bisect, backout and fetch.
1022 def overridebailifchanged(orig, repo, *args, **kwargs):
1032 def overridebailifchanged(orig, repo, *args, **kwargs):
1023 orig(repo, *args, **kwargs)
1033 orig(repo, *args, **kwargs)
1024 repo.lfstatus = True
1034 repo.lfstatus = True
1025 s = repo.status()
1035 s = repo.status()
1026 repo.lfstatus = False
1036 repo.lfstatus = False
1027 if s.modified or s.added or s.removed or s.deleted:
1037 if s.modified or s.added or s.removed or s.deleted:
1028 raise util.Abort(_('uncommitted changes'))
1038 raise util.Abort(_('uncommitted changes'))
1029
1039
1030 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1040 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1031 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1041 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1032 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1042 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1033 m = composelargefilematcher(match, repo[None].manifest())
1043 m = composelargefilematcher(match, repo[None].manifest())
1034
1044
1035 try:
1045 try:
1036 repo.lfstatus = True
1046 repo.lfstatus = True
1037 s = repo.status(match=m, clean=True)
1047 s = repo.status(match=m, clean=True)
1038 finally:
1048 finally:
1039 repo.lfstatus = False
1049 repo.lfstatus = False
1040 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1050 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1041 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1051 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1042
1052
1043 for f in forget:
1053 for f in forget:
1044 if lfutil.standin(f) not in repo.dirstate and not \
1054 if lfutil.standin(f) not in repo.dirstate and not \
1045 repo.wvfs.isdir(lfutil.standin(f)):
1055 repo.wvfs.isdir(lfutil.standin(f)):
1046 ui.warn(_('not removing %s: file is already untracked\n')
1056 ui.warn(_('not removing %s: file is already untracked\n')
1047 % m.rel(f))
1057 % m.rel(f))
1048 bad.append(f)
1058 bad.append(f)
1049
1059
1050 for f in forget:
1060 for f in forget:
1051 if ui.verbose or not m.exact(f):
1061 if ui.verbose or not m.exact(f):
1052 ui.status(_('removing %s\n') % m.rel(f))
1062 ui.status(_('removing %s\n') % m.rel(f))
1053
1063
1054 # Need to lock because standin files are deleted then removed from the
1064 # Need to lock because standin files are deleted then removed from the
1055 # repository and we could race in-between.
1065 # repository and we could race in-between.
1056 wlock = repo.wlock()
1066 wlock = repo.wlock()
1057 try:
1067 try:
1058 lfdirstate = lfutil.openlfdirstate(ui, repo)
1068 lfdirstate = lfutil.openlfdirstate(ui, repo)
1059 for f in forget:
1069 for f in forget:
1060 if lfdirstate[f] == 'a':
1070 if lfdirstate[f] == 'a':
1061 lfdirstate.drop(f)
1071 lfdirstate.drop(f)
1062 else:
1072 else:
1063 lfdirstate.remove(f)
1073 lfdirstate.remove(f)
1064 lfdirstate.write()
1074 lfdirstate.write()
1065 standins = [lfutil.standin(f) for f in forget]
1075 standins = [lfutil.standin(f) for f in forget]
1066 for f in standins:
1076 for f in standins:
1067 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1077 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1068 rejected = repo[None].forget(standins)
1078 rejected = repo[None].forget(standins)
1069 finally:
1079 finally:
1070 wlock.release()
1080 wlock.release()
1071
1081
1072 bad.extend(f for f in rejected if f in m.files())
1082 bad.extend(f for f in rejected if f in m.files())
1073 forgot.extend(f for f in forget if f not in rejected)
1083 forgot.extend(f for f in forget if f not in rejected)
1074 return bad, forgot
1084 return bad, forgot
1075
1085
1076 def _getoutgoings(repo, other, missing, addfunc):
1086 def _getoutgoings(repo, other, missing, addfunc):
1077 """get pairs of filename and largefile hash in outgoing revisions
1087 """get pairs of filename and largefile hash in outgoing revisions
1078 in 'missing'.
1088 in 'missing'.
1079
1089
1080 largefiles already existing on 'other' repository are ignored.
1090 largefiles already existing on 'other' repository are ignored.
1081
1091
1082 'addfunc' is invoked with each unique pairs of filename and
1092 'addfunc' is invoked with each unique pairs of filename and
1083 largefile hash value.
1093 largefile hash value.
1084 """
1094 """
1085 knowns = set()
1095 knowns = set()
1086 lfhashes = set()
1096 lfhashes = set()
1087 def dedup(fn, lfhash):
1097 def dedup(fn, lfhash):
1088 k = (fn, lfhash)
1098 k = (fn, lfhash)
1089 if k not in knowns:
1099 if k not in knowns:
1090 knowns.add(k)
1100 knowns.add(k)
1091 lfhashes.add(lfhash)
1101 lfhashes.add(lfhash)
1092 lfutil.getlfilestoupload(repo, missing, dedup)
1102 lfutil.getlfilestoupload(repo, missing, dedup)
1093 if lfhashes:
1103 if lfhashes:
1094 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1104 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1095 for fn, lfhash in knowns:
1105 for fn, lfhash in knowns:
1096 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1106 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1097 addfunc(fn, lfhash)
1107 addfunc(fn, lfhash)
1098
1108
1099 def outgoinghook(ui, repo, other, opts, missing):
1109 def outgoinghook(ui, repo, other, opts, missing):
1100 if opts.pop('large', None):
1110 if opts.pop('large', None):
1101 lfhashes = set()
1111 lfhashes = set()
1102 if ui.debugflag:
1112 if ui.debugflag:
1103 toupload = {}
1113 toupload = {}
1104 def addfunc(fn, lfhash):
1114 def addfunc(fn, lfhash):
1105 if fn not in toupload:
1115 if fn not in toupload:
1106 toupload[fn] = []
1116 toupload[fn] = []
1107 toupload[fn].append(lfhash)
1117 toupload[fn].append(lfhash)
1108 lfhashes.add(lfhash)
1118 lfhashes.add(lfhash)
1109 def showhashes(fn):
1119 def showhashes(fn):
1110 for lfhash in sorted(toupload[fn]):
1120 for lfhash in sorted(toupload[fn]):
1111 ui.debug(' %s\n' % (lfhash))
1121 ui.debug(' %s\n' % (lfhash))
1112 else:
1122 else:
1113 toupload = set()
1123 toupload = set()
1114 def addfunc(fn, lfhash):
1124 def addfunc(fn, lfhash):
1115 toupload.add(fn)
1125 toupload.add(fn)
1116 lfhashes.add(lfhash)
1126 lfhashes.add(lfhash)
1117 def showhashes(fn):
1127 def showhashes(fn):
1118 pass
1128 pass
1119 _getoutgoings(repo, other, missing, addfunc)
1129 _getoutgoings(repo, other, missing, addfunc)
1120
1130
1121 if not toupload:
1131 if not toupload:
1122 ui.status(_('largefiles: no files to upload\n'))
1132 ui.status(_('largefiles: no files to upload\n'))
1123 else:
1133 else:
1124 ui.status(_('largefiles to upload (%d entities):\n')
1134 ui.status(_('largefiles to upload (%d entities):\n')
1125 % (len(lfhashes)))
1135 % (len(lfhashes)))
1126 for file in sorted(toupload):
1136 for file in sorted(toupload):
1127 ui.status(lfutil.splitstandin(file) + '\n')
1137 ui.status(lfutil.splitstandin(file) + '\n')
1128 showhashes(file)
1138 showhashes(file)
1129 ui.status('\n')
1139 ui.status('\n')
1130
1140
1131 def summaryremotehook(ui, repo, opts, changes):
1141 def summaryremotehook(ui, repo, opts, changes):
1132 largeopt = opts.get('large', False)
1142 largeopt = opts.get('large', False)
1133 if changes is None:
1143 if changes is None:
1134 if largeopt:
1144 if largeopt:
1135 return (False, True) # only outgoing check is needed
1145 return (False, True) # only outgoing check is needed
1136 else:
1146 else:
1137 return (False, False)
1147 return (False, False)
1138 elif largeopt:
1148 elif largeopt:
1139 url, branch, peer, outgoing = changes[1]
1149 url, branch, peer, outgoing = changes[1]
1140 if peer is None:
1150 if peer is None:
1141 # i18n: column positioning for "hg summary"
1151 # i18n: column positioning for "hg summary"
1142 ui.status(_('largefiles: (no remote repo)\n'))
1152 ui.status(_('largefiles: (no remote repo)\n'))
1143 return
1153 return
1144
1154
1145 toupload = set()
1155 toupload = set()
1146 lfhashes = set()
1156 lfhashes = set()
1147 def addfunc(fn, lfhash):
1157 def addfunc(fn, lfhash):
1148 toupload.add(fn)
1158 toupload.add(fn)
1149 lfhashes.add(lfhash)
1159 lfhashes.add(lfhash)
1150 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1160 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1151
1161
1152 if not toupload:
1162 if not toupload:
1153 # i18n: column positioning for "hg summary"
1163 # i18n: column positioning for "hg summary"
1154 ui.status(_('largefiles: (no files to upload)\n'))
1164 ui.status(_('largefiles: (no files to upload)\n'))
1155 else:
1165 else:
1156 # i18n: column positioning for "hg summary"
1166 # i18n: column positioning for "hg summary"
1157 ui.status(_('largefiles: %d entities for %d files to upload\n')
1167 ui.status(_('largefiles: %d entities for %d files to upload\n')
1158 % (len(lfhashes), len(toupload)))
1168 % (len(lfhashes), len(toupload)))
1159
1169
1160 def overridesummary(orig, ui, repo, *pats, **opts):
1170 def overridesummary(orig, ui, repo, *pats, **opts):
1161 try:
1171 try:
1162 repo.lfstatus = True
1172 repo.lfstatus = True
1163 orig(ui, repo, *pats, **opts)
1173 orig(ui, repo, *pats, **opts)
1164 finally:
1174 finally:
1165 repo.lfstatus = False
1175 repo.lfstatus = False
1166
1176
1167 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1177 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1168 similarity=None):
1178 similarity=None):
1169 if not lfutil.islfilesrepo(repo):
1179 if not lfutil.islfilesrepo(repo):
1170 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1180 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1171 # Get the list of missing largefiles so we can remove them
1181 # Get the list of missing largefiles so we can remove them
1172 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1182 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1173 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1183 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1174 False, False, False)
1184 False, False, False)
1175
1185
1176 # Call into the normal remove code, but the removing of the standin, we want
1186 # Call into the normal remove code, but the removing of the standin, we want
1177 # to have handled by original addremove. Monkey patching here makes sure
1187 # to have handled by original addremove. Monkey patching here makes sure
1178 # we don't remove the standin in the largefiles code, preventing a very
1188 # we don't remove the standin in the largefiles code, preventing a very
1179 # confused state later.
1189 # confused state later.
1180 if s.deleted:
1190 if s.deleted:
1181 m = copy.copy(matcher)
1191 m = copy.copy(matcher)
1182
1192
1183 # The m._files and m._map attributes are not changed to the deleted list
1193 # The m._files and m._map attributes are not changed to the deleted list
1184 # because that affects the m.exact() test, which in turn governs whether
1194 # because that affects the m.exact() test, which in turn governs whether
1185 # or not the file name is printed, and how. Simply limit the original
1195 # or not the file name is printed, and how. Simply limit the original
1186 # matches to those in the deleted status list.
1196 # matches to those in the deleted status list.
1187 matchfn = m.matchfn
1197 matchfn = m.matchfn
1188 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1198 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1189
1199
1190 removelargefiles(repo.ui, repo, True, m, **opts)
1200 removelargefiles(repo.ui, repo, True, m, **opts)
1191 # Call into the normal add code, and any files that *should* be added as
1201 # Call into the normal add code, and any files that *should* be added as
1192 # largefiles will be
1202 # largefiles will be
1193 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1203 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1194 # Now that we've handled largefiles, hand off to the original addremove
1204 # Now that we've handled largefiles, hand off to the original addremove
1195 # function to take care of the rest. Make sure it doesn't do anything with
1205 # function to take care of the rest. Make sure it doesn't do anything with
1196 # largefiles by passing a matcher that will ignore them.
1206 # largefiles by passing a matcher that will ignore them.
1197 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1207 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1198 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1208 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1199
1209
1200 # Calling purge with --all will cause the largefiles to be deleted.
1210 # Calling purge with --all will cause the largefiles to be deleted.
1201 # Override repo.status to prevent this from happening.
1211 # Override repo.status to prevent this from happening.
1202 def overridepurge(orig, ui, repo, *dirs, **opts):
1212 def overridepurge(orig, ui, repo, *dirs, **opts):
1203 # XXX Monkey patching a repoview will not work. The assigned attribute will
1213 # XXX Monkey patching a repoview will not work. The assigned attribute will
1204 # be set on the unfiltered repo, but we will only lookup attributes in the
1214 # be set on the unfiltered repo, but we will only lookup attributes in the
1205 # unfiltered repo if the lookup in the repoview object itself fails. As the
1215 # unfiltered repo if the lookup in the repoview object itself fails. As the
1206 # monkey patched method exists on the repoview class the lookup will not
1216 # monkey patched method exists on the repoview class the lookup will not
1207 # fail. As a result, the original version will shadow the monkey patched
1217 # fail. As a result, the original version will shadow the monkey patched
1208 # one, defeating the monkey patch.
1218 # one, defeating the monkey patch.
1209 #
1219 #
1210 # As a work around we use an unfiltered repo here. We should do something
1220 # As a work around we use an unfiltered repo here. We should do something
1211 # cleaner instead.
1221 # cleaner instead.
1212 repo = repo.unfiltered()
1222 repo = repo.unfiltered()
1213 oldstatus = repo.status
1223 oldstatus = repo.status
1214 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1224 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1215 clean=False, unknown=False, listsubrepos=False):
1225 clean=False, unknown=False, listsubrepos=False):
1216 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1226 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1217 listsubrepos)
1227 listsubrepos)
1218 lfdirstate = lfutil.openlfdirstate(ui, repo)
1228 lfdirstate = lfutil.openlfdirstate(ui, repo)
1219 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1229 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1220 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1230 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1221 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1231 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1222 unknown, ignored, r.clean)
1232 unknown, ignored, r.clean)
1223 repo.status = overridestatus
1233 repo.status = overridestatus
1224 orig(ui, repo, *dirs, **opts)
1234 orig(ui, repo, *dirs, **opts)
1225 repo.status = oldstatus
1235 repo.status = oldstatus
1226 def overriderollback(orig, ui, repo, **opts):
1236 def overriderollback(orig, ui, repo, **opts):
1227 wlock = repo.wlock()
1237 wlock = repo.wlock()
1228 try:
1238 try:
1229 before = repo.dirstate.parents()
1239 before = repo.dirstate.parents()
1230 orphans = set(f for f in repo.dirstate
1240 orphans = set(f for f in repo.dirstate
1231 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1241 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1232 result = orig(ui, repo, **opts)
1242 result = orig(ui, repo, **opts)
1233 after = repo.dirstate.parents()
1243 after = repo.dirstate.parents()
1234 if before == after:
1244 if before == after:
1235 return result # no need to restore standins
1245 return result # no need to restore standins
1236
1246
1237 pctx = repo['.']
1247 pctx = repo['.']
1238 for f in repo.dirstate:
1248 for f in repo.dirstate:
1239 if lfutil.isstandin(f):
1249 if lfutil.isstandin(f):
1240 orphans.discard(f)
1250 orphans.discard(f)
1241 if repo.dirstate[f] == 'r':
1251 if repo.dirstate[f] == 'r':
1242 repo.wvfs.unlinkpath(f, ignoremissing=True)
1252 repo.wvfs.unlinkpath(f, ignoremissing=True)
1243 elif f in pctx:
1253 elif f in pctx:
1244 fctx = pctx[f]
1254 fctx = pctx[f]
1245 repo.wwrite(f, fctx.data(), fctx.flags())
1255 repo.wwrite(f, fctx.data(), fctx.flags())
1246 else:
1256 else:
1247 # content of standin is not so important in 'a',
1257 # content of standin is not so important in 'a',
1248 # 'm' or 'n' (coming from the 2nd parent) cases
1258 # 'm' or 'n' (coming from the 2nd parent) cases
1249 lfutil.writestandin(repo, f, '', False)
1259 lfutil.writestandin(repo, f, '', False)
1250 for standin in orphans:
1260 for standin in orphans:
1251 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1261 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1252
1262
1253 lfdirstate = lfutil.openlfdirstate(ui, repo)
1263 lfdirstate = lfutil.openlfdirstate(ui, repo)
1254 orphans = set(lfdirstate)
1264 orphans = set(lfdirstate)
1255 lfiles = lfutil.listlfiles(repo)
1265 lfiles = lfutil.listlfiles(repo)
1256 for file in lfiles:
1266 for file in lfiles:
1257 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1267 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1258 orphans.discard(file)
1268 orphans.discard(file)
1259 for lfile in orphans:
1269 for lfile in orphans:
1260 lfdirstate.drop(lfile)
1270 lfdirstate.drop(lfile)
1261 lfdirstate.write()
1271 lfdirstate.write()
1262 finally:
1272 finally:
1263 wlock.release()
1273 wlock.release()
1264 return result
1274 return result
1265
1275
1266 def overridetransplant(orig, ui, repo, *revs, **opts):
1276 def overridetransplant(orig, ui, repo, *revs, **opts):
1267 resuming = opts.get('continue')
1277 resuming = opts.get('continue')
1268 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1278 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1269 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1279 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1270 try:
1280 try:
1271 result = orig(ui, repo, *revs, **opts)
1281 result = orig(ui, repo, *revs, **opts)
1272 finally:
1282 finally:
1273 repo._lfstatuswriters.pop()
1283 repo._lfstatuswriters.pop()
1274 repo._lfcommithooks.pop()
1284 repo._lfcommithooks.pop()
1275 return result
1285 return result
1276
1286
1277 def overridecat(orig, ui, repo, file1, *pats, **opts):
1287 def overridecat(orig, ui, repo, file1, *pats, **opts):
1278 ctx = scmutil.revsingle(repo, opts.get('rev'))
1288 ctx = scmutil.revsingle(repo, opts.get('rev'))
1279 err = 1
1289 err = 1
1280 notbad = set()
1290 notbad = set()
1281 m = scmutil.match(ctx, (file1,) + pats, opts)
1291 m = scmutil.match(ctx, (file1,) + pats, opts)
1282 origmatchfn = m.matchfn
1292 origmatchfn = m.matchfn
1283 def lfmatchfn(f):
1293 def lfmatchfn(f):
1284 if origmatchfn(f):
1294 if origmatchfn(f):
1285 return True
1295 return True
1286 lf = lfutil.splitstandin(f)
1296 lf = lfutil.splitstandin(f)
1287 if lf is None:
1297 if lf is None:
1288 return False
1298 return False
1289 notbad.add(lf)
1299 notbad.add(lf)
1290 return origmatchfn(lf)
1300 return origmatchfn(lf)
1291 m.matchfn = lfmatchfn
1301 m.matchfn = lfmatchfn
1292 origbadfn = m.bad
1302 origbadfn = m.bad
1293 def lfbadfn(f, msg):
1303 def lfbadfn(f, msg):
1294 if not f in notbad:
1304 if not f in notbad:
1295 origbadfn(f, msg)
1305 origbadfn(f, msg)
1296 m.bad = lfbadfn
1306 m.bad = lfbadfn
1297
1307
1298 origvisitdirfn = m.visitdir
1308 origvisitdirfn = m.visitdir
1299 def lfvisitdirfn(dir):
1309 def lfvisitdirfn(dir):
1300 if dir == lfutil.shortname:
1310 if dir == lfutil.shortname:
1301 return True
1311 return True
1302 ret = origvisitdirfn(dir)
1312 ret = origvisitdirfn(dir)
1303 if ret:
1313 if ret:
1304 return ret
1314 return ret
1305 lf = lfutil.splitstandin(dir)
1315 lf = lfutil.splitstandin(dir)
1306 if lf is None:
1316 if lf is None:
1307 return False
1317 return False
1308 return origvisitdirfn(lf)
1318 return origvisitdirfn(lf)
1309 m.visitdir = lfvisitdirfn
1319 m.visitdir = lfvisitdirfn
1310
1320
1311 for f in ctx.walk(m):
1321 for f in ctx.walk(m):
1312 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1322 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1313 pathname=f)
1323 pathname=f)
1314 lf = lfutil.splitstandin(f)
1324 lf = lfutil.splitstandin(f)
1315 if lf is None or origmatchfn(f):
1325 if lf is None or origmatchfn(f):
1316 # duplicating unreachable code from commands.cat
1326 # duplicating unreachable code from commands.cat
1317 data = ctx[f].data()
1327 data = ctx[f].data()
1318 if opts.get('decode'):
1328 if opts.get('decode'):
1319 data = repo.wwritedata(f, data)
1329 data = repo.wwritedata(f, data)
1320 fp.write(data)
1330 fp.write(data)
1321 else:
1331 else:
1322 hash = lfutil.readstandin(repo, lf, ctx.rev())
1332 hash = lfutil.readstandin(repo, lf, ctx.rev())
1323 if not lfutil.inusercache(repo.ui, hash):
1333 if not lfutil.inusercache(repo.ui, hash):
1324 store = basestore._openstore(repo)
1334 store = basestore._openstore(repo)
1325 success, missing = store.get([(lf, hash)])
1335 success, missing = store.get([(lf, hash)])
1326 if len(success) != 1:
1336 if len(success) != 1:
1327 raise util.Abort(
1337 raise util.Abort(
1328 _('largefile %s is not in cache and could not be '
1338 _('largefile %s is not in cache and could not be '
1329 'downloaded') % lf)
1339 'downloaded') % lf)
1330 path = lfutil.usercachepath(repo.ui, hash)
1340 path = lfutil.usercachepath(repo.ui, hash)
1331 fpin = open(path, "rb")
1341 fpin = open(path, "rb")
1332 for chunk in util.filechunkiter(fpin, 128 * 1024):
1342 for chunk in util.filechunkiter(fpin, 128 * 1024):
1333 fp.write(chunk)
1343 fp.write(chunk)
1334 fpin.close()
1344 fpin.close()
1335 fp.close()
1345 fp.close()
1336 err = 0
1346 err = 0
1337 return err
1347 return err
1338
1348
1339 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1349 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1340 *args, **kwargs):
1350 *args, **kwargs):
1341 wlock = repo.wlock()
1351 wlock = repo.wlock()
1342 try:
1352 try:
1343 # branch | | |
1353 # branch | | |
1344 # merge | force | partial | action
1354 # merge | force | partial | action
1345 # -------+-------+---------+--------------
1355 # -------+-------+---------+--------------
1346 # x | x | x | linear-merge
1356 # x | x | x | linear-merge
1347 # o | x | x | branch-merge
1357 # o | x | x | branch-merge
1348 # x | o | x | overwrite (as clean update)
1358 # x | o | x | overwrite (as clean update)
1349 # o | o | x | force-branch-merge (*1)
1359 # o | o | x | force-branch-merge (*1)
1350 # x | x | o | (*)
1360 # x | x | o | (*)
1351 # o | x | o | (*)
1361 # o | x | o | (*)
1352 # x | o | o | overwrite (as revert)
1362 # x | o | o | overwrite (as revert)
1353 # o | o | o | (*)
1363 # o | o | o | (*)
1354 #
1364 #
1355 # (*) don't care
1365 # (*) don't care
1356 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1366 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1357
1367
1358 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1368 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1359 unsure, s = lfdirstate.status(match_.always(repo.root,
1369 unsure, s = lfdirstate.status(match_.always(repo.root,
1360 repo.getcwd()),
1370 repo.getcwd()),
1361 [], False, False, False)
1371 [], False, False, False)
1362 pctx = repo['.']
1372 pctx = repo['.']
1363 for lfile in unsure + s.modified:
1373 for lfile in unsure + s.modified:
1364 lfileabs = repo.wvfs.join(lfile)
1374 lfileabs = repo.wvfs.join(lfile)
1365 if not os.path.exists(lfileabs):
1375 if not os.path.exists(lfileabs):
1366 continue
1376 continue
1367 lfhash = lfutil.hashrepofile(repo, lfile)
1377 lfhash = lfutil.hashrepofile(repo, lfile)
1368 standin = lfutil.standin(lfile)
1378 standin = lfutil.standin(lfile)
1369 lfutil.writestandin(repo, standin, lfhash,
1379 lfutil.writestandin(repo, standin, lfhash,
1370 lfutil.getexecutable(lfileabs))
1380 lfutil.getexecutable(lfileabs))
1371 if (standin in pctx and
1381 if (standin in pctx and
1372 lfhash == lfutil.readstandin(repo, lfile, '.')):
1382 lfhash == lfutil.readstandin(repo, lfile, '.')):
1373 lfdirstate.normal(lfile)
1383 lfdirstate.normal(lfile)
1374 for lfile in s.added:
1384 for lfile in s.added:
1375 lfutil.updatestandin(repo, lfutil.standin(lfile))
1385 lfutil.updatestandin(repo, lfutil.standin(lfile))
1376 lfdirstate.write()
1386 lfdirstate.write()
1377
1387
1378 oldstandins = lfutil.getstandinsstate(repo)
1388 oldstandins = lfutil.getstandinsstate(repo)
1379
1389
1380 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1390 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1381
1391
1382 newstandins = lfutil.getstandinsstate(repo)
1392 newstandins = lfutil.getstandinsstate(repo)
1383 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1393 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1384 if branchmerge or force or partial:
1394 if branchmerge or force or partial:
1385 filelist.extend(s.deleted + s.removed)
1395 filelist.extend(s.deleted + s.removed)
1386
1396
1387 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1397 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1388 normallookup=partial)
1398 normallookup=partial)
1389
1399
1390 return result
1400 return result
1391 finally:
1401 finally:
1392 wlock.release()
1402 wlock.release()
1393
1403
1394 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1404 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1395 result = orig(repo, files, *args, **kwargs)
1405 result = orig(repo, files, *args, **kwargs)
1396
1406
1397 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1407 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1398 if filelist:
1408 if filelist:
1399 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1409 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1400 printmessage=False, normallookup=True)
1410 printmessage=False, normallookup=True)
1401
1411
1402 return result
1412 return result
@@ -1,170 +1,172 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10
10
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo, copies
12 httppeer, merge, scmutil, sshpeer, wireproto, revset, subrepo, copies
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.hgweb import hgweb_mod, webcommands
14 from mercurial.hgweb import hgweb_mod, webcommands
15
15
16 import overrides
16 import overrides
17 import proto
17 import proto
18
18
19 def uisetup(ui):
19 def uisetup(ui):
20 # Disable auto-status for some commands which assume that all
20 # Disable auto-status for some commands which assume that all
21 # files in the result are under Mercurial's control
21 # files in the result are under Mercurial's control
22
22
23 entry = extensions.wrapcommand(commands.table, 'add',
23 entry = extensions.wrapcommand(commands.table, 'add',
24 overrides.overrideadd)
24 overrides.overrideadd)
25 addopt = [('', 'large', None, _('add as largefile')),
25 addopt = [('', 'large', None, _('add as largefile')),
26 ('', 'normal', None, _('add as normal file')),
26 ('', 'normal', None, _('add as normal file')),
27 ('', 'lfsize', '', _('add all files above this size '
27 ('', 'lfsize', '', _('add all files above this size '
28 '(in megabytes) as largefiles '
28 '(in megabytes) as largefiles '
29 '(default: 10)'))]
29 '(default: 10)'))]
30 entry[1].extend(addopt)
30 entry[1].extend(addopt)
31
31
32 # The scmutil function is called both by the (trivial) addremove command,
32 # The scmutil function is called both by the (trivial) addremove command,
33 # and in the process of handling commit -A (issue3542)
33 # and in the process of handling commit -A (issue3542)
34 entry = extensions.wrapfunction(scmutil, 'addremove',
34 entry = extensions.wrapfunction(scmutil, 'addremove',
35 overrides.scmutiladdremove)
35 overrides.scmutiladdremove)
36 extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
36 extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
37 extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
37 extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
38 extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
38 extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
39
39
40 extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
40 extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
41
41
42 # Subrepos call status function
42 # Subrepos call status function
43 entry = extensions.wrapcommand(commands.table, 'status',
43 entry = extensions.wrapcommand(commands.table, 'status',
44 overrides.overridestatus)
44 overrides.overridestatus)
45 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
45 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'status',
46 overrides.overridestatusfn)
46 overrides.overridestatusfn)
47
47
48 entry = extensions.wrapcommand(commands.table, 'log',
48 entry = extensions.wrapcommand(commands.table, 'log',
49 overrides.overridelog)
49 overrides.overridelog)
50 entry = extensions.wrapcommand(commands.table, 'rollback',
50 entry = extensions.wrapcommand(commands.table, 'rollback',
51 overrides.overriderollback)
51 overrides.overriderollback)
52 entry = extensions.wrapcommand(commands.table, 'verify',
52 entry = extensions.wrapcommand(commands.table, 'verify',
53 overrides.overrideverify)
53 overrides.overrideverify)
54
54
55 verifyopt = [('', 'large', None,
55 verifyopt = [('', 'large', None,
56 _('verify that all largefiles in current revision exists')),
56 _('verify that all largefiles in current revision exists')),
57 ('', 'lfa', None,
57 ('', 'lfa', None,
58 _('verify largefiles in all revisions, not just current')),
58 _('verify largefiles in all revisions, not just current')),
59 ('', 'lfc', None,
59 ('', 'lfc', None,
60 _('verify local largefile contents, not just existence'))]
60 _('verify local largefile contents, not just existence'))]
61 entry[1].extend(verifyopt)
61 entry[1].extend(verifyopt)
62
62
63 entry = extensions.wrapcommand(commands.table, 'debugstate',
63 entry = extensions.wrapcommand(commands.table, 'debugstate',
64 overrides.overridedebugstate)
64 overrides.overridedebugstate)
65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
66 entry[1].extend(debugstateopt)
66 entry[1].extend(debugstateopt)
67
67
68 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
68 outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
69 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
69 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
71 entry[1].extend(outgoingopt)
71 entry[1].extend(outgoingopt)
72 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
72 cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
73 entry = extensions.wrapcommand(commands.table, 'summary',
73 entry = extensions.wrapcommand(commands.table, 'summary',
74 overrides.overridesummary)
74 overrides.overridesummary)
75 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
75 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
76 entry[1].extend(summaryopt)
76 entry[1].extend(summaryopt)
77 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
77 cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
78
78
79 entry = extensions.wrapcommand(commands.table, 'pull',
79 entry = extensions.wrapcommand(commands.table, 'pull',
80 overrides.overridepull)
80 overrides.overridepull)
81 pullopt = [('', 'all-largefiles', None,
81 pullopt = [('', 'all-largefiles', None,
82 _('download all pulled versions of largefiles (DEPRECATED)')),
82 _('download all pulled versions of largefiles (DEPRECATED)')),
83 ('', 'lfrev', [],
83 ('', 'lfrev', [],
84 _('download largefiles for these revisions'), _('REV'))]
84 _('download largefiles for these revisions'), _('REV'))]
85 entry[1].extend(pullopt)
85 entry[1].extend(pullopt)
86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
87
87
88 entry = extensions.wrapcommand(commands.table, 'clone',
88 entry = extensions.wrapcommand(commands.table, 'clone',
89 overrides.overrideclone)
89 overrides.overrideclone)
90 cloneopt = [('', 'all-largefiles', None,
90 cloneopt = [('', 'all-largefiles', None,
91 _('download all versions of all largefiles'))]
91 _('download all versions of all largefiles'))]
92 entry[1].extend(cloneopt)
92 entry[1].extend(cloneopt)
93 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
93 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
94
94
95 entry = extensions.wrapcommand(commands.table, 'cat',
95 entry = extensions.wrapcommand(commands.table, 'cat',
96 overrides.overridecat)
96 overrides.overridecat)
97 entry = extensions.wrapfunction(merge, '_checkunknownfile',
97 entry = extensions.wrapfunction(merge, '_checkunknownfile',
98 overrides.overridecheckunknownfile)
98 overrides.overridecheckunknownfile)
99 entry = extensions.wrapfunction(merge, 'calculateupdates',
99 entry = extensions.wrapfunction(merge, 'calculateupdates',
100 overrides.overridecalculateupdates)
100 overrides.overridecalculateupdates)
101 entry = extensions.wrapfunction(merge, 'recordupdates',
101 entry = extensions.wrapfunction(merge, 'recordupdates',
102 overrides.mergerecordupdates)
102 overrides.mergerecordupdates)
103 entry = extensions.wrapfunction(merge, 'update',
103 entry = extensions.wrapfunction(merge, 'update',
104 overrides.mergeupdate)
104 overrides.mergeupdate)
105 entry = extensions.wrapfunction(filemerge, 'filemerge',
105 entry = extensions.wrapfunction(filemerge, 'filemerge',
106 overrides.overridefilemerge)
106 overrides.overridefilemerge)
107 entry = extensions.wrapfunction(cmdutil, 'copy',
107 entry = extensions.wrapfunction(cmdutil, 'copy',
108 overrides.overridecopy)
108 overrides.overridecopy)
109
109
110 # Summary calls dirty on the subrepos
110 # Summary calls dirty on the subrepos
111 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
111 entry = extensions.wrapfunction(subrepo.hgsubrepo, 'dirty',
112 overrides.overridedirty)
112 overrides.overridedirty)
113
113
114 entry = extensions.wrapfunction(cmdutil, 'revert',
114 entry = extensions.wrapfunction(cmdutil, 'revert',
115 overrides.overriderevert)
115 overrides.overriderevert)
116
116
117 extensions.wrapcommand(commands.table, 'archive',
117 extensions.wrapcommand(commands.table, 'archive',
118 overrides.overridearchivecmd)
118 overrides.overridearchivecmd)
119 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
119 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
120 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
120 extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
121 overrides.hgsubrepoarchive)
121 overrides.hgsubrepoarchive)
122 extensions.wrapfunction(webcommands, 'archive',
123 overrides.hgwebarchive)
122 extensions.wrapfunction(cmdutil, 'bailifchanged',
124 extensions.wrapfunction(cmdutil, 'bailifchanged',
123 overrides.overridebailifchanged)
125 overrides.overridebailifchanged)
124
126
125 extensions.wrapfunction(scmutil, 'marktouched',
127 extensions.wrapfunction(scmutil, 'marktouched',
126 overrides.scmutilmarktouched)
128 overrides.scmutilmarktouched)
127
129
128 # create the new wireproto commands ...
130 # create the new wireproto commands ...
129 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
131 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
130 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
132 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
131 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
133 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
132
134
133 # ... and wrap some existing ones
135 # ... and wrap some existing ones
134 wireproto.commands['capabilities'] = (proto.capabilities, '')
136 wireproto.commands['capabilities'] = (proto.capabilities, '')
135 wireproto.commands['heads'] = (proto.heads, '')
137 wireproto.commands['heads'] = (proto.heads, '')
136 wireproto.commands['lheads'] = (wireproto.heads, '')
138 wireproto.commands['lheads'] = (wireproto.heads, '')
137
139
138 # make putlfile behave the same as push and {get,stat}lfile behave
140 # make putlfile behave the same as push and {get,stat}lfile behave
139 # the same as pull w.r.t. permissions checks
141 # the same as pull w.r.t. permissions checks
140 hgweb_mod.perms['putlfile'] = 'push'
142 hgweb_mod.perms['putlfile'] = 'push'
141 hgweb_mod.perms['getlfile'] = 'pull'
143 hgweb_mod.perms['getlfile'] = 'pull'
142 hgweb_mod.perms['statlfile'] = 'pull'
144 hgweb_mod.perms['statlfile'] = 'pull'
143
145
144 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
146 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
145
147
146 # the hello wireproto command uses wireproto.capabilities, so it won't see
148 # the hello wireproto command uses wireproto.capabilities, so it won't see
147 # our largefiles capability unless we replace the actual function as well.
149 # our largefiles capability unless we replace the actual function as well.
148 proto.capabilitiesorig = wireproto.capabilities
150 proto.capabilitiesorig = wireproto.capabilities
149 wireproto.capabilities = proto.capabilities
151 wireproto.capabilities = proto.capabilities
150
152
151 # can't do this in reposetup because it needs to have happened before
153 # can't do this in reposetup because it needs to have happened before
152 # wirerepo.__init__ is called
154 # wirerepo.__init__ is called
153 proto.ssholdcallstream = sshpeer.sshpeer._callstream
155 proto.ssholdcallstream = sshpeer.sshpeer._callstream
154 proto.httpoldcallstream = httppeer.httppeer._callstream
156 proto.httpoldcallstream = httppeer.httppeer._callstream
155 sshpeer.sshpeer._callstream = proto.sshrepocallstream
157 sshpeer.sshpeer._callstream = proto.sshrepocallstream
156 httppeer.httppeer._callstream = proto.httprepocallstream
158 httppeer.httppeer._callstream = proto.httprepocallstream
157
159
158 # override some extensions' stuff as well
160 # override some extensions' stuff as well
159 for name, module in extensions.extensions():
161 for name, module in extensions.extensions():
160 if name == 'purge':
162 if name == 'purge':
161 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
163 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
162 overrides.overridepurge)
164 overrides.overridepurge)
163 if name == 'rebase':
165 if name == 'rebase':
164 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
166 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
165 overrides.overriderebase)
167 overrides.overriderebase)
166 extensions.wrapfunction(module, 'rebase',
168 extensions.wrapfunction(module, 'rebase',
167 overrides.overriderebase)
169 overrides.overriderebase)
168 if name == 'transplant':
170 if name == 'transplant':
169 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
171 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
170 overrides.overridetransplant)
172 overrides.overridetransplant)
@@ -1,296 +1,309 b''
1 This file contains testcases that tend to be related to the wire protocol part
1 This file contains testcases that tend to be related to the wire protocol part
2 of largefiles.
2 of largefiles.
3
3
4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
4 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
5 $ mkdir "${USERCACHE}"
5 $ mkdir "${USERCACHE}"
6 $ cat >> $HGRCPATH <<EOF
6 $ cat >> $HGRCPATH <<EOF
7 > [extensions]
7 > [extensions]
8 > largefiles=
8 > largefiles=
9 > purge=
9 > purge=
10 > rebase=
10 > rebase=
11 > transplant=
11 > transplant=
12 > [phases]
12 > [phases]
13 > publish=False
13 > publish=False
14 > [largefiles]
14 > [largefiles]
15 > minsize=2
15 > minsize=2
16 > patterns=glob:**.dat
16 > patterns=glob:**.dat
17 > usercache=${USERCACHE}
17 > usercache=${USERCACHE}
18 > [web]
19 > allow_archive = zip
18 > [hooks]
20 > [hooks]
19 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
21 > precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
20 > EOF
22 > EOF
21
23
22
24
23 #if serve
25 #if serve
24 vanilla clients not locked out from largefiles servers on vanilla repos
26 vanilla clients not locked out from largefiles servers on vanilla repos
25 $ mkdir r1
27 $ mkdir r1
26 $ cd r1
28 $ cd r1
27 $ hg init
29 $ hg init
28 $ echo c1 > f1
30 $ echo c1 > f1
29 $ hg add f1
31 $ hg add f1
30 $ hg commit -m "m1"
32 $ hg commit -m "m1"
31 Invoking status precommit hook
33 Invoking status precommit hook
32 A f1
34 A f1
33 $ cd ..
35 $ cd ..
34 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
36 $ hg serve -R r1 -d -p $HGPORT --pid-file hg.pid
35 $ cat hg.pid >> $DAEMON_PIDS
37 $ cat hg.pid >> $DAEMON_PIDS
36 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
38 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT r2
37 requesting all changes
39 requesting all changes
38 adding changesets
40 adding changesets
39 adding manifests
41 adding manifests
40 adding file changes
42 adding file changes
41 added 1 changesets with 1 changes to 1 files
43 added 1 changesets with 1 changes to 1 files
42 updating to branch default
44 updating to branch default
43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
44
46
45 largefiles clients still work with vanilla servers
47 largefiles clients still work with vanilla servers
46 $ hg --config extensions.largefiles=! serve -R r1 -d -p $HGPORT1 --pid-file hg.pid
48 $ hg --config extensions.largefiles=! serve -R r1 -d -p $HGPORT1 --pid-file hg.pid
47 $ cat hg.pid >> $DAEMON_PIDS
49 $ cat hg.pid >> $DAEMON_PIDS
48 $ hg clone http://localhost:$HGPORT1 r3
50 $ hg clone http://localhost:$HGPORT1 r3
49 requesting all changes
51 requesting all changes
50 adding changesets
52 adding changesets
51 adding manifests
53 adding manifests
52 adding file changes
54 adding file changes
53 added 1 changesets with 1 changes to 1 files
55 added 1 changesets with 1 changes to 1 files
54 updating to branch default
56 updating to branch default
55 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 #endif
58 #endif
57
59
58 vanilla clients locked out from largefiles http repos
60 vanilla clients locked out from largefiles http repos
59 $ mkdir r4
61 $ mkdir r4
60 $ cd r4
62 $ cd r4
61 $ hg init
63 $ hg init
62 $ echo c1 > f1
64 $ echo c1 > f1
63 $ hg add --large f1
65 $ hg add --large f1
64 $ hg commit -m "m1"
66 $ hg commit -m "m1"
65 Invoking status precommit hook
67 Invoking status precommit hook
66 A f1
68 A f1
67 $ cd ..
69 $ cd ..
68
70
69 largefiles can be pushed locally (issue3583)
71 largefiles can be pushed locally (issue3583)
70 $ hg init dest
72 $ hg init dest
71 $ cd r4
73 $ cd r4
72 $ hg outgoing ../dest
74 $ hg outgoing ../dest
73 comparing with ../dest
75 comparing with ../dest
74 searching for changes
76 searching for changes
75 changeset: 0:639881c12b4c
77 changeset: 0:639881c12b4c
76 tag: tip
78 tag: tip
77 user: test
79 user: test
78 date: Thu Jan 01 00:00:00 1970 +0000
80 date: Thu Jan 01 00:00:00 1970 +0000
79 summary: m1
81 summary: m1
80
82
81 $ hg push ../dest
83 $ hg push ../dest
82 pushing to ../dest
84 pushing to ../dest
83 searching for changes
85 searching for changes
84 adding changesets
86 adding changesets
85 adding manifests
87 adding manifests
86 adding file changes
88 adding file changes
87 added 1 changesets with 1 changes to 1 files
89 added 1 changesets with 1 changes to 1 files
88
90
89 exit code with nothing outgoing (issue3611)
91 exit code with nothing outgoing (issue3611)
90 $ hg outgoing ../dest
92 $ hg outgoing ../dest
91 comparing with ../dest
93 comparing with ../dest
92 searching for changes
94 searching for changes
93 no changes found
95 no changes found
94 [1]
96 [1]
95 $ cd ..
97 $ cd ..
96
98
97 #if serve
99 #if serve
98 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
100 $ hg serve -R r4 -d -p $HGPORT2 --pid-file hg.pid
99 $ cat hg.pid >> $DAEMON_PIDS
101 $ cat hg.pid >> $DAEMON_PIDS
100 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
102 $ hg --config extensions.largefiles=! clone http://localhost:$HGPORT2 r5
101 abort: remote error:
103 abort: remote error:
102
104
103 This repository uses the largefiles extension.
105 This repository uses the largefiles extension.
104
106
105 Please enable it in your Mercurial config file.
107 Please enable it in your Mercurial config file.
106 [255]
108 [255]
107
109
108 used all HGPORTs, kill all daemons
110 used all HGPORTs, kill all daemons
109 $ killdaemons.py
111 $ killdaemons.py
110 #endif
112 #endif
111
113
112 vanilla clients locked out from largefiles ssh repos
114 vanilla clients locked out from largefiles ssh repos
113 $ hg --config extensions.largefiles=! clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
115 $ hg --config extensions.largefiles=! clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/r4 r5
114 remote:
116 remote:
115 remote: This repository uses the largefiles extension.
117 remote: This repository uses the largefiles extension.
116 remote:
118 remote:
117 remote: Please enable it in your Mercurial config file.
119 remote: Please enable it in your Mercurial config file.
118 remote:
120 remote:
119 remote: -
121 remote: -
120 abort: remote error
122 abort: remote error
121 (check previous remote output)
123 (check previous remote output)
122 [255]
124 [255]
123
125
124 #if serve
126 #if serve
125
127
126 largefiles clients refuse to push largefiles repos to vanilla servers
128 largefiles clients refuse to push largefiles repos to vanilla servers
127 $ mkdir r6
129 $ mkdir r6
128 $ cd r6
130 $ cd r6
129 $ hg init
131 $ hg init
130 $ echo c1 > f1
132 $ echo c1 > f1
131 $ hg add f1
133 $ hg add f1
132 $ hg commit -m "m1"
134 $ hg commit -m "m1"
133 Invoking status precommit hook
135 Invoking status precommit hook
134 A f1
136 A f1
135 $ cat >> .hg/hgrc <<!
137 $ cat >> .hg/hgrc <<!
136 > [web]
138 > [web]
137 > push_ssl = false
139 > push_ssl = false
138 > allow_push = *
140 > allow_push = *
139 > !
141 > !
140 $ cd ..
142 $ cd ..
141 $ hg clone r6 r7
143 $ hg clone r6 r7
142 updating to branch default
144 updating to branch default
143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 $ cd r7
146 $ cd r7
145 $ echo c2 > f2
147 $ echo c2 > f2
146 $ hg add --large f2
148 $ hg add --large f2
147 $ hg commit -m "m2"
149 $ hg commit -m "m2"
148 Invoking status precommit hook
150 Invoking status precommit hook
149 A f2
151 A f2
150 $ hg --config extensions.largefiles=! -R ../r6 serve -d -p $HGPORT --pid-file ../hg.pid
152 $ hg --config extensions.largefiles=! -R ../r6 serve -d -p $HGPORT --pid-file ../hg.pid
151 $ cat ../hg.pid >> $DAEMON_PIDS
153 $ cat ../hg.pid >> $DAEMON_PIDS
152 $ hg push http://localhost:$HGPORT
154 $ hg push http://localhost:$HGPORT
153 pushing to http://localhost:$HGPORT/
155 pushing to http://localhost:$HGPORT/
154 searching for changes
156 searching for changes
155 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
157 abort: http://localhost:$HGPORT/ does not appear to be a largefile store
156 [255]
158 [255]
157 $ cd ..
159 $ cd ..
158
160
159 putlfile errors are shown (issue3123)
161 putlfile errors are shown (issue3123)
160 Corrupt the cached largefile in r7 and move it out of the servers usercache
162 Corrupt the cached largefile in r7 and move it out of the servers usercache
161 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
163 $ mv r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 .
162 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
164 $ echo 'client side corruption' > r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
163 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
165 $ rm "$USERCACHE/4cdac4d8b084d0b599525cf732437fb337d422a8"
164 $ hg init empty
166 $ hg init empty
165 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
167 $ hg serve -R empty -d -p $HGPORT1 --pid-file hg.pid \
166 > --config 'web.allow_push=*' --config web.push_ssl=False
168 > --config 'web.allow_push=*' --config web.push_ssl=False
167 $ cat hg.pid >> $DAEMON_PIDS
169 $ cat hg.pid >> $DAEMON_PIDS
168 $ hg push -R r7 http://localhost:$HGPORT1
170 $ hg push -R r7 http://localhost:$HGPORT1
169 pushing to http://localhost:$HGPORT1/
171 pushing to http://localhost:$HGPORT1/
170 searching for changes
172 searching for changes
171 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
173 remote: largefiles: failed to put 4cdac4d8b084d0b599525cf732437fb337d422a8 into store: largefile contents do not match hash
172 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ (glob)
174 abort: remotestore: could not put $TESTTMP/r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8 to remote store http://localhost:$HGPORT1/ (glob)
173 [255]
175 [255]
174 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
176 $ mv 4cdac4d8b084d0b599525cf732437fb337d422a8 r7/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
175 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
177 Push of file that exists on server but is corrupted - magic healing would be nice ... but too magic
176 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
178 $ echo "server side corruption" > empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
177 $ hg push -R r7 http://localhost:$HGPORT1
179 $ hg push -R r7 http://localhost:$HGPORT1
178 pushing to http://localhost:$HGPORT1/
180 pushing to http://localhost:$HGPORT1/
179 searching for changes
181 searching for changes
180 remote: adding changesets
182 remote: adding changesets
181 remote: adding manifests
183 remote: adding manifests
182 remote: adding file changes
184 remote: adding file changes
183 remote: added 2 changesets with 2 changes to 2 files
185 remote: added 2 changesets with 2 changes to 2 files
184 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
186 $ cat empty/.hg/largefiles/4cdac4d8b084d0b599525cf732437fb337d422a8
185 server side corruption
187 server side corruption
186 $ rm -rf empty
188 $ rm -rf empty
187
189
188 Push a largefiles repository to a served empty repository
190 Push a largefiles repository to a served empty repository
189 $ hg init r8
191 $ hg init r8
190 $ echo c3 > r8/f1
192 $ echo c3 > r8/f1
191 $ hg add --large r8/f1 -R r8
193 $ hg add --large r8/f1 -R r8
192 $ hg commit -m "m1" -R r8
194 $ hg commit -m "m1" -R r8
193 Invoking status precommit hook
195 Invoking status precommit hook
194 A f1
196 A f1
195 $ hg init empty
197 $ hg init empty
196 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
198 $ hg serve -R empty -d -p $HGPORT2 --pid-file hg.pid \
197 > --config 'web.allow_push=*' --config web.push_ssl=False
199 > --config 'web.allow_push=*' --config web.push_ssl=False
198 $ cat hg.pid >> $DAEMON_PIDS
200 $ cat hg.pid >> $DAEMON_PIDS
199 $ rm "${USERCACHE}"/*
201 $ rm "${USERCACHE}"/*
200 $ hg push -R r8 http://localhost:$HGPORT2/#default
202 $ hg push -R r8 http://localhost:$HGPORT2/#default
201 pushing to http://localhost:$HGPORT2/
203 pushing to http://localhost:$HGPORT2/
202 searching for changes
204 searching for changes
203 remote: adding changesets
205 remote: adding changesets
204 remote: adding manifests
206 remote: adding manifests
205 remote: adding file changes
207 remote: adding file changes
206 remote: added 1 changesets with 1 changes to 1 files
208 remote: added 1 changesets with 1 changes to 1 files
207 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
209 $ [ -f "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
208 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
210 $ [ -f empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
209
211
210 Clone over http, no largefiles pulled on clone.
212 Clone over http, no largefiles pulled on clone.
211
213
212 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
214 $ hg clone http://localhost:$HGPORT2/#default http-clone -U
213 adding changesets
215 adding changesets
214 adding manifests
216 adding manifests
215 adding file changes
217 adding file changes
216 added 1 changesets with 1 changes to 1 files
218 added 1 changesets with 1 changes to 1 files
217
219
220 Archive contains largefiles
221 >>> import urllib2, os
222 >>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
223 >>> with open('archive.zip', 'w') as f:
224 ... f.write(urllib2.urlopen(u).read())
225 $ unzip -t archive.zip
226 Archive: archive.zip
227 testing: empty-default/.hg_archival.txt OK
228 testing: empty-default/f1 OK
229 No errors detected in compressed data of archive.zip.
230
218 test 'verify' with remotestore:
231 test 'verify' with remotestore:
219
232
220 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
233 $ rm "${USERCACHE}"/02a439e5c31c526465ab1a0ca1f431f76b827b90
221 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
234 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
222 $ hg -R http-clone verify --large --lfa
235 $ hg -R http-clone verify --large --lfa
223 checking changesets
236 checking changesets
224 checking manifests
237 checking manifests
225 crosschecking files in changesets and manifests
238 crosschecking files in changesets and manifests
226 checking files
239 checking files
227 1 files, 1 changesets, 1 total revisions
240 1 files, 1 changesets, 1 total revisions
228 searching 1 changesets for largefiles
241 searching 1 changesets for largefiles
229 changeset 0:cf03e5bb9936: f1 missing
242 changeset 0:cf03e5bb9936: f1 missing
230 verified existence of 1 revisions of 1 largefiles
243 verified existence of 1 revisions of 1 largefiles
231 [1]
244 [1]
232 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
245 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
233 $ hg -R http-clone -q verify --large --lfa
246 $ hg -R http-clone -q verify --large --lfa
234
247
235 largefiles pulled on update - a largefile missing on the server:
248 largefiles pulled on update - a largefile missing on the server:
236 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
249 $ mv empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 .
237 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
250 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
238 getting changed largefiles
251 getting changed largefiles
239 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
252 f1: largefile 02a439e5c31c526465ab1a0ca1f431f76b827b90 not available from http://localhost:$HGPORT2/
240 0 largefiles updated, 0 removed
253 0 largefiles updated, 0 removed
241 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
254 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
242 $ hg -R http-clone st
255 $ hg -R http-clone st
243 ! f1
256 ! f1
244 $ hg -R http-clone up -Cqr null
257 $ hg -R http-clone up -Cqr null
245
258
246 largefiles pulled on update - a largefile corrupted on the server:
259 largefiles pulled on update - a largefile corrupted on the server:
247 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
260 $ echo corruption > empty/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90
248 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
261 $ hg -R http-clone up --config largefiles.usercache=http-clone-usercache
249 getting changed largefiles
262 getting changed largefiles
250 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
263 f1: data corruption (expected 02a439e5c31c526465ab1a0ca1f431f76b827b90, got 6a7bb2556144babe3899b25e5428123735bb1e27)
251 0 largefiles updated, 0 removed
264 0 largefiles updated, 0 removed
252 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
253 $ hg -R http-clone st
266 $ hg -R http-clone st
254 ! f1
267 ! f1
255 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
268 $ [ ! -f http-clone/.hg/largefiles/02a439e5c31c526465ab1a0ca1f431f76b827b90 ]
256 $ [ ! -f http-clone/f1 ]
269 $ [ ! -f http-clone/f1 ]
257 $ [ ! -f http-clone-usercache ]
270 $ [ ! -f http-clone-usercache ]
258 $ hg -R http-clone verify --large --lfc
271 $ hg -R http-clone verify --large --lfc
259 checking changesets
272 checking changesets
260 checking manifests
273 checking manifests
261 crosschecking files in changesets and manifests
274 crosschecking files in changesets and manifests
262 checking files
275 checking files
263 1 files, 1 changesets, 1 total revisions
276 1 files, 1 changesets, 1 total revisions
264 searching 1 changesets for largefiles
277 searching 1 changesets for largefiles
265 verified contents of 1 revisions of 1 largefiles
278 verified contents of 1 revisions of 1 largefiles
266 $ hg -R http-clone up -Cqr null
279 $ hg -R http-clone up -Cqr null
267
280
268 largefiles pulled on update - no server side problems:
281 largefiles pulled on update - no server side problems:
269 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
282 $ mv 02a439e5c31c526465ab1a0ca1f431f76b827b90 empty/.hg/largefiles/
270 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
283 $ hg -R http-clone --debug up --config largefiles.usercache=http-clone-usercache --config progress.debug=true
271 resolving manifests
284 resolving manifests
272 branchmerge: False, force: False, partial: False
285 branchmerge: False, force: False, partial: False
273 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
286 ancestor: 000000000000, local: 000000000000+, remote: cf03e5bb9936
274 .hglf/f1: remote created -> g
287 .hglf/f1: remote created -> g
275 getting .hglf/f1
288 getting .hglf/f1
276 updating: .hglf/f1 1/1 files (100.00%)
289 updating: .hglf/f1 1/1 files (100.00%)
277 getting changed largefiles
290 getting changed largefiles
278 using http://localhost:$HGPORT2/
291 using http://localhost:$HGPORT2/
279 sending capabilities command
292 sending capabilities command
280 sending batch command
293 sending batch command
281 getting largefiles: 0/1 lfile (0.00%)
294 getting largefiles: 0/1 lfile (0.00%)
282 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
295 getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90
283 sending getlfile command
296 sending getlfile command
284 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
297 found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
285 1 largefiles updated, 0 removed
298 1 largefiles updated, 0 removed
286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
299 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
287
300
288 $ ls http-clone-usercache/*
301 $ ls http-clone-usercache/*
289 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
302 http-clone-usercache/02a439e5c31c526465ab1a0ca1f431f76b827b90
290
303
291 $ rm -rf empty http-clone*
304 $ rm -rf empty http-clone*
292
305
293 used all HGPORTs, kill all daemons
306 used all HGPORTs, kill all daemons
294 $ killdaemons.py
307 $ killdaemons.py
295
308
296 #endif
309 #endif
General Comments 0
You need to be logged in to leave comments. Login now