##// END OF EJS Templates
largefiles: remove use of underscores that breaks coding convention
Na'Tosha Bard -
r16247:d87d9d8a default
parent child Browse files
Show More
@@ -1,195 +1,195 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''base class for store implementations and store-related utility code'''
10 10
11 11 import binascii
12 12 import re
13 13
14 14 from mercurial import util, node, hg
15 15 from mercurial.i18n import _
16 16
17 17 import lfutil
18 18
19 19 class StoreError(Exception):
20 20 '''Raised when there is a problem getting files from or putting
21 21 files to a central store.'''
22 22 def __init__(self, filename, hash, url, detail):
23 23 self.filename = filename
24 24 self.hash = hash
25 25 self.url = url
26 26 self.detail = detail
27 27
28 28 def longmessage(self):
29 29 if self.url:
30 30 return ('%s: %s\n'
31 31 '(failed URL: %s)\n'
32 32 % (self.filename, self.detail, self.url))
33 33 else:
34 34 return ('%s: %s\n'
35 35 '(no default or default-push path set in hgrc)\n'
36 36 % (self.filename, self.detail))
37 37
38 38 def __str__(self):
39 39 return "%s: %s" % (self.url, self.detail)
40 40
41 41 class basestore(object):
42 42 def __init__(self, ui, repo, url):
43 43 self.ui = ui
44 44 self.repo = repo
45 45 self.url = url
46 46
47 47 def put(self, source, hash):
48 48 '''Put source file into the store under <filename>/<hash>.'''
49 49 raise NotImplementedError('abstract method')
50 50
51 51 def exists(self, hash):
52 52 '''Check to see if the store contains the given hash.'''
53 53 raise NotImplementedError('abstract method')
54 54
55 55 def get(self, files):
56 56 '''Get the specified largefiles from the store and write to local
57 57 files under repo.root. files is a list of (filename, hash)
58 58 tuples. Return (success, missing), lists of files successfuly
59 59 downloaded and those not found in the store. success is a list
60 60 of (filename, hash) tuples; missing is a list of filenames that
61 61 we could not get. (The detailed error message will already have
62 62 been presented to the user, so missing is just supplied as a
63 63 summary.)'''
64 64 success = []
65 65 missing = []
66 66 ui = self.ui
67 67
68 68 at = 0
69 69 for filename, hash in files:
70 70 ui.progress(_('getting largefiles'), at, unit='lfile',
71 71 total=len(files))
72 72 at += 1
73 73 ui.note(_('getting %s:%s\n') % (filename, hash))
74 74
75 75 storefilename = lfutil.storepath(self.repo, hash)
76 76 tmpfile = util.atomictempfile(storefilename,
77 77 createmode=self.repo.store.createmode)
78 78
79 79 try:
80 80 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
81 81 except StoreError, err:
82 82 ui.warn(err.longmessage())
83 83 hhash = ""
84 84
85 85 if hhash != hash:
86 86 if hhash != "":
87 87 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
88 88 % (filename, hash, hhash))
89 89 tmpfile.discard() # no-op if it's already closed
90 90 missing.append(filename)
91 91 continue
92 92
93 93 tmpfile.close()
94 94 lfutil.linktousercache(self.repo, hash)
95 95 success.append((filename, hhash))
96 96
97 97 ui.progress(_('getting largefiles'), None)
98 98 return (success, missing)
99 99
100 100 def verify(self, revs, contents=False):
101 101 '''Verify the existence (and, optionally, contents) of every big
102 102 file revision referenced by every changeset in revs.
103 103 Return 0 if all is well, non-zero on any errors.'''
104 104 write = self.ui.write
105 105 failed = False
106 106
107 107 write(_('searching %d changesets for largefiles\n') % len(revs))
108 108 verified = set() # set of (filename, filenode) tuples
109 109
110 110 for rev in revs:
111 111 cctx = self.repo[rev]
112 112 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
113 113
114 114 failed = util.any(self._verifyfile(
115 115 cctx, cset, contents, standin, verified) for standin in cctx)
116 116
117 num_revs = len(verified)
118 num_lfiles = len(set([fname for (fname, fnode) in verified]))
117 numrevs = len(verified)
118 numlfiles = len(set([fname for (fname, fnode) in verified]))
119 119 if contents:
120 120 write(_('verified contents of %d revisions of %d largefiles\n')
121 % (num_revs, num_lfiles))
121 % (numrevs, numlfiles))
122 122 else:
123 123 write(_('verified existence of %d revisions of %d largefiles\n')
124 % (num_revs, num_lfiles))
124 % (numrevs, numlfiles))
125 125
126 126 return int(failed)
127 127
128 128 def _getfile(self, tmpfile, filename, hash):
129 129 '''Fetch one revision of one file from the store and write it
130 130 to tmpfile. Compute the hash of the file on-the-fly as it
131 131 downloads and return the binary hash. Close tmpfile. Raise
132 132 StoreError if unable to download the file (e.g. it does not
133 133 exist in the store).'''
134 134 raise NotImplementedError('abstract method')
135 135
136 136 def _verifyfile(self, cctx, cset, contents, standin, verified):
137 137 '''Perform the actual verification of a file in the store.
138 138 '''
139 139 raise NotImplementedError('abstract method')
140 140
141 141 import localstore, wirestore
142 142
143 143 _storeprovider = {
144 144 'file': [localstore.localstore],
145 145 'http': [wirestore.wirestore],
146 146 'https': [wirestore.wirestore],
147 147 'ssh': [wirestore.wirestore],
148 148 }
149 149
150 150 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
151 151
152 152 # During clone this function is passed the src's ui object
153 153 # but it needs the dest's ui object so it can read out of
154 154 # the config file. Use repo.ui instead.
155 155 def _openstore(repo, remote=None, put=False):
156 156 ui = repo.ui
157 157
158 158 if not remote:
159 159 lfpullsource = getattr(repo, 'lfpullsource', None)
160 160 if lfpullsource:
161 161 path = ui.expandpath(lfpullsource)
162 162 else:
163 163 path = ui.expandpath('default-push', 'default')
164 164
165 165 # ui.expandpath() leaves 'default-push' and 'default' alone if
166 166 # they cannot be expanded: fallback to the empty string,
167 167 # meaning the current directory.
168 168 if path == 'default-push' or path == 'default':
169 169 path = ''
170 170 remote = repo
171 171 else:
172 172 remote = hg.peer(repo, {}, path)
173 173
174 174 # The path could be a scheme so use Mercurial's normal functionality
175 175 # to resolve the scheme to a repository and use its path
176 176 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
177 177
178 178 match = _scheme_re.match(path)
179 179 if not match: # regular filesystem path
180 180 scheme = 'file'
181 181 else:
182 182 scheme = match.group(1)
183 183
184 184 try:
185 185 storeproviders = _storeprovider[scheme]
186 186 except KeyError:
187 187 raise util.Abort(_('unsupported URL scheme %r') % scheme)
188 188
189 for class_obj in storeproviders:
189 for classobj in storeproviders:
190 190 try:
191 return class_obj(ui, repo, remote)
191 return classobj(ui, repo, remote)
192 192 except lfutil.storeprotonotcapable:
193 193 pass
194 194
195 195 raise util.Abort(_('%s does not appear to be a largefile store') % path)
@@ -1,500 +1,500 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error
15 15 from mercurial.i18n import _
16 16
17 17 import lfutil
18 18 import basestore
19 19
20 20 # -- Commands ----------------------------------------------------------
21 21
22 22 def lfconvert(ui, src, dest, *pats, **opts):
23 23 '''convert a normal repository to a largefiles repository
24 24
25 25 Convert repository SOURCE to a new repository DEST, identical to
26 26 SOURCE except that certain files will be converted as largefiles:
27 27 specifically, any file that matches any PATTERN *or* whose size is
28 28 above the minimum size threshold is converted as a largefile. The
29 29 size used to determine whether or not to track a file as a
30 30 largefile is the size of the first version of the file. The
31 31 minimum size can be specified either with --size or in
32 32 configuration as ``largefiles.size``.
33 33
34 34 After running this command you will need to make sure that
35 35 largefiles is enabled anywhere you intend to push the new
36 36 repository.
37 37
38 38 Use --to-normal to convert largefiles back to normal files; after
39 39 this, the DEST repository can be used without largefiles at all.'''
40 40
41 41 if opts['to_normal']:
42 42 tolfile = False
43 43 else:
44 44 tolfile = True
45 45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 46
47 47 if not hg.islocal(src):
48 48 raise util.Abort(_('%s is not a local Mercurial repo') % src)
49 49 if not hg.islocal(dest):
50 50 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
51 51
52 52 rsrc = hg.repository(ui, src)
53 53 ui.status(_('initializing destination %s\n') % dest)
54 54 rdst = hg.repository(ui, dest, create=True)
55 55
56 56 success = False
57 57 try:
58 58 # Lock destination to prevent modification while it is converted to.
59 59 # Don't need to lock src because we are just reading from its history
60 60 # which can't change.
61 dst_lock = rdst.lock()
61 dstlock = rdst.lock()
62 62
63 63 # Get a list of all changesets in the source. The easy way to do this
64 64 # is to simply walk the changelog, using changelog.nodesbewteen().
65 65 # Take a look at mercurial/revlog.py:639 for more details.
66 66 # Use a generator instead of a list to decrease memory usage
67 67 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
68 68 rsrc.heads())[0])
69 69 revmap = {node.nullid: node.nullid}
70 70 if tolfile:
71 71 lfiles = set()
72 72 normalfiles = set()
73 73 if not pats:
74 74 pats = ui.configlist(lfutil.longname, 'patterns', default=[])
75 75 if pats:
76 76 matcher = match_.match(rsrc.root, '', list(pats))
77 77 else:
78 78 matcher = None
79 79
80 80 lfiletohash = {}
81 81 for ctx in ctxs:
82 82 ui.progress(_('converting revisions'), ctx.rev(),
83 83 unit=_('revision'), total=rsrc['tip'].rev())
84 84 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
85 85 lfiles, normalfiles, matcher, size, lfiletohash)
86 86 ui.progress(_('converting revisions'), None)
87 87
88 88 if os.path.exists(rdst.wjoin(lfutil.shortname)):
89 89 shutil.rmtree(rdst.wjoin(lfutil.shortname))
90 90
91 91 for f in lfiletohash.keys():
92 92 if os.path.isfile(rdst.wjoin(f)):
93 93 os.unlink(rdst.wjoin(f))
94 94 try:
95 95 os.removedirs(os.path.dirname(rdst.wjoin(f)))
96 96 except OSError:
97 97 pass
98 98
99 99 # If there were any files converted to largefiles, add largefiles
100 100 # to the destination repository's requirements.
101 101 if lfiles:
102 102 rdst.requirements.add('largefiles')
103 103 rdst._writerequirements()
104 104 else:
105 105 for ctx in ctxs:
106 106 ui.progress(_('converting revisions'), ctx.rev(),
107 107 unit=_('revision'), total=rsrc['tip'].rev())
108 108 _addchangeset(ui, rsrc, rdst, ctx, revmap)
109 109
110 110 ui.progress(_('converting revisions'), None)
111 111 success = True
112 112 finally:
113 113 if not success:
114 114 # we failed, remove the new directory
115 115 shutil.rmtree(rdst.root)
116 dst_lock.release()
116 dstlock.release()
117 117
118 118 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
119 119 # Convert src parents to dst parents
120 120 parents = _convertparents(ctx, revmap)
121 121
122 122 # Generate list of changed files
123 123 files = _getchangedfiles(ctx, parents)
124 124
125 125 def getfilectx(repo, memctx, f):
126 126 if lfutil.standin(f) in files:
127 127 # if the file isn't in the manifest then it was removed
128 128 # or renamed, raise IOError to indicate this
129 129 try:
130 130 fctx = ctx.filectx(lfutil.standin(f))
131 131 except error.LookupError:
132 132 raise IOError()
133 133 renamed = fctx.renamed()
134 134 if renamed:
135 135 renamed = lfutil.splitstandin(renamed[0])
136 136
137 137 hash = fctx.data().strip()
138 138 path = lfutil.findfile(rsrc, hash)
139 139 ### TODO: What if the file is not cached?
140 140 data = ''
141 141 fd = None
142 142 try:
143 143 fd = open(path, 'rb')
144 144 data = fd.read()
145 145 finally:
146 146 if fd:
147 147 fd.close()
148 148 return context.memfilectx(f, data, 'l' in fctx.flags(),
149 149 'x' in fctx.flags(), renamed)
150 150 else:
151 151 return _getnormalcontext(repo.ui, ctx, f, revmap)
152 152
153 153 dstfiles = []
154 154 for file in files:
155 155 if lfutil.isstandin(file):
156 156 dstfiles.append(lfutil.splitstandin(file))
157 157 else:
158 158 dstfiles.append(file)
159 159 # Commit
160 160 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
161 161
162 162 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
163 163 matcher, size, lfiletohash):
164 164 # Convert src parents to dst parents
165 165 parents = _convertparents(ctx, revmap)
166 166
167 167 # Generate list of changed files
168 168 files = _getchangedfiles(ctx, parents)
169 169
170 170 dstfiles = []
171 171 for f in files:
172 172 if f not in lfiles and f not in normalfiles:
173 173 islfile = _islfile(f, ctx, matcher, size)
174 174 # If this file was renamed or copied then copy
175 175 # the lfileness of its predecessor
176 176 if f in ctx.manifest():
177 177 fctx = ctx.filectx(f)
178 178 renamed = fctx.renamed()
179 179 renamedlfile = renamed and renamed[0] in lfiles
180 180 islfile |= renamedlfile
181 181 if 'l' in fctx.flags():
182 182 if renamedlfile:
183 183 raise util.Abort(
184 184 _('renamed/copied largefile %s becomes symlink')
185 185 % f)
186 186 islfile = False
187 187 if islfile:
188 188 lfiles.add(f)
189 189 else:
190 190 normalfiles.add(f)
191 191
192 192 if f in lfiles:
193 193 dstfiles.append(lfutil.standin(f))
194 194 # largefile in manifest if it has not been removed/renamed
195 195 if f in ctx.manifest():
196 196 fctx = ctx.filectx(f)
197 197 if 'l' in fctx.flags():
198 198 renamed = fctx.renamed()
199 199 if renamed and renamed[0] in lfiles:
200 200 raise util.Abort(_('largefile %s becomes symlink') % f)
201 201
202 202 # largefile was modified, update standins
203 203 fullpath = rdst.wjoin(f)
204 204 util.makedirs(os.path.dirname(fullpath))
205 205 m = util.sha1('')
206 206 m.update(ctx[f].data())
207 207 hash = m.hexdigest()
208 208 if f not in lfiletohash or lfiletohash[f] != hash:
209 209 try:
210 210 fd = open(fullpath, 'wb')
211 211 fd.write(ctx[f].data())
212 212 finally:
213 213 if fd:
214 214 fd.close()
215 215 executable = 'x' in ctx[f].flags()
216 216 os.chmod(fullpath, lfutil.getmode(executable))
217 217 lfutil.writestandin(rdst, lfutil.standin(f), hash,
218 218 executable)
219 219 lfiletohash[f] = hash
220 220 else:
221 221 # normal file
222 222 dstfiles.append(f)
223 223
224 224 def getfilectx(repo, memctx, f):
225 225 if lfutil.isstandin(f):
226 226 # if the file isn't in the manifest then it was removed
227 227 # or renamed, raise IOError to indicate this
228 228 srcfname = lfutil.splitstandin(f)
229 229 try:
230 230 fctx = ctx.filectx(srcfname)
231 231 except error.LookupError:
232 232 raise IOError()
233 233 renamed = fctx.renamed()
234 234 if renamed:
235 235 # standin is always a largefile because largefile-ness
236 236 # doesn't change after rename or copy
237 237 renamed = lfutil.standin(renamed[0])
238 238
239 239 return context.memfilectx(f, lfiletohash[srcfname] + '\n', 'l' in
240 240 fctx.flags(), 'x' in fctx.flags(), renamed)
241 241 else:
242 242 return _getnormalcontext(repo.ui, ctx, f, revmap)
243 243
244 244 # Commit
245 245 _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap)
246 246
247 247 def _commitcontext(rdst, parents, ctx, dstfiles, getfilectx, revmap):
248 248 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
249 249 getfilectx, ctx.user(), ctx.date(), ctx.extra())
250 250 ret = rdst.commitctx(mctx)
251 251 rdst.dirstate.setparents(ret)
252 252 revmap[ctx.node()] = rdst.changelog.tip()
253 253
254 254 # Generate list of changed files
255 255 def _getchangedfiles(ctx, parents):
256 256 files = set(ctx.files())
257 257 if node.nullid not in parents:
258 258 mc = ctx.manifest()
259 259 mp1 = ctx.parents()[0].manifest()
260 260 mp2 = ctx.parents()[1].manifest()
261 261 files |= (set(mp1) | set(mp2)) - set(mc)
262 262 for f in mc:
263 263 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
264 264 files.add(f)
265 265 return files
266 266
267 267 # Convert src parents to dst parents
268 268 def _convertparents(ctx, revmap):
269 269 parents = []
270 270 for p in ctx.parents():
271 271 parents.append(revmap[p.node()])
272 272 while len(parents) < 2:
273 273 parents.append(node.nullid)
274 274 return parents
275 275
276 276 # Get memfilectx for a normal file
277 277 def _getnormalcontext(ui, ctx, f, revmap):
278 278 try:
279 279 fctx = ctx.filectx(f)
280 280 except error.LookupError:
281 281 raise IOError()
282 282 renamed = fctx.renamed()
283 283 if renamed:
284 284 renamed = renamed[0]
285 285
286 286 data = fctx.data()
287 287 if f == '.hgtags':
288 288 data = _converttags (ui, revmap, data)
289 289 return context.memfilectx(f, data, 'l' in fctx.flags(),
290 290 'x' in fctx.flags(), renamed)
291 291
292 292 # Remap tag data using a revision map
293 293 def _converttags(ui, revmap, data):
294 294 newdata = []
295 295 for line in data.splitlines():
296 296 try:
297 297 id, name = line.split(' ', 1)
298 298 except ValueError:
299 299 ui.warn(_('skipping incorrectly formatted tag %s\n'
300 300 % line))
301 301 continue
302 302 try:
303 303 newid = node.bin(id)
304 304 except TypeError:
305 305 ui.warn(_('skipping incorrectly formatted id %s\n'
306 306 % id))
307 307 continue
308 308 try:
309 309 newdata.append('%s %s\n' % (node.hex(revmap[newid]),
310 310 name))
311 311 except KeyError:
312 312 ui.warn(_('no mapping for id %s\n') % id)
313 313 continue
314 314 return ''.join(newdata)
315 315
316 316 def _islfile(file, ctx, matcher, size):
317 317 '''Return true if file should be considered a largefile, i.e.
318 318 matcher matches it or it is larger than size.'''
319 319 # never store special .hg* files as largefiles
320 320 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
321 321 return False
322 322 if matcher and matcher(file):
323 323 return True
324 324 try:
325 325 return ctx.filectx(file).size() >= size * 1024 * 1024
326 326 except error.LookupError:
327 327 return False
328 328
329 329 def uploadlfiles(ui, rsrc, rdst, files):
330 330 '''upload largefiles to the central store'''
331 331
332 332 if not files:
333 333 return
334 334
335 335 store = basestore._openstore(rsrc, rdst, put=True)
336 336
337 337 at = 0
338 338 files = filter(lambda h: not store.exists(h), files)
339 339 for hash in files:
340 340 ui.progress(_('uploading largefiles'), at, unit='largefile',
341 341 total=len(files))
342 342 source = lfutil.findfile(rsrc, hash)
343 343 if not source:
344 344 raise util.Abort(_('largefile %s missing from store'
345 345 ' (needs to be uploaded)') % hash)
346 346 # XXX check for errors here
347 347 store.put(source, hash)
348 348 at += 1
349 349 ui.progress(_('uploading largefiles'), None)
350 350
351 351 def verifylfiles(ui, repo, all=False, contents=False):
352 352 '''Verify that every big file revision in the current changeset
353 353 exists in the central store. With --contents, also verify that
354 354 the contents of each big file revision are correct (SHA-1 hash
355 355 matches the revision ID). With --all, check every changeset in
356 356 this repository.'''
357 357 if all:
358 358 # Pass a list to the function rather than an iterator because we know a
359 359 # list will work.
360 360 revs = range(len(repo))
361 361 else:
362 362 revs = ['.']
363 363
364 364 store = basestore._openstore(repo)
365 365 return store.verify(revs, contents=contents)
366 366
367 367 def cachelfiles(ui, repo, node):
368 368 '''cachelfiles ensures that all largefiles needed by the specified revision
369 369 are present in the repository's largefile cache.
370 370
371 371 returns a tuple (cached, missing). cached is the list of files downloaded
372 372 by this operation; missing is the list of files that were needed but could
373 373 not be found.'''
374 374 lfiles = lfutil.listlfiles(repo, node)
375 375 toget = []
376 376
377 377 for lfile in lfiles:
378 378 # If we are mid-merge, then we have to trust the standin that is in the
379 379 # working copy to have the correct hashvalue. This is because the
380 380 # original hg.merge() already updated the standin as part of the normal
381 381 # merge process -- we just have to udpate the largefile to match.
382 382 if (getattr(repo, "_ismerging", False) and
383 383 os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
384 384 expectedhash = lfutil.readstandin(repo, lfile)
385 385 else:
386 386 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
387 387
388 388 # if it exists and its hash matches, it might have been locally
389 389 # modified before updating and the user chose 'local'. in this case,
390 390 # it will not be in any store, so don't look for it.
391 391 if ((not os.path.exists(repo.wjoin(lfile)) or
392 392 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
393 393 not lfutil.findfile(repo, expectedhash)):
394 394 toget.append((lfile, expectedhash))
395 395
396 396 if toget:
397 397 store = basestore._openstore(repo)
398 398 ret = store.get(toget)
399 399 return ret
400 400
401 401 return ([], [])
402 402
403 403 def updatelfiles(ui, repo, filelist=None, printmessage=True):
404 404 wlock = repo.wlock()
405 405 try:
406 406 lfdirstate = lfutil.openlfdirstate(ui, repo)
407 407 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
408 408
409 409 if filelist is not None:
410 410 lfiles = [f for f in lfiles if f in filelist]
411 411
412 412 printed = False
413 413 if printmessage and lfiles:
414 414 ui.status(_('getting changed largefiles\n'))
415 415 printed = True
416 416 cachelfiles(ui, repo, '.')
417 417
418 418 updated, removed = 0, 0
419 419 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
420 420 # increment the appropriate counter according to _updatelfile's
421 421 # return value
422 422 updated += i > 0 and i or 0
423 423 removed -= i < 0 and i or 0
424 424 if printmessage and (removed or updated) and not printed:
425 425 ui.status(_('getting changed largefiles\n'))
426 426 printed = True
427 427
428 428 lfdirstate.write()
429 429 if printed and printmessage:
430 430 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
431 431 removed))
432 432 finally:
433 433 wlock.release()
434 434
435 435 def _updatelfile(repo, lfdirstate, lfile):
436 436 '''updates a single largefile and copies the state of its standin from
437 437 the repository's dirstate to its state in the lfdirstate.
438 438
439 439 returns 1 if the file was modified, -1 if the file was removed, 0 if the
440 440 file was unchanged, and None if the needed largefile was missing from the
441 441 cache.'''
442 442 ret = 0
443 443 abslfile = repo.wjoin(lfile)
444 444 absstandin = repo.wjoin(lfutil.standin(lfile))
445 445 if os.path.exists(absstandin):
446 446 if os.path.exists(absstandin+'.orig'):
447 447 shutil.copyfile(abslfile, abslfile+'.orig')
448 448 expecthash = lfutil.readstandin(repo, lfile)
449 449 if (expecthash != '' and
450 450 (not os.path.exists(abslfile) or
451 451 expecthash != lfutil.hashfile(abslfile))):
452 452 if not lfutil.copyfromcache(repo, expecthash, lfile):
453 453 # use normallookup() to allocate entry in largefiles dirstate,
454 # because lack of it misleads lfiles_repo.status() into
454 # because lack of it misleads lfilesrepo.status() into
455 455 # recognition that such cache missing files are REMOVED.
456 456 lfdirstate.normallookup(lfile)
457 457 return None # don't try to set the mode
458 458 ret = 1
459 459 mode = os.stat(absstandin).st_mode
460 460 if mode != os.stat(abslfile).st_mode:
461 461 os.chmod(abslfile, mode)
462 462 ret = 1
463 463 else:
464 464 # Remove lfiles for which the standin is deleted, unless the
465 465 # lfile is added to the repository again. This happens when a
466 466 # largefile is converted back to a normal file: the standin
467 467 # disappears, but a new (normal) file appears as the lfile.
468 468 if os.path.exists(abslfile) and lfile not in repo[None]:
469 469 util.unlinkpath(abslfile)
470 470 ret = -1
471 471 state = repo.dirstate[lfutil.standin(lfile)]
472 472 if state == 'n':
473 473 # When rebasing, we need to synchronize the standin and the largefile,
474 474 # because otherwise the largefile will get reverted. But for commit's
475 475 # sake, we have to mark the file as unclean.
476 476 if getattr(repo, "_isrebasing", False):
477 477 lfdirstate.normallookup(lfile)
478 478 else:
479 479 lfdirstate.normal(lfile)
480 480 elif state == 'r':
481 481 lfdirstate.remove(lfile)
482 482 elif state == 'a':
483 483 lfdirstate.add(lfile)
484 484 elif state == '?':
485 485 lfdirstate.drop(lfile)
486 486 return ret
487 487
488 488 # -- hg commands declarations ------------------------------------------------
489 489
490 490 cmdtable = {
491 491 'lfconvert': (lfconvert,
492 492 [('s', 'size', '',
493 493 _('minimum size (MB) for files to be converted '
494 494 'as largefiles'),
495 495 'SIZE'),
496 496 ('', 'to-normal', False,
497 497 _('convert from a largefiles repo to a normal repo')),
498 498 ],
499 499 _('hg lfconvert SOURCE DEST [FILE ...]')),
500 500 }
@@ -1,467 +1,467 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 def repo_add(repo, list):
29 def repoadd(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 def repo_remove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 def repo_forget(repo, list):
49 def repoforget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 56 remote, force=force)
57 57 return repo.changelog.findmissing(common)
58 58
59 59 # -- Private worker functions ------------------------------------------
60 60
61 61 def getminsize(ui, assumelfiles, opt, default=10):
62 62 lfsize = opt
63 63 if not lfsize and assumelfiles:
64 64 lfsize = ui.config(longname, 'minsize', default=default)
65 65 if lfsize:
66 66 try:
67 67 lfsize = float(lfsize)
68 68 except ValueError:
69 69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 70 % lfsize)
71 71 if lfsize is None:
72 72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 73 return lfsize
74 74
75 75 def link(src, dest):
76 76 try:
77 77 util.oslink(src, dest)
78 78 except OSError:
79 79 # if hardlinks fail, fallback on atomic copy
80 80 dst = util.atomictempfile(dest)
81 81 for chunk in util.filechunkiter(open(src, 'rb')):
82 82 dst.write(chunk)
83 83 dst.close()
84 84 os.chmod(dest, os.stat(src).st_mode)
85 85
86 86 def usercachepath(ui, hash):
87 87 path = ui.configpath(longname, 'usercache', None)
88 88 if path:
89 89 path = os.path.join(path, hash)
90 90 else:
91 91 if os.name == 'nt':
92 92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 93 if appdata:
94 94 path = os.path.join(appdata, longname, hash)
95 95 elif platform.system() == 'Darwin':
96 96 home = os.getenv('HOME')
97 97 if home:
98 98 path = os.path.join(home, 'Library', 'Caches',
99 99 longname, hash)
100 100 elif os.name == 'posix':
101 101 path = os.getenv('XDG_CACHE_HOME')
102 102 if path:
103 103 path = os.path.join(path, longname, hash)
104 104 else:
105 105 home = os.getenv('HOME')
106 106 if home:
107 107 path = os.path.join(home, '.cache', longname, hash)
108 108 else:
109 109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 110 return path
111 111
112 112 def inusercache(ui, hash):
113 113 path = usercachepath(ui, hash)
114 114 return path and os.path.exists(path)
115 115
116 116 def findfile(repo, hash):
117 117 if instore(repo, hash):
118 118 repo.ui.note(_('Found %s in store\n') % hash)
119 119 return storepath(repo, hash)
120 120 elif inusercache(repo.ui, hash):
121 121 repo.ui.note(_('Found %s in system cache\n') % hash)
122 122 path = storepath(repo, hash)
123 123 util.makedirs(os.path.dirname(path))
124 124 link(usercachepath(repo.ui, hash), path)
125 125 return path
126 126 return None
127 127
128 class largefiles_dirstate(dirstate.dirstate):
128 class largefilesdirstate(dirstate.dirstate):
129 129 def __getitem__(self, key):
130 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
131 131 def normal(self, f):
132 return super(largefiles_dirstate, self).normal(unixpath(f))
132 return super(largefilesdirstate, self).normal(unixpath(f))
133 133 def remove(self, f):
134 return super(largefiles_dirstate, self).remove(unixpath(f))
134 return super(largefilesdirstate, self).remove(unixpath(f))
135 135 def add(self, f):
136 return super(largefiles_dirstate, self).add(unixpath(f))
136 return super(largefilesdirstate, self).add(unixpath(f))
137 137 def drop(self, f):
138 return super(largefiles_dirstate, self).drop(unixpath(f))
138 return super(largefilesdirstate, self).drop(unixpath(f))
139 139 def forget(self, f):
140 return super(largefiles_dirstate, self).forget(unixpath(f))
140 return super(largefilesdirstate, self).forget(unixpath(f))
141 141 def normallookup(self, f):
142 return super(largefiles_dirstate, self).normallookup(unixpath(f))
142 return super(largefilesdirstate, self).normallookup(unixpath(f))
143 143
144 144 def openlfdirstate(ui, repo):
145 145 '''
146 146 Return a dirstate object that tracks largefiles: i.e. its root is
147 147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 148 '''
149 149 admin = repo.join(longname)
150 150 opener = scmutil.opener(admin)
151 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
152 152 repo.dirstate._validate)
153 153
154 154 # If the largefiles dirstate does not exist, populate and create
155 155 # it. This ensures that we create it on the first meaningful
156 156 # largefiles operation in a new clone.
157 157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 158 util.makedirs(admin)
159 159 matcher = getstandinmatcher(repo)
160 for standin in dirstate_walk(repo.dirstate, matcher):
160 for standin in dirstatewalk(repo.dirstate, matcher):
161 161 lfile = splitstandin(standin)
162 162 hash = readstandin(repo, lfile)
163 163 lfdirstate.normallookup(lfile)
164 164 try:
165 165 if hash == hashfile(repo.wjoin(lfile)):
166 166 lfdirstate.normal(lfile)
167 167 except OSError, err:
168 168 if err.errno != errno.ENOENT:
169 169 raise
170 170 return lfdirstate
171 171
172 def lfdirstate_status(lfdirstate, repo, rev):
172 def lfdirstatestatus(lfdirstate, repo, rev):
173 173 match = match_.always(repo.root, repo.getcwd())
174 174 s = lfdirstate.status(match, [], False, False, False)
175 175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 176 for lfile in unsure:
177 177 if repo[rev][standin(lfile)].data().strip() != \
178 178 hashfile(repo.wjoin(lfile)):
179 179 modified.append(lfile)
180 180 else:
181 181 clean.append(lfile)
182 182 lfdirstate.normal(lfile)
183 183 return (modified, added, removed, missing, unknown, ignored, clean)
184 184
185 185 def listlfiles(repo, rev=None, matcher=None):
186 186 '''return a list of largefiles in the working copy or the
187 187 specified changeset'''
188 188
189 189 if matcher is None:
190 190 matcher = getstandinmatcher(repo)
191 191
192 192 # ignore unknown files in working directory
193 193 return [splitstandin(f)
194 194 for f in repo[rev].walk(matcher)
195 195 if rev is not None or repo.dirstate[f] != '?']
196 196
197 197 def instore(repo, hash):
198 198 return os.path.exists(storepath(repo, hash))
199 199
200 200 def storepath(repo, hash):
201 201 return repo.join(os.path.join(longname, hash))
202 202
203 203 def copyfromcache(repo, hash, filename):
204 204 '''Copy the specified largefile from the repo or system cache to
205 205 filename in the repository. Return true on success or false if the
206 206 file was not found in either cache (which should not happened:
207 207 this is meant to be called only after ensuring that the needed
208 208 largefile exists in the cache).'''
209 209 path = findfile(repo, hash)
210 210 if path is None:
211 211 return False
212 212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 213 # The write may fail before the file is fully written, but we
214 214 # don't use atomic writes in the working copy.
215 215 shutil.copy(path, repo.wjoin(filename))
216 216 return True
217 217
218 218 def copytostore(repo, rev, file, uploaded=False):
219 219 hash = readstandin(repo, file)
220 220 if instore(repo, hash):
221 221 return
222 222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 223
224 224 def copyalltostore(repo, node):
225 225 '''Copy all largefiles in a given revision to the store'''
226 226
227 227 ctx = repo[node]
228 228 for filename in ctx.files():
229 229 if isstandin(filename) and filename in ctx.manifest():
230 230 realfile = splitstandin(filename)
231 231 copytostore(repo, ctx.node(), realfile)
232 232
233 233
234 234 def copytostoreabsolute(repo, file, hash):
235 235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 236 if inusercache(repo.ui, hash):
237 237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 238 else:
239 239 dst = util.atomictempfile(storepath(repo, hash),
240 240 createmode=repo.store.createmode)
241 241 for chunk in util.filechunkiter(open(file, 'rb')):
242 242 dst.write(chunk)
243 243 dst.close()
244 244 linktousercache(repo, hash)
245 245
246 246 def linktousercache(repo, hash):
247 247 path = usercachepath(repo.ui, hash)
248 248 if path:
249 249 util.makedirs(os.path.dirname(path))
250 250 link(storepath(repo, hash), path)
251 251
252 252 def getstandinmatcher(repo, pats=[], opts={}):
253 253 '''Return a match object that applies pats to the standin directory'''
254 254 standindir = repo.pathto(shortname)
255 255 if pats:
256 256 # patterns supplied: search standin directory relative to current dir
257 257 cwd = repo.getcwd()
258 258 if os.path.isabs(cwd):
259 259 # cwd is an absolute path for hg -R <reponame>
260 260 # work relative to the repository root in this case
261 261 cwd = ''
262 262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 263 elif os.path.isdir(standindir):
264 264 # no patterns: relative to repo root
265 265 pats = [standindir]
266 266 else:
267 267 # no patterns and no standin dir: return matcher that matches nothing
268 268 match = match_.match(repo.root, None, [], exact=True)
269 269 match.matchfn = lambda f: False
270 270 return match
271 271 return getmatcher(repo, pats, opts, showbad=False)
272 272
273 273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 275 neuter the match object's bad() method so it does not print any
276 276 warnings about missing files or directories.'''
277 277 match = scmutil.match(repo[None], pats, opts)
278 278
279 279 if not showbad:
280 280 match.bad = lambda f, msg: None
281 281 return match
282 282
283 283 def composestandinmatcher(repo, rmatcher):
284 284 '''Return a matcher that accepts standins corresponding to the
285 285 files accepted by rmatcher. Pass the list of files in the matcher
286 286 as the paths specified by the user.'''
287 287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 288 isstandin = smatcher.matchfn
289 def composed_matchfn(f):
289 def composedmatchfn(f):
290 290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 smatcher.matchfn = composed_matchfn
291 smatcher.matchfn = composedmatchfn
292 292
293 293 return smatcher
294 294
295 295 def standin(filename):
296 296 '''Return the repo-relative path to the standin for the specified big
297 297 file.'''
298 298 # Notes:
299 # 1) Most callers want an absolute path, but _create_standin() needs
300 # it repo-relative so lfadd() can pass it to repo_add(). So leave
299 # 1) Most callers want an absolute path, but _createstandin() needs
300 # it repo-relative so lfadd() can pass it to repoadd(). So leave
301 301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 302 # 2) Join with '/' because that's what dirstate always uses, even on
303 303 # Windows. Change existing separator to '/' first in case we are
304 304 # passed filenames from an external source (like the command line).
305 305 return shortname + '/' + util.pconvert(filename)
306 306
307 307 def isstandin(filename):
308 308 '''Return true if filename is a big file standin. filename must be
309 309 in Mercurial's internal form (slash-separated).'''
310 310 return filename.startswith(shortname + '/')
311 311
312 312 def splitstandin(filename):
313 313 # Split on / because that's what dirstate always uses, even on Windows.
314 314 # Change local separator to / first just in case we are passed filenames
315 315 # from an external source (like the command line).
316 316 bits = util.pconvert(filename).split('/', 1)
317 317 if len(bits) == 2 and bits[0] == shortname:
318 318 return bits[1]
319 319 else:
320 320 return None
321 321
322 322 def updatestandin(repo, standin):
323 323 file = repo.wjoin(splitstandin(standin))
324 324 if os.path.exists(file):
325 325 hash = hashfile(file)
326 326 executable = getexecutable(file)
327 327 writestandin(repo, standin, hash, executable)
328 328
329 329 def readstandin(repo, filename, node=None):
330 330 '''read hex hash from standin for filename at given node, or working
331 331 directory if no node is given'''
332 332 return repo[node][standin(filename)].data().strip()
333 333
334 334 def writestandin(repo, standin, hash, executable):
335 335 '''write hash to <repo.root>/<standin>'''
336 336 writehash(hash, repo.wjoin(standin), executable)
337 337
338 338 def copyandhash(instream, outfile):
339 339 '''Read bytes from instream (iterable) and write them to outfile,
340 340 computing the SHA-1 hash of the data along the way. Close outfile
341 341 when done and return the binary hash.'''
342 342 hasher = util.sha1('')
343 343 for data in instream:
344 344 hasher.update(data)
345 345 outfile.write(data)
346 346
347 347 # Blecch: closing a file that somebody else opened is rude and
348 348 # wrong. But it's so darn convenient and practical! After all,
349 349 # outfile was opened just to copy and hash.
350 350 outfile.close()
351 351
352 352 return hasher.digest()
353 353
354 354 def hashrepofile(repo, file):
355 355 return hashfile(repo.wjoin(file))
356 356
357 357 def hashfile(file):
358 358 if not os.path.exists(file):
359 359 return ''
360 360 hasher = util.sha1('')
361 361 fd = open(file, 'rb')
362 362 for data in blockstream(fd):
363 363 hasher.update(data)
364 364 fd.close()
365 365 return hasher.hexdigest()
366 366
367 367 class limitreader(object):
368 368 def __init__(self, f, limit):
369 369 self.f = f
370 370 self.limit = limit
371 371
372 372 def read(self, length):
373 373 if self.limit == 0:
374 374 return ''
375 375 length = length > self.limit and self.limit or length
376 376 self.limit -= length
377 377 return self.f.read(length)
378 378
379 379 def close(self):
380 380 pass
381 381
382 382 def blockstream(infile, blocksize=128 * 1024):
383 383 """Generator that yields blocks of data from infile and closes infile."""
384 384 while True:
385 385 data = infile.read(blocksize)
386 386 if not data:
387 387 break
388 388 yield data
389 389 # same blecch as copyandhash() above
390 390 infile.close()
391 391
392 392 def writehash(hash, filename, executable):
393 393 util.makedirs(os.path.dirname(filename))
394 394 util.writefile(filename, hash + '\n')
395 395 os.chmod(filename, getmode(executable))
396 396
397 397 def getexecutable(filename):
398 398 mode = os.stat(filename).st_mode
399 399 return ((mode & stat.S_IXUSR) and
400 400 (mode & stat.S_IXGRP) and
401 401 (mode & stat.S_IXOTH))
402 402
403 403 def getmode(executable):
404 404 if executable:
405 405 return 0755
406 406 else:
407 407 return 0644
408 408
409 409 def urljoin(first, second, *arg):
410 410 def join(left, right):
411 411 if not left.endswith('/'):
412 412 left += '/'
413 413 if right.startswith('/'):
414 414 right = right[1:]
415 415 return left + right
416 416
417 417 url = join(first, second)
418 418 for a in arg:
419 419 url = join(url, a)
420 420 return url
421 421
422 422 def hexsha1(data):
423 423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 424 object data"""
425 425 h = util.sha1()
426 426 for chunk in util.filechunkiter(data):
427 427 h.update(chunk)
428 428 return h.hexdigest()
429 429
430 430 def httpsendfile(ui, filename):
431 431 return httpconnection.httpsendfile(ui, filename, 'rb')
432 432
433 433 def unixpath(path):
434 434 '''Return a version of path normalized for use with the lfdirstate.'''
435 435 return util.pconvert(os.path.normpath(path))
436 436
437 437 def islfilesrepo(repo):
438 438 return ('largefiles' in repo.requirements and
439 439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 440
441 441 class storeprotonotcapable(Exception):
442 442 def __init__(self, storetypes):
443 443 self.storetypes = storetypes
444 444
445 445 def getcurrentheads(repo):
446 446 branches = repo.branchmap()
447 447 heads = []
448 448 for branch in branches:
449 449 newheads = repo.branchheads(branch)
450 450 heads = heads + newheads
451 451 return heads
452 452
453 453 def getstandinsstate(repo):
454 454 standins = []
455 455 matcher = getstandinmatcher(repo)
456 for standin in dirstate_walk(repo.dirstate, matcher):
456 for standin in dirstatewalk(repo.dirstate, matcher):
457 457 lfile = splitstandin(standin)
458 458 standins.append((lfile, readstandin(repo, lfile)))
459 459 return standins
460 460
461 461 def getlfilestoupdate(oldstandins, newstandins):
462 462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 463 filelist = []
464 464 for f in changedstandins:
465 465 if f[0] not in filelist:
466 466 filelist.append(f[0])
467 467 return filelist
@@ -1,968 +1,968 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 def override_match(ctx, pats=[], opts={}, globbed=False,
29 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 orig_matchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
37 origmatchfn = m.matchfn
38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 39 return m
40 oldmatch = installmatchfn(override_match)
40 oldmatch = installmatchfn(overridematch)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 def add_largefiles(ui, repo, *pats, **opts):
56 def addlargefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if exact or not exists:
85 85 abovemin = (lfsize and
86 86 os.lstat(repo.wjoin(f)).st_size >= lfsize * 1024 * 1024)
87 87 if large or abovemin or (lfmatcher and lfmatcher(f)):
88 88 lfnames.append(f)
89 89 if ui.verbose or not exact:
90 90 ui.status(_('adding %s as a largefile\n') % m.rel(f))
91 91
92 92 bad = []
93 93 standins = []
94 94
95 95 # Need to lock, otherwise there could be a race condition between
96 96 # when standins are created and added to the repo.
97 97 wlock = repo.wlock()
98 98 try:
99 99 if not opts.get('dry_run'):
100 100 lfdirstate = lfutil.openlfdirstate(ui, repo)
101 101 for f in lfnames:
102 102 standinname = lfutil.standin(f)
103 103 lfutil.writestandin(repo, standinname, hash='',
104 104 executable=lfutil.getexecutable(repo.wjoin(f)))
105 105 standins.append(standinname)
106 106 if lfdirstate[f] == 'r':
107 107 lfdirstate.normallookup(f)
108 108 else:
109 109 lfdirstate.add(f)
110 110 lfdirstate.write()
111 111 bad += [lfutil.splitstandin(f)
112 for f in lfutil.repo_add(repo, standins)
112 for f in lfutil.repoadd(repo, standins)
113 113 if f in m.files()]
114 114 finally:
115 115 wlock.release()
116 116 return bad
117 117
118 def remove_largefiles(ui, repo, *pats, **opts):
118 def removelargefiles(ui, repo, *pats, **opts):
119 119 after = opts.get('after')
120 120 if not pats and not after:
121 121 raise util.Abort(_('no files specified'))
122 122 m = scmutil.match(repo[None], pats, opts)
123 123 try:
124 124 repo.lfstatus = True
125 125 s = repo.status(match=m, clean=True)
126 126 finally:
127 127 repo.lfstatus = False
128 128 manifest = repo[None].manifest()
129 129 modified, added, deleted, clean = [[f for f in list
130 130 if lfutil.standin(f) in manifest]
131 131 for list in [s[0], s[1], s[3], s[6]]]
132 132
133 133 def warn(files, reason):
134 134 for f in files:
135 135 ui.warn(_('not removing %s: %s (use forget to undo)\n')
136 136 % (m.rel(f), reason))
137 137
138 138 if after:
139 139 remove, forget = deleted, []
140 140 warn(modified + added + clean, _('file still exists'))
141 141 else:
142 142 remove, forget = deleted + clean, []
143 143 warn(modified, _('file is modified'))
144 144 warn(added, _('file has been marked for add'))
145 145
146 146 for f in sorted(remove + forget):
147 147 if ui.verbose or not m.exact(f):
148 148 ui.status(_('removing %s\n') % m.rel(f))
149 149
150 150 # Need to lock because standin files are deleted then removed from the
151 151 # repository and we could race inbetween.
152 152 wlock = repo.wlock()
153 153 try:
154 154 lfdirstate = lfutil.openlfdirstate(ui, repo)
155 155 for f in remove:
156 156 if not after:
157 157 # If this is being called by addremove, notify the user that we
158 158 # are removing the file.
159 159 if getattr(repo, "_isaddremove", False):
160 160 ui.status(_('removing %s\n') % f)
161 161 if os.path.exists(repo.wjoin(f)):
162 162 util.unlinkpath(repo.wjoin(f))
163 163 lfdirstate.remove(f)
164 164 lfdirstate.write()
165 165 forget = [lfutil.standin(f) for f in forget]
166 166 remove = [lfutil.standin(f) for f in remove]
167 lfutil.repo_forget(repo, forget)
167 lfutil.repoforget(repo, forget)
168 168 # If this is being called by addremove, let the original addremove
169 169 # function handle this.
170 170 if not getattr(repo, "_isaddremove", False):
171 lfutil.repo_remove(repo, remove, unlink=True)
171 lfutil.reporemove(repo, remove, unlink=True)
172 172 finally:
173 173 wlock.release()
174 174
175 175 # -- Wrappers: modify existing commands --------------------------------
176 176
177 177 # Add works by going through the files that the user wanted to add and
178 178 # checking if they should be added as largefiles. Then it makes a new
179 179 # matcher which matches only the normal files and runs the original
180 180 # version of add.
181 def override_add(orig, ui, repo, *pats, **opts):
181 def overrideadd(orig, ui, repo, *pats, **opts):
182 182 normal = opts.pop('normal')
183 183 if normal:
184 184 if opts.get('large'):
185 185 raise util.Abort(_('--normal cannot be used with --large'))
186 186 return orig(ui, repo, *pats, **opts)
187 bad = add_largefiles(ui, repo, *pats, **opts)
187 bad = addlargefiles(ui, repo, *pats, **opts)
188 188 installnormalfilesmatchfn(repo[None].manifest())
189 189 result = orig(ui, repo, *pats, **opts)
190 190 restorematchfn()
191 191
192 192 return (result == 1 or bad) and 1 or 0
193 193
194 def override_remove(orig, ui, repo, *pats, **opts):
194 def overrideremove(orig, ui, repo, *pats, **opts):
195 195 installnormalfilesmatchfn(repo[None].manifest())
196 196 orig(ui, repo, *pats, **opts)
197 197 restorematchfn()
198 remove_largefiles(ui, repo, *pats, **opts)
198 removelargefiles(ui, repo, *pats, **opts)
199 199
200 def override_status(orig, ui, repo, *pats, **opts):
200 def overridestatus(orig, ui, repo, *pats, **opts):
201 201 try:
202 202 repo.lfstatus = True
203 203 return orig(ui, repo, *pats, **opts)
204 204 finally:
205 205 repo.lfstatus = False
206 206
207 def override_log(orig, ui, repo, *pats, **opts):
207 def overridelog(orig, ui, repo, *pats, **opts):
208 208 try:
209 209 repo.lfstatus = True
210 210 orig(ui, repo, *pats, **opts)
211 211 finally:
212 212 repo.lfstatus = False
213 213
214 def override_verify(orig, ui, repo, *pats, **opts):
214 def overrideverify(orig, ui, repo, *pats, **opts):
215 215 large = opts.pop('large', False)
216 216 all = opts.pop('lfa', False)
217 217 contents = opts.pop('lfc', False)
218 218
219 219 result = orig(ui, repo, *pats, **opts)
220 220 if large:
221 221 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
222 222 return result
223 223
224 224 # Override needs to refresh standins so that update's normal merge
225 225 # will go through properly. Then the other update hook (overriding repo.update)
226 226 # will get the new files. Filemerge is also overriden so that the merge
227 227 # will merge standins correctly.
228 def override_update(orig, ui, repo, *pats, **opts):
228 def overrideupdate(orig, ui, repo, *pats, **opts):
229 229 lfdirstate = lfutil.openlfdirstate(ui, repo)
230 230 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
231 231 False, False)
232 232 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
233 233
234 234 # Need to lock between the standins getting updated and their
235 235 # largefiles getting updated
236 236 wlock = repo.wlock()
237 237 try:
238 238 if opts['check']:
239 239 mod = len(modified) > 0
240 240 for lfile in unsure:
241 241 standin = lfutil.standin(lfile)
242 242 if repo['.'][standin].data().strip() != \
243 243 lfutil.hashfile(repo.wjoin(lfile)):
244 244 mod = True
245 245 else:
246 246 lfdirstate.normal(lfile)
247 247 lfdirstate.write()
248 248 if mod:
249 249 raise util.Abort(_('uncommitted local changes'))
250 250 # XXX handle removed differently
251 251 if not opts['clean']:
252 252 for lfile in unsure + modified + added:
253 253 lfutil.updatestandin(repo, lfutil.standin(lfile))
254 254 finally:
255 255 wlock.release()
256 256 return orig(ui, repo, *pats, **opts)
257 257
258 258 # Before starting the manifest merge, merge.updates will call
259 259 # _checkunknown to check if there are any files in the merged-in
260 260 # changeset that collide with unknown files in the working copy.
261 261 #
262 262 # The largefiles are seen as unknown, so this prevents us from merging
263 263 # in a file 'foo' if we already have a largefile with the same name.
264 264 #
265 265 # The overridden function filters the unknown files by removing any
266 266 # largefiles. This makes the merge proceed and we can then handle this
267 267 # case further in the overridden manifestmerge function below.
268 def override_checkunknownfile(origfn, repo, wctx, mctx, f):
268 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
269 269 if lfutil.standin(f) in wctx:
270 270 return False
271 271 return origfn(repo, wctx, mctx, f)
272 272
273 273 # The manifest merge handles conflicts on the manifest level. We want
274 274 # to handle changes in largefile-ness of files at this level too.
275 275 #
276 276 # The strategy is to run the original manifestmerge and then process
277 277 # the action list it outputs. There are two cases we need to deal with:
278 278 #
279 279 # 1. Normal file in p1, largefile in p2. Here the largefile is
280 280 # detected via its standin file, which will enter the working copy
281 281 # with a "get" action. It is not "merge" since the standin is all
282 282 # Mercurial is concerned with at this level -- the link to the
283 283 # existing normal file is not relevant here.
284 284 #
285 285 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
286 286 # since the largefile will be present in the working copy and
287 287 # different from the normal file in p2. Mercurial therefore
288 288 # triggers a merge action.
289 289 #
290 290 # In both cases, we prompt the user and emit new actions to either
291 291 # remove the standin (if the normal file was kept) or to remove the
292 292 # normal file and get the standin (if the largefile was kept). The
293 293 # default prompt answer is to use the largefile version since it was
294 294 # presumably changed on purpose.
295 295 #
296 296 # Finally, the merge.applyupdates function will then take care of
297 297 # writing the files into the working copy and lfcommands.updatelfiles
298 298 # will update the largefiles.
299 def override_manifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
299 def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
300 300 actions = origfn(repo, p1, p2, pa, overwrite, partial)
301 301 processed = []
302 302
303 303 for action in actions:
304 304 if overwrite:
305 305 processed.append(action)
306 306 continue
307 307 f, m = action[:2]
308 308
309 309 choices = (_('&Largefile'), _('&Normal file'))
310 310 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
311 311 # Case 1: normal file in the working copy, largefile in
312 312 # the second parent
313 313 lfile = lfutil.splitstandin(f)
314 314 standin = f
315 315 msg = _('%s has been turned into a largefile\n'
316 316 'use (l)argefile or keep as (n)ormal file?') % lfile
317 317 if repo.ui.promptchoice(msg, choices, 0) == 0:
318 318 processed.append((lfile, "r"))
319 319 processed.append((standin, "g", p2.flags(standin)))
320 320 else:
321 321 processed.append((standin, "r"))
322 322 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
323 323 # Case 2: largefile in the working copy, normal file in
324 324 # the second parent
325 325 standin = lfutil.standin(f)
326 326 lfile = f
327 327 msg = _('%s has been turned into a normal file\n'
328 328 'keep as (l)argefile or use (n)ormal file?') % lfile
329 329 if repo.ui.promptchoice(msg, choices, 0) == 0:
330 330 processed.append((lfile, "r"))
331 331 else:
332 332 processed.append((standin, "r"))
333 333 processed.append((lfile, "g", p2.flags(lfile)))
334 334 else:
335 335 processed.append(action)
336 336
337 337 return processed
338 338
339 339 # Override filemerge to prompt the user about how they wish to merge
340 340 # largefiles. This will handle identical edits, and copy/rename +
341 341 # edit without prompting the user.
342 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
342 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
343 343 # Use better variable names here. Because this is a wrapper we cannot
344 344 # change the variable names in the function declaration.
345 345 fcdest, fcother, fcancestor = fcd, fco, fca
346 346 if not lfutil.isstandin(orig):
347 347 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
348 348 else:
349 349 if not fcother.cmp(fcdest): # files identical?
350 350 return None
351 351
352 352 # backwards, use working dir parent as ancestor
353 353 if fcancestor == fcother:
354 354 fcancestor = fcdest.parents()[0]
355 355
356 356 if orig != fcother.path():
357 357 repo.ui.status(_('merging %s and %s to %s\n')
358 358 % (lfutil.splitstandin(orig),
359 359 lfutil.splitstandin(fcother.path()),
360 360 lfutil.splitstandin(fcdest.path())))
361 361 else:
362 362 repo.ui.status(_('merging %s\n')
363 363 % lfutil.splitstandin(fcdest.path()))
364 364
365 365 if fcancestor.path() != fcother.path() and fcother.data() == \
366 366 fcancestor.data():
367 367 return 0
368 368 if fcancestor.path() != fcdest.path() and fcdest.data() == \
369 369 fcancestor.data():
370 370 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
371 371 return 0
372 372
373 373 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
374 374 'keep (l)ocal or take (o)ther?') %
375 375 lfutil.splitstandin(orig),
376 376 (_('&Local'), _('&Other')), 0) == 0:
377 377 return 0
378 378 else:
379 379 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
380 380 return 0
381 381
382 382 # Copy first changes the matchers to match standins instead of
383 383 # largefiles. Then it overrides util.copyfile in that function it
384 384 # checks if the destination largefile already exists. It also keeps a
385 385 # list of copied files so that the largefiles can be copied and the
386 386 # dirstate updated.
387 def override_copy(orig, ui, repo, pats, opts, rename=False):
387 def overridecopy(orig, ui, repo, pats, opts, rename=False):
388 388 # doesn't remove largefile on rename
389 389 if len(pats) < 2:
390 390 # this isn't legal, let the original function deal with it
391 391 return orig(ui, repo, pats, opts, rename)
392 392
393 393 def makestandin(relpath):
394 394 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
395 395 return os.path.join(repo.wjoin(lfutil.standin(path)))
396 396
397 397 fullpats = scmutil.expandpats(pats)
398 398 dest = fullpats[-1]
399 399
400 400 if os.path.isdir(dest):
401 401 if not os.path.isdir(makestandin(dest)):
402 402 os.makedirs(makestandin(dest))
403 403 # This could copy both lfiles and normal files in one command,
404 404 # but we don't want to do that. First replace their matcher to
405 405 # only match normal files and run it, then replace it to just
406 406 # match largefiles and run it again.
407 407 nonormalfiles = False
408 408 nolfiles = False
409 409 try:
410 410 try:
411 411 installnormalfilesmatchfn(repo[None].manifest())
412 412 result = orig(ui, repo, pats, opts, rename)
413 413 except util.Abort, e:
414 414 if str(e) != 'no files to copy':
415 415 raise e
416 416 else:
417 417 nonormalfiles = True
418 418 result = 0
419 419 finally:
420 420 restorematchfn()
421 421
422 422 # The first rename can cause our current working directory to be removed.
423 423 # In that case there is nothing left to copy/rename so just quit.
424 424 try:
425 425 repo.getcwd()
426 426 except OSError:
427 427 return result
428 428
429 429 try:
430 430 try:
431 431 # When we call orig below it creates the standins but we don't add them
432 432 # to the dir state until later so lock during that time.
433 433 wlock = repo.wlock()
434 434
435 435 manifest = repo[None].manifest()
436 436 oldmatch = None # for the closure
437 def override_match(ctx, pats=[], opts={}, globbed=False,
437 def overridematch(ctx, pats=[], opts={}, globbed=False,
438 438 default='relpath'):
439 439 newpats = []
440 440 # The patterns were previously mangled to add the standin
441 441 # directory; we need to remove that now
442 442 for pat in pats:
443 443 if match_.patkind(pat) is None and lfutil.shortname in pat:
444 444 newpats.append(pat.replace(lfutil.shortname, ''))
445 445 else:
446 446 newpats.append(pat)
447 447 match = oldmatch(ctx, newpats, opts, globbed, default)
448 448 m = copy.copy(match)
449 449 lfile = lambda f: lfutil.standin(f) in manifest
450 450 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
451 451 m._fmap = set(m._files)
452 orig_matchfn = m.matchfn
452 origmatchfn = m.matchfn
453 453 m.matchfn = lambda f: (lfutil.isstandin(f) and
454 454 (f in manifest) and
455 orig_matchfn(lfutil.splitstandin(f)) or
455 origmatchfn(lfutil.splitstandin(f)) or
456 456 None)
457 457 return m
458 oldmatch = installmatchfn(override_match)
458 oldmatch = installmatchfn(overridematch)
459 459 listpats = []
460 460 for pat in pats:
461 461 if match_.patkind(pat) is not None:
462 462 listpats.append(pat)
463 463 else:
464 464 listpats.append(makestandin(pat))
465 465
466 466 try:
467 467 origcopyfile = util.copyfile
468 468 copiedfiles = []
469 def override_copyfile(src, dest):
469 def overridecopyfile(src, dest):
470 470 if (lfutil.shortname in src and
471 471 dest.startswith(repo.wjoin(lfutil.shortname))):
472 472 destlfile = dest.replace(lfutil.shortname, '')
473 473 if not opts['force'] and os.path.exists(destlfile):
474 474 raise IOError('',
475 475 _('destination largefile already exists'))
476 476 copiedfiles.append((src, dest))
477 477 origcopyfile(src, dest)
478 478
479 util.copyfile = override_copyfile
479 util.copyfile = overridecopyfile
480 480 result += orig(ui, repo, listpats, opts, rename)
481 481 finally:
482 482 util.copyfile = origcopyfile
483 483
484 484 lfdirstate = lfutil.openlfdirstate(ui, repo)
485 485 for (src, dest) in copiedfiles:
486 486 if (lfutil.shortname in src and
487 487 dest.startswith(repo.wjoin(lfutil.shortname))):
488 488 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
489 489 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
490 490 destlfiledir = os.path.dirname(destlfile) or '.'
491 491 if not os.path.isdir(destlfiledir):
492 492 os.makedirs(destlfiledir)
493 493 if rename:
494 494 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
495 495 lfdirstate.remove(srclfile)
496 496 else:
497 497 util.copyfile(srclfile, destlfile)
498 498 lfdirstate.add(destlfile)
499 499 lfdirstate.write()
500 500 except util.Abort, e:
501 501 if str(e) != 'no files to copy':
502 502 raise e
503 503 else:
504 504 nolfiles = True
505 505 finally:
506 506 restorematchfn()
507 507 wlock.release()
508 508
509 509 if nolfiles and nonormalfiles:
510 510 raise util.Abort(_('no files to copy'))
511 511
512 512 return result
513 513
514 514 # When the user calls revert, we have to be careful to not revert any
515 515 # changes to other largefiles accidentally. This means we have to keep
516 516 # track of the largefiles that are being reverted so we only pull down
517 517 # the necessary largefiles.
518 518 #
519 519 # Standins are only updated (to match the hash of largefiles) before
520 520 # commits. Update the standins then run the original revert, changing
521 521 # the matcher to hit standins instead of largefiles. Based on the
522 522 # resulting standins update the largefiles. Then return the standins
523 523 # to their proper state
524 def override_revert(orig, ui, repo, *pats, **opts):
524 def overriderevert(orig, ui, repo, *pats, **opts):
525 525 # Because we put the standins in a bad state (by updating them)
526 526 # and then return them to a correct state we need to lock to
527 527 # prevent others from changing them in their incorrect state.
528 528 wlock = repo.wlock()
529 529 try:
530 530 lfdirstate = lfutil.openlfdirstate(ui, repo)
531 531 (modified, added, removed, missing, unknown, ignored, clean) = \
532 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
532 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
533 533 for lfile in modified:
534 534 lfutil.updatestandin(repo, lfutil.standin(lfile))
535 535 for lfile in missing:
536 536 os.unlink(repo.wjoin(lfutil.standin(lfile)))
537 537
538 538 try:
539 539 ctx = repo[opts.get('rev')]
540 540 oldmatch = None # for the closure
541 def override_match(ctx, pats=[], opts={}, globbed=False,
541 def overridematch(ctx, pats=[], opts={}, globbed=False,
542 542 default='relpath'):
543 543 match = oldmatch(ctx, pats, opts, globbed, default)
544 544 m = copy.copy(match)
545 545 def tostandin(f):
546 546 if lfutil.standin(f) in ctx:
547 547 return lfutil.standin(f)
548 548 elif lfutil.standin(f) in repo[None]:
549 549 return None
550 550 return f
551 551 m._files = [tostandin(f) for f in m._files]
552 552 m._files = [f for f in m._files if f is not None]
553 553 m._fmap = set(m._files)
554 orig_matchfn = m.matchfn
554 origmatchfn = m.matchfn
555 555 def matchfn(f):
556 556 if lfutil.isstandin(f):
557 557 # We need to keep track of what largefiles are being
558 558 # matched so we know which ones to update later --
559 559 # otherwise we accidentally revert changes to other
560 560 # largefiles. This is repo-specific, so duckpunch the
561 561 # repo object to keep the list of largefiles for us
562 562 # later.
563 if orig_matchfn(lfutil.splitstandin(f)) and \
563 if origmatchfn(lfutil.splitstandin(f)) and \
564 564 (f in repo[None] or f in ctx):
565 565 lfileslist = getattr(repo, '_lfilestoupdate', [])
566 566 lfileslist.append(lfutil.splitstandin(f))
567 567 repo._lfilestoupdate = lfileslist
568 568 return True
569 569 else:
570 570 return False
571 return orig_matchfn(f)
571 return origmatchfn(f)
572 572 m.matchfn = matchfn
573 573 return m
574 oldmatch = installmatchfn(override_match)
574 oldmatch = installmatchfn(overridematch)
575 575 scmutil.match
576 matches = override_match(repo[None], pats, opts)
576 matches = overridematch(repo[None], pats, opts)
577 577 orig(ui, repo, *pats, **opts)
578 578 finally:
579 579 restorematchfn()
580 580 lfileslist = getattr(repo, '_lfilestoupdate', [])
581 581 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
582 582 printmessage=False)
583 583
584 584 # empty out the largefiles list so we start fresh next time
585 585 repo._lfilestoupdate = []
586 586 for lfile in modified:
587 587 if lfile in lfileslist:
588 588 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
589 589 in repo['.']:
590 590 lfutil.writestandin(repo, lfutil.standin(lfile),
591 591 repo['.'][lfile].data().strip(),
592 592 'x' in repo['.'][lfile].flags())
593 593 lfdirstate = lfutil.openlfdirstate(ui, repo)
594 594 for lfile in added:
595 595 standin = lfutil.standin(lfile)
596 596 if standin not in ctx and (standin in matches or opts.get('all')):
597 597 if lfile in lfdirstate:
598 598 lfdirstate.drop(lfile)
599 599 util.unlinkpath(repo.wjoin(standin))
600 600 lfdirstate.write()
601 601 finally:
602 602 wlock.release()
603 603
604 def hg_update(orig, repo, node):
604 def hgupdate(orig, repo, node):
605 605 # Only call updatelfiles the standins that have changed to save time
606 606 oldstandins = lfutil.getstandinsstate(repo)
607 607 result = orig(repo, node)
608 608 newstandins = lfutil.getstandinsstate(repo)
609 609 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
610 610 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
611 611 return result
612 612
613 def hg_clean(orig, repo, node, show_stats=True):
613 def hgclean(orig, repo, node, show_stats=True):
614 614 result = orig(repo, node, show_stats)
615 615 lfcommands.updatelfiles(repo.ui, repo)
616 616 return result
617 617
618 def hg_merge(orig, repo, node, force=None, remind=True):
618 def hgmerge(orig, repo, node, force=None, remind=True):
619 619 # Mark the repo as being in the middle of a merge, so that
620 620 # updatelfiles() will know that it needs to trust the standins in
621 621 # the working copy, not in the standins in the current node
622 622 repo._ismerging = True
623 623 try:
624 624 result = orig(repo, node, force, remind)
625 625 lfcommands.updatelfiles(repo.ui, repo)
626 626 finally:
627 627 repo._ismerging = False
628 628 return result
629 629
630 630 # When we rebase a repository with remotely changed largefiles, we need to
631 631 # take some extra care so that the largefiles are correctly updated in the
632 632 # working copy
633 def override_pull(orig, ui, repo, source=None, **opts):
633 def overridepull(orig, ui, repo, source=None, **opts):
634 634 if opts.get('rebase', False):
635 635 repo._isrebasing = True
636 636 try:
637 637 if opts.get('update'):
638 638 del opts['update']
639 639 ui.debug('--update and --rebase are not compatible, ignoring '
640 640 'the update flag\n')
641 641 del opts['rebase']
642 642 cmdutil.bailifchanged(repo)
643 643 revsprepull = len(repo)
644 644 origpostincoming = commands.postincoming
645 645 def _dummy(*args, **kwargs):
646 646 pass
647 647 commands.postincoming = _dummy
648 648 repo.lfpullsource = source
649 649 if not source:
650 650 source = 'default'
651 651 try:
652 652 result = commands.pull(ui, repo, source, **opts)
653 653 finally:
654 654 commands.postincoming = origpostincoming
655 655 revspostpull = len(repo)
656 656 if revspostpull > revsprepull:
657 657 result = result or rebase.rebase(ui, repo)
658 658 finally:
659 659 repo._isrebasing = False
660 660 else:
661 661 repo.lfpullsource = source
662 662 if not source:
663 663 source = 'default'
664 664 oldheads = lfutil.getcurrentheads(repo)
665 665 result = orig(ui, repo, source, **opts)
666 666 # If we do not have the new largefiles for any new heads we pulled, we
667 667 # will run into a problem later if we try to merge or rebase with one of
668 668 # these heads, so cache the largefiles now direclty into the system
669 669 # cache.
670 670 ui.status(_("caching new largefiles\n"))
671 671 numcached = 0
672 672 heads = lfutil.getcurrentheads(repo)
673 673 newheads = set(heads).difference(set(oldheads))
674 674 for head in newheads:
675 675 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
676 676 numcached += len(cached)
677 677 ui.status(_("%d largefiles cached\n") % numcached)
678 678 return result
679 679
680 def override_rebase(orig, ui, repo, **opts):
680 def overriderebase(orig, ui, repo, **opts):
681 681 repo._isrebasing = True
682 682 try:
683 683 orig(ui, repo, **opts)
684 684 finally:
685 685 repo._isrebasing = False
686 686
687 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
687 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
688 688 prefix=None, mtime=None, subrepos=None):
689 689 # No need to lock because we are only reading history and
690 690 # largefile caches, neither of which are modified.
691 691 lfcommands.cachelfiles(repo.ui, repo, node)
692 692
693 693 if kind not in archival.archivers:
694 694 raise util.Abort(_("unknown archive type '%s'") % kind)
695 695
696 696 ctx = repo[node]
697 697
698 698 if kind == 'files':
699 699 if prefix:
700 700 raise util.Abort(
701 701 _('cannot give prefix when archiving to files'))
702 702 else:
703 703 prefix = archival.tidyprefix(dest, kind, prefix)
704 704
705 705 def write(name, mode, islink, getdata):
706 706 if matchfn and not matchfn(name):
707 707 return
708 708 data = getdata()
709 709 if decode:
710 710 data = repo.wwritedata(name, data)
711 711 archiver.addfile(prefix + name, mode, islink, data)
712 712
713 713 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
714 714
715 715 if repo.ui.configbool("ui", "archivemeta", True):
716 716 def metadata():
717 717 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
718 718 hex(repo.changelog.node(0)), hex(node), ctx.branch())
719 719
720 720 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
721 721 if repo.tagtype(t) == 'global')
722 722 if not tags:
723 723 repo.ui.pushbuffer()
724 724 opts = {'template': '{latesttag}\n{latesttagdistance}',
725 725 'style': '', 'patch': None, 'git': None}
726 726 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
727 727 ltags, dist = repo.ui.popbuffer().split('\n')
728 728 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
729 729 tags += 'latesttagdistance: %s\n' % dist
730 730
731 731 return base + tags
732 732
733 733 write('.hg_archival.txt', 0644, False, metadata)
734 734
735 735 for f in ctx:
736 736 ff = ctx.flags(f)
737 737 getdata = ctx[f].data
738 738 if lfutil.isstandin(f):
739 739 path = lfutil.findfile(repo, getdata().strip())
740 740 if path is None:
741 741 raise util.Abort(
742 742 _('largefile %s not found in repo store or system cache')
743 743 % lfutil.splitstandin(f))
744 744 f = lfutil.splitstandin(f)
745 745
746 746 def getdatafn():
747 747 fd = None
748 748 try:
749 749 fd = open(path, 'rb')
750 750 return fd.read()
751 751 finally:
752 752 if fd:
753 753 fd.close()
754 754
755 755 getdata = getdatafn
756 756 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
757 757
758 758 if subrepos:
759 759 for subpath in ctx.substate:
760 760 sub = ctx.sub(subpath)
761 761 sub.archive(repo.ui, archiver, prefix)
762 762
763 763 archiver.done()
764 764
765 765 # If a largefile is modified, the change is not reflected in its
766 766 # standin until a commit. cmdutil.bailifchanged() raises an exception
767 767 # if the repo has uncommitted changes. Wrap it to also check if
768 768 # largefiles were changed. This is used by bisect and backout.
769 def override_bailifchanged(orig, repo):
769 def overridebailifchanged(orig, repo):
770 770 orig(repo)
771 771 repo.lfstatus = True
772 772 modified, added, removed, deleted = repo.status()[:4]
773 773 repo.lfstatus = False
774 774 if modified or added or removed or deleted:
775 775 raise util.Abort(_('outstanding uncommitted changes'))
776 776
777 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
778 def override_fetch(orig, ui, repo, *pats, **opts):
777 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
778 def overridefetch(orig, ui, repo, *pats, **opts):
779 779 repo.lfstatus = True
780 780 modified, added, removed, deleted = repo.status()[:4]
781 781 repo.lfstatus = False
782 782 if modified or added or removed or deleted:
783 783 raise util.Abort(_('outstanding uncommitted changes'))
784 784 return orig(ui, repo, *pats, **opts)
785 785
786 def override_forget(orig, ui, repo, *pats, **opts):
786 def overrideforget(orig, ui, repo, *pats, **opts):
787 787 installnormalfilesmatchfn(repo[None].manifest())
788 788 orig(ui, repo, *pats, **opts)
789 789 restorematchfn()
790 790 m = scmutil.match(repo[None], pats, opts)
791 791
792 792 try:
793 793 repo.lfstatus = True
794 794 s = repo.status(match=m, clean=True)
795 795 finally:
796 796 repo.lfstatus = False
797 797 forget = sorted(s[0] + s[1] + s[3] + s[6])
798 798 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
799 799
800 800 for f in forget:
801 801 if lfutil.standin(f) not in repo.dirstate and not \
802 802 os.path.isdir(m.rel(lfutil.standin(f))):
803 803 ui.warn(_('not removing %s: file is already untracked\n')
804 804 % m.rel(f))
805 805
806 806 for f in forget:
807 807 if ui.verbose or not m.exact(f):
808 808 ui.status(_('removing %s\n') % m.rel(f))
809 809
810 810 # Need to lock because standin files are deleted then removed from the
811 811 # repository and we could race inbetween.
812 812 wlock = repo.wlock()
813 813 try:
814 814 lfdirstate = lfutil.openlfdirstate(ui, repo)
815 815 for f in forget:
816 816 if lfdirstate[f] == 'a':
817 817 lfdirstate.drop(f)
818 818 else:
819 819 lfdirstate.remove(f)
820 820 lfdirstate.write()
821 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
821 lfutil.reporemove(repo, [lfutil.standin(f) for f in forget],
822 822 unlink=True)
823 823 finally:
824 824 wlock.release()
825 825
826 826 def getoutgoinglfiles(ui, repo, dest=None, **opts):
827 827 dest = ui.expandpath(dest or 'default-push', dest or 'default')
828 828 dest, branches = hg.parseurl(dest, opts.get('branch'))
829 829 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
830 830 if revs:
831 831 revs = [repo.lookup(rev) for rev in revs]
832 832
833 833 remoteui = hg.remoteui
834 834
835 835 try:
836 836 remote = hg.repository(remoteui(repo, opts), dest)
837 837 except error.RepoError:
838 838 return None
839 839 o = lfutil.findoutgoing(repo, remote, False)
840 840 if not o:
841 841 return None
842 842 o = repo.changelog.nodesbetween(o, revs)[0]
843 843 if opts.get('newest_first'):
844 844 o.reverse()
845 845
846 846 toupload = set()
847 847 for n in o:
848 848 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
849 849 ctx = repo[n]
850 850 files = set(ctx.files())
851 851 if len(parents) == 2:
852 852 mc = ctx.manifest()
853 853 mp1 = ctx.parents()[0].manifest()
854 854 mp2 = ctx.parents()[1].manifest()
855 855 for f in mp1:
856 856 if f not in mc:
857 857 files.add(f)
858 858 for f in mp2:
859 859 if f not in mc:
860 860 files.add(f)
861 861 for f in mc:
862 862 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
863 863 files.add(f)
864 864 toupload = toupload.union(
865 865 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
866 866 return toupload
867 867
868 def override_outgoing(orig, ui, repo, dest=None, **opts):
868 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
869 869 orig(ui, repo, dest, **opts)
870 870
871 871 if opts.pop('large', None):
872 872 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
873 873 if toupload is None:
874 874 ui.status(_('largefiles: No remote repo\n'))
875 875 else:
876 876 ui.status(_('largefiles to upload:\n'))
877 877 for file in toupload:
878 878 ui.status(lfutil.splitstandin(file) + '\n')
879 879 ui.status('\n')
880 880
881 def override_summary(orig, ui, repo, *pats, **opts):
881 def overridesummary(orig, ui, repo, *pats, **opts):
882 882 try:
883 883 repo.lfstatus = True
884 884 orig(ui, repo, *pats, **opts)
885 885 finally:
886 886 repo.lfstatus = False
887 887
888 888 if opts.pop('large', None):
889 889 toupload = getoutgoinglfiles(ui, repo, None, **opts)
890 890 if toupload is None:
891 891 ui.status(_('largefiles: No remote repo\n'))
892 892 else:
893 893 ui.status(_('largefiles: %d to upload\n') % len(toupload))
894 894
895 def override_addremove(orig, ui, repo, *pats, **opts):
895 def overrideaddremove(orig, ui, repo, *pats, **opts):
896 896 # Get the list of missing largefiles so we can remove them
897 897 lfdirstate = lfutil.openlfdirstate(ui, repo)
898 898 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
899 899 False, False)
900 900 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
901 901
902 902 # Call into the normal remove code, but the removing of the standin, we want
903 903 # to have handled by original addremove. Monkey patching here makes sure
904 904 # we don't remove the standin in the largefiles code, preventing a very
905 905 # confused state later.
906 906 if missing:
907 907 repo._isaddremove = True
908 remove_largefiles(ui, repo, *missing, **opts)
908 removelargefiles(ui, repo, *missing, **opts)
909 909 repo._isaddremove = False
910 910 # Call into the normal add code, and any files that *should* be added as
911 911 # largefiles will be
912 add_largefiles(ui, repo, *pats, **opts)
912 addlargefiles(ui, repo, *pats, **opts)
913 913 # Now that we've handled largefiles, hand off to the original addremove
914 914 # function to take care of the rest. Make sure it doesn't do anything with
915 915 # largefiles by installing a matcher that will ignore them.
916 916 installnormalfilesmatchfn(repo[None].manifest())
917 917 result = orig(ui, repo, *pats, **opts)
918 918 restorematchfn()
919 919 return result
920 920
921 921 # Calling purge with --all will cause the largefiles to be deleted.
922 922 # Override repo.status to prevent this from happening.
923 def override_purge(orig, ui, repo, *dirs, **opts):
923 def overridepurge(orig, ui, repo, *dirs, **opts):
924 924 oldstatus = repo.status
925 def override_status(node1='.', node2=None, match=None, ignored=False,
925 def overridestatus(node1='.', node2=None, match=None, ignored=False,
926 926 clean=False, unknown=False, listsubrepos=False):
927 927 r = oldstatus(node1, node2, match, ignored, clean, unknown,
928 928 listsubrepos)
929 929 lfdirstate = lfutil.openlfdirstate(ui, repo)
930 930 modified, added, removed, deleted, unknown, ignored, clean = r
931 931 unknown = [f for f in unknown if lfdirstate[f] == '?']
932 932 ignored = [f for f in ignored if lfdirstate[f] == '?']
933 933 return modified, added, removed, deleted, unknown, ignored, clean
934 repo.status = override_status
934 repo.status = overridestatus
935 935 orig(ui, repo, *dirs, **opts)
936 936 repo.status = oldstatus
937 937
938 def override_rollback(orig, ui, repo, **opts):
938 def overriderollback(orig, ui, repo, **opts):
939 939 result = orig(ui, repo, **opts)
940 940 merge.update(repo, node=None, branchmerge=False, force=True,
941 941 partial=lfutil.isstandin)
942 942 wlock = repo.wlock()
943 943 try:
944 944 lfdirstate = lfutil.openlfdirstate(ui, repo)
945 945 lfiles = lfutil.listlfiles(repo)
946 946 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
947 947 for file in lfiles:
948 948 if file in oldlfiles:
949 949 lfdirstate.normallookup(file)
950 950 else:
951 951 lfdirstate.add(file)
952 952 lfdirstate.write()
953 953 finally:
954 954 wlock.release()
955 955 return result
956 956
957 def override_transplant(orig, ui, repo, *revs, **opts):
957 def overridetransplant(orig, ui, repo, *revs, **opts):
958 958 try:
959 959 oldstandins = lfutil.getstandinsstate(repo)
960 960 repo._istransplanting = True
961 961 result = orig(ui, repo, *revs, **opts)
962 962 newstandins = lfutil.getstandinsstate(repo)
963 963 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
964 964 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
965 965 printmessage=True)
966 966 finally:
967 967 repo._istransplanting = False
968 968 return result
@@ -1,166 +1,166 b''
1 1 # Copyright 2011 Fog Creek Software
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 import os
7 7 import urllib2
8 8
9 9 from mercurial import error, httprepo, util, wireproto
10 10 from mercurial.i18n import _
11 11
12 12 import lfutil
13 13
14 14 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
15 15 '\n\nPlease enable it in your Mercurial config '
16 16 'file.\n')
17 17
18 18 def putlfile(repo, proto, sha):
19 19 '''Put a largefile into a repository's local store and into the
20 20 user cache.'''
21 21 proto.redirect()
22 22
23 23 tmpfp = util.atomictempfile(lfutil.storepath(repo, sha),
24 24 createmode=repo.store.createmode)
25 25 try:
26 26 try:
27 27 proto.getfile(tmpfp)
28 28 tmpfp._fp.seek(0)
29 29 if sha != lfutil.hexsha1(tmpfp._fp):
30 30 raise IOError(0, _('largefile contents do not match hash'))
31 31 tmpfp.close()
32 32 lfutil.linktousercache(repo, sha)
33 33 except IOError, e:
34 34 repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
35 35 (sha, e.strerror))
36 36 return wireproto.pushres(1)
37 37 finally:
38 38 tmpfp.discard()
39 39
40 40 return wireproto.pushres(0)
41 41
42 42 def getlfile(repo, proto, sha):
43 43 '''Retrieve a largefile from the repository-local cache or system
44 44 cache.'''
45 45 filename = lfutil.findfile(repo, sha)
46 46 if not filename:
47 47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 48 f = open(filename, 'rb')
49 49 length = os.fstat(f.fileno())[6]
50 50
51 51 # Since we can't set an HTTP content-length header here, and
52 52 # Mercurial core provides no way to give the length of a streamres
53 53 # (and reading the entire file into RAM would be ill-advised), we
54 54 # just send the length on the first line of the response, like the
55 55 # ssh proto does for string responses.
56 56 def generator():
57 57 yield '%d\n' % length
58 58 for chunk in f:
59 59 yield chunk
60 60 return wireproto.streamres(generator())
61 61
62 62 def statlfile(repo, proto, sha):
63 63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 64 mismatched checksum, or '0\n' if it is in good condition'''
65 65 filename = lfutil.findfile(repo, sha)
66 66 if not filename:
67 67 return '2\n'
68 68 fd = None
69 69 try:
70 70 fd = open(filename, 'rb')
71 71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 72 finally:
73 73 if fd:
74 74 fd.close()
75 75
76 76 def wirereposetup(ui, repo):
77 77 class lfileswirerepository(repo.__class__):
78 78 def putlfile(self, sha, fd):
79 79 # unfortunately, httprepository._callpush tries to convert its
80 80 # input file-like into a bundle before sending it, so we can't use
81 81 # it ...
82 82 if issubclass(self.__class__, httprepo.httprepository):
83 83 res = None
84 84 try:
85 85 res = self._call('putlfile', data=fd, sha=sha,
86 86 headers={'content-type':'application/mercurial-0.1'})
87 87 d, output = res.split('\n', 1)
88 88 for l in output.splitlines(True):
89 89 self.ui.warn(_('remote: '), l, '\n')
90 90 return int(d)
91 91 except (ValueError, urllib2.HTTPError):
92 92 self.ui.warn(_('unexpected putlfile response: %s') % res)
93 93 return 1
94 94 # ... but we can't use sshrepository._call because the data=
95 95 # argument won't get sent, and _callpush does exactly what we want
96 96 # in this case: send the data straight through
97 97 else:
98 98 try:
99 99 ret, output = self._callpush("putlfile", fd, sha=sha)
100 100 if ret == "":
101 101 raise error.ResponseError(_('putlfile failed:'),
102 102 output)
103 103 return int(ret)
104 104 except IOError:
105 105 return 1
106 106 except ValueError:
107 107 raise error.ResponseError(
108 108 _('putlfile failed (unexpected response):'), ret)
109 109
110 110 def getlfile(self, sha):
111 111 stream = self._callstream("getlfile", sha=sha)
112 112 length = stream.readline()
113 113 try:
114 114 length = int(length)
115 115 except ValueError:
116 116 self._abort(error.ResponseError(_("unexpected response:"),
117 117 length))
118 118 return (length, stream)
119 119
120 120 def statlfile(self, sha):
121 121 try:
122 122 return int(self._call("statlfile", sha=sha))
123 123 except (ValueError, urllib2.HTTPError):
124 124 # If the server returns anything but an integer followed by a
125 125 # newline, newline, it's not speaking our language; if we get
126 126 # an HTTP error, we can't be sure the largefile is present;
127 127 # either way, consider it missing.
128 128 return 2
129 129
130 130 repo.__class__ = lfileswirerepository
131 131
132 132 # advertise the largefiles=serve capability
133 133 def capabilities(repo, proto):
134 return capabilities_orig(repo, proto) + ' largefiles=serve'
134 return capabilitiesorig(repo, proto) + ' largefiles=serve'
135 135
136 136 # duplicate what Mercurial's new out-of-band errors mechanism does, because
137 137 # clients old and new alike both handle it well
138 def webproto_refuseclient(self, message):
138 def webprotorefuseclient(self, message):
139 139 self.req.header([('Content-Type', 'application/hg-error')])
140 140 return message
141 141
142 def sshproto_refuseclient(self, message):
142 def sshprotorefuseclient(self, message):
143 143 self.ui.write_err('%s\n-\n' % message)
144 144 self.fout.write('\n')
145 145 self.fout.flush()
146 146
147 147 return ''
148 148
149 149 def heads(repo, proto):
150 150 if lfutil.islfilesrepo(repo):
151 151 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
152 152 return wireproto.heads(repo, proto)
153 153
154 def sshrepo_callstream(self, cmd, **args):
154 def sshrepocallstream(self, cmd, **args):
155 155 if cmd == 'heads' and self.capable('largefiles'):
156 156 cmd = 'lheads'
157 157 if cmd == 'batch' and self.capable('largefiles'):
158 158 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
159 return ssh_oldcallstream(self, cmd, **args)
159 return ssholdcallstream(self, cmd, **args)
160 160
161 def httprepo_callstream(self, cmd, **args):
161 def httprepocallstream(self, cmd, **args):
162 162 if cmd == 'heads' and self.capable('largefiles'):
163 163 cmd = 'lheads'
164 164 if cmd == 'batch' and self.capable('largefiles'):
165 165 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
166 return http_oldcallstream(self, cmd, **args)
166 return httpoldcallstream(self, cmd, **args)
@@ -1,458 +1,458 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13
14 14 from mercurial import context, error, manifest, match as match_, util
15 15 from mercurial import node as node_
16 16 from mercurial.i18n import _
17 17
18 18 import lfcommands
19 19 import proto
20 20 import lfutil
21 21
22 22 def reposetup(ui, repo):
23 23 # wire repositories should be given new wireproto functions but not the
24 24 # other largefiles modifications
25 25 if not repo.local():
26 26 return proto.wirereposetup(ui, repo)
27 27
28 28 for name in ('status', 'commitctx', 'commit', 'push'):
29 29 method = getattr(repo, name)
30 30 if (isinstance(method, types.FunctionType) and
31 31 method.func_name == 'wrap'):
32 32 ui.warn(_('largefiles: repo method %r appears to have already been'
33 33 ' wrapped by another extension: '
34 34 'largefiles may behave incorrectly\n')
35 35 % name)
36 36
37 class lfiles_repo(repo.__class__):
37 class lfilesrepo(repo.__class__):
38 38 lfstatus = False
39 39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfiles_repo, self).status(*args, **kwargs)
40 return super(lfilesrepo, self).status(*args, **kwargs)
41 41
42 42 # When lfstatus is set, return a context that gives the names
43 43 # of largefiles instead of their corresponding standins and
44 44 # identifies the largefiles as always binary, regardless of
45 45 # their actual contents.
46 46 def __getitem__(self, changeid):
47 ctx = super(lfiles_repo, self).__getitem__(changeid)
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 48 if self.lfstatus:
49 class lfiles_manifestdict(manifest.manifestdict):
49 class lfilesmanifestdict(manifest.manifestdict):
50 50 def __contains__(self, filename):
51 if super(lfiles_manifestdict,
51 if super(lfilesmanifestdict,
52 52 self).__contains__(filename):
53 53 return True
54 return super(lfiles_manifestdict,
54 return super(lfilesmanifestdict,
55 55 self).__contains__(lfutil.standin(filename))
56 class lfiles_ctx(ctx.__class__):
56 class lfilesctx(ctx.__class__):
57 57 def files(self):
58 filenames = super(lfiles_ctx, self).files()
58 filenames = super(lfilesctx, self).files()
59 59 return [lfutil.splitstandin(f) or f for f in filenames]
60 60 def manifest(self):
61 man1 = super(lfiles_ctx, self).manifest()
62 man1.__class__ = lfiles_manifestdict
61 man1 = super(lfilesctx, self).manifest()
62 man1.__class__ = lfilesmanifestdict
63 63 return man1
64 64 def filectx(self, path, fileid=None, filelog=None):
65 65 try:
66 66 if filelog is not None:
67 result = super(lfiles_ctx, self).filectx(
67 result = super(lfilesctx, self).filectx(
68 68 path, fileid, filelog)
69 69 else:
70 result = super(lfiles_ctx, self).filectx(
70 result = super(lfilesctx, self).filectx(
71 71 path, fileid)
72 72 except error.LookupError:
73 73 # Adding a null character will cause Mercurial to
74 74 # identify this as a binary file.
75 75 if filelog is not None:
76 result = super(lfiles_ctx, self).filectx(
76 result = super(lfilesctx, self).filectx(
77 77 lfutil.standin(path), fileid, filelog)
78 78 else:
79 result = super(lfiles_ctx, self).filectx(
79 result = super(lfilesctx, self).filectx(
80 80 lfutil.standin(path), fileid)
81 81 olddata = result.data
82 82 result.data = lambda: olddata() + '\0'
83 83 return result
84 ctx.__class__ = lfiles_ctx
84 ctx.__class__ = lfilesctx
85 85 return ctx
86 86
87 87 # Figure out the status of big files and insert them into the
88 88 # appropriate list in the result. Also removes standin files
89 89 # from the listing. Revert to the original status if
90 90 # self.lfstatus is False.
91 91 def status(self, node1='.', node2=None, match=None, ignored=False,
92 92 clean=False, unknown=False, listsubrepos=False):
93 93 listignored, listclean, listunknown = ignored, clean, unknown
94 94 if not self.lfstatus:
95 return super(lfiles_repo, self).status(node1, node2, match,
95 return super(lfilesrepo, self).status(node1, node2, match,
96 96 listignored, listclean, listunknown, listsubrepos)
97 97 else:
98 98 # some calls in this function rely on the old version of status
99 99 self.lfstatus = False
100 100 if isinstance(node1, context.changectx):
101 101 ctx1 = node1
102 102 else:
103 103 ctx1 = repo[node1]
104 104 if isinstance(node2, context.changectx):
105 105 ctx2 = node2
106 106 else:
107 107 ctx2 = repo[node2]
108 108 working = ctx2.rev() is None
109 109 parentworking = working and ctx1 == self['.']
110 110
111 111 def inctx(file, ctx):
112 112 try:
113 113 if ctx.rev() is None:
114 114 return file in ctx.manifest()
115 115 ctx[file]
116 116 return True
117 117 except KeyError:
118 118 return False
119 119
120 120 if match is None:
121 121 match = match_.always(self.root, self.getcwd())
122 122
123 123 # First check if there were files specified on the
124 124 # command line. If there were, and none of them were
125 125 # largefiles, we should just bail here and let super
126 126 # handle it -- thus gaining a big performance boost.
127 127 lfdirstate = lfutil.openlfdirstate(ui, self)
128 128 if match.files() and not match.anypats():
129 129 for f in lfdirstate:
130 130 if match(f):
131 131 break
132 132 else:
133 return super(lfiles_repo, self).status(node1, node2,
133 return super(lfilesrepo, self).status(node1, node2,
134 134 match, listignored, listclean,
135 135 listunknown, listsubrepos)
136 136
137 137 # Create a copy of match that matches standins instead
138 138 # of largefiles.
139 139 def tostandin(file):
140 140 if inctx(lfutil.standin(file), ctx2):
141 141 return lfutil.standin(file)
142 142 return file
143 143
144 144 # Create a function that we can use to override what is
145 145 # normally the ignore matcher. We've already checked
146 146 # for ignored files on the first dirstate walk, and
147 147 # unecessarily re-checking here causes a huge performance
148 148 # hit because lfdirstate only knows about largefiles
149 149 def _ignoreoverride(self):
150 150 return False
151 151
152 152 m = copy.copy(match)
153 153 m._files = [tostandin(f) for f in m._files]
154 154
155 155 # Get ignored files here even if we weren't asked for them; we
156 156 # must use the result here for filtering later
157 result = super(lfiles_repo, self).status(node1, node2, m,
157 result = super(lfilesrepo, self).status(node1, node2, m,
158 158 True, clean, unknown, listsubrepos)
159 159 if working:
160 160 try:
161 161 # Any non-largefiles that were explicitly listed must be
162 162 # taken out or lfdirstate.status will report an error.
163 163 # The status of these files was already computed using
164 164 # super's status.
165 165 # Override lfdirstate's ignore matcher to not do
166 166 # anything
167 orig_ignore = lfdirstate._ignore
167 origignore = lfdirstate._ignore
168 168 lfdirstate._ignore = _ignoreoverride
169 169
170 170 match._files = [f for f in match._files if f in
171 171 lfdirstate]
172 172 # Don't waste time getting the ignored and unknown
173 173 # files again; we already have them
174 174 s = lfdirstate.status(match, [], False,
175 175 listclean, False)
176 176 (unsure, modified, added, removed, missing, unknown,
177 177 ignored, clean) = s
178 178 # Replace the list of ignored and unknown files with
179 179 # the previously caclulated lists, and strip out the
180 180 # largefiles
181 181 lfiles = set(lfdirstate._map)
182 182 ignored = set(result[5]).difference(lfiles)
183 183 unknown = set(result[4]).difference(lfiles)
184 184 if parentworking:
185 185 for lfile in unsure:
186 186 standin = lfutil.standin(lfile)
187 187 if standin not in ctx1:
188 188 # from second parent
189 189 modified.append(lfile)
190 190 elif ctx1[standin].data().strip() \
191 191 != lfutil.hashfile(self.wjoin(lfile)):
192 192 modified.append(lfile)
193 193 else:
194 194 clean.append(lfile)
195 195 lfdirstate.normal(lfile)
196 196 else:
197 197 tocheck = unsure + modified + added + clean
198 198 modified, added, clean = [], [], []
199 199
200 200 for lfile in tocheck:
201 201 standin = lfutil.standin(lfile)
202 202 if inctx(standin, ctx1):
203 203 if ctx1[standin].data().strip() != \
204 204 lfutil.hashfile(self.wjoin(lfile)):
205 205 modified.append(lfile)
206 206 else:
207 207 clean.append(lfile)
208 208 else:
209 209 added.append(lfile)
210 210 finally:
211 211 # Replace the original ignore function
212 lfdirstate._ignore = orig_ignore
212 lfdirstate._ignore = origignore
213 213
214 214 for standin in ctx1.manifest():
215 215 if not lfutil.isstandin(standin):
216 216 continue
217 217 lfile = lfutil.splitstandin(standin)
218 218 if not match(lfile):
219 219 continue
220 220 if lfile not in lfdirstate:
221 221 removed.append(lfile)
222 222
223 223 # Filter result lists
224 224 result = list(result)
225 225
226 226 # Largefiles are not really removed when they're
227 227 # still in the normal dirstate. Likewise, normal
228 228 # files are not really removed if it's still in
229 229 # lfdirstate. This happens in merges where files
230 230 # change type.
231 231 removed = [f for f in removed if f not in repo.dirstate]
232 232 result[2] = [f for f in result[2] if f not in lfdirstate]
233 233
234 234 # Unknown files
235 235 unknown = set(unknown).difference(ignored)
236 236 result[4] = [f for f in unknown
237 237 if (repo.dirstate[f] == '?' and
238 238 not lfutil.isstandin(f))]
239 239 # Ignored files were calculated earlier by the dirstate,
240 240 # and we already stripped out the largefiles from the list
241 241 result[5] = ignored
242 242 # combine normal files and largefiles
243 243 normals = [[fn for fn in filelist
244 244 if not lfutil.isstandin(fn)]
245 245 for filelist in result]
246 246 lfiles = (modified, added, removed, missing, [], [], clean)
247 247 result = [sorted(list1 + list2)
248 248 for (list1, list2) in zip(normals, lfiles)]
249 249 else:
250 250 def toname(f):
251 251 if lfutil.isstandin(f):
252 252 return lfutil.splitstandin(f)
253 253 return f
254 254 result = [[toname(f) for f in items] for items in result]
255 255
256 256 if not listunknown:
257 257 result[4] = []
258 258 if not listignored:
259 259 result[5] = []
260 260 if not listclean:
261 261 result[6] = []
262 262 self.lfstatus = True
263 263 return result
264 264
265 265 # As part of committing, copy all of the largefiles into the
266 266 # cache.
267 267 def commitctx(self, *args, **kwargs):
268 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
268 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
269 269 lfutil.copyalltostore(self, node)
270 270 return node
271 271
272 272 # Before commit, largefile standins have not had their
273 273 # contents updated to reflect the hash of their largefile.
274 274 # Do that here.
275 275 def commit(self, text="", user=None, date=None, match=None,
276 276 force=False, editor=False, extra={}):
277 orig = super(lfiles_repo, self).commit
277 orig = super(lfilesrepo, self).commit
278 278
279 279 wlock = repo.wlock()
280 280 try:
281 281 # Case 0: Rebase or Transplant
282 282 # We have to take the time to pull down the new largefiles now.
283 283 # Otherwise, any largefiles that were modified in the
284 284 # destination changesets get overwritten, either by the rebase
285 285 # or in the first commit after the rebase or transplant.
286 286 # updatelfiles will update the dirstate to mark any pulled
287 287 # largefiles as modified
288 288 if getattr(repo, "_isrebasing", False) or \
289 289 getattr(repo, "_istransplanting", False):
290 290 lfcommands.updatelfiles(repo.ui, repo, filelist=None,
291 291 printmessage=False)
292 292 result = orig(text=text, user=user, date=date, match=match,
293 293 force=force, editor=editor, extra=extra)
294 294 return result
295 295 # Case 1: user calls commit with no specific files or
296 296 # include/exclude patterns: refresh and commit all files that
297 297 # are "dirty".
298 298 if ((match is None) or
299 299 (not match.anypats() and not match.files())):
300 300 # Spend a bit of time here to get a list of files we know
301 301 # are modified so we can compare only against those.
302 302 # It can cost a lot of time (several seconds)
303 303 # otherwise to update all standins if the largefiles are
304 304 # large.
305 305 lfdirstate = lfutil.openlfdirstate(ui, self)
306 306 dirtymatch = match_.always(repo.root, repo.getcwd())
307 307 s = lfdirstate.status(dirtymatch, [], False, False, False)
308 308 modifiedfiles = []
309 309 for i in s:
310 310 modifiedfiles.extend(i)
311 311 lfiles = lfutil.listlfiles(self)
312 312 # this only loops through largefiles that exist (not
313 313 # removed/renamed)
314 314 for lfile in lfiles:
315 315 if lfile in modifiedfiles:
316 316 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
317 317 # this handles the case where a rebase is being
318 318 # performed and the working copy is not updated
319 319 # yet.
320 320 if os.path.exists(self.wjoin(lfile)):
321 321 lfutil.updatestandin(self,
322 322 lfutil.standin(lfile))
323 323 lfdirstate.normal(lfile)
324 324 for lfile in lfdirstate:
325 325 if lfile in modifiedfiles:
326 326 if not os.path.exists(
327 327 repo.wjoin(lfutil.standin(lfile))):
328 328 lfdirstate.drop(lfile)
329 329
330 330 result = orig(text=text, user=user, date=date, match=match,
331 331 force=force, editor=editor, extra=extra)
332 332 # This needs to be after commit; otherwise precommit hooks
333 333 # get the wrong status
334 334 lfdirstate.write()
335 335 return result
336 336
337 337 for f in match.files():
338 338 if lfutil.isstandin(f):
339 339 raise util.Abort(
340 340 _('file "%s" is a largefile standin') % f,
341 341 hint=('commit the largefile itself instead'))
342 342
343 343 # Case 2: user calls commit with specified patterns: refresh
344 344 # any matching big files.
345 345 smatcher = lfutil.composestandinmatcher(self, match)
346 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
346 standins = lfutil.dirstatewalk(self.dirstate, smatcher)
347 347
348 348 # No matching big files: get out of the way and pass control to
349 349 # the usual commit() method.
350 350 if not standins:
351 351 return orig(text=text, user=user, date=date, match=match,
352 352 force=force, editor=editor, extra=extra)
353 353
354 354 # Refresh all matching big files. It's possible that the
355 355 # commit will end up failing, in which case the big files will
356 356 # stay refreshed. No harm done: the user modified them and
357 357 # asked to commit them, so sooner or later we're going to
358 358 # refresh the standins. Might as well leave them refreshed.
359 359 lfdirstate = lfutil.openlfdirstate(ui, self)
360 360 for standin in standins:
361 361 lfile = lfutil.splitstandin(standin)
362 362 if lfdirstate[lfile] <> 'r':
363 363 lfutil.updatestandin(self, standin)
364 364 lfdirstate.normal(lfile)
365 365 else:
366 366 lfdirstate.drop(lfile)
367 367
368 368 # Cook up a new matcher that only matches regular files or
369 369 # standins corresponding to the big files requested by the
370 370 # user. Have to modify _files to prevent commit() from
371 371 # complaining "not tracked" for big files.
372 372 lfiles = lfutil.listlfiles(repo)
373 373 match = copy.copy(match)
374 orig_matchfn = match.matchfn
374 origmatchfn = match.matchfn
375 375
376 376 # Check both the list of largefiles and the list of
377 377 # standins because if a largefile was removed, it
378 378 # won't be in the list of largefiles at this point
379 379 match._files += sorted(standins)
380 380
381 381 actualfiles = []
382 382 for f in match._files:
383 383 fstandin = lfutil.standin(f)
384 384
385 385 # ignore known largefiles and standins
386 386 if f in lfiles or fstandin in standins:
387 387 continue
388 388
389 389 # append directory separator to avoid collisions
390 390 if not fstandin.endswith(os.sep):
391 391 fstandin += os.sep
392 392
393 393 # prevalidate matching standin directories
394 394 if util.any(st for st in match._files
395 395 if st.startswith(fstandin)):
396 396 continue
397 397 actualfiles.append(f)
398 398 match._files = actualfiles
399 399
400 400 def matchfn(f):
401 if orig_matchfn(f):
401 if origmatchfn(f):
402 402 return f not in lfiles
403 403 else:
404 404 return f in standins
405 405
406 406 match.matchfn = matchfn
407 407 result = orig(text=text, user=user, date=date, match=match,
408 408 force=force, editor=editor, extra=extra)
409 409 # This needs to be after commit; otherwise precommit hooks
410 410 # get the wrong status
411 411 lfdirstate.write()
412 412 return result
413 413 finally:
414 414 wlock.release()
415 415
416 416 def push(self, remote, force=False, revs=None, newbranch=False):
417 417 o = lfutil.findoutgoing(repo, remote, force)
418 418 if o:
419 419 toupload = set()
420 420 o = repo.changelog.nodesbetween(o, revs)[0]
421 421 for n in o:
422 422 parents = [p for p in repo.changelog.parents(n)
423 423 if p != node_.nullid]
424 424 ctx = repo[n]
425 425 files = set(ctx.files())
426 426 if len(parents) == 2:
427 427 mc = ctx.manifest()
428 428 mp1 = ctx.parents()[0].manifest()
429 429 mp2 = ctx.parents()[1].manifest()
430 430 for f in mp1:
431 431 if f not in mc:
432 432 files.add(f)
433 433 for f in mp2:
434 434 if f not in mc:
435 435 files.add(f)
436 436 for f in mc:
437 437 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
438 438 None):
439 439 files.add(f)
440 440
441 441 toupload = toupload.union(
442 442 set([ctx[f].data().strip()
443 443 for f in files
444 444 if lfutil.isstandin(f) and f in ctx]))
445 445 lfcommands.uploadlfiles(ui, self, remote, toupload)
446 return super(lfiles_repo, self).push(remote, force, revs,
446 return super(lfilesrepo, self).push(remote, force, revs,
447 447 newbranch)
448 448
449 repo.__class__ = lfiles_repo
449 repo.__class__ = lfilesrepo
450 450
451 451 def checkrequireslfiles(ui, repo, **kwargs):
452 452 if 'largefiles' not in repo.requirements and util.any(
453 453 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
454 454 repo.requirements.add('largefiles')
455 455 repo._writerequirements()
456 456
457 457 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
458 458 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
@@ -1,143 +1,143 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles extension: uisetup'''
10 10
11 11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 12 httprepo, localrepo, merge, sshrepo, sshserver, wireproto
13 13 from mercurial.i18n import _
14 14 from mercurial.hgweb import hgweb_mod, protocol
15 15
16 16 import overrides
17 17 import proto
18 18
19 19 def uisetup(ui):
20 20 # Disable auto-status for some commands which assume that all
21 21 # files in the result are under Mercurial's control
22 22
23 23 entry = extensions.wrapcommand(commands.table, 'add',
24 overrides.override_add)
24 overrides.overrideadd)
25 25 addopt = [('', 'large', None, _('add as largefile')),
26 26 ('', 'normal', None, _('add as normal file')),
27 27 ('', 'lfsize', '', _('add all files above this size '
28 28 '(in megabytes) as largefiles '
29 29 '(default: 10)'))]
30 30 entry[1].extend(addopt)
31 31
32 32 entry = extensions.wrapcommand(commands.table, 'addremove',
33 overrides.override_addremove)
33 overrides.overrideaddremove)
34 34 entry = extensions.wrapcommand(commands.table, 'remove',
35 overrides.override_remove)
35 overrides.overrideremove)
36 36 entry = extensions.wrapcommand(commands.table, 'forget',
37 overrides.override_forget)
37 overrides.overrideforget)
38 38 entry = extensions.wrapcommand(commands.table, 'status',
39 overrides.override_status)
39 overrides.overridestatus)
40 40 entry = extensions.wrapcommand(commands.table, 'log',
41 overrides.override_log)
41 overrides.overridelog)
42 42 entry = extensions.wrapcommand(commands.table, 'rollback',
43 overrides.override_rollback)
43 overrides.overriderollback)
44 44 entry = extensions.wrapcommand(commands.table, 'verify',
45 overrides.override_verify)
45 overrides.overrideverify)
46 46
47 47 verifyopt = [('', 'large', None, _('verify largefiles')),
48 48 ('', 'lfa', None,
49 49 _('verify all revisions of largefiles not just current')),
50 50 ('', 'lfc', None,
51 51 _('verify largefile contents not just existence'))]
52 52 entry[1].extend(verifyopt)
53 53
54 54 entry = extensions.wrapcommand(commands.table, 'outgoing',
55 overrides.override_outgoing)
55 overrides.overrideoutgoing)
56 56 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
57 57 entry[1].extend(outgoingopt)
58 58 entry = extensions.wrapcommand(commands.table, 'summary',
59 overrides.override_summary)
59 overrides.overridesummary)
60 60 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
61 61 entry[1].extend(summaryopt)
62 62
63 63 entry = extensions.wrapcommand(commands.table, 'update',
64 overrides.override_update)
64 overrides.overrideupdate)
65 65 entry = extensions.wrapcommand(commands.table, 'pull',
66 overrides.override_pull)
66 overrides.overridepull)
67 67 entry = extensions.wrapfunction(merge, '_checkunknownfile',
68 overrides.override_checkunknownfile)
68 overrides.overridecheckunknownfile)
69 69 entry = extensions.wrapfunction(merge, 'manifestmerge',
70 overrides.override_manifestmerge)
70 overrides.overridemanifestmerge)
71 71 entry = extensions.wrapfunction(filemerge, 'filemerge',
72 overrides.override_filemerge)
72 overrides.overridefilemerge)
73 73 entry = extensions.wrapfunction(cmdutil, 'copy',
74 overrides.override_copy)
74 overrides.overridecopy)
75 75
76 76 # Backout calls revert so we need to override both the command and the
77 77 # function
78 78 entry = extensions.wrapcommand(commands.table, 'revert',
79 overrides.override_revert)
79 overrides.overriderevert)
80 80 entry = extensions.wrapfunction(commands, 'revert',
81 overrides.override_revert)
81 overrides.overriderevert)
82 82
83 83 # clone uses hg._update instead of hg.update even though they are the
84 84 # same function... so wrap both of them)
85 extensions.wrapfunction(hg, 'update', overrides.hg_update)
86 extensions.wrapfunction(hg, '_update', overrides.hg_update)
87 extensions.wrapfunction(hg, 'clean', overrides.hg_clean)
88 extensions.wrapfunction(hg, 'merge', overrides.hg_merge)
85 extensions.wrapfunction(hg, 'update', overrides.hgupdate)
86 extensions.wrapfunction(hg, '_update', overrides.hgupdate)
87 extensions.wrapfunction(hg, 'clean', overrides.hgclean)
88 extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
89 89
90 extensions.wrapfunction(archival, 'archive', overrides.override_archive)
90 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
91 91 extensions.wrapfunction(cmdutil, 'bailifchanged',
92 overrides.override_bailifchanged)
92 overrides.overridebailifchanged)
93 93
94 94 # create the new wireproto commands ...
95 95 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
96 96 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
97 97 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
98 98
99 99 # ... and wrap some existing ones
100 100 wireproto.commands['capabilities'] = (proto.capabilities, '')
101 101 wireproto.commands['heads'] = (proto.heads, '')
102 102 wireproto.commands['lheads'] = (wireproto.heads, '')
103 103
104 104 # make putlfile behave the same as push and {get,stat}lfile behave
105 105 # the same as pull w.r.t. permissions checks
106 106 hgweb_mod.perms['putlfile'] = 'push'
107 107 hgweb_mod.perms['getlfile'] = 'pull'
108 108 hgweb_mod.perms['statlfile'] = 'pull'
109 109
110 110 # the hello wireproto command uses wireproto.capabilities, so it won't see
111 111 # our largefiles capability unless we replace the actual function as well.
112 proto.capabilities_orig = wireproto.capabilities
112 proto.capabilitiesorig = wireproto.capabilities
113 113 wireproto.capabilities = proto.capabilities
114 114
115 115 # these let us reject non-largefiles clients and make them display
116 116 # our error messages
117 protocol.webproto.refuseclient = proto.webproto_refuseclient
118 sshserver.sshserver.refuseclient = proto.sshproto_refuseclient
117 protocol.webproto.refuseclient = proto.webprotorefuseclient
118 sshserver.sshserver.refuseclient = proto.sshprotorefuseclient
119 119
120 120 # can't do this in reposetup because it needs to have happened before
121 121 # wirerepo.__init__ is called
122 proto.ssh_oldcallstream = sshrepo.sshrepository._callstream
123 proto.http_oldcallstream = httprepo.httprepository._callstream
124 sshrepo.sshrepository._callstream = proto.sshrepo_callstream
125 httprepo.httprepository._callstream = proto.httprepo_callstream
122 proto.ssholdcallstream = sshrepo.sshrepository._callstream
123 proto.httpoldcallstream = httprepo.httprepository._callstream
124 sshrepo.sshrepository._callstream = proto.sshrepocallstream
125 httprepo.httprepository._callstream = proto.httprepocallstream
126 126
127 127 # don't die on seeing a repo with the largefiles requirement
128 128 localrepo.localrepository.supported |= set(['largefiles'])
129 129
130 130 # override some extensions' stuff as well
131 131 for name, module in extensions.extensions():
132 132 if name == 'fetch':
133 133 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
134 overrides.override_fetch)
134 overrides.overridefetch)
135 135 if name == 'purge':
136 136 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
137 overrides.override_purge)
137 overrides.overridepurge)
138 138 if name == 'rebase':
139 139 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
140 overrides.override_rebase)
140 overrides.overriderebase)
141 141 if name == 'transplant':
142 142 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
143 overrides.override_transplant)
143 overrides.overridetransplant)
General Comments 0
You need to be logged in to leave comments. Login now