##// END OF EJS Templates
largefiles: cosmetics, whitespace, code style...
Greg Ward -
r15255:7ab05d75 default
parent child Browse files
Show More
@@ -1,202 +1,202 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''base class for store implementations and store-related utility code'''
10 10
11 11 import os
12 12 import tempfile
13 13 import binascii
14 14 import re
15 15
16 16 from mercurial import util, node, hg
17 17 from mercurial.i18n import _
18 18
19 19 import lfutil
20 20
21 21 class StoreError(Exception):
22 22 '''Raised when there is a problem getting files from or putting
23 23 files to a central store.'''
24 24 def __init__(self, filename, hash, url, detail):
25 25 self.filename = filename
26 26 self.hash = hash
27 27 self.url = url
28 28 self.detail = detail
29 29
30 30 def longmessage(self):
31 31 if self.url:
32 32 return ('%s: %s\n'
33 33 '(failed URL: %s)\n'
34 34 % (self.filename, self.detail, self.url))
35 35 else:
36 36 return ('%s: %s\n'
37 37 '(no default or default-push path set in hgrc)\n'
38 38 % (self.filename, self.detail))
39 39
40 40 def __str__(self):
41 41 return "%s: %s" % (self.url, self.detail)
42 42
43 43 class basestore(object):
44 44 def __init__(self, ui, repo, url):
45 45 self.ui = ui
46 46 self.repo = repo
47 47 self.url = url
48 48
49 49 def put(self, source, hash):
50 50 '''Put source file into the store under <filename>/<hash>.'''
51 51 raise NotImplementedError('abstract method')
52 52
53 53 def exists(self, hash):
54 54 '''Check to see if the store contains the given hash.'''
55 55 raise NotImplementedError('abstract method')
56 56
57 57 def get(self, files):
58 58 '''Get the specified largefiles from the store and write to local
59 59 files under repo.root. files is a list of (filename, hash)
60 60 tuples. Return (success, missing), lists of files successfuly
61 61 downloaded and those not found in the store. success is a list
62 62 of (filename, hash) tuples; missing is a list of filenames that
63 63 we could not get. (The detailed error message will already have
64 64 been presented to the user, so missing is just supplied as a
65 65 summary.)'''
66 66 success = []
67 67 missing = []
68 68 ui = self.ui
69 69
70 70 at = 0
71 71 for filename, hash in files:
72 72 ui.progress(_('getting largefiles'), at, unit='lfile',
73 73 total=len(files))
74 74 at += 1
75 75 ui.note(_('getting %s:%s\n') % (filename, hash))
76 76
77 77 cachefilename = lfutil.cachepath(self.repo, hash)
78 78 cachedir = os.path.dirname(cachefilename)
79 79
80 80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
81 81 # opened the file in binary mode.
82 82 (tmpfd, tmpfilename) = tempfile.mkstemp(
83 83 dir=cachedir, prefix=os.path.basename(filename))
84 84 tmpfile = os.fdopen(tmpfd, 'w')
85 85
86 86 try:
87 87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
88 88 except StoreError, err:
89 89 ui.warn(err.longmessage())
90 90 hhash = ""
91 91
92 92 if hhash != hash:
93 93 if hhash != "":
94 94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
95 95 % (filename, hash, hhash))
96 96 tmpfile.close() # no-op if it's already closed
97 97 os.remove(tmpfilename)
98 98 missing.append(filename)
99 99 continue
100 100
101 101 if os.path.exists(cachefilename): # Windows
102 102 os.remove(cachefilename)
103 103 os.rename(tmpfilename, cachefilename)
104 104 lfutil.linktosystemcache(self.repo, hash)
105 105 success.append((filename, hhash))
106 106
107 107 ui.progress(_('getting largefiles'), None)
108 108 return (success, missing)
109 109
110 110 def verify(self, revs, contents=False):
111 111 '''Verify the existence (and, optionally, contents) of every big
112 112 file revision referenced by every changeset in revs.
113 113 Return 0 if all is well, non-zero on any errors.'''
114 114 write = self.ui.write
115 115 failed = False
116 116
117 117 write(_('searching %d changesets for largefiles\n') % len(revs))
118 118 verified = set() # set of (filename, filenode) tuples
119 119
120 120 for rev in revs:
121 121 cctx = self.repo[rev]
122 122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
123 123
124 124 failed = lfutil.any_(self._verifyfile(
125 125 cctx, cset, contents, standin, verified) for standin in cctx)
126 126
127 127 num_revs = len(verified)
128 128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
129 129 if contents:
130 130 write(_('verified contents of %d revisions of %d largefiles\n')
131 131 % (num_revs, num_lfiles))
132 132 else:
133 133 write(_('verified existence of %d revisions of %d largefiles\n')
134 134 % (num_revs, num_lfiles))
135 135
136 136 return int(failed)
137 137
138 138 def _getfile(self, tmpfile, filename, hash):
139 139 '''Fetch one revision of one file from the store and write it
140 140 to tmpfile. Compute the hash of the file on-the-fly as it
141 141 downloads and return the binary hash. Close tmpfile. Raise
142 142 StoreError if unable to download the file (e.g. it does not
143 143 exist in the store).'''
144 144 raise NotImplementedError('abstract method')
145 145
146 146 def _verifyfile(self, cctx, cset, contents, standin, verified):
147 147 '''Perform the actual verification of a file in the store.
148 148 '''
149 149 raise NotImplementedError('abstract method')
150 150
151 151 import localstore, wirestore
152 152
153 153 _storeprovider = {
154 154 'file': [localstore.localstore],
155 155 'http': [wirestore.wirestore],
156 156 'https': [wirestore.wirestore],
157 157 'ssh': [wirestore.wirestore],
158 158 }
159 159
160 160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
161 161
162 162 # During clone this function is passed the src's ui object
163 163 # but it needs the dest's ui object so it can read out of
164 164 # the config file. Use repo.ui instead.
165 165 def _openstore(repo, remote=None, put=False):
166 166 ui = repo.ui
167 167
168 168 if not remote:
169 path = getattr(repo, 'lfpullsource', None) or \
170 ui.expandpath('default-push', 'default')
169 path = (getattr(repo, 'lfpullsource', None) or
170 ui.expandpath('default-push', 'default'))
171 171
172 172 # ui.expandpath() leaves 'default-push' and 'default' alone if
173 173 # they cannot be expanded: fallback to the empty string,
174 174 # meaning the current directory.
175 175 if path == 'default-push' or path == 'default':
176 176 path = ''
177 177 remote = repo
178 178 else:
179 179 remote = hg.peer(repo, {}, path)
180 180
181 181 # The path could be a scheme so use Mercurial's normal functionality
182 182 # to resolve the scheme to a repository and use its path
183 183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
184 184
185 185 match = _scheme_re.match(path)
186 186 if not match: # regular filesystem path
187 187 scheme = 'file'
188 188 else:
189 189 scheme = match.group(1)
190 190
191 191 try:
192 192 storeproviders = _storeprovider[scheme]
193 193 except KeyError:
194 194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
195 195
196 196 for class_obj in storeproviders:
197 197 try:
198 198 return class_obj(ui, repo, remote)
199 199 except lfutil.storeprotonotcapable:
200 200 pass
201 201
202 202 raise util.Abort(_('%s does not appear to be a largefile store'), path)
@@ -1,482 +1,481 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''High-level command function for lfconvert, plus the cmdtable.'''
10 10
11 11 import os
12 12 import shutil
13 13
14 14 from mercurial import util, match as match_, hg, node, context, error
15 15 from mercurial.i18n import _
16 16
17 17 import lfutil
18 18 import basestore
19 19
20 20 # -- Commands ----------------------------------------------------------
21 21
22 22 def lfconvert(ui, src, dest, *pats, **opts):
23 23 '''convert a normal repository to a largefiles repository
24 24
25 25 Convert repository SOURCE to a new repository DEST, identical to
26 26 SOURCE except that certain files will be converted as largefiles:
27 27 specifically, any file that matches any PATTERN *or* whose size is
28 28 above the minimum size threshold is converted as a largefile. The
29 29 size used to determine whether or not to track a file as a
30 30 largefile is the size of the first version of the file. The
31 31 minimum size can be specified either with --size or in
32 32 configuration as ``largefiles.size``.
33 33
34 34 After running this command you will need to make sure that
35 35 largefiles is enabled anywhere you intend to push the new
36 36 repository.
37 37
38 38 Use --tonormal to convert largefiles back to normal files; after
39 39 this, the DEST repository can be used without largefiles at all.'''
40 40
41 41 if opts['tonormal']:
42 42 tolfile = False
43 43 else:
44 44 tolfile = True
45 45 size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
46 46 try:
47 47 rsrc = hg.repository(ui, src)
48 48 if not rsrc.local():
49 49 raise util.Abort(_('%s is not a local Mercurial repo') % src)
50 50 except error.RepoError, err:
51 51 ui.traceback()
52 52 raise util.Abort(err.args[0])
53 53 if os.path.exists(dest):
54 54 if not os.path.isdir(dest):
55 55 raise util.Abort(_('destination %s already exists') % dest)
56 56 elif os.listdir(dest):
57 57 raise util.Abort(_('destination %s is not empty') % dest)
58 58 try:
59 59 ui.status(_('initializing destination %s\n') % dest)
60 60 rdst = hg.repository(ui, dest, create=True)
61 61 if not rdst.local():
62 62 raise util.Abort(_('%s is not a local Mercurial repo') % dest)
63 63 except error.RepoError:
64 64 ui.traceback()
65 65 raise util.Abort(_('%s is not a repo') % dest)
66 66
67 67 success = False
68 68 try:
69 69 # Lock destination to prevent modification while it is converted to.
70 70 # Don't need to lock src because we are just reading from its history
71 71 # which can't change.
72 72 dst_lock = rdst.lock()
73 73
74 74 # Get a list of all changesets in the source. The easy way to do this
75 75 # is to simply walk the changelog, using changelog.nodesbewteen().
76 76 # Take a look at mercurial/revlog.py:639 for more details.
77 77 # Use a generator instead of a list to decrease memory usage
78 78 ctxs = (rsrc[ctx] for ctx in rsrc.changelog.nodesbetween(None,
79 79 rsrc.heads())[0])
80 80 revmap = {node.nullid: node.nullid}
81 81 if tolfile:
82 82 lfiles = set()
83 83 normalfiles = set()
84 84 if not pats:
85 85 pats = ui.config(lfutil.longname, 'patterns', default=())
86 86 if pats:
87 87 pats = pats.split(' ')
88 88 if pats:
89 89 matcher = match_.match(rsrc.root, '', list(pats))
90 90 else:
91 91 matcher = None
92 92
93 93 lfiletohash = {}
94 94 for ctx in ctxs:
95 95 ui.progress(_('converting revisions'), ctx.rev(),
96 96 unit=_('revision'), total=rsrc['tip'].rev())
97 97 _lfconvert_addchangeset(rsrc, rdst, ctx, revmap,
98 98 lfiles, normalfiles, matcher, size, lfiletohash)
99 99 ui.progress(_('converting revisions'), None)
100 100
101 101 if os.path.exists(rdst.wjoin(lfutil.shortname)):
102 102 shutil.rmtree(rdst.wjoin(lfutil.shortname))
103 103
104 104 for f in lfiletohash.keys():
105 105 if os.path.isfile(rdst.wjoin(f)):
106 106 os.unlink(rdst.wjoin(f))
107 107 try:
108 108 os.removedirs(os.path.dirname(rdst.wjoin(f)))
109 109 except OSError:
110 110 pass
111 111
112 112 else:
113 113 for ctx in ctxs:
114 114 ui.progress(_('converting revisions'), ctx.rev(),
115 115 unit=_('revision'), total=rsrc['tip'].rev())
116 116 _addchangeset(ui, rsrc, rdst, ctx, revmap)
117 117
118 118 ui.progress(_('converting revisions'), None)
119 119 success = True
120 120 finally:
121 121 if not success:
122 122 # we failed, remove the new directory
123 123 shutil.rmtree(rdst.root)
124 124 dst_lock.release()
125 125
126 126 def _addchangeset(ui, rsrc, rdst, ctx, revmap):
127 127 # Convert src parents to dst parents
128 128 parents = []
129 129 for p in ctx.parents():
130 130 parents.append(revmap[p.node()])
131 131 while len(parents) < 2:
132 132 parents.append(node.nullid)
133 133
134 134 # Generate list of changed files
135 135 files = set(ctx.files())
136 136 if node.nullid not in parents:
137 137 mc = ctx.manifest()
138 138 mp1 = ctx.parents()[0].manifest()
139 139 mp2 = ctx.parents()[1].manifest()
140 140 files |= (set(mp1) | set(mp2)) - set(mc)
141 141 for f in mc:
142 142 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
143 143 files.add(f)
144 144
145 145 def getfilectx(repo, memctx, f):
146 146 if lfutil.standin(f) in files:
147 147 # if the file isn't in the manifest then it was removed
148 148 # or renamed, raise IOError to indicate this
149 149 try:
150 150 fctx = ctx.filectx(lfutil.standin(f))
151 151 except error.LookupError:
152 152 raise IOError()
153 153 renamed = fctx.renamed()
154 154 if renamed:
155 155 renamed = lfutil.splitstandin(renamed[0])
156 156
157 157 hash = fctx.data().strip()
158 158 path = lfutil.findfile(rsrc, hash)
159 159 ### TODO: What if the file is not cached?
160 160 data = ''
161 161 fd = None
162 162 try:
163 163 fd = open(path, 'rb')
164 164 data = fd.read()
165 165 finally:
166 166 if fd:
167 167 fd.close()
168 168 return context.memfilectx(f, data, 'l' in fctx.flags(),
169 169 'x' in fctx.flags(), renamed)
170 170 else:
171 171 try:
172 172 fctx = ctx.filectx(f)
173 173 except error.LookupError:
174 174 raise IOError()
175 175 renamed = fctx.renamed()
176 176 if renamed:
177 177 renamed = renamed[0]
178 178 data = fctx.data()
179 179 if f == '.hgtags':
180 180 newdata = []
181 181 for line in data.splitlines():
182 182 id, name = line.split(' ', 1)
183 183 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
184 184 name))
185 185 data = ''.join(newdata)
186 186 return context.memfilectx(f, data, 'l' in fctx.flags(),
187 187 'x' in fctx.flags(), renamed)
188 188
189 189 dstfiles = []
190 190 for file in files:
191 191 if lfutil.isstandin(file):
192 192 dstfiles.append(lfutil.splitstandin(file))
193 193 else:
194 194 dstfiles.append(file)
195 195 # Commit
196 196 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
197 197 getfilectx, ctx.user(), ctx.date(), ctx.extra())
198 198 ret = rdst.commitctx(mctx)
199 199 rdst.dirstate.setparents(ret)
200 200 revmap[ctx.node()] = rdst.changelog.tip()
201 201
202 202 def _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles,
203 203 matcher, size, lfiletohash):
204 204 # Convert src parents to dst parents
205 205 parents = []
206 206 for p in ctx.parents():
207 207 parents.append(revmap[p.node()])
208 208 while len(parents) < 2:
209 209 parents.append(node.nullid)
210 210
211 211 # Generate list of changed files
212 212 files = set(ctx.files())
213 213 if node.nullid not in parents:
214 214 mc = ctx.manifest()
215 215 mp1 = ctx.parents()[0].manifest()
216 216 mp2 = ctx.parents()[1].manifest()
217 217 files |= (set(mp1) | set(mp2)) - set(mc)
218 218 for f in mc:
219 219 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
220 220 files.add(f)
221 221
222 222 dstfiles = []
223 223 for f in files:
224 224 if f not in lfiles and f not in normalfiles:
225 225 islfile = _islfile(f, ctx, matcher, size)
226 226 # If this file was renamed or copied then copy
227 227 # the lfileness of its predecessor
228 228 if f in ctx.manifest():
229 229 fctx = ctx.filectx(f)
230 230 renamed = fctx.renamed()
231 231 renamedlfile = renamed and renamed[0] in lfiles
232 232 islfile |= renamedlfile
233 233 if 'l' in fctx.flags():
234 234 if renamedlfile:
235 235 raise util.Abort(
236 236 _('Renamed/copied largefile %s becomes symlink')
237 237 % f)
238 238 islfile = False
239 239 if islfile:
240 240 lfiles.add(f)
241 241 else:
242 242 normalfiles.add(f)
243 243
244 244 if f in lfiles:
245 245 dstfiles.append(lfutil.standin(f))
246 246 # largefile in manifest if it has not been removed/renamed
247 247 if f in ctx.manifest():
248 248 if 'l' in ctx.filectx(f).flags():
249 249 if renamed and renamed[0] in lfiles:
250 250 raise util.Abort(_('largefile %s becomes symlink') % f)
251 251
252 252 # largefile was modified, update standins
253 253 fullpath = rdst.wjoin(f)
254 254 lfutil.createdir(os.path.dirname(fullpath))
255 255 m = util.sha1('')
256 256 m.update(ctx[f].data())
257 257 hash = m.hexdigest()
258 258 if f not in lfiletohash or lfiletohash[f] != hash:
259 259 try:
260 260 fd = open(fullpath, 'wb')
261 261 fd.write(ctx[f].data())
262 262 finally:
263 263 if fd:
264 264 fd.close()
265 265 executable = 'x' in ctx[f].flags()
266 266 os.chmod(fullpath, lfutil.getmode(executable))
267 267 lfutil.writestandin(rdst, lfutil.standin(f), hash,
268 268 executable)
269 269 lfiletohash[f] = hash
270 270 else:
271 271 # normal file
272 272 dstfiles.append(f)
273 273
274 274 def getfilectx(repo, memctx, f):
275 275 if lfutil.isstandin(f):
276 276 # if the file isn't in the manifest then it was removed
277 277 # or renamed, raise IOError to indicate this
278 278 srcfname = lfutil.splitstandin(f)
279 279 try:
280 280 fctx = ctx.filectx(srcfname)
281 281 except error.LookupError:
282 282 raise IOError()
283 283 renamed = fctx.renamed()
284 284 if renamed:
285 285 # standin is always a largefile because largefile-ness
286 286 # doesn't change after rename or copy
287 287 renamed = lfutil.standin(renamed[0])
288 288
289 289 return context.memfilectx(f, lfiletohash[srcfname], 'l' in
290 290 fctx.flags(), 'x' in fctx.flags(), renamed)
291 291 else:
292 292 try:
293 293 fctx = ctx.filectx(f)
294 294 except error.LookupError:
295 295 raise IOError()
296 296 renamed = fctx.renamed()
297 297 if renamed:
298 298 renamed = renamed[0]
299 299
300 300 data = fctx.data()
301 301 if f == '.hgtags':
302 302 newdata = []
303 303 for line in data.splitlines():
304 304 id, name = line.split(' ', 1)
305 305 newdata.append('%s %s\n' % (node.hex(revmap[node.bin(id)]),
306 306 name))
307 307 data = ''.join(newdata)
308 308 return context.memfilectx(f, data, 'l' in fctx.flags(),
309 309 'x' in fctx.flags(), renamed)
310 310
311 311 # Commit
312 312 mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
313 313 getfilectx, ctx.user(), ctx.date(), ctx.extra())
314 314 ret = rdst.commitctx(mctx)
315 315 rdst.dirstate.setparents(ret)
316 316 revmap[ctx.node()] = rdst.changelog.tip()
317 317
318 318 def _islfile(file, ctx, matcher, size):
319 319 '''Return true if file should be considered a largefile, i.e.
320 320 matcher matches it or it is larger than size.'''
321 321 # never store special .hg* files as largefiles
322 322 if file == '.hgtags' or file == '.hgignore' or file == '.hgsigs':
323 323 return False
324 324 if matcher and matcher(file):
325 325 return True
326 326 try:
327 327 return ctx.filectx(file).size() >= size * 1024 * 1024
328 328 except error.LookupError:
329 329 return False
330 330
331 331 def uploadlfiles(ui, rsrc, rdst, files):
332 332 '''upload largefiles to the central store'''
333 333
334 334 # Don't upload locally. All largefiles are in the system wide cache
335 335 # so the other repo can just get them from there.
336 336 if not files or rdst.local():
337 337 return
338 338
339 339 store = basestore._openstore(rsrc, rdst, put=True)
340 340
341 341 at = 0
342 342 files = filter(lambda h: not store.exists(h), files)
343 343 for hash in files:
344 344 ui.progress(_('uploading largefiles'), at, unit='largefile',
345 345 total=len(files))
346 346 source = lfutil.findfile(rsrc, hash)
347 347 if not source:
348 348 raise util.Abort(_('largefile %s missing from store'
349 349 ' (needs to be uploaded)') % hash)
350 350 # XXX check for errors here
351 351 store.put(source, hash)
352 352 at += 1
353 353 ui.progress(_('uploading largefiles'), None)
354 354
355 355 def verifylfiles(ui, repo, all=False, contents=False):
356 356 '''Verify that every big file revision in the current changeset
357 357 exists in the central store. With --contents, also verify that
358 358 the contents of each big file revision are correct (SHA-1 hash
359 359 matches the revision ID). With --all, check every changeset in
360 360 this repository.'''
361 361 if all:
362 362 # Pass a list to the function rather than an iterator because we know a
363 363 # list will work.
364 364 revs = range(len(repo))
365 365 else:
366 366 revs = ['.']
367 367
368 368 store = basestore._openstore(repo)
369 369 return store.verify(revs, contents=contents)
370 370
371 371 def cachelfiles(ui, repo, node):
372 372 '''cachelfiles ensures that all largefiles needed by the specified revision
373 373 are present in the repository's largefile cache.
374 374
375 375 returns a tuple (cached, missing). cached is the list of files downloaded
376 376 by this operation; missing is the list of files that were needed but could
377 377 not be found.'''
378 378 lfiles = lfutil.listlfiles(repo, node)
379 379 toget = []
380 380
381 381 for lfile in lfiles:
382 382 expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
383 383 # if it exists and its hash matches, it might have been locally
384 384 # modified before updating and the user chose 'local'. in this case,
385 385 # it will not be in any store, so don't look for it.
386 if (not os.path.exists(repo.wjoin(lfile)) \
387 or expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and \
388 not lfutil.findfile(repo, expectedhash):
386 if ((not os.path.exists(repo.wjoin(lfile)) or
387 expectedhash != lfutil.hashfile(repo.wjoin(lfile))) and
388 not lfutil.findfile(repo, expectedhash)):
389 389 toget.append((lfile, expectedhash))
390 390
391 391 if toget:
392 392 store = basestore._openstore(repo)
393 393 ret = store.get(toget)
394 394 return ret
395 395
396 396 return ([], [])
397 397
398 398 def updatelfiles(ui, repo, filelist=None, printmessage=True):
399 399 wlock = repo.wlock()
400 400 try:
401 401 lfdirstate = lfutil.openlfdirstate(ui, repo)
402 402 lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
403 403
404 404 if filelist is not None:
405 405 lfiles = [f for f in lfiles if f in filelist]
406 406
407 407 printed = False
408 408 if printmessage and lfiles:
409 409 ui.status(_('getting changed largefiles\n'))
410 410 printed = True
411 411 cachelfiles(ui, repo, '.')
412 412
413 413 updated, removed = 0, 0
414 414 for i in map(lambda f: _updatelfile(repo, lfdirstate, f), lfiles):
415 415 # increment the appropriate counter according to _updatelfile's
416 416 # return value
417 417 updated += i > 0 and i or 0
418 418 removed -= i < 0 and i or 0
419 419 if printmessage and (removed or updated) and not printed:
420 420 ui.status(_('getting changed largefiles\n'))
421 421 printed = True
422 422
423 423 lfdirstate.write()
424 424 if printed and printmessage:
425 425 ui.status(_('%d largefiles updated, %d removed\n') % (updated,
426 426 removed))
427 427 finally:
428 428 wlock.release()
429 429
430 430 def _updatelfile(repo, lfdirstate, lfile):
431 431 '''updates a single largefile and copies the state of its standin from
432 432 the repository's dirstate to its state in the lfdirstate.
433 433
434 434 returns 1 if the file was modified, -1 if the file was removed, 0 if the
435 435 file was unchanged, and None if the needed largefile was missing from the
436 436 cache.'''
437 437 ret = 0
438 438 abslfile = repo.wjoin(lfile)
439 439 absstandin = repo.wjoin(lfutil.standin(lfile))
440 440 if os.path.exists(absstandin):
441 441 if os.path.exists(absstandin+'.orig'):
442 442 shutil.copyfile(abslfile, abslfile+'.orig')
443 443 expecthash = lfutil.readstandin(repo, lfile)
444 if expecthash != '' and \
445 (not os.path.exists(abslfile) or \
446 expecthash != lfutil.hashfile(abslfile)):
444 if (expecthash != '' and
445 (not os.path.exists(abslfile) or
446 expecthash != lfutil.hashfile(abslfile))):
447 447 if not lfutil.copyfromcache(repo, expecthash, lfile):
448 448 return None # don't try to set the mode or update the dirstate
449 449 ret = 1
450 450 mode = os.stat(absstandin).st_mode
451 451 if mode != os.stat(abslfile).st_mode:
452 452 os.chmod(abslfile, mode)
453 453 ret = 1
454 454 else:
455 455 if os.path.exists(abslfile):
456 456 os.unlink(abslfile)
457 457 ret = -1
458 458 state = repo.dirstate[lfutil.standin(lfile)]
459 459 if state == 'n':
460 460 lfdirstate.normal(lfile)
461 461 elif state == 'r':
462 462 lfdirstate.remove(lfile)
463 463 elif state == 'a':
464 464 lfdirstate.add(lfile)
465 465 elif state == '?':
466 466 lfdirstate.drop(lfile)
467 467 return ret
468 468
469 469 # -- hg commands declarations ------------------------------------------------
470 470
471
472 471 cmdtable = {
473 472 'lfconvert': (lfconvert,
474 473 [('s', 'size', '',
475 474 _('minimum size (MB) for files to be converted '
476 475 'as largefiles'),
477 476 'SIZE'),
478 477 ('', 'tonormal', False,
479 478 _('convert from a largefiles repo to a normal repo')),
480 479 ],
481 480 _('hg lfconvert SOURCE DEST [FILE ...]')),
482 481 }
@@ -1,446 +1,448 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import shutil
14 14 import stat
15 15 import hashlib
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 29 def repo_add(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 33 def repo_remove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 49 def repo_forget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 56 remote, force=force)
57 57 return repo.changelog.findmissing(common)
58 58
59 59 # -- Private worker functions ------------------------------------------
60 60
61 61 def getminsize(ui, assumelfiles, opt, default=10):
62 62 lfsize = opt
63 63 if not lfsize and assumelfiles:
64 64 lfsize = ui.config(longname, 'size', default=default)
65 65 if lfsize:
66 66 try:
67 67 lfsize = float(lfsize)
68 68 except ValueError:
69 69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 70 % lfsize)
71 71 if lfsize is None:
72 72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 73 return lfsize
74 74
75 75 def link(src, dest):
76 76 try:
77 77 util.oslink(src, dest)
78 78 except OSError:
79 79 # if hardlinks fail, fallback on copy
80 80 shutil.copyfile(src, dest)
81 81 os.chmod(dest, os.stat(src).st_mode)
82 82
83 83 def systemcachepath(ui, hash):
84 84 path = ui.config(longname, 'systemcache', None)
85 85 if path:
86 86 path = os.path.join(path, hash)
87 87 else:
88 88 if os.name == 'nt':
89 path = os.path.join(os.getenv('LOCALAPPDATA') or \
90 os.getenv('APPDATA'), longname, hash)
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 path = os.path.join(appdata, longname, hash)
91 91 elif os.name == 'posix':
92 92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
93 93 else:
94 94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
95 95 return path
96 96
97 97 def insystemcache(ui, hash):
98 98 return os.path.exists(systemcachepath(ui, hash))
99 99
100 100 def findfile(repo, hash):
101 101 if incache(repo, hash):
102 102 repo.ui.note(_('Found %s in cache\n') % hash)
103 103 return cachepath(repo, hash)
104 104 if insystemcache(repo.ui, hash):
105 105 repo.ui.note(_('Found %s in system cache\n') % hash)
106 106 return systemcachepath(repo.ui, hash)
107 107 return None
108 108
109 109 class largefiles_dirstate(dirstate.dirstate):
110 110 def __getitem__(self, key):
111 111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
112 112 def normal(self, f):
113 113 return super(largefiles_dirstate, self).normal(unixpath(f))
114 114 def remove(self, f):
115 115 return super(largefiles_dirstate, self).remove(unixpath(f))
116 116 def add(self, f):
117 117 return super(largefiles_dirstate, self).add(unixpath(f))
118 118 def drop(self, f):
119 119 return super(largefiles_dirstate, self).drop(unixpath(f))
120 120 def forget(self, f):
121 121 return super(largefiles_dirstate, self).forget(unixpath(f))
122 122
123 123 def openlfdirstate(ui, repo):
124 124 '''
125 125 Return a dirstate object that tracks largefiles: i.e. its root is
126 126 the repo root, but it is saved in .hg/largefiles/dirstate.
127 127 '''
128 128 admin = repo.join(longname)
129 129 opener = scmutil.opener(admin)
130 130 if util.safehasattr(repo.dirstate, '_validate'):
131 131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
132 132 repo.dirstate._validate)
133 133 else:
134 134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
135 135
136 136 # If the largefiles dirstate does not exist, populate and create
137 137 # it. This ensures that we create it on the first meaningful
138 138 # largefiles operation in a new clone. It also gives us an easy
139 139 # way to forcibly rebuild largefiles state:
140 140 # rm .hg/largefiles/dirstate && hg status
141 141 # Or even, if things are really messed up:
142 142 # rm -rf .hg/largefiles && hg status
143 143 if not os.path.exists(os.path.join(admin, 'dirstate')):
144 144 util.makedirs(admin)
145 145 matcher = getstandinmatcher(repo)
146 146 for standin in dirstate_walk(repo.dirstate, matcher):
147 147 lfile = splitstandin(standin)
148 148 hash = readstandin(repo, lfile)
149 149 lfdirstate.normallookup(lfile)
150 150 try:
151 151 if hash == hashfile(lfile):
152 152 lfdirstate.normal(lfile)
153 153 except IOError, err:
154 154 if err.errno != errno.ENOENT:
155 155 raise
156 156
157 157 lfdirstate.write()
158 158
159 159 return lfdirstate
160 160
161 161 def lfdirstate_status(lfdirstate, repo, rev):
162 162 wlock = repo.wlock()
163 163 try:
164 164 match = match_.always(repo.root, repo.getcwd())
165 165 s = lfdirstate.status(match, [], False, False, False)
166 166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
167 167 for lfile in unsure:
168 168 if repo[rev][standin(lfile)].data().strip() != \
169 169 hashfile(repo.wjoin(lfile)):
170 170 modified.append(lfile)
171 171 else:
172 172 clean.append(lfile)
173 173 lfdirstate.normal(lfile)
174 174 lfdirstate.write()
175 175 finally:
176 176 wlock.release()
177 177 return (modified, added, removed, missing, unknown, ignored, clean)
178 178
179 179 def listlfiles(repo, rev=None, matcher=None):
180 180 '''return a list of largefiles in the working copy or the
181 181 specified changeset'''
182 182
183 183 if matcher is None:
184 184 matcher = getstandinmatcher(repo)
185 185
186 186 # ignore unknown files in working directory
187 return [splitstandin(f) for f in repo[rev].walk(matcher) \
187 return [splitstandin(f)
188 for f in repo[rev].walk(matcher)
188 189 if rev is not None or repo.dirstate[f] != '?']
189 190
190 191 def incache(repo, hash):
191 192 return os.path.exists(cachepath(repo, hash))
192 193
193 194 def createdir(dir):
194 195 if not os.path.exists(dir):
195 196 os.makedirs(dir)
196 197
197 198 def cachepath(repo, hash):
198 199 return repo.join(os.path.join(longname, hash))
199 200
200 201 def copyfromcache(repo, hash, filename):
201 202 '''Copy the specified largefile from the repo or system cache to
202 203 filename in the repository. Return true on success or false if the
203 204 file was not found in either cache (which should not happened:
204 205 this is meant to be called only after ensuring that the needed
205 206 largefile exists in the cache).'''
206 207 path = findfile(repo, hash)
207 208 if path is None:
208 209 return False
209 210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 211 shutil.copy(path, repo.wjoin(filename))
211 212 return True
212 213
213 214 def copytocache(repo, rev, file, uploaded=False):
214 215 hash = readstandin(repo, file)
215 216 if incache(repo, hash):
216 217 return
217 218 copytocacheabsolute(repo, repo.wjoin(file), hash)
218 219
219 220 def copytocacheabsolute(repo, file, hash):
220 221 createdir(os.path.dirname(cachepath(repo, hash)))
221 222 if insystemcache(repo.ui, hash):
222 223 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
223 224 else:
224 225 shutil.copyfile(file, cachepath(repo, hash))
225 226 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
226 227 linktosystemcache(repo, hash)
227 228
228 229 def linktosystemcache(repo, hash):
229 230 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
230 231 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
231 232
232 233 def getstandinmatcher(repo, pats=[], opts={}):
233 234 '''Return a match object that applies pats to the standin directory'''
234 235 standindir = repo.pathto(shortname)
235 236 if pats:
236 237 # patterns supplied: search standin directory relative to current dir
237 238 cwd = repo.getcwd()
238 239 if os.path.isabs(cwd):
239 240 # cwd is an absolute path for hg -R <reponame>
240 241 # work relative to the repository root in this case
241 242 cwd = ''
242 243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
243 244 elif os.path.isdir(standindir):
244 245 # no patterns: relative to repo root
245 246 pats = [standindir]
246 247 else:
247 248 # no patterns and no standin dir: return matcher that matches nothing
248 249 match = match_.match(repo.root, None, [], exact=True)
249 250 match.matchfn = lambda f: False
250 251 return match
251 252 return getmatcher(repo, pats, opts, showbad=False)
252 253
253 254 def getmatcher(repo, pats=[], opts={}, showbad=True):
254 255 '''Wrapper around scmutil.match() that adds showbad: if false,
255 256 neuter the match object's bad() method so it does not print any
256 257 warnings about missing files or directories.'''
257 258 match = scmutil.match(repo[None], pats, opts)
258 259
259 260 if not showbad:
260 261 match.bad = lambda f, msg: None
261 262 return match
262 263
263 264 def composestandinmatcher(repo, rmatcher):
264 265 '''Return a matcher that accepts standins corresponding to the
265 266 files accepted by rmatcher. Pass the list of files in the matcher
266 267 as the paths specified by the user.'''
267 268 smatcher = getstandinmatcher(repo, rmatcher.files())
268 269 isstandin = smatcher.matchfn
269 270 def composed_matchfn(f):
270 271 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
271 272 smatcher.matchfn = composed_matchfn
272 273
273 274 return smatcher
274 275
275 276 def standin(filename):
276 277 '''Return the repo-relative path to the standin for the specified big
277 278 file.'''
278 279 # Notes:
279 280 # 1) Most callers want an absolute path, but _create_standin() needs
280 281 # it repo-relative so lfadd() can pass it to repo_add(). So leave
281 282 # it up to the caller to use repo.wjoin() to get an absolute path.
282 283 # 2) Join with '/' because that's what dirstate always uses, even on
283 284 # Windows. Change existing separator to '/' first in case we are
284 285 # passed filenames from an external source (like the command line).
285 286 return shortname + '/' + filename.replace(os.sep, '/')
286 287
287 288 def isstandin(filename):
288 289 '''Return true if filename is a big file standin. filename must be
289 290 in Mercurial's internal form (slash-separated).'''
290 291 return filename.startswith(shortname + '/')
291 292
292 293 def splitstandin(filename):
293 294 # Split on / because that's what dirstate always uses, even on Windows.
294 295 # Change local separator to / first just in case we are passed filenames
295 296 # from an external source (like the command line).
296 297 bits = filename.replace(os.sep, '/').split('/', 1)
297 298 if len(bits) == 2 and bits[0] == shortname:
298 299 return bits[1]
299 300 else:
300 301 return None
301 302
302 303 def updatestandin(repo, standin):
303 304 file = repo.wjoin(splitstandin(standin))
304 305 if os.path.exists(file):
305 306 hash = hashfile(file)
306 307 executable = getexecutable(file)
307 308 writestandin(repo, standin, hash, executable)
308 309
309 310 def readstandin(repo, filename, node=None):
310 311 '''read hex hash from standin for filename at given node, or working
311 312 directory if no node is given'''
312 313 return repo[node][standin(filename)].data().strip()
313 314
314 315 def writestandin(repo, standin, hash, executable):
315 316 '''write hash to <repo.root>/<standin>'''
316 317 writehash(hash, repo.wjoin(standin), executable)
317 318
318 319 def copyandhash(instream, outfile):
319 320 '''Read bytes from instream (iterable) and write them to outfile,
320 321 computing the SHA-1 hash of the data along the way. Close outfile
321 322 when done and return the binary hash.'''
322 323 hasher = util.sha1('')
323 324 for data in instream:
324 325 hasher.update(data)
325 326 outfile.write(data)
326 327
327 328 # Blecch: closing a file that somebody else opened is rude and
328 329 # wrong. But it's so darn convenient and practical! After all,
329 330 # outfile was opened just to copy and hash.
330 331 outfile.close()
331 332
332 333 return hasher.digest()
333 334
334 335 def hashrepofile(repo, file):
335 336 return hashfile(repo.wjoin(file))
336 337
337 338 def hashfile(file):
338 339 if not os.path.exists(file):
339 340 return ''
340 341 hasher = util.sha1('')
341 342 fd = open(file, 'rb')
342 343 for data in blockstream(fd):
343 344 hasher.update(data)
344 345 fd.close()
345 346 return hasher.hexdigest()
346 347
347 348 class limitreader(object):
348 349 def __init__(self, f, limit):
349 350 self.f = f
350 351 self.limit = limit
351 352
352 353 def read(self, length):
353 354 if self.limit == 0:
354 355 return ''
355 356 length = length > self.limit and self.limit or length
356 357 self.limit -= length
357 358 return self.f.read(length)
358 359
359 360 def close(self):
360 361 pass
361 362
362 363 def blockstream(infile, blocksize=128 * 1024):
363 364 """Generator that yields blocks of data from infile and closes infile."""
364 365 while True:
365 366 data = infile.read(blocksize)
366 367 if not data:
367 368 break
368 369 yield data
369 370 # same blecch as copyandhash() above
370 371 infile.close()
371 372
372 373 def readhash(filename):
373 374 rfile = open(filename, 'rb')
374 375 hash = rfile.read(40)
375 376 rfile.close()
376 377 if len(hash) < 40:
377 378 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
378 379 % (filename, len(hash)))
379 380 return hash
380 381
381 382 def writehash(hash, filename, executable):
382 383 util.makedirs(os.path.dirname(filename))
383 384 if os.path.exists(filename):
384 385 os.unlink(filename)
385 386 wfile = open(filename, 'wb')
386 387
387 388 try:
388 389 wfile.write(hash)
389 390 wfile.write('\n')
390 391 finally:
391 392 wfile.close()
392 393 if os.path.exists(filename):
393 394 os.chmod(filename, getmode(executable))
394 395
395 396 def getexecutable(filename):
396 397 mode = os.stat(filename).st_mode
397 return (mode & stat.S_IXUSR) and (mode & stat.S_IXGRP) and (mode & \
398 stat.S_IXOTH)
398 return ((mode & stat.S_IXUSR) and
399 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXOTH))
399 401
400 402 def getmode(executable):
401 403 if executable:
402 404 return 0755
403 405 else:
404 406 return 0644
405 407
406 408 def urljoin(first, second, *arg):
407 409 def join(left, right):
408 410 if not left.endswith('/'):
409 411 left += '/'
410 412 if right.startswith('/'):
411 413 right = right[1:]
412 414 return left + right
413 415
414 416 url = join(first, second)
415 417 for a in arg:
416 418 url = join(url, a)
417 419 return url
418 420
419 421 def hexsha1(data):
420 422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
421 423 object data"""
422 424 h = hashlib.sha1()
423 425 for chunk in util.filechunkiter(data):
424 426 h.update(chunk)
425 427 return h.hexdigest()
426 428
427 429 def httpsendfile(ui, filename):
428 430 return httpconnection.httpsendfile(ui, filename, 'rb')
429 431
430 432 def unixpath(path):
431 433 '''Return a version of path normalized for use with the lfdirstate.'''
432 434 return os.path.normpath(path).replace(os.sep, '/')
433 435
434 436 def islfilesrepo(repo):
435 437 return ('largefiles' in repo.requirements and
436 438 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
437 439
438 440 def any_(gen):
439 441 for x in gen:
440 442 if x:
441 443 return True
442 444 return False
443 445
444 446 class storeprotonotcapable(BaseException):
445 447 def __init__(self, storetypes):
446 448 self.storetypes = storetypes
@@ -1,826 +1,830 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, match as match_, node, \
15 15 archival, error, merge
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19 import lfutil
20 20
21 21 try:
22 22 from mercurial import scmutil
23 23 except ImportError:
24 24 pass
25 25
26 26 import lfutil
27 27 import lfcommands
28 28
29 29 def installnormalfilesmatchfn(manifest):
30 30 '''overrides scmutil.match so that the matcher it returns will ignore all
31 31 largefiles'''
32 32 oldmatch = None # for the closure
33 33 def override_match(repo, pats=[], opts={}, globbed=False,
34 34 default='relpath'):
35 35 match = oldmatch(repo, pats, opts, globbed, default)
36 36 m = copy.copy(match)
37 37 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
38 38 manifest)
39 39 m._files = filter(notlfile, m._files)
40 40 m._fmap = set(m._files)
41 41 orig_matchfn = m.matchfn
42 42 m.matchfn = lambda f: notlfile(f) and orig_matchfn(f) or None
43 43 return m
44 44 oldmatch = installmatchfn(override_match)
45 45
46 46 def installmatchfn(f):
47 47 oldmatch = scmutil.match
48 48 setattr(f, 'oldmatch', oldmatch)
49 49 scmutil.match = f
50 50 return oldmatch
51 51
52 52 def restorematchfn():
53 53 '''restores scmutil.match to what it was before installnormalfilesmatchfn
54 54 was called. no-op if scmutil.match is its original function.
55 55
56 56 Note that n calls to installnormalfilesmatchfn will require n calls to
57 57 restore matchfn to reverse'''
58 58 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
59 59
60 60 # -- Wrappers: modify existing commands --------------------------------
61 61
62 62 # Add works by going through the files that the user wanted to add and
63 63 # checking if they should be added as largefiles. Then it makes a new
64 64 # matcher which matches only the normal files and runs the original
65 65 # version of add.
66 66 def override_add(orig, ui, repo, *pats, **opts):
67 67 large = opts.pop('large', None)
68 68 lfsize = lfutil.getminsize(
69 69 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
70 70
71 71 lfmatcher = None
72 72 if os.path.exists(repo.wjoin(lfutil.shortname)):
73 73 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
74 74 if lfpats:
75 75 lfmatcher = match_.match(repo.root, '', list(lfpats))
76 76
77 77 lfnames = []
78 78 m = scmutil.match(repo[None], pats, opts)
79 79 m.bad = lambda x, y: None
80 80 wctx = repo[None]
81 81 for f in repo.walk(m):
82 82 exact = m.exact(f)
83 83 lfile = lfutil.standin(f) in wctx
84 84 nfile = f in wctx
85 85 exists = lfile or nfile
86 86
87 87 # Don't warn the user when they attempt to add a normal tracked file.
88 88 # The normal add code will do that for us.
89 89 if exact and exists:
90 90 if lfile:
91 91 ui.warn(_('%s already a largefile\n') % f)
92 92 continue
93 93
94 94 if exact or not exists:
95 if large or (lfsize and os.path.getsize(repo.wjoin(f)) >= \
96 lfsize * 1024 * 1024) or (lfmatcher and lfmatcher(f)):
95 abovemin = (lfsize and
96 os.path.getsize(repo.wjoin(f)) >= lfsize * 1024 * 1024)
97 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 98 lfnames.append(f)
98 99 if ui.verbose or not exact:
99 100 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100 101
101 102 bad = []
102 103 standins = []
103 104
104 105 # Need to lock, otherwise there could be a race condition between
105 106 # when standins are created and added to the repo.
106 107 wlock = repo.wlock()
107 108 try:
108 109 if not opts.get('dry_run'):
109 110 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 111 for f in lfnames:
111 112 standinname = lfutil.standin(f)
112 113 lfutil.writestandin(repo, standinname, hash='',
113 114 executable=lfutil.getexecutable(repo.wjoin(f)))
114 115 standins.append(standinname)
115 116 if lfdirstate[f] == 'r':
116 117 lfdirstate.normallookup(f)
117 118 else:
118 119 lfdirstate.add(f)
119 120 lfdirstate.write()
120 bad += [lfutil.splitstandin(f) for f in lfutil.repo_add(repo,
121 standins) if f in m.files()]
121 bad += [lfutil.splitstandin(f)
122 for f in lfutil.repo_add(repo, standins)
123 if f in m.files()]
122 124 finally:
123 125 wlock.release()
124 126
125 127 installnormalfilesmatchfn(repo[None].manifest())
126 128 result = orig(ui, repo, *pats, **opts)
127 129 restorematchfn()
128 130
129 131 return (result == 1 or bad) and 1 or 0
130 132
131 133 def override_remove(orig, ui, repo, *pats, **opts):
132 134 manifest = repo[None].manifest()
133 135 installnormalfilesmatchfn(manifest)
134 136 orig(ui, repo, *pats, **opts)
135 137 restorematchfn()
136 138
137 139 after, force = opts.get('after'), opts.get('force')
138 140 if not pats and not after:
139 141 raise util.Abort(_('no files specified'))
140 142 m = scmutil.match(repo[None], pats, opts)
141 143 try:
142 144 repo.lfstatus = True
143 145 s = repo.status(match=m, clean=True)
144 146 finally:
145 147 repo.lfstatus = False
146 modified, added, deleted, clean = [[f for f in list if lfutil.standin(f) \
147 in manifest] for list in [s[0], s[1], s[3], s[6]]]
148 modified, added, deleted, clean = [[f for f in list
149 if lfutil.standin(f) in manifest]
150 for list in [s[0], s[1], s[3], s[6]]]
148 151
149 152 def warn(files, reason):
150 153 for f in files:
151 154 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
152 155 % (m.rel(f), reason))
153 156
154 157 if force:
155 158 remove, forget = modified + deleted + clean, added
156 159 elif after:
157 160 remove, forget = deleted, []
158 161 warn(modified + added + clean, _('still exists'))
159 162 else:
160 163 remove, forget = deleted + clean, []
161 164 warn(modified, _('is modified'))
162 165 warn(added, _('has been marked for add'))
163 166
164 167 for f in sorted(remove + forget):
165 168 if ui.verbose or not m.exact(f):
166 169 ui.status(_('removing %s\n') % m.rel(f))
167 170
168 171 # Need to lock because standin files are deleted then removed from the
169 172 # repository and we could race inbetween.
170 173 wlock = repo.wlock()
171 174 try:
172 175 lfdirstate = lfutil.openlfdirstate(ui, repo)
173 176 for f in remove:
174 177 if not after:
175 178 os.unlink(repo.wjoin(f))
176 179 currentdir = os.path.split(f)[0]
177 180 while currentdir and not os.listdir(repo.wjoin(currentdir)):
178 181 os.rmdir(repo.wjoin(currentdir))
179 182 currentdir = os.path.split(currentdir)[0]
180 183 lfdirstate.remove(f)
181 184 lfdirstate.write()
182 185
183 186 forget = [lfutil.standin(f) for f in forget]
184 187 remove = [lfutil.standin(f) for f in remove]
185 188 lfutil.repo_forget(repo, forget)
186 189 lfutil.repo_remove(repo, remove, unlink=True)
187 190 finally:
188 191 wlock.release()
189 192
190 193 def override_status(orig, ui, repo, *pats, **opts):
191 194 try:
192 195 repo.lfstatus = True
193 196 return orig(ui, repo, *pats, **opts)
194 197 finally:
195 198 repo.lfstatus = False
196 199
197 200 def override_log(orig, ui, repo, *pats, **opts):
198 201 try:
199 202 repo.lfstatus = True
200 203 orig(ui, repo, *pats, **opts)
201 204 finally:
202 205 repo.lfstatus = False
203 206
204 207 def override_verify(orig, ui, repo, *pats, **opts):
205 208 large = opts.pop('large', False)
206 209 all = opts.pop('lfa', False)
207 210 contents = opts.pop('lfc', False)
208 211
209 212 result = orig(ui, repo, *pats, **opts)
210 213 if large:
211 214 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
212 215 return result
213 216
214 217 # Override needs to refresh standins so that update's normal merge
215 218 # will go through properly. Then the other update hook (overriding repo.update)
216 219 # will get the new files. Filemerge is also overriden so that the merge
217 220 # will merge standins correctly.
218 221 def override_update(orig, ui, repo, *pats, **opts):
219 222 lfdirstate = lfutil.openlfdirstate(ui, repo)
220 223 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
221 224 False, False)
222 225 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
223 226
224 227 # Need to lock between the standins getting updated and their
225 228 # largefiles getting updated
226 229 wlock = repo.wlock()
227 230 try:
228 231 if opts['check']:
229 232 mod = len(modified) > 0
230 233 for lfile in unsure:
231 234 standin = lfutil.standin(lfile)
232 235 if repo['.'][standin].data().strip() != \
233 236 lfutil.hashfile(repo.wjoin(lfile)):
234 237 mod = True
235 238 else:
236 239 lfdirstate.normal(lfile)
237 240 lfdirstate.write()
238 241 if mod:
239 242 raise util.Abort(_('uncommitted local changes'))
240 243 # XXX handle removed differently
241 244 if not opts['clean']:
242 245 for lfile in unsure + modified + added:
243 246 lfutil.updatestandin(repo, lfutil.standin(lfile))
244 247 finally:
245 248 wlock.release()
246 249 return orig(ui, repo, *pats, **opts)
247 250
248 251 # Override filemerge to prompt the user about how they wish to merge
249 252 # largefiles. This will handle identical edits, and copy/rename +
250 253 # edit without prompting the user.
251 254 def override_filemerge(origfn, repo, mynode, orig, fcd, fco, fca):
252 255 # Use better variable names here. Because this is a wrapper we cannot
253 256 # change the variable names in the function declaration.
254 257 fcdest, fcother, fcancestor = fcd, fco, fca
255 258 if not lfutil.isstandin(orig):
256 259 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
257 260 else:
258 261 if not fcother.cmp(fcdest): # files identical?
259 262 return None
260 263
261 264 # backwards, use working dir parent as ancestor
262 265 if fcancestor == fcother:
263 266 fcancestor = fcdest.parents()[0]
264 267
265 268 if orig != fcother.path():
266 269 repo.ui.status(_('merging %s and %s to %s\n')
267 270 % (lfutil.splitstandin(orig),
268 271 lfutil.splitstandin(fcother.path()),
269 272 lfutil.splitstandin(fcdest.path())))
270 273 else:
271 274 repo.ui.status(_('merging %s\n')
272 275 % lfutil.splitstandin(fcdest.path()))
273 276
274 277 if fcancestor.path() != fcother.path() and fcother.data() == \
275 278 fcancestor.data():
276 279 return 0
277 280 if fcancestor.path() != fcdest.path() and fcdest.data() == \
278 281 fcancestor.data():
279 282 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
280 283 return 0
281 284
282 285 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
283 286 'keep (l)ocal or take (o)ther?') %
284 287 lfutil.splitstandin(orig),
285 288 (_('&Local'), _('&Other')), 0) == 0:
286 289 return 0
287 290 else:
288 291 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
289 292 return 0
290 293
291 294 # Copy first changes the matchers to match standins instead of
292 295 # largefiles. Then it overrides util.copyfile in that function it
293 296 # checks if the destination largefile already exists. It also keeps a
294 297 # list of copied files so that the largefiles can be copied and the
295 298 # dirstate updated.
296 299 def override_copy(orig, ui, repo, pats, opts, rename=False):
297 300 # doesn't remove largefile on rename
298 301 if len(pats) < 2:
299 302 # this isn't legal, let the original function deal with it
300 303 return orig(ui, repo, pats, opts, rename)
301 304
302 305 def makestandin(relpath):
303 306 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
304 307 return os.path.join(os.path.relpath('.', repo.getcwd()),
305 308 lfutil.standin(path))
306 309
307 310 fullpats = scmutil.expandpats(pats)
308 311 dest = fullpats[-1]
309 312
310 313 if os.path.isdir(dest):
311 314 if not os.path.isdir(makestandin(dest)):
312 315 os.makedirs(makestandin(dest))
313 316 # This could copy both lfiles and normal files in one command,
314 317 # but we don't want to do that. First replace their matcher to
315 318 # only match normal files and run it, then replace it to just
316 319 # match largefiles and run it again.
317 320 nonormalfiles = False
318 321 nolfiles = False
319 322 try:
320 323 installnormalfilesmatchfn(repo[None].manifest())
321 324 result = orig(ui, repo, pats, opts, rename)
322 325 except util.Abort, e:
323 326 if str(e) != 'no files to copy':
324 327 raise e
325 328 else:
326 329 nonormalfiles = True
327 330 result = 0
328 331 finally:
329 332 restorematchfn()
330 333
331 334 # The first rename can cause our current working directory to be removed.
332 335 # In that case there is nothing left to copy/rename so just quit.
333 336 try:
334 337 repo.getcwd()
335 338 except OSError:
336 339 return result
337 340
338 341 try:
339 342 # When we call orig below it creates the standins but we don't add them
340 343 # to the dir state until later so lock during that time.
341 344 wlock = repo.wlock()
342 345
343 346 manifest = repo[None].manifest()
344 347 oldmatch = None # for the closure
345 348 def override_match(repo, pats=[], opts={}, globbed=False,
346 349 default='relpath'):
347 350 newpats = []
348 351 # The patterns were previously mangled to add the standin
349 352 # directory; we need to remove that now
350 353 for pat in pats:
351 354 if match_.patkind(pat) is None and lfutil.shortname in pat:
352 355 newpats.append(pat.replace(lfutil.shortname, ''))
353 356 else:
354 357 newpats.append(pat)
355 358 match = oldmatch(repo, newpats, opts, globbed, default)
356 359 m = copy.copy(match)
357 360 lfile = lambda f: lfutil.standin(f) in manifest
358 361 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
359 362 m._fmap = set(m._files)
360 363 orig_matchfn = m.matchfn
361 m.matchfn = lambda f: lfutil.isstandin(f) and \
362 lfile(lfutil.splitstandin(f)) and \
363 orig_matchfn(lfutil.splitstandin(f)) or None
364 m.matchfn = lambda f: (lfutil.isstandin(f) and
365 lfile(lfutil.splitstandin(f)) and
366 orig_matchfn(lfutil.splitstandin(f)) or
367 None)
364 368 return m
365 369 oldmatch = installmatchfn(override_match)
366 370 listpats = []
367 371 for pat in pats:
368 372 if match_.patkind(pat) is not None:
369 373 listpats.append(pat)
370 374 else:
371 375 listpats.append(makestandin(pat))
372 376
373 377 try:
374 378 origcopyfile = util.copyfile
375 379 copiedfiles = []
376 380 def override_copyfile(src, dest):
377 381 if lfutil.shortname in src and lfutil.shortname in dest:
378 382 destlfile = dest.replace(lfutil.shortname, '')
379 383 if not opts['force'] and os.path.exists(destlfile):
380 384 raise IOError('',
381 385 _('destination largefile already exists'))
382 386 copiedfiles.append((src, dest))
383 387 origcopyfile(src, dest)
384 388
385 389 util.copyfile = override_copyfile
386 390 result += orig(ui, repo, listpats, opts, rename)
387 391 finally:
388 392 util.copyfile = origcopyfile
389 393
390 394 lfdirstate = lfutil.openlfdirstate(ui, repo)
391 395 for (src, dest) in copiedfiles:
392 396 if lfutil.shortname in src and lfutil.shortname in dest:
393 397 srclfile = src.replace(lfutil.shortname, '')
394 398 destlfile = dest.replace(lfutil.shortname, '')
395 399 destlfiledir = os.path.dirname(destlfile) or '.'
396 400 if not os.path.isdir(destlfiledir):
397 401 os.makedirs(destlfiledir)
398 402 if rename:
399 403 os.rename(srclfile, destlfile)
400 404 lfdirstate.remove(os.path.relpath(srclfile,
401 405 repo.root))
402 406 else:
403 407 util.copyfile(srclfile, destlfile)
404 408 lfdirstate.add(os.path.relpath(destlfile,
405 409 repo.root))
406 410 lfdirstate.write()
407 411 except util.Abort, e:
408 412 if str(e) != 'no files to copy':
409 413 raise e
410 414 else:
411 415 nolfiles = True
412 416 finally:
413 417 restorematchfn()
414 418 wlock.release()
415 419
416 420 if nolfiles and nonormalfiles:
417 421 raise util.Abort(_('no files to copy'))
418 422
419 423 return result
420 424
421 425 # When the user calls revert, we have to be careful to not revert any
422 426 # changes to other largefiles accidentally. This means we have to keep
423 427 # track of the largefiles that are being reverted so we only pull down
424 428 # the necessary largefiles.
425 429 #
426 430 # Standins are only updated (to match the hash of largefiles) before
427 431 # commits. Update the standins then run the original revert, changing
428 432 # the matcher to hit standins instead of largefiles. Based on the
429 433 # resulting standins update the largefiles. Then return the standins
430 434 # to their proper state
431 435 def override_revert(orig, ui, repo, *pats, **opts):
432 436 # Because we put the standins in a bad state (by updating them)
433 437 # and then return them to a correct state we need to lock to
434 438 # prevent others from changing them in their incorrect state.
435 439 wlock = repo.wlock()
436 440 try:
437 441 lfdirstate = lfutil.openlfdirstate(ui, repo)
438 442 (modified, added, removed, missing, unknown, ignored, clean) = \
439 443 lfutil.lfdirstate_status(lfdirstate, repo, repo['.'].rev())
440 444 for lfile in modified:
441 445 lfutil.updatestandin(repo, lfutil.standin(lfile))
442 446
443 447 try:
444 448 ctx = repo[opts.get('rev')]
445 449 oldmatch = None # for the closure
446 450 def override_match(ctxorrepo, pats=[], opts={}, globbed=False,
447 451 default='relpath'):
448 452 if util.safehasattr(ctxorrepo, 'match'):
449 453 ctx0 = ctxorrepo
450 454 else:
451 455 ctx0 = ctxorrepo[None]
452 456 match = oldmatch(ctxorrepo, pats, opts, globbed, default)
453 457 m = copy.copy(match)
454 458 def tostandin(f):
455 459 if lfutil.standin(f) in ctx0 or lfutil.standin(f) in ctx:
456 460 return lfutil.standin(f)
457 461 elif lfutil.standin(f) in repo[None]:
458 462 return None
459 463 return f
460 464 m._files = [tostandin(f) for f in m._files]
461 465 m._files = [f for f in m._files if f is not None]
462 466 m._fmap = set(m._files)
463 467 orig_matchfn = m.matchfn
464 468 def matchfn(f):
465 469 if lfutil.isstandin(f):
466 470 # We need to keep track of what largefiles are being
467 471 # matched so we know which ones to update later --
468 472 # otherwise we accidentally revert changes to other
469 473 # largefiles. This is repo-specific, so duckpunch the
470 474 # repo object to keep the list of largefiles for us
471 475 # later.
472 476 if orig_matchfn(lfutil.splitstandin(f)) and \
473 477 (f in repo[None] or f in ctx):
474 478 lfileslist = getattr(repo, '_lfilestoupdate', [])
475 479 lfileslist.append(lfutil.splitstandin(f))
476 480 repo._lfilestoupdate = lfileslist
477 481 return True
478 482 else:
479 483 return False
480 484 return orig_matchfn(f)
481 485 m.matchfn = matchfn
482 486 return m
483 487 oldmatch = installmatchfn(override_match)
484 488 scmutil.match
485 489 matches = override_match(repo[None], pats, opts)
486 490 orig(ui, repo, *pats, **opts)
487 491 finally:
488 492 restorematchfn()
489 493 lfileslist = getattr(repo, '_lfilestoupdate', [])
490 494 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
491 495 printmessage=False)
492 496
493 497 # empty out the largefiles list so we start fresh next time
494 498 repo._lfilestoupdate = []
495 499 for lfile in modified:
496 500 if lfile in lfileslist:
497 501 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
498 502 in repo['.']:
499 503 lfutil.writestandin(repo, lfutil.standin(lfile),
500 504 repo['.'][lfile].data().strip(),
501 505 'x' in repo['.'][lfile].flags())
502 506 lfdirstate = lfutil.openlfdirstate(ui, repo)
503 507 for lfile in added:
504 508 standin = lfutil.standin(lfile)
505 509 if standin not in ctx and (standin in matches or opts.get('all')):
506 510 if lfile in lfdirstate:
507 511 lfdirstate.drop(lfile)
508 512 util.unlinkpath(repo.wjoin(standin))
509 513 lfdirstate.write()
510 514 finally:
511 515 wlock.release()
512 516
513 517 def hg_update(orig, repo, node):
514 518 result = orig(repo, node)
515 519 # XXX check if it worked first
516 520 lfcommands.updatelfiles(repo.ui, repo)
517 521 return result
518 522
519 523 def hg_clean(orig, repo, node, show_stats=True):
520 524 result = orig(repo, node, show_stats)
521 525 lfcommands.updatelfiles(repo.ui, repo)
522 526 return result
523 527
524 528 def hg_merge(orig, repo, node, force=None, remind=True):
525 529 result = orig(repo, node, force, remind)
526 530 lfcommands.updatelfiles(repo.ui, repo)
527 531 return result
528 532
529 533 # When we rebase a repository with remotely changed largefiles, we need to
530 534 # take some extra care so that the largefiles are correctly updated in the
531 535 # working copy
532 536 def override_pull(orig, ui, repo, source=None, **opts):
533 537 if opts.get('rebase', False):
534 538 repo._isrebasing = True
535 539 try:
536 540 if opts.get('update'):
537 541 del opts['update']
538 542 ui.debug('--update and --rebase are not compatible, ignoring '
539 543 'the update flag\n')
540 544 del opts['rebase']
541 545 cmdutil.bailifchanged(repo)
542 546 revsprepull = len(repo)
543 547 origpostincoming = commands.postincoming
544 548 def _dummy(*args, **kwargs):
545 549 pass
546 550 commands.postincoming = _dummy
547 551 repo.lfpullsource = source
548 552 if not source:
549 553 source = 'default'
550 554 try:
551 555 result = commands.pull(ui, repo, source, **opts)
552 556 finally:
553 557 commands.postincoming = origpostincoming
554 558 revspostpull = len(repo)
555 559 if revspostpull > revsprepull:
556 560 result = result or rebase.rebase(ui, repo)
557 561 finally:
558 562 repo._isrebasing = False
559 563 else:
560 564 repo.lfpullsource = source
561 565 if not source:
562 566 source = 'default'
563 567 result = orig(ui, repo, source, **opts)
564 568 return result
565 569
566 570 def override_rebase(orig, ui, repo, **opts):
567 571 repo._isrebasing = True
568 572 try:
569 573 orig(ui, repo, **opts)
570 574 finally:
571 575 repo._isrebasing = False
572 576
573 577 def override_archive(orig, repo, dest, node, kind, decode=True, matchfn=None,
574 578 prefix=None, mtime=None, subrepos=None):
575 579 # No need to lock because we are only reading history and
576 580 # largefile caches, neither of which are modified.
577 581 lfcommands.cachelfiles(repo.ui, repo, node)
578 582
579 583 if kind not in archival.archivers:
580 584 raise util.Abort(_("unknown archive type '%s'") % kind)
581 585
582 586 ctx = repo[node]
583 587
584 588 if kind == 'files':
585 589 if prefix:
586 590 raise util.Abort(
587 591 _('cannot give prefix when archiving to files'))
588 592 else:
589 593 prefix = archival.tidyprefix(dest, kind, prefix)
590 594
591 595 def write(name, mode, islink, getdata):
592 596 if matchfn and not matchfn(name):
593 597 return
594 598 data = getdata()
595 599 if decode:
596 600 data = repo.wwritedata(name, data)
597 601 archiver.addfile(prefix + name, mode, islink, data)
598 602
599 603 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
600 604
601 605 if repo.ui.configbool("ui", "archivemeta", True):
602 606 def metadata():
603 607 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
604 608 hex(repo.changelog.node(0)), hex(node), ctx.branch())
605 609
606 610 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
607 611 if repo.tagtype(t) == 'global')
608 612 if not tags:
609 613 repo.ui.pushbuffer()
610 614 opts = {'template': '{latesttag}\n{latesttagdistance}',
611 615 'style': '', 'patch': None, 'git': None}
612 616 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
613 617 ltags, dist = repo.ui.popbuffer().split('\n')
614 618 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
615 619 tags += 'latesttagdistance: %s\n' % dist
616 620
617 621 return base + tags
618 622
619 623 write('.hg_archival.txt', 0644, False, metadata)
620 624
621 625 for f in ctx:
622 626 ff = ctx.flags(f)
623 627 getdata = ctx[f].data
624 628 if lfutil.isstandin(f):
625 629 path = lfutil.findfile(repo, getdata().strip())
626 630 f = lfutil.splitstandin(f)
627 631
628 632 def getdatafn():
629 633 try:
630 634 fd = open(path, 'rb')
631 635 return fd.read()
632 636 finally:
633 637 fd.close()
634 638
635 639 getdata = getdatafn
636 640 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
637 641
638 642 if subrepos:
639 643 for subpath in ctx.substate:
640 644 sub = ctx.sub(subpath)
641 645 try:
642 646 sub.archive(repo.ui, archiver, prefix)
643 647 except TypeError:
644 648 sub.archive(archiver, prefix)
645 649
646 650 archiver.done()
647 651
648 652 # If a largefile is modified, the change is not reflected in its
649 653 # standin until a commit. cmdutil.bailifchanged() raises an exception
650 654 # if the repo has uncommitted changes. Wrap it to also check if
651 655 # largefiles were changed. This is used by bisect and backout.
652 656 def override_bailifchanged(orig, repo):
653 657 orig(repo)
654 658 repo.lfstatus = True
655 659 modified, added, removed, deleted = repo.status()[:4]
656 660 repo.lfstatus = False
657 661 if modified or added or removed or deleted:
658 662 raise util.Abort(_('outstanding uncommitted changes'))
659 663
660 664 # Fetch doesn't use cmdutil.bail_if_changed so override it to add the check
661 665 def override_fetch(orig, ui, repo, *pats, **opts):
662 666 repo.lfstatus = True
663 667 modified, added, removed, deleted = repo.status()[:4]
664 668 repo.lfstatus = False
665 669 if modified or added or removed or deleted:
666 670 raise util.Abort(_('outstanding uncommitted changes'))
667 671 return orig(ui, repo, *pats, **opts)
668 672
669 673 def override_forget(orig, ui, repo, *pats, **opts):
670 674 installnormalfilesmatchfn(repo[None].manifest())
671 675 orig(ui, repo, *pats, **opts)
672 676 restorematchfn()
673 677 m = scmutil.match(repo[None], pats, opts)
674 678
675 679 try:
676 680 repo.lfstatus = True
677 681 s = repo.status(match=m, clean=True)
678 682 finally:
679 683 repo.lfstatus = False
680 684 forget = sorted(s[0] + s[1] + s[3] + s[6])
681 685 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
682 686
683 687 for f in forget:
684 688 if lfutil.standin(f) not in repo.dirstate and not \
685 689 os.path.isdir(m.rel(lfutil.standin(f))):
686 690 ui.warn(_('not removing %s: file is already untracked\n')
687 691 % m.rel(f))
688 692
689 693 for f in forget:
690 694 if ui.verbose or not m.exact(f):
691 695 ui.status(_('removing %s\n') % m.rel(f))
692 696
693 697 # Need to lock because standin files are deleted then removed from the
694 698 # repository and we could race inbetween.
695 699 wlock = repo.wlock()
696 700 try:
697 701 lfdirstate = lfutil.openlfdirstate(ui, repo)
698 702 for f in forget:
699 703 if lfdirstate[f] == 'a':
700 704 lfdirstate.drop(f)
701 705 else:
702 706 lfdirstate.remove(f)
703 707 lfdirstate.write()
704 708 lfutil.repo_remove(repo, [lfutil.standin(f) for f in forget],
705 709 unlink=True)
706 710 finally:
707 711 wlock.release()
708 712
709 713 def getoutgoinglfiles(ui, repo, dest=None, **opts):
710 714 dest = ui.expandpath(dest or 'default-push', dest or 'default')
711 715 dest, branches = hg.parseurl(dest, opts.get('branch'))
712 716 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
713 717 if revs:
714 718 revs = [repo.lookup(rev) for rev in revs]
715 719
716 720 remoteui = hg.remoteui
717 721
718 722 try:
719 723 remote = hg.repository(remoteui(repo, opts), dest)
720 724 except error.RepoError:
721 725 return None
722 726 o = lfutil.findoutgoing(repo, remote, False)
723 727 if not o:
724 728 return None
725 729 o = repo.changelog.nodesbetween(o, revs)[0]
726 730 if opts.get('newest_first'):
727 731 o.reverse()
728 732
729 733 toupload = set()
730 734 for n in o:
731 735 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
732 736 ctx = repo[n]
733 737 files = set(ctx.files())
734 738 if len(parents) == 2:
735 739 mc = ctx.manifest()
736 740 mp1 = ctx.parents()[0].manifest()
737 741 mp2 = ctx.parents()[1].manifest()
738 742 for f in mp1:
739 743 if f not in mc:
740 744 files.add(f)
741 745 for f in mp2:
742 746 if f not in mc:
743 747 files.add(f)
744 748 for f in mc:
745 749 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
746 750 files.add(f)
747 toupload = toupload.union(set([f for f in files if lfutil.isstandin(f)\
748 and f in ctx]))
751 toupload = toupload.union(
752 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
749 753 return toupload
750 754
751 755 def override_outgoing(orig, ui, repo, dest=None, **opts):
752 756 orig(ui, repo, dest, **opts)
753 757
754 758 if opts.pop('large', None):
755 759 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
756 760 if toupload is None:
757 761 ui.status(_('largefiles: No remote repo\n'))
758 762 else:
759 763 ui.status(_('largefiles to upload:\n'))
760 764 for file in toupload:
761 765 ui.status(lfutil.splitstandin(file) + '\n')
762 766 ui.status('\n')
763 767
764 768 def override_summary(orig, ui, repo, *pats, **opts):
765 769 orig(ui, repo, *pats, **opts)
766 770
767 771 if opts.pop('large', None):
768 772 toupload = getoutgoinglfiles(ui, repo, None, **opts)
769 773 if toupload is None:
770 774 ui.status(_('largefiles: No remote repo\n'))
771 775 else:
772 776 ui.status(_('largefiles: %d to upload\n') % len(toupload))
773 777
774 778 def override_addremove(orig, ui, repo, *pats, **opts):
775 779 # Check if the parent or child has largefiles; if so, disallow
776 780 # addremove. If there is a symlink in the manifest then getting
777 781 # the manifest throws an exception: catch it and let addremove
778 782 # deal with it.
779 783 try:
780 784 manifesttip = set(repo['tip'].manifest())
781 785 except util.Abort:
782 786 manifesttip = set()
783 787 try:
784 788 manifestworking = set(repo[None].manifest())
785 789 except util.Abort:
786 790 manifestworking = set()
787 791
788 792 # Manifests are only iterable so turn them into sets then union
789 793 for file in manifesttip.union(manifestworking):
790 794 if file.startswith(lfutil.shortname):
791 795 raise util.Abort(
792 796 _('addremove cannot be run on a repo with largefiles'))
793 797
794 798 return orig(ui, repo, *pats, **opts)
795 799
796 800 # Calling purge with --all will cause the largefiles to be deleted.
797 801 # Override repo.status to prevent this from happening.
798 802 def override_purge(orig, ui, repo, *dirs, **opts):
799 803 oldstatus = repo.status
800 804 def override_status(node1='.', node2=None, match=None, ignored=False,
801 805 clean=False, unknown=False, listsubrepos=False):
802 806 r = oldstatus(node1, node2, match, ignored, clean, unknown,
803 807 listsubrepos)
804 808 lfdirstate = lfutil.openlfdirstate(ui, repo)
805 809 modified, added, removed, deleted, unknown, ignored, clean = r
806 810 unknown = [f for f in unknown if lfdirstate[f] == '?']
807 811 ignored = [f for f in ignored if lfdirstate[f] == '?']
808 812 return modified, added, removed, deleted, unknown, ignored, clean
809 813 repo.status = override_status
810 814 orig(ui, repo, *dirs, **opts)
811 815 repo.status = oldstatus
812 816
813 817 def override_rollback(orig, ui, repo, **opts):
814 818 result = orig(ui, repo, **opts)
815 819 merge.update(repo, node=None, branchmerge=False, force=True,
816 820 partial=lfutil.isstandin)
817 821 lfdirstate = lfutil.openlfdirstate(ui, repo)
818 822 lfiles = lfutil.listlfiles(repo)
819 823 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
820 824 for file in lfiles:
821 825 if file in oldlfiles:
822 826 lfdirstate.normallookup(file)
823 827 else:
824 828 lfdirstate.add(file)
825 829 lfdirstate.write()
826 830 return result
@@ -1,160 +1,160 b''
1 1 # Copyright 2011 Fog Creek Software
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 import os
7 7 import tempfile
8 8 import urllib2
9 9
10 10 from mercurial import error, httprepo, util, wireproto
11 11 from mercurial.i18n import _
12 12
13 13 import lfutil
14 14
15 LARGEFILES_REQUIRED_MSG = '\nThis repository uses the largefiles extension.' \
16 '\n\nPlease enable it in your Mercurial config ' \
17 'file.\n'
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
16 '\n\nPlease enable it in your Mercurial config '
17 'file.\n')
18 18
19 19 def putlfile(repo, proto, sha):
20 20 '''Put a largefile into a repository's local cache and into the
21 21 system cache.'''
22 22 f = None
23 23 proto.redirect()
24 24 try:
25 25 try:
26 26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 27 proto.getfile(f)
28 28 f.seek(0)
29 29 if sha != lfutil.hexsha1(f):
30 30 return wireproto.pushres(1)
31 31 lfutil.copytocacheabsolute(repo, f.name, sha)
32 32 except IOError:
33 33 repo.ui.warn(
34 34 _('error: could not put received data into largefile store'))
35 35 return wireproto.pushres(1)
36 36 finally:
37 37 if f:
38 38 f.close()
39 39
40 40 return wireproto.pushres(0)
41 41
42 42 def getlfile(repo, proto, sha):
43 43 '''Retrieve a largefile from the repository-local cache or system
44 44 cache.'''
45 45 filename = lfutil.findfile(repo, sha)
46 46 if not filename:
47 47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 48 f = open(filename, 'rb')
49 49 length = os.fstat(f.fileno())[6]
50 50
51 51 # Since we can't set an HTTP content-length header here, and
52 52 # Mercurial core provides no way to give the length of a streamres
53 53 # (and reading the entire file into RAM would be ill-advised), we
54 54 # just send the length on the first line of the response, like the
55 55 # ssh proto does for string responses.
56 56 def generator():
57 57 yield '%d\n' % length
58 58 for chunk in f:
59 59 yield chunk
60 60 return wireproto.streamres(generator())
61 61
62 62 def statlfile(repo, proto, sha):
63 63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 64 mismatched checksum, or '0\n' if it is in good condition'''
65 65 filename = lfutil.findfile(repo, sha)
66 66 if not filename:
67 67 return '2\n'
68 68 fd = None
69 69 try:
70 70 fd = open(filename, 'rb')
71 71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 72 finally:
73 73 if fd:
74 74 fd.close()
75 75
76 76 def wirereposetup(ui, repo):
77 77 class lfileswirerepository(repo.__class__):
78 78 def putlfile(self, sha, fd):
79 79 # unfortunately, httprepository._callpush tries to convert its
80 80 # input file-like into a bundle before sending it, so we can't use
81 81 # it ...
82 82 if issubclass(self.__class__, httprepo.httprepository):
83 83 try:
84 84 return int(self._call('putlfile', data=fd, sha=sha,
85 85 headers={'content-type':'application/mercurial-0.1'}))
86 86 except (ValueError, urllib2.HTTPError):
87 87 return 1
88 88 # ... but we can't use sshrepository._call because the data=
89 89 # argument won't get sent, and _callpush does exactly what we want
90 90 # in this case: send the data straight through
91 91 else:
92 92 try:
93 93 ret, output = self._callpush("putlfile", fd, sha=sha)
94 94 if ret == "":
95 95 raise error.ResponseError(_('putlfile failed:'),
96 96 output)
97 97 return int(ret)
98 98 except IOError:
99 99 return 1
100 100 except ValueError:
101 101 raise error.ResponseError(
102 102 _('putlfile failed (unexpected response):'), ret)
103 103
104 104 def getlfile(self, sha):
105 105 stream = self._callstream("getlfile", sha=sha)
106 106 length = stream.readline()
107 107 try:
108 108 length = int(length)
109 109 except ValueError:
110 110 self._abort(error.ResponseError(_("unexpected response:"),
111 111 length))
112 112 return (length, stream)
113 113
114 114 def statlfile(self, sha):
115 115 try:
116 116 return int(self._call("statlfile", sha=sha))
117 117 except (ValueError, urllib2.HTTPError):
118 118 # If the server returns anything but an integer followed by a
119 119 # newline, newline, it's not speaking our language; if we get
120 120 # an HTTP error, we can't be sure the largefile is present;
121 121 # either way, consider it missing.
122 122 return 2
123 123
124 124 repo.__class__ = lfileswirerepository
125 125
126 126 # advertise the largefiles=serve capability
127 127 def capabilities(repo, proto):
128 128 return capabilities_orig(repo, proto) + ' largefiles=serve'
129 129
130 130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
131 131 # clients old and new alike both handle it well
132 132 def webproto_refuseclient(self, message):
133 133 self.req.header([('Content-Type', 'application/hg-error')])
134 134 return message
135 135
136 136 def sshproto_refuseclient(self, message):
137 137 self.ui.write_err('%s\n-\n' % message)
138 138 self.fout.write('\n')
139 139 self.fout.flush()
140 140
141 141 return ''
142 142
143 143 def heads(repo, proto):
144 144 if lfutil.islfilesrepo(repo):
145 145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
146 146 return wireproto.heads(repo, proto)
147 147
148 148 def sshrepo_callstream(self, cmd, **args):
149 149 if cmd == 'heads' and self.capable('largefiles'):
150 150 cmd = 'lheads'
151 151 if cmd == 'batch' and self.capable('largefiles'):
152 152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
153 153 return ssh_oldcallstream(self, cmd, **args)
154 154
155 155 def httprepo_callstream(self, cmd, **args):
156 156 if cmd == 'heads' and self.capable('largefiles'):
157 157 cmd = 'lheads'
158 158 if cmd == 'batch' and self.capable('largefiles'):
159 159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
160 160 return http_oldcallstream(self, cmd, **args)
@@ -1,412 +1,416 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13 import re
14 14
15 15 from mercurial import context, error, manifest, match as match_, \
16 16 node, util
17 17 from mercurial.i18n import _
18 18
19 19 import lfcommands
20 20 import proto
21 21 import lfutil
22 22
23 23 def reposetup(ui, repo):
24 24 # wire repositories should be given new wireproto functions but not the
25 25 # other largefiles modifications
26 26 if not repo.local():
27 27 return proto.wirereposetup(ui, repo)
28 28
29 29 for name in ('status', 'commitctx', 'commit', 'push'):
30 30 method = getattr(repo, name)
31 31 #if not (isinstance(method, types.MethodType) and
32 32 # method.im_func is repo.__class__.commitctx.im_func):
33 if isinstance(method, types.FunctionType) and method.func_name == \
34 'wrap':
33 if (isinstance(method, types.FunctionType) and
34 method.func_name == 'wrap'):
35 35 ui.warn(_('largefiles: repo method %r appears to have already been'
36 36 ' wrapped by another extension: '
37 37 'largefiles may behave incorrectly\n')
38 38 % name)
39 39
40 40 class lfiles_repo(repo.__class__):
41 41 lfstatus = False
42 42 def status_nolfiles(self, *args, **kwargs):
43 43 return super(lfiles_repo, self).status(*args, **kwargs)
44 44
45 45 # When lfstatus is set, return a context that gives the names
46 46 # of largefiles instead of their corresponding standins and
47 47 # identifies the largefiles as always binary, regardless of
48 48 # their actual contents.
49 49 def __getitem__(self, changeid):
50 50 ctx = super(lfiles_repo, self).__getitem__(changeid)
51 51 if self.lfstatus:
52 52 class lfiles_manifestdict(manifest.manifestdict):
53 53 def __contains__(self, filename):
54 54 if super(lfiles_manifestdict,
55 55 self).__contains__(filename):
56 56 return True
57 57 return super(lfiles_manifestdict,
58 58 self).__contains__(lfutil.shortname+'/' + filename)
59 59 class lfiles_ctx(ctx.__class__):
60 60 def files(self):
61 61 filenames = super(lfiles_ctx, self).files()
62 62 return [re.sub('^\\'+lfutil.shortname+'/', '',
63 63 filename) for filename in filenames]
64 64 def manifest(self):
65 65 man1 = super(lfiles_ctx, self).manifest()
66 66 man1.__class__ = lfiles_manifestdict
67 67 return man1
68 68 def filectx(self, path, fileid=None, filelog=None):
69 69 try:
70 70 result = super(lfiles_ctx, self).filectx(path,
71 71 fileid, filelog)
72 72 except error.LookupError:
73 73 # Adding a null character will cause Mercurial to
74 74 # identify this as a binary file.
75 75 result = super(lfiles_ctx, self).filectx(
76 76 lfutil.shortname + '/' + path, fileid,
77 77 filelog)
78 78 olddata = result.data
79 79 result.data = lambda: olddata() + '\0'
80 80 return result
81 81 ctx.__class__ = lfiles_ctx
82 82 return ctx
83 83
84 84 # Figure out the status of big files and insert them into the
85 85 # appropriate list in the result. Also removes standin files
86 86 # from the listing. Revert to the original status if
87 87 # self.lfstatus is False.
88 88 def status(self, node1='.', node2=None, match=None, ignored=False,
89 89 clean=False, unknown=False, listsubrepos=False):
90 90 listignored, listclean, listunknown = ignored, clean, unknown
91 91 if not self.lfstatus:
92 92 try:
93 93 return super(lfiles_repo, self).status(node1, node2, match,
94 94 listignored, listclean, listunknown, listsubrepos)
95 95 except TypeError:
96 96 return super(lfiles_repo, self).status(node1, node2, match,
97 97 listignored, listclean, listunknown)
98 98 else:
99 99 # some calls in this function rely on the old version of status
100 100 self.lfstatus = False
101 101 if isinstance(node1, context.changectx):
102 102 ctx1 = node1
103 103 else:
104 104 ctx1 = repo[node1]
105 105 if isinstance(node2, context.changectx):
106 106 ctx2 = node2
107 107 else:
108 108 ctx2 = repo[node2]
109 109 working = ctx2.rev() is None
110 110 parentworking = working and ctx1 == self['.']
111 111
112 112 def inctx(file, ctx):
113 113 try:
114 114 if ctx.rev() is None:
115 115 return file in ctx.manifest()
116 116 ctx[file]
117 117 return True
118 118 except KeyError:
119 119 return False
120 120
121 121 if match is None:
122 122 match = match_.always(self.root, self.getcwd())
123 123
124 124 # Create a copy of match that matches standins instead
125 125 # of largefiles.
126 126 def tostandin(file):
127 127 if inctx(lfutil.standin(file), ctx2):
128 128 return lfutil.standin(file)
129 129 return file
130 130
131 131 m = copy.copy(match)
132 132 m._files = [tostandin(f) for f in m._files]
133 133
134 134 # get ignored, clean, and unknown but remove them
135 135 # later if they were not asked for
136 136 try:
137 137 result = super(lfiles_repo, self).status(node1, node2, m,
138 138 True, True, True, listsubrepos)
139 139 except TypeError:
140 140 result = super(lfiles_repo, self).status(node1, node2, m,
141 141 True, True, True)
142 142 if working:
143 143 # hold the wlock while we read largefiles and
144 144 # update the lfdirstate
145 145 wlock = repo.wlock()
146 146 try:
147 147 # Any non-largefiles that were explicitly listed must be
148 148 # taken out or lfdirstate.status will report an error.
149 149 # The status of these files was already computed using
150 150 # super's status.
151 151 lfdirstate = lfutil.openlfdirstate(ui, self)
152 152 match._files = [f for f in match._files if f in
153 153 lfdirstate]
154 154 s = lfdirstate.status(match, [], listignored,
155 155 listclean, listunknown)
156 156 (unsure, modified, added, removed, missing, unknown,
157 157 ignored, clean) = s
158 158 if parentworking:
159 159 for lfile in unsure:
160 160 if ctx1[lfutil.standin(lfile)].data().strip() \
161 161 != lfutil.hashfile(self.wjoin(lfile)):
162 162 modified.append(lfile)
163 163 else:
164 164 clean.append(lfile)
165 165 lfdirstate.normal(lfile)
166 166 lfdirstate.write()
167 167 else:
168 168 tocheck = unsure + modified + added + clean
169 169 modified, added, clean = [], [], []
170 170
171 171 for lfile in tocheck:
172 172 standin = lfutil.standin(lfile)
173 173 if inctx(standin, ctx1):
174 174 if ctx1[standin].data().strip() != \
175 175 lfutil.hashfile(self.wjoin(lfile)):
176 176 modified.append(lfile)
177 177 else:
178 178 clean.append(lfile)
179 179 else:
180 180 added.append(lfile)
181 181 finally:
182 182 wlock.release()
183 183
184 184 for standin in ctx1.manifest():
185 185 if not lfutil.isstandin(standin):
186 186 continue
187 187 lfile = lfutil.splitstandin(standin)
188 188 if not match(lfile):
189 189 continue
190 190 if lfile not in lfdirstate:
191 191 removed.append(lfile)
192 192 # Handle unknown and ignored differently
193 193 lfiles = (modified, added, removed, missing, [], [], clean)
194 194 result = list(result)
195 195 # Unknown files
196 result[4] = [f for f in unknown if repo.dirstate[f] == '?'\
197 and not lfutil.isstandin(f)]
196 result[4] = [f for f in unknown
197 if (repo.dirstate[f] == '?' and
198 not lfutil.isstandin(f))]
198 199 # Ignored files must be ignored by both the dirstate and
199 200 # lfdirstate
200 201 result[5] = set(ignored).intersection(set(result[5]))
201 202 # combine normal files and largefiles
202 normals = [[fn for fn in filelist if not \
203 lfutil.isstandin(fn)] for filelist in result]
204 result = [sorted(list1 + list2) for (list1, list2) in \
205 zip(normals, lfiles)]
203 normals = [[fn for fn in filelist
204 if not lfutil.isstandin(fn)]
205 for filelist in result]
206 result = [sorted(list1 + list2)
207 for (list1, list2) in zip(normals, lfiles)]
206 208 else:
207 209 def toname(f):
208 210 if lfutil.isstandin(f):
209 211 return lfutil.splitstandin(f)
210 212 return f
211 213 result = [[toname(f) for f in items] for items in result]
212 214
213 215 if not listunknown:
214 216 result[4] = []
215 217 if not listignored:
216 218 result[5] = []
217 219 if not listclean:
218 220 result[6] = []
219 221 self.lfstatus = True
220 222 return result
221 223
222 224 # As part of committing, copy all of the largefiles into the
223 225 # cache.
224 226 def commitctx(self, *args, **kwargs):
225 227 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 228 ctx = self[node]
227 229 for filename in ctx.files():
228 230 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 231 realfile = lfutil.splitstandin(filename)
230 232 lfutil.copytocache(self, ctx.node(), realfile)
231 233
232 234 return node
233 235
234 236 # Before commit, largefile standins have not had their
235 237 # contents updated to reflect the hash of their largefile.
236 238 # Do that here.
237 239 def commit(self, text="", user=None, date=None, match=None,
238 240 force=False, editor=False, extra={}):
239 241 orig = super(lfiles_repo, self).commit
240 242
241 243 wlock = repo.wlock()
242 244 try:
243 245 if getattr(repo, "_isrebasing", False):
244 246 # We have to take the time to pull down the new
245 247 # largefiles now. Otherwise if we are rebasing,
246 248 # any largefiles that were modified in the
247 249 # destination changesets get overwritten, either
248 250 # by the rebase or in the first commit after the
249 251 # rebase.
250 252 lfcommands.updatelfiles(repo.ui, repo)
251 253 # Case 1: user calls commit with no specific files or
252 254 # include/exclude patterns: refresh and commit all files that
253 255 # are "dirty".
254 if (match is None) or (not match.anypats() and not \
255 match.files()):
256 if ((match is None) or
257 (not match.anypats() and not match.files())):
256 258 # Spend a bit of time here to get a list of files we know
257 259 # are modified so we can compare only against those.
258 260 # It can cost a lot of time (several seconds)
259 261 # otherwise to update all standins if the largefiles are
260 262 # large.
261 263 lfdirstate = lfutil.openlfdirstate(ui, self)
262 264 dirtymatch = match_.always(repo.root, repo.getcwd())
263 265 s = lfdirstate.status(dirtymatch, [], False, False, False)
264 266 modifiedfiles = []
265 267 for i in s:
266 268 modifiedfiles.extend(i)
267 269 lfiles = lfutil.listlfiles(self)
268 270 # this only loops through largefiles that exist (not
269 271 # removed/renamed)
270 272 for lfile in lfiles:
271 273 if lfile in modifiedfiles:
272 274 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
273 275 # this handles the case where a rebase is being
274 276 # performed and the working copy is not updated
275 277 # yet.
276 278 if os.path.exists(self.wjoin(lfile)):
277 279 lfutil.updatestandin(self,
278 280 lfutil.standin(lfile))
279 281 lfdirstate.normal(lfile)
280 282 for lfile in lfdirstate:
281 283 if lfile in modifiedfiles:
282 284 if not os.path.exists(
283 285 repo.wjoin(lfutil.standin(lfile))):
284 286 lfdirstate.drop(lfile)
285 287 lfdirstate.write()
286 288
287 289 return orig(text=text, user=user, date=date, match=match,
288 290 force=force, editor=editor, extra=extra)
289 291
290 292 for f in match.files():
291 293 if lfutil.isstandin(f):
292 294 raise util.Abort(
293 295 _('file "%s" is a largefile standin') % f,
294 296 hint=('commit the largefile itself instead'))
295 297
296 298 # Case 2: user calls commit with specified patterns: refresh
297 299 # any matching big files.
298 300 smatcher = lfutil.composestandinmatcher(self, match)
299 301 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
300 302
301 303 # No matching big files: get out of the way and pass control to
302 304 # the usual commit() method.
303 305 if not standins:
304 306 return orig(text=text, user=user, date=date, match=match,
305 307 force=force, editor=editor, extra=extra)
306 308
307 309 # Refresh all matching big files. It's possible that the
308 310 # commit will end up failing, in which case the big files will
309 311 # stay refreshed. No harm done: the user modified them and
310 312 # asked to commit them, so sooner or later we're going to
311 313 # refresh the standins. Might as well leave them refreshed.
312 314 lfdirstate = lfutil.openlfdirstate(ui, self)
313 315 for standin in standins:
314 316 lfile = lfutil.splitstandin(standin)
315 317 if lfdirstate[lfile] <> 'r':
316 318 lfutil.updatestandin(self, standin)
317 319 lfdirstate.normal(lfile)
318 320 else:
319 321 lfdirstate.drop(lfile)
320 322 lfdirstate.write()
321 323
322 324 # Cook up a new matcher that only matches regular files or
323 325 # standins corresponding to the big files requested by the
324 326 # user. Have to modify _files to prevent commit() from
325 327 # complaining "not tracked" for big files.
326 328 lfiles = lfutil.listlfiles(repo)
327 329 match = copy.copy(match)
328 330 orig_matchfn = match.matchfn
329 331
330 332 # Check both the list of largefiles and the list of
331 333 # standins because if a largefile was removed, it
332 334 # won't be in the list of largefiles at this point
333 335 match._files += sorted(standins)
334 336
335 337 actualfiles = []
336 338 for f in match._files:
337 339 fstandin = lfutil.standin(f)
338 340
339 341 # ignore known largefiles and standins
340 342 if f in lfiles or fstandin in standins:
341 343 continue
342 344
343 345 # append directory separator to avoid collisions
344 346 if not fstandin.endswith(os.sep):
345 347 fstandin += os.sep
346 348
347 349 # prevalidate matching standin directories
348 if lfutil.any_(st for st in match._files if \
349 st.startswith(fstandin)):
350 if lfutil.any_(st for st in match._files
351 if st.startswith(fstandin)):
350 352 continue
351 353 actualfiles.append(f)
352 354 match._files = actualfiles
353 355
354 356 def matchfn(f):
355 357 if orig_matchfn(f):
356 358 return f not in lfiles
357 359 else:
358 360 return f in standins
359 361
360 362 match.matchfn = matchfn
361 363 return orig(text=text, user=user, date=date, match=match,
362 364 force=force, editor=editor, extra=extra)
363 365 finally:
364 366 wlock.release()
365 367
366 368 def push(self, remote, force=False, revs=None, newbranch=False):
367 369 o = lfutil.findoutgoing(repo, remote, force)
368 370 if o:
369 371 toupload = set()
370 372 o = repo.changelog.nodesbetween(o, revs)[0]
371 373 for n in o:
372 parents = [p for p in repo.changelog.parents(n) if p != \
373 node.nullid]
374 parents = [p for p in repo.changelog.parents(n)
375 if p != node.nullid]
374 376 ctx = repo[n]
375 377 files = set(ctx.files())
376 378 if len(parents) == 2:
377 379 mc = ctx.manifest()
378 380 mp1 = ctx.parents()[0].manifest()
379 381 mp2 = ctx.parents()[1].manifest()
380 382 for f in mp1:
381 383 if f not in mc:
382 384 files.add(f)
383 385 for f in mp2:
384 386 if f not in mc:
385 387 files.add(f)
386 388 for f in mc:
387 389 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
388 390 None):
389 391 files.add(f)
390 392
391 toupload = toupload.union(set([ctx[f].data().strip() for f\
392 in files if lfutil.isstandin(f) and f in ctx]))
393 toupload = toupload.union(
394 set([ctx[f].data().strip()
395 for f in files
396 if lfutil.isstandin(f) and f in ctx]))
393 397 lfcommands.uploadlfiles(ui, self, remote, toupload)
394 398 return super(lfiles_repo, self).push(remote, force, revs,
395 399 newbranch)
396 400
397 401 repo.__class__ = lfiles_repo
398 402
399 403 def checkrequireslfiles(ui, repo, **kwargs):
400 404 if 'largefiles' not in repo.requirements and lfutil.any_(
401 405 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
402 406 # workaround bug in Mercurial 1.9 whereby requirements is
403 407 # a list on newly-cloned repos
404 408 repo.requirements = set(repo.requirements)
405 409
406 410 repo.requirements |= set(['largefiles'])
407 411 repo._writerequirements()
408 412
409 413 checkrequireslfiles(ui, repo)
410 414
411 415 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
412 416 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now