##// END OF EJS Templates
largefiles: rename functions and methods to match desired behavior...
Benjamin Pollack -
r15316:c65f5b6e stable
parent child Browse files
Show More
@@ -1,202 +1,202
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''base class for store implementations and store-related utility code'''
10 10
11 11 import os
12 12 import tempfile
13 13 import binascii
14 14 import re
15 15
16 16 from mercurial import util, node, hg
17 17 from mercurial.i18n import _
18 18
19 19 import lfutil
20 20
21 21 class StoreError(Exception):
22 22 '''Raised when there is a problem getting files from or putting
23 23 files to a central store.'''
24 24 def __init__(self, filename, hash, url, detail):
25 25 self.filename = filename
26 26 self.hash = hash
27 27 self.url = url
28 28 self.detail = detail
29 29
30 30 def longmessage(self):
31 31 if self.url:
32 32 return ('%s: %s\n'
33 33 '(failed URL: %s)\n'
34 34 % (self.filename, self.detail, self.url))
35 35 else:
36 36 return ('%s: %s\n'
37 37 '(no default or default-push path set in hgrc)\n'
38 38 % (self.filename, self.detail))
39 39
40 40 def __str__(self):
41 41 return "%s: %s" % (self.url, self.detail)
42 42
43 43 class basestore(object):
44 44 def __init__(self, ui, repo, url):
45 45 self.ui = ui
46 46 self.repo = repo
47 47 self.url = url
48 48
49 49 def put(self, source, hash):
50 50 '''Put source file into the store under <filename>/<hash>.'''
51 51 raise NotImplementedError('abstract method')
52 52
53 53 def exists(self, hash):
54 54 '''Check to see if the store contains the given hash.'''
55 55 raise NotImplementedError('abstract method')
56 56
57 57 def get(self, files):
58 58 '''Get the specified largefiles from the store and write to local
59 59 files under repo.root. files is a list of (filename, hash)
60 60 tuples. Return (success, missing), lists of files successfuly
61 61 downloaded and those not found in the store. success is a list
62 62 of (filename, hash) tuples; missing is a list of filenames that
63 63 we could not get. (The detailed error message will already have
64 64 been presented to the user, so missing is just supplied as a
65 65 summary.)'''
66 66 success = []
67 67 missing = []
68 68 ui = self.ui
69 69
70 70 at = 0
71 71 for filename, hash in files:
72 72 ui.progress(_('getting largefiles'), at, unit='lfile',
73 73 total=len(files))
74 74 at += 1
75 75 ui.note(_('getting %s:%s\n') % (filename, hash))
76 76
77 cachefilename = lfutil.cachepath(self.repo, hash)
78 cachedir = os.path.dirname(cachefilename)
77 storefilename = lfutil.storepath(self.repo, hash)
78 storedir = os.path.dirname(storefilename)
79 79
80 80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
81 81 # opened the file in binary mode.
82 82 (tmpfd, tmpfilename) = tempfile.mkstemp(
83 dir=cachedir, prefix=os.path.basename(filename))
83 dir=storedir, prefix=os.path.basename(filename))
84 84 tmpfile = os.fdopen(tmpfd, 'w')
85 85
86 86 try:
87 87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
88 88 except StoreError, err:
89 89 ui.warn(err.longmessage())
90 90 hhash = ""
91 91
92 92 if hhash != hash:
93 93 if hhash != "":
94 94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
95 95 % (filename, hash, hhash))
96 96 tmpfile.close() # no-op if it's already closed
97 97 os.remove(tmpfilename)
98 98 missing.append(filename)
99 99 continue
100 100
101 if os.path.exists(cachefilename): # Windows
102 os.remove(cachefilename)
103 os.rename(tmpfilename, cachefilename)
104 lfutil.linktosystemcache(self.repo, hash)
101 if os.path.exists(storefilename): # Windows
102 os.remove(storefilename)
103 os.rename(tmpfilename, storefilename)
104 lfutil.linktousercache(self.repo, hash)
105 105 success.append((filename, hhash))
106 106
107 107 ui.progress(_('getting largefiles'), None)
108 108 return (success, missing)
109 109
110 110 def verify(self, revs, contents=False):
111 111 '''Verify the existence (and, optionally, contents) of every big
112 112 file revision referenced by every changeset in revs.
113 113 Return 0 if all is well, non-zero on any errors.'''
114 114 write = self.ui.write
115 115 failed = False
116 116
117 117 write(_('searching %d changesets for largefiles\n') % len(revs))
118 118 verified = set() # set of (filename, filenode) tuples
119 119
120 120 for rev in revs:
121 121 cctx = self.repo[rev]
122 122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
123 123
124 124 failed = lfutil.any_(self._verifyfile(
125 125 cctx, cset, contents, standin, verified) for standin in cctx)
126 126
127 127 num_revs = len(verified)
128 128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
129 129 if contents:
130 130 write(_('verified contents of %d revisions of %d largefiles\n')
131 131 % (num_revs, num_lfiles))
132 132 else:
133 133 write(_('verified existence of %d revisions of %d largefiles\n')
134 134 % (num_revs, num_lfiles))
135 135
136 136 return int(failed)
137 137
138 138 def _getfile(self, tmpfile, filename, hash):
139 139 '''Fetch one revision of one file from the store and write it
140 140 to tmpfile. Compute the hash of the file on-the-fly as it
141 141 downloads and return the binary hash. Close tmpfile. Raise
142 142 StoreError if unable to download the file (e.g. it does not
143 143 exist in the store).'''
144 144 raise NotImplementedError('abstract method')
145 145
146 146 def _verifyfile(self, cctx, cset, contents, standin, verified):
147 147 '''Perform the actual verification of a file in the store.
148 148 '''
149 149 raise NotImplementedError('abstract method')
150 150
151 151 import localstore, wirestore
152 152
153 153 _storeprovider = {
154 154 'file': [localstore.localstore],
155 155 'http': [wirestore.wirestore],
156 156 'https': [wirestore.wirestore],
157 157 'ssh': [wirestore.wirestore],
158 158 }
159 159
160 160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
161 161
162 162 # During clone this function is passed the src's ui object
163 163 # but it needs the dest's ui object so it can read out of
164 164 # the config file. Use repo.ui instead.
165 165 def _openstore(repo, remote=None, put=False):
166 166 ui = repo.ui
167 167
168 168 if not remote:
169 169 path = (getattr(repo, 'lfpullsource', None) or
170 170 ui.expandpath('default-push', 'default'))
171 171
172 172 # ui.expandpath() leaves 'default-push' and 'default' alone if
173 173 # they cannot be expanded: fallback to the empty string,
174 174 # meaning the current directory.
175 175 if path == 'default-push' or path == 'default':
176 176 path = ''
177 177 remote = repo
178 178 else:
179 179 remote = hg.peer(repo, {}, path)
180 180
181 181 # The path could be a scheme so use Mercurial's normal functionality
182 182 # to resolve the scheme to a repository and use its path
183 183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
184 184
185 185 match = _scheme_re.match(path)
186 186 if not match: # regular filesystem path
187 187 scheme = 'file'
188 188 else:
189 189 scheme = match.group(1)
190 190
191 191 try:
192 192 storeproviders = _storeprovider[scheme]
193 193 except KeyError:
194 194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
195 195
196 196 for class_obj in storeproviders:
197 197 try:
198 198 return class_obj(ui, repo, remote)
199 199 except lfutil.storeprotonotcapable:
200 200 pass
201 201
202 202 raise util.Abort(_('%s does not appear to be a largefile store') % path)
@@ -1,448 +1,448
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import shutil
14 14 import stat
15 15 import hashlib
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 29 def repo_add(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 33 def repo_remove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 49 def repo_forget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 56 remote, force=force)
57 57 return repo.changelog.findmissing(common)
58 58
59 59 # -- Private worker functions ------------------------------------------
60 60
61 61 def getminsize(ui, assumelfiles, opt, default=10):
62 62 lfsize = opt
63 63 if not lfsize and assumelfiles:
64 64 lfsize = ui.config(longname, 'minsize', default=default)
65 65 if lfsize:
66 66 try:
67 67 lfsize = float(lfsize)
68 68 except ValueError:
69 69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 70 % lfsize)
71 71 if lfsize is None:
72 72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 73 return lfsize
74 74
75 75 def link(src, dest):
76 76 try:
77 77 util.oslink(src, dest)
78 78 except OSError:
79 79 # if hardlinks fail, fallback on copy
80 80 shutil.copyfile(src, dest)
81 81 os.chmod(dest, os.stat(src).st_mode)
82 82
83 def systemcachepath(ui, hash):
84 path = ui.config(longname, 'systemcache', None)
83 def usercachepath(ui, hash):
84 path = ui.config(longname, 'usercache', None)
85 85 if path:
86 86 path = os.path.join(path, hash)
87 87 else:
88 88 if os.name == 'nt':
89 89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 90 path = os.path.join(appdata, longname, hash)
91 91 elif os.name == 'posix':
92 92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
93 93 else:
94 94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
95 95 return path
96 96
97 def insystemcache(ui, hash):
98 return os.path.exists(systemcachepath(ui, hash))
97 def inusercache(ui, hash):
98 return os.path.exists(usercachepath(ui, hash))
99 99
100 100 def findfile(repo, hash):
101 if incache(repo, hash):
102 repo.ui.note(_('Found %s in cache\n') % hash)
103 return cachepath(repo, hash)
104 if insystemcache(repo.ui, hash):
101 if instore(repo, hash):
102 repo.ui.note(_('Found %s in store\n') % hash)
103 return storepath(repo, hash)
104 if inusercache(repo.ui, hash):
105 105 repo.ui.note(_('Found %s in system cache\n') % hash)
106 return systemcachepath(repo.ui, hash)
106 return usercachepath(repo.ui, hash)
107 107 return None
108 108
109 109 class largefiles_dirstate(dirstate.dirstate):
110 110 def __getitem__(self, key):
111 111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
112 112 def normal(self, f):
113 113 return super(largefiles_dirstate, self).normal(unixpath(f))
114 114 def remove(self, f):
115 115 return super(largefiles_dirstate, self).remove(unixpath(f))
116 116 def add(self, f):
117 117 return super(largefiles_dirstate, self).add(unixpath(f))
118 118 def drop(self, f):
119 119 return super(largefiles_dirstate, self).drop(unixpath(f))
120 120 def forget(self, f):
121 121 return super(largefiles_dirstate, self).forget(unixpath(f))
122 122
123 123 def openlfdirstate(ui, repo):
124 124 '''
125 125 Return a dirstate object that tracks largefiles: i.e. its root is
126 126 the repo root, but it is saved in .hg/largefiles/dirstate.
127 127 '''
128 128 admin = repo.join(longname)
129 129 opener = scmutil.opener(admin)
130 130 if util.safehasattr(repo.dirstate, '_validate'):
131 131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
132 132 repo.dirstate._validate)
133 133 else:
134 134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
135 135
136 136 # If the largefiles dirstate does not exist, populate and create
137 137 # it. This ensures that we create it on the first meaningful
138 138 # largefiles operation in a new clone. It also gives us an easy
139 139 # way to forcibly rebuild largefiles state:
140 140 # rm .hg/largefiles/dirstate && hg status
141 141 # Or even, if things are really messed up:
142 142 # rm -rf .hg/largefiles && hg status
143 143 if not os.path.exists(os.path.join(admin, 'dirstate')):
144 144 util.makedirs(admin)
145 145 matcher = getstandinmatcher(repo)
146 146 for standin in dirstate_walk(repo.dirstate, matcher):
147 147 lfile = splitstandin(standin)
148 148 hash = readstandin(repo, lfile)
149 149 lfdirstate.normallookup(lfile)
150 150 try:
151 151 if hash == hashfile(lfile):
152 152 lfdirstate.normal(lfile)
153 153 except IOError, err:
154 154 if err.errno != errno.ENOENT:
155 155 raise
156 156
157 157 lfdirstate.write()
158 158
159 159 return lfdirstate
160 160
161 161 def lfdirstate_status(lfdirstate, repo, rev):
162 162 wlock = repo.wlock()
163 163 try:
164 164 match = match_.always(repo.root, repo.getcwd())
165 165 s = lfdirstate.status(match, [], False, False, False)
166 166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
167 167 for lfile in unsure:
168 168 if repo[rev][standin(lfile)].data().strip() != \
169 169 hashfile(repo.wjoin(lfile)):
170 170 modified.append(lfile)
171 171 else:
172 172 clean.append(lfile)
173 173 lfdirstate.normal(lfile)
174 174 lfdirstate.write()
175 175 finally:
176 176 wlock.release()
177 177 return (modified, added, removed, missing, unknown, ignored, clean)
178 178
179 179 def listlfiles(repo, rev=None, matcher=None):
180 180 '''return a list of largefiles in the working copy or the
181 181 specified changeset'''
182 182
183 183 if matcher is None:
184 184 matcher = getstandinmatcher(repo)
185 185
186 186 # ignore unknown files in working directory
187 187 return [splitstandin(f)
188 188 for f in repo[rev].walk(matcher)
189 189 if rev is not None or repo.dirstate[f] != '?']
190 190
191 def incache(repo, hash):
192 return os.path.exists(cachepath(repo, hash))
191 def instore(repo, hash):
192 return os.path.exists(storepath(repo, hash))
193 193
194 194 def createdir(dir):
195 195 if not os.path.exists(dir):
196 196 os.makedirs(dir)
197 197
198 def cachepath(repo, hash):
198 def storepath(repo, hash):
199 199 return repo.join(os.path.join(longname, hash))
200 200
201 201 def copyfromcache(repo, hash, filename):
202 202 '''Copy the specified largefile from the repo or system cache to
203 203 filename in the repository. Return true on success or false if the
204 204 file was not found in either cache (which should not happened:
205 205 this is meant to be called only after ensuring that the needed
206 206 largefile exists in the cache).'''
207 207 path = findfile(repo, hash)
208 208 if path is None:
209 209 return False
210 210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
211 211 shutil.copy(path, repo.wjoin(filename))
212 212 return True
213 213
214 def copytocache(repo, rev, file, uploaded=False):
214 def copytostore(repo, rev, file, uploaded=False):
215 215 hash = readstandin(repo, file)
216 if incache(repo, hash):
216 if instore(repo, hash):
217 217 return
218 copytocacheabsolute(repo, repo.wjoin(file), hash)
218 copytostoreabsolute(repo, repo.wjoin(file), hash)
219 219
220 def copytocacheabsolute(repo, file, hash):
221 createdir(os.path.dirname(cachepath(repo, hash)))
222 if insystemcache(repo.ui, hash):
223 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
220 def copytostoreabsolute(repo, file, hash):
221 createdir(os.path.dirname(storepath(repo, hash)))
222 if inusercache(repo.ui, hash):
223 link(usercachepath(repo.ui, hash), storepath(repo, hash))
224 224 else:
225 shutil.copyfile(file, cachepath(repo, hash))
226 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
227 linktosystemcache(repo, hash)
225 shutil.copyfile(file, storepath(repo, hash))
226 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
227 linktousercache(repo, hash)
228 228
229 def linktosystemcache(repo, hash):
230 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
231 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
229 def linktousercache(repo, hash):
230 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
231 link(storepath(repo, hash), usercachepath(repo.ui, hash))
232 232
233 233 def getstandinmatcher(repo, pats=[], opts={}):
234 234 '''Return a match object that applies pats to the standin directory'''
235 235 standindir = repo.pathto(shortname)
236 236 if pats:
237 237 # patterns supplied: search standin directory relative to current dir
238 238 cwd = repo.getcwd()
239 239 if os.path.isabs(cwd):
240 240 # cwd is an absolute path for hg -R <reponame>
241 241 # work relative to the repository root in this case
242 242 cwd = ''
243 243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
244 244 elif os.path.isdir(standindir):
245 245 # no patterns: relative to repo root
246 246 pats = [standindir]
247 247 else:
248 248 # no patterns and no standin dir: return matcher that matches nothing
249 249 match = match_.match(repo.root, None, [], exact=True)
250 250 match.matchfn = lambda f: False
251 251 return match
252 252 return getmatcher(repo, pats, opts, showbad=False)
253 253
254 254 def getmatcher(repo, pats=[], opts={}, showbad=True):
255 255 '''Wrapper around scmutil.match() that adds showbad: if false,
256 256 neuter the match object's bad() method so it does not print any
257 257 warnings about missing files or directories.'''
258 258 match = scmutil.match(repo[None], pats, opts)
259 259
260 260 if not showbad:
261 261 match.bad = lambda f, msg: None
262 262 return match
263 263
264 264 def composestandinmatcher(repo, rmatcher):
265 265 '''Return a matcher that accepts standins corresponding to the
266 266 files accepted by rmatcher. Pass the list of files in the matcher
267 267 as the paths specified by the user.'''
268 268 smatcher = getstandinmatcher(repo, rmatcher.files())
269 269 isstandin = smatcher.matchfn
270 270 def composed_matchfn(f):
271 271 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
272 272 smatcher.matchfn = composed_matchfn
273 273
274 274 return smatcher
275 275
276 276 def standin(filename):
277 277 '''Return the repo-relative path to the standin for the specified big
278 278 file.'''
279 279 # Notes:
280 280 # 1) Most callers want an absolute path, but _create_standin() needs
281 281 # it repo-relative so lfadd() can pass it to repo_add(). So leave
282 282 # it up to the caller to use repo.wjoin() to get an absolute path.
283 283 # 2) Join with '/' because that's what dirstate always uses, even on
284 284 # Windows. Change existing separator to '/' first in case we are
285 285 # passed filenames from an external source (like the command line).
286 286 return shortname + '/' + filename.replace(os.sep, '/')
287 287
288 288 def isstandin(filename):
289 289 '''Return true if filename is a big file standin. filename must be
290 290 in Mercurial's internal form (slash-separated).'''
291 291 return filename.startswith(shortname + '/')
292 292
293 293 def splitstandin(filename):
294 294 # Split on / because that's what dirstate always uses, even on Windows.
295 295 # Change local separator to / first just in case we are passed filenames
296 296 # from an external source (like the command line).
297 297 bits = filename.replace(os.sep, '/').split('/', 1)
298 298 if len(bits) == 2 and bits[0] == shortname:
299 299 return bits[1]
300 300 else:
301 301 return None
302 302
303 303 def updatestandin(repo, standin):
304 304 file = repo.wjoin(splitstandin(standin))
305 305 if os.path.exists(file):
306 306 hash = hashfile(file)
307 307 executable = getexecutable(file)
308 308 writestandin(repo, standin, hash, executable)
309 309
310 310 def readstandin(repo, filename, node=None):
311 311 '''read hex hash from standin for filename at given node, or working
312 312 directory if no node is given'''
313 313 return repo[node][standin(filename)].data().strip()
314 314
315 315 def writestandin(repo, standin, hash, executable):
316 316 '''write hash to <repo.root>/<standin>'''
317 317 writehash(hash, repo.wjoin(standin), executable)
318 318
319 319 def copyandhash(instream, outfile):
320 320 '''Read bytes from instream (iterable) and write them to outfile,
321 321 computing the SHA-1 hash of the data along the way. Close outfile
322 322 when done and return the binary hash.'''
323 323 hasher = util.sha1('')
324 324 for data in instream:
325 325 hasher.update(data)
326 326 outfile.write(data)
327 327
328 328 # Blecch: closing a file that somebody else opened is rude and
329 329 # wrong. But it's so darn convenient and practical! After all,
330 330 # outfile was opened just to copy and hash.
331 331 outfile.close()
332 332
333 333 return hasher.digest()
334 334
335 335 def hashrepofile(repo, file):
336 336 return hashfile(repo.wjoin(file))
337 337
338 338 def hashfile(file):
339 339 if not os.path.exists(file):
340 340 return ''
341 341 hasher = util.sha1('')
342 342 fd = open(file, 'rb')
343 343 for data in blockstream(fd):
344 344 hasher.update(data)
345 345 fd.close()
346 346 return hasher.hexdigest()
347 347
348 348 class limitreader(object):
349 349 def __init__(self, f, limit):
350 350 self.f = f
351 351 self.limit = limit
352 352
353 353 def read(self, length):
354 354 if self.limit == 0:
355 355 return ''
356 356 length = length > self.limit and self.limit or length
357 357 self.limit -= length
358 358 return self.f.read(length)
359 359
360 360 def close(self):
361 361 pass
362 362
363 363 def blockstream(infile, blocksize=128 * 1024):
364 364 """Generator that yields blocks of data from infile and closes infile."""
365 365 while True:
366 366 data = infile.read(blocksize)
367 367 if not data:
368 368 break
369 369 yield data
370 370 # same blecch as copyandhash() above
371 371 infile.close()
372 372
373 373 def readhash(filename):
374 374 rfile = open(filename, 'rb')
375 375 hash = rfile.read(40)
376 376 rfile.close()
377 377 if len(hash) < 40:
378 378 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
379 379 % (filename, len(hash)))
380 380 return hash
381 381
382 382 def writehash(hash, filename, executable):
383 383 util.makedirs(os.path.dirname(filename))
384 384 if os.path.exists(filename):
385 385 os.unlink(filename)
386 386 wfile = open(filename, 'wb')
387 387
388 388 try:
389 389 wfile.write(hash)
390 390 wfile.write('\n')
391 391 finally:
392 392 wfile.close()
393 393 if os.path.exists(filename):
394 394 os.chmod(filename, getmode(executable))
395 395
396 396 def getexecutable(filename):
397 397 mode = os.stat(filename).st_mode
398 398 return ((mode & stat.S_IXUSR) and
399 399 (mode & stat.S_IXGRP) and
400 400 (mode & stat.S_IXOTH))
401 401
402 402 def getmode(executable):
403 403 if executable:
404 404 return 0755
405 405 else:
406 406 return 0644
407 407
408 408 def urljoin(first, second, *arg):
409 409 def join(left, right):
410 410 if not left.endswith('/'):
411 411 left += '/'
412 412 if right.startswith('/'):
413 413 right = right[1:]
414 414 return left + right
415 415
416 416 url = join(first, second)
417 417 for a in arg:
418 418 url = join(url, a)
419 419 return url
420 420
421 421 def hexsha1(data):
422 422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 423 object data"""
424 424 h = hashlib.sha1()
425 425 for chunk in util.filechunkiter(data):
426 426 h.update(chunk)
427 427 return h.hexdigest()
428 428
429 429 def httpsendfile(ui, filename):
430 430 return httpconnection.httpsendfile(ui, filename, 'rb')
431 431
432 432 def unixpath(path):
433 433 '''Return a version of path normalized for use with the lfdirstate.'''
434 434 return os.path.normpath(path).replace(os.sep, '/')
435 435
436 436 def islfilesrepo(repo):
437 437 return ('largefiles' in repo.requirements and
438 438 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
439 439
440 440 def any_(gen):
441 441 for x in gen:
442 442 if x:
443 443 return True
444 444 return False
445 445
446 446 class storeprotonotcapable(BaseException):
447 447 def __init__(self, storetypes):
448 448 self.storetypes = storetypes
@@ -1,71 +1,71
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''store class for local filesystem'''
10 10
11 11 import os
12 12
13 13 from mercurial import util
14 14 from mercurial.i18n import _
15 15
16 16 import lfutil
17 17 import basestore
18 18
19 19 class localstore(basestore.basestore):
20 20 '''Because there is a system-wide cache, the local store always
21 21 uses that cache. Since the cache is updated elsewhere, we can
22 22 just read from it here as if it were the store.'''
23 23
24 24 def __init__(self, ui, repo, remote):
25 25 url = os.path.join(remote.path, '.hg', lfutil.longname)
26 26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
27 27
28 28 def put(self, source, filename, hash):
29 29 '''Any file that is put must already be in the system-wide
30 30 cache so do nothing.'''
31 31 return
32 32
33 33 def exists(self, hash):
34 return lfutil.insystemcache(self.repo.ui, hash)
34 return lfutil.inusercache(self.repo.ui, hash)
35 35
36 36 def _getfile(self, tmpfile, filename, hash):
37 if lfutil.insystemcache(self.ui, hash):
38 return lfutil.systemcachepath(self.ui, hash)
37 if lfutil.inusercache(self.ui, hash):
38 return lfutil.usercachepath(self.ui, hash)
39 39 raise basestore.StoreError(filename, hash, '',
40 40 _("Can't get file locally"))
41 41
42 42 def _verifyfile(self, cctx, cset, contents, standin, verified):
43 43 filename = lfutil.splitstandin(standin)
44 44 if not filename:
45 45 return False
46 46 fctx = cctx[standin]
47 47 key = (filename, fctx.filenode())
48 48 if key in verified:
49 49 return False
50 50
51 51 expecthash = fctx.data()[0:40]
52 52 verified.add(key)
53 if not lfutil.insystemcache(self.ui, expecthash):
53 if not lfutil.inusercache(self.ui, expecthash):
54 54 self.ui.warn(
55 55 _('changeset %s: %s missing\n'
56 56 ' (looked for hash %s)\n')
57 57 % (cset, filename, expecthash))
58 58 return True # failed
59 59
60 60 if contents:
61 storepath = lfutil.systemcachepath(self.ui, expecthash)
61 storepath = lfutil.usercachepath(self.ui, expecthash)
62 62 actualhash = lfutil.hashfile(storepath)
63 63 if actualhash != expecthash:
64 64 self.ui.warn(
65 65 _('changeset %s: %s: contents differ\n'
66 66 ' (%s:\n'
67 67 ' expected hash %s,\n'
68 68 ' but got %s)\n')
69 69 % (cset, filename, storepath, expecthash, actualhash))
70 70 return True # failed
71 71 return False
@@ -1,160 +1,160
1 1 # Copyright 2011 Fog Creek Software
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 import os
7 7 import tempfile
8 8 import urllib2
9 9
10 10 from mercurial import error, httprepo, util, wireproto
11 11 from mercurial.i18n import _
12 12
13 13 import lfutil
14 14
15 15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
16 16 '\n\nPlease enable it in your Mercurial config '
17 17 'file.\n')
18 18
19 19 def putlfile(repo, proto, sha):
20 20 '''Put a largefile into a repository's local cache and into the
21 21 system cache.'''
22 22 f = None
23 23 proto.redirect()
24 24 try:
25 25 try:
26 26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 27 proto.getfile(f)
28 28 f.seek(0)
29 29 if sha != lfutil.hexsha1(f):
30 30 return wireproto.pushres(1)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
31 lfutil.copytostoreabsolute(repo, f.name, sha)
32 32 except IOError:
33 33 repo.ui.warn(
34 34 _('error: could not put received data into largefile store'))
35 35 return wireproto.pushres(1)
36 36 finally:
37 37 if f:
38 38 f.close()
39 39
40 40 return wireproto.pushres(0)
41 41
42 42 def getlfile(repo, proto, sha):
43 43 '''Retrieve a largefile from the repository-local cache or system
44 44 cache.'''
45 45 filename = lfutil.findfile(repo, sha)
46 46 if not filename:
47 47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 48 f = open(filename, 'rb')
49 49 length = os.fstat(f.fileno())[6]
50 50
51 51 # Since we can't set an HTTP content-length header here, and
52 52 # Mercurial core provides no way to give the length of a streamres
53 53 # (and reading the entire file into RAM would be ill-advised), we
54 54 # just send the length on the first line of the response, like the
55 55 # ssh proto does for string responses.
56 56 def generator():
57 57 yield '%d\n' % length
58 58 for chunk in f:
59 59 yield chunk
60 60 return wireproto.streamres(generator())
61 61
62 62 def statlfile(repo, proto, sha):
63 63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 64 mismatched checksum, or '0\n' if it is in good condition'''
65 65 filename = lfutil.findfile(repo, sha)
66 66 if not filename:
67 67 return '2\n'
68 68 fd = None
69 69 try:
70 70 fd = open(filename, 'rb')
71 71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 72 finally:
73 73 if fd:
74 74 fd.close()
75 75
76 76 def wirereposetup(ui, repo):
77 77 class lfileswirerepository(repo.__class__):
78 78 def putlfile(self, sha, fd):
79 79 # unfortunately, httprepository._callpush tries to convert its
80 80 # input file-like into a bundle before sending it, so we can't use
81 81 # it ...
82 82 if issubclass(self.__class__, httprepo.httprepository):
83 83 try:
84 84 return int(self._call('putlfile', data=fd, sha=sha,
85 85 headers={'content-type':'application/mercurial-0.1'}))
86 86 except (ValueError, urllib2.HTTPError):
87 87 return 1
88 88 # ... but we can't use sshrepository._call because the data=
89 89 # argument won't get sent, and _callpush does exactly what we want
90 90 # in this case: send the data straight through
91 91 else:
92 92 try:
93 93 ret, output = self._callpush("putlfile", fd, sha=sha)
94 94 if ret == "":
95 95 raise error.ResponseError(_('putlfile failed:'),
96 96 output)
97 97 return int(ret)
98 98 except IOError:
99 99 return 1
100 100 except ValueError:
101 101 raise error.ResponseError(
102 102 _('putlfile failed (unexpected response):'), ret)
103 103
104 104 def getlfile(self, sha):
105 105 stream = self._callstream("getlfile", sha=sha)
106 106 length = stream.readline()
107 107 try:
108 108 length = int(length)
109 109 except ValueError:
110 110 self._abort(error.ResponseError(_("unexpected response:"),
111 111 length))
112 112 return (length, stream)
113 113
114 114 def statlfile(self, sha):
115 115 try:
116 116 return int(self._call("statlfile", sha=sha))
117 117 except (ValueError, urllib2.HTTPError):
118 118 # If the server returns anything but an integer followed by a
119 119 # newline, newline, it's not speaking our language; if we get
120 120 # an HTTP error, we can't be sure the largefile is present;
121 121 # either way, consider it missing.
122 122 return 2
123 123
124 124 repo.__class__ = lfileswirerepository
125 125
126 126 # advertise the largefiles=serve capability
127 127 def capabilities(repo, proto):
128 128 return capabilities_orig(repo, proto) + ' largefiles=serve'
129 129
130 130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
131 131 # clients old and new alike both handle it well
132 132 def webproto_refuseclient(self, message):
133 133 self.req.header([('Content-Type', 'application/hg-error')])
134 134 return message
135 135
136 136 def sshproto_refuseclient(self, message):
137 137 self.ui.write_err('%s\n-\n' % message)
138 138 self.fout.write('\n')
139 139 self.fout.flush()
140 140
141 141 return ''
142 142
143 143 def heads(repo, proto):
144 144 if lfutil.islfilesrepo(repo):
145 145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
146 146 return wireproto.heads(repo, proto)
147 147
148 148 def sshrepo_callstream(self, cmd, **args):
149 149 if cmd == 'heads' and self.capable('largefiles'):
150 150 cmd = 'lheads'
151 151 if cmd == 'batch' and self.capable('largefiles'):
152 152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
153 153 return ssh_oldcallstream(self, cmd, **args)
154 154
155 155 def httprepo_callstream(self, cmd, **args):
156 156 if cmd == 'heads' and self.capable('largefiles'):
157 157 cmd = 'lheads'
158 158 if cmd == 'batch' and self.capable('largefiles'):
159 159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
160 160 return http_oldcallstream(self, cmd, **args)
@@ -1,411 +1,411
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import types
12 12 import os
13 13 import re
14 14
15 15 from mercurial import context, error, manifest, match as match_, node, util
16 16 from mercurial.i18n import _
17 17
18 18 import lfcommands
19 19 import proto
20 20 import lfutil
21 21
22 22 def reposetup(ui, repo):
23 23 # wire repositories should be given new wireproto functions but not the
24 24 # other largefiles modifications
25 25 if not repo.local():
26 26 return proto.wirereposetup(ui, repo)
27 27
28 28 for name in ('status', 'commitctx', 'commit', 'push'):
29 29 method = getattr(repo, name)
30 30 #if not (isinstance(method, types.MethodType) and
31 31 # method.im_func is repo.__class__.commitctx.im_func):
32 32 if (isinstance(method, types.FunctionType) and
33 33 method.func_name == 'wrap'):
34 34 ui.warn(_('largefiles: repo method %r appears to have already been'
35 35 ' wrapped by another extension: '
36 36 'largefiles may behave incorrectly\n')
37 37 % name)
38 38
39 39 class lfiles_repo(repo.__class__):
40 40 lfstatus = False
41 41 def status_nolfiles(self, *args, **kwargs):
42 42 return super(lfiles_repo, self).status(*args, **kwargs)
43 43
44 44 # When lfstatus is set, return a context that gives the names
45 45 # of largefiles instead of their corresponding standins and
46 46 # identifies the largefiles as always binary, regardless of
47 47 # their actual contents.
48 48 def __getitem__(self, changeid):
49 49 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 50 if self.lfstatus:
51 51 class lfiles_manifestdict(manifest.manifestdict):
52 52 def __contains__(self, filename):
53 53 if super(lfiles_manifestdict,
54 54 self).__contains__(filename):
55 55 return True
56 56 return super(lfiles_manifestdict,
57 57 self).__contains__(lfutil.shortname+'/' + filename)
58 58 class lfiles_ctx(ctx.__class__):
59 59 def files(self):
60 60 filenames = super(lfiles_ctx, self).files()
61 61 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 62 filename) for filename in filenames]
63 63 def manifest(self):
64 64 man1 = super(lfiles_ctx, self).manifest()
65 65 man1.__class__ = lfiles_manifestdict
66 66 return man1
67 67 def filectx(self, path, fileid=None, filelog=None):
68 68 try:
69 69 result = super(lfiles_ctx, self).filectx(path,
70 70 fileid, filelog)
71 71 except error.LookupError:
72 72 # Adding a null character will cause Mercurial to
73 73 # identify this as a binary file.
74 74 result = super(lfiles_ctx, self).filectx(
75 75 lfutil.shortname + '/' + path, fileid,
76 76 filelog)
77 77 olddata = result.data
78 78 result.data = lambda: olddata() + '\0'
79 79 return result
80 80 ctx.__class__ = lfiles_ctx
81 81 return ctx
82 82
83 83 # Figure out the status of big files and insert them into the
84 84 # appropriate list in the result. Also removes standin files
85 85 # from the listing. Revert to the original status if
86 86 # self.lfstatus is False.
87 87 def status(self, node1='.', node2=None, match=None, ignored=False,
88 88 clean=False, unknown=False, listsubrepos=False):
89 89 listignored, listclean, listunknown = ignored, clean, unknown
90 90 if not self.lfstatus:
91 91 try:
92 92 return super(lfiles_repo, self).status(node1, node2, match,
93 93 listignored, listclean, listunknown, listsubrepos)
94 94 except TypeError:
95 95 return super(lfiles_repo, self).status(node1, node2, match,
96 96 listignored, listclean, listunknown)
97 97 else:
98 98 # some calls in this function rely on the old version of status
99 99 self.lfstatus = False
100 100 if isinstance(node1, context.changectx):
101 101 ctx1 = node1
102 102 else:
103 103 ctx1 = repo[node1]
104 104 if isinstance(node2, context.changectx):
105 105 ctx2 = node2
106 106 else:
107 107 ctx2 = repo[node2]
108 108 working = ctx2.rev() is None
109 109 parentworking = working and ctx1 == self['.']
110 110
111 111 def inctx(file, ctx):
112 112 try:
113 113 if ctx.rev() is None:
114 114 return file in ctx.manifest()
115 115 ctx[file]
116 116 return True
117 117 except KeyError:
118 118 return False
119 119
120 120 if match is None:
121 121 match = match_.always(self.root, self.getcwd())
122 122
123 123 # Create a copy of match that matches standins instead
124 124 # of largefiles.
125 125 def tostandin(file):
126 126 if inctx(lfutil.standin(file), ctx2):
127 127 return lfutil.standin(file)
128 128 return file
129 129
130 130 m = copy.copy(match)
131 131 m._files = [tostandin(f) for f in m._files]
132 132
133 133 # get ignored, clean, and unknown but remove them
134 134 # later if they were not asked for
135 135 try:
136 136 result = super(lfiles_repo, self).status(node1, node2, m,
137 137 True, True, True, listsubrepos)
138 138 except TypeError:
139 139 result = super(lfiles_repo, self).status(node1, node2, m,
140 140 True, True, True)
141 141 if working:
142 142 # hold the wlock while we read largefiles and
143 143 # update the lfdirstate
144 144 wlock = repo.wlock()
145 145 try:
146 146 # Any non-largefiles that were explicitly listed must be
147 147 # taken out or lfdirstate.status will report an error.
148 148 # The status of these files was already computed using
149 149 # super's status.
150 150 lfdirstate = lfutil.openlfdirstate(ui, self)
151 151 match._files = [f for f in match._files if f in
152 152 lfdirstate]
153 153 s = lfdirstate.status(match, [], listignored,
154 154 listclean, listunknown)
155 155 (unsure, modified, added, removed, missing, unknown,
156 156 ignored, clean) = s
157 157 if parentworking:
158 158 for lfile in unsure:
159 159 if ctx1[lfutil.standin(lfile)].data().strip() \
160 160 != lfutil.hashfile(self.wjoin(lfile)):
161 161 modified.append(lfile)
162 162 else:
163 163 clean.append(lfile)
164 164 lfdirstate.normal(lfile)
165 165 lfdirstate.write()
166 166 else:
167 167 tocheck = unsure + modified + added + clean
168 168 modified, added, clean = [], [], []
169 169
170 170 for lfile in tocheck:
171 171 standin = lfutil.standin(lfile)
172 172 if inctx(standin, ctx1):
173 173 if ctx1[standin].data().strip() != \
174 174 lfutil.hashfile(self.wjoin(lfile)):
175 175 modified.append(lfile)
176 176 else:
177 177 clean.append(lfile)
178 178 else:
179 179 added.append(lfile)
180 180 finally:
181 181 wlock.release()
182 182
183 183 for standin in ctx1.manifest():
184 184 if not lfutil.isstandin(standin):
185 185 continue
186 186 lfile = lfutil.splitstandin(standin)
187 187 if not match(lfile):
188 188 continue
189 189 if lfile not in lfdirstate:
190 190 removed.append(lfile)
191 191 # Handle unknown and ignored differently
192 192 lfiles = (modified, added, removed, missing, [], [], clean)
193 193 result = list(result)
194 194 # Unknown files
195 195 result[4] = [f for f in unknown
196 196 if (repo.dirstate[f] == '?' and
197 197 not lfutil.isstandin(f))]
198 198 # Ignored files must be ignored by both the dirstate and
199 199 # lfdirstate
200 200 result[5] = set(ignored).intersection(set(result[5]))
201 201 # combine normal files and largefiles
202 202 normals = [[fn for fn in filelist
203 203 if not lfutil.isstandin(fn)]
204 204 for filelist in result]
205 205 result = [sorted(list1 + list2)
206 206 for (list1, list2) in zip(normals, lfiles)]
207 207 else:
208 208 def toname(f):
209 209 if lfutil.isstandin(f):
210 210 return lfutil.splitstandin(f)
211 211 return f
212 212 result = [[toname(f) for f in items] for items in result]
213 213
214 214 if not listunknown:
215 215 result[4] = []
216 216 if not listignored:
217 217 result[5] = []
218 218 if not listclean:
219 219 result[6] = []
220 220 self.lfstatus = True
221 221 return result
222 222
223 223 # As part of committing, copy all of the largefiles into the
224 224 # cache.
225 225 def commitctx(self, *args, **kwargs):
226 226 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
227 227 ctx = self[node]
228 228 for filename in ctx.files():
229 229 if lfutil.isstandin(filename) and filename in ctx.manifest():
230 230 realfile = lfutil.splitstandin(filename)
231 lfutil.copytocache(self, ctx.node(), realfile)
231 lfutil.copytostore(self, ctx.node(), realfile)
232 232
233 233 return node
234 234
235 235 # Before commit, largefile standins have not had their
236 236 # contents updated to reflect the hash of their largefile.
237 237 # Do that here.
238 238 def commit(self, text="", user=None, date=None, match=None,
239 239 force=False, editor=False, extra={}):
240 240 orig = super(lfiles_repo, self).commit
241 241
242 242 wlock = repo.wlock()
243 243 try:
244 244 if getattr(repo, "_isrebasing", False):
245 245 # We have to take the time to pull down the new
246 246 # largefiles now. Otherwise if we are rebasing,
247 247 # any largefiles that were modified in the
248 248 # destination changesets get overwritten, either
249 249 # by the rebase or in the first commit after the
250 250 # rebase.
251 251 lfcommands.updatelfiles(repo.ui, repo)
252 252 # Case 1: user calls commit with no specific files or
253 253 # include/exclude patterns: refresh and commit all files that
254 254 # are "dirty".
255 255 if ((match is None) or
256 256 (not match.anypats() and not match.files())):
257 257 # Spend a bit of time here to get a list of files we know
258 258 # are modified so we can compare only against those.
259 259 # It can cost a lot of time (several seconds)
260 260 # otherwise to update all standins if the largefiles are
261 261 # large.
262 262 lfdirstate = lfutil.openlfdirstate(ui, self)
263 263 dirtymatch = match_.always(repo.root, repo.getcwd())
264 264 s = lfdirstate.status(dirtymatch, [], False, False, False)
265 265 modifiedfiles = []
266 266 for i in s:
267 267 modifiedfiles.extend(i)
268 268 lfiles = lfutil.listlfiles(self)
269 269 # this only loops through largefiles that exist (not
270 270 # removed/renamed)
271 271 for lfile in lfiles:
272 272 if lfile in modifiedfiles:
273 273 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
274 274 # this handles the case where a rebase is being
275 275 # performed and the working copy is not updated
276 276 # yet.
277 277 if os.path.exists(self.wjoin(lfile)):
278 278 lfutil.updatestandin(self,
279 279 lfutil.standin(lfile))
280 280 lfdirstate.normal(lfile)
281 281 for lfile in lfdirstate:
282 282 if lfile in modifiedfiles:
283 283 if not os.path.exists(
284 284 repo.wjoin(lfutil.standin(lfile))):
285 285 lfdirstate.drop(lfile)
286 286 lfdirstate.write()
287 287
288 288 return orig(text=text, user=user, date=date, match=match,
289 289 force=force, editor=editor, extra=extra)
290 290
291 291 for f in match.files():
292 292 if lfutil.isstandin(f):
293 293 raise util.Abort(
294 294 _('file "%s" is a largefile standin') % f,
295 295 hint=('commit the largefile itself instead'))
296 296
297 297 # Case 2: user calls commit with specified patterns: refresh
298 298 # any matching big files.
299 299 smatcher = lfutil.composestandinmatcher(self, match)
300 300 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
301 301
302 302 # No matching big files: get out of the way and pass control to
303 303 # the usual commit() method.
304 304 if not standins:
305 305 return orig(text=text, user=user, date=date, match=match,
306 306 force=force, editor=editor, extra=extra)
307 307
308 308 # Refresh all matching big files. It's possible that the
309 309 # commit will end up failing, in which case the big files will
310 310 # stay refreshed. No harm done: the user modified them and
311 311 # asked to commit them, so sooner or later we're going to
312 312 # refresh the standins. Might as well leave them refreshed.
313 313 lfdirstate = lfutil.openlfdirstate(ui, self)
314 314 for standin in standins:
315 315 lfile = lfutil.splitstandin(standin)
316 316 if lfdirstate[lfile] <> 'r':
317 317 lfutil.updatestandin(self, standin)
318 318 lfdirstate.normal(lfile)
319 319 else:
320 320 lfdirstate.drop(lfile)
321 321 lfdirstate.write()
322 322
323 323 # Cook up a new matcher that only matches regular files or
324 324 # standins corresponding to the big files requested by the
325 325 # user. Have to modify _files to prevent commit() from
326 326 # complaining "not tracked" for big files.
327 327 lfiles = lfutil.listlfiles(repo)
328 328 match = copy.copy(match)
329 329 orig_matchfn = match.matchfn
330 330
331 331 # Check both the list of largefiles and the list of
332 332 # standins because if a largefile was removed, it
333 333 # won't be in the list of largefiles at this point
334 334 match._files += sorted(standins)
335 335
336 336 actualfiles = []
337 337 for f in match._files:
338 338 fstandin = lfutil.standin(f)
339 339
340 340 # ignore known largefiles and standins
341 341 if f in lfiles or fstandin in standins:
342 342 continue
343 343
344 344 # append directory separator to avoid collisions
345 345 if not fstandin.endswith(os.sep):
346 346 fstandin += os.sep
347 347
348 348 # prevalidate matching standin directories
349 349 if lfutil.any_(st for st in match._files
350 350 if st.startswith(fstandin)):
351 351 continue
352 352 actualfiles.append(f)
353 353 match._files = actualfiles
354 354
355 355 def matchfn(f):
356 356 if orig_matchfn(f):
357 357 return f not in lfiles
358 358 else:
359 359 return f in standins
360 360
361 361 match.matchfn = matchfn
362 362 return orig(text=text, user=user, date=date, match=match,
363 363 force=force, editor=editor, extra=extra)
364 364 finally:
365 365 wlock.release()
366 366
367 367 def push(self, remote, force=False, revs=None, newbranch=False):
368 368 o = lfutil.findoutgoing(repo, remote, force)
369 369 if o:
370 370 toupload = set()
371 371 o = repo.changelog.nodesbetween(o, revs)[0]
372 372 for n in o:
373 373 parents = [p for p in repo.changelog.parents(n)
374 374 if p != node.nullid]
375 375 ctx = repo[n]
376 376 files = set(ctx.files())
377 377 if len(parents) == 2:
378 378 mc = ctx.manifest()
379 379 mp1 = ctx.parents()[0].manifest()
380 380 mp2 = ctx.parents()[1].manifest()
381 381 for f in mp1:
382 382 if f not in mc:
383 383 files.add(f)
384 384 for f in mp2:
385 385 if f not in mc:
386 386 files.add(f)
387 387 for f in mc:
388 388 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
389 389 None):
390 390 files.add(f)
391 391
392 392 toupload = toupload.union(
393 393 set([ctx[f].data().strip()
394 394 for f in files
395 395 if lfutil.isstandin(f) and f in ctx]))
396 396 lfcommands.uploadlfiles(ui, self, remote, toupload)
397 397 return super(lfiles_repo, self).push(remote, force, revs,
398 398 newbranch)
399 399
400 400 repo.__class__ = lfiles_repo
401 401
402 402 def checkrequireslfiles(ui, repo, **kwargs):
403 403 if 'largefiles' not in repo.requirements and lfutil.any_(
404 404 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
405 405 repo.requirements.add('largefiles')
406 406 repo._writerequirements()
407 407
408 408 checkrequireslfiles(ui, repo)
409 409
410 410 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
411 411 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now