##// END OF EJS Templates
merge with stable
Matt Mackall -
r15660:c7b0bedb merge default
parent child Browse files
Show More
@@ -1,452 +1,460
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src)):
82 for chunk in util.filechunkiter(open(src)):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 path = os.path.join(appdata, longname, hash)
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
96 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
97 home = os.getenv('HOME')
97 longname, hash)
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
98 elif os.name == 'posix':
101 elif os.name == 'posix':
99 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
100 if path:
103 if path:
101 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
102 else:
105 else:
103 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
106 home = os.getenv('HOME')
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
104 else:
109 else:
105 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
106 return path
111 return path
107
112
108 def inusercache(ui, hash):
113 def inusercache(ui, hash):
109 return os.path.exists(usercachepath(ui, hash))
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
110
116
111 def findfile(repo, hash):
117 def findfile(repo, hash):
112 if instore(repo, hash):
118 if instore(repo, hash):
113 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
114 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
115 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('Found %s in system cache\n') % hash)
116 path = storepath(repo, hash)
122 path = storepath(repo, hash)
117 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
118 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
119 else:
125 else:
120 return None
126 return None
121 return storepath(repo, hash)
127 return storepath(repo, hash)
122
128
123 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
124 def __getitem__(self, key):
130 def __getitem__(self, key):
125 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
126 def normal(self, f):
132 def normal(self, f):
127 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
128 def remove(self, f):
134 def remove(self, f):
129 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
130 def add(self, f):
136 def add(self, f):
131 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
132 def drop(self, f):
138 def drop(self, f):
133 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
134 def forget(self, f):
140 def forget(self, f):
135 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
136
142
137 def openlfdirstate(ui, repo):
143 def openlfdirstate(ui, repo):
138 '''
144 '''
139 Return a dirstate object that tracks largefiles: i.e. its root is
145 Return a dirstate object that tracks largefiles: i.e. its root is
140 the repo root, but it is saved in .hg/largefiles/dirstate.
146 the repo root, but it is saved in .hg/largefiles/dirstate.
141 '''
147 '''
142 admin = repo.join(longname)
148 admin = repo.join(longname)
143 opener = scmutil.opener(admin)
149 opener = scmutil.opener(admin)
144 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
150 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
145 repo.dirstate._validate)
151 repo.dirstate._validate)
146
152
147 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
148 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
149 # largefiles operation in a new clone. It also gives us an easy
155 # largefiles operation in a new clone. It also gives us an easy
150 # way to forcibly rebuild largefiles state:
156 # way to forcibly rebuild largefiles state:
151 # rm .hg/largefiles/dirstate && hg status
157 # rm .hg/largefiles/dirstate && hg status
152 # Or even, if things are really messed up:
158 # Or even, if things are really messed up:
153 # rm -rf .hg/largefiles && hg status
159 # rm -rf .hg/largefiles && hg status
154 if not os.path.exists(os.path.join(admin, 'dirstate')):
160 if not os.path.exists(os.path.join(admin, 'dirstate')):
155 util.makedirs(admin)
161 util.makedirs(admin)
156 matcher = getstandinmatcher(repo)
162 matcher = getstandinmatcher(repo)
157 for standin in dirstate_walk(repo.dirstate, matcher):
163 for standin in dirstate_walk(repo.dirstate, matcher):
158 lfile = splitstandin(standin)
164 lfile = splitstandin(standin)
159 hash = readstandin(repo, lfile)
165 hash = readstandin(repo, lfile)
160 lfdirstate.normallookup(lfile)
166 lfdirstate.normallookup(lfile)
161 try:
167 try:
162 if hash == hashfile(repo.wjoin(lfile)):
168 if hash == hashfile(repo.wjoin(lfile)):
163 lfdirstate.normal(lfile)
169 lfdirstate.normal(lfile)
164 except OSError, err:
170 except OSError, err:
165 if err.errno != errno.ENOENT:
171 if err.errno != errno.ENOENT:
166 raise
172 raise
167
173
168 lfdirstate.write()
174 lfdirstate.write()
169
175
170 return lfdirstate
176 return lfdirstate
171
177
172 def lfdirstate_status(lfdirstate, repo, rev):
178 def lfdirstate_status(lfdirstate, repo, rev):
173 wlock = repo.wlock()
179 wlock = repo.wlock()
174 try:
180 try:
175 match = match_.always(repo.root, repo.getcwd())
181 match = match_.always(repo.root, repo.getcwd())
176 s = lfdirstate.status(match, [], False, False, False)
182 s = lfdirstate.status(match, [], False, False, False)
177 unsure, modified, added, removed, missing, unknown, ignored, clean = s
183 unsure, modified, added, removed, missing, unknown, ignored, clean = s
178 for lfile in unsure:
184 for lfile in unsure:
179 if repo[rev][standin(lfile)].data().strip() != \
185 if repo[rev][standin(lfile)].data().strip() != \
180 hashfile(repo.wjoin(lfile)):
186 hashfile(repo.wjoin(lfile)):
181 modified.append(lfile)
187 modified.append(lfile)
182 else:
188 else:
183 clean.append(lfile)
189 clean.append(lfile)
184 lfdirstate.normal(lfile)
190 lfdirstate.normal(lfile)
185 lfdirstate.write()
191 lfdirstate.write()
186 finally:
192 finally:
187 wlock.release()
193 wlock.release()
188 return (modified, added, removed, missing, unknown, ignored, clean)
194 return (modified, added, removed, missing, unknown, ignored, clean)
189
195
190 def listlfiles(repo, rev=None, matcher=None):
196 def listlfiles(repo, rev=None, matcher=None):
191 '''return a list of largefiles in the working copy or the
197 '''return a list of largefiles in the working copy or the
192 specified changeset'''
198 specified changeset'''
193
199
194 if matcher is None:
200 if matcher is None:
195 matcher = getstandinmatcher(repo)
201 matcher = getstandinmatcher(repo)
196
202
197 # ignore unknown files in working directory
203 # ignore unknown files in working directory
198 return [splitstandin(f)
204 return [splitstandin(f)
199 for f in repo[rev].walk(matcher)
205 for f in repo[rev].walk(matcher)
200 if rev is not None or repo.dirstate[f] != '?']
206 if rev is not None or repo.dirstate[f] != '?']
201
207
202 def instore(repo, hash):
208 def instore(repo, hash):
203 return os.path.exists(storepath(repo, hash))
209 return os.path.exists(storepath(repo, hash))
204
210
205 def storepath(repo, hash):
211 def storepath(repo, hash):
206 return repo.join(os.path.join(longname, hash))
212 return repo.join(os.path.join(longname, hash))
207
213
208 def copyfromcache(repo, hash, filename):
214 def copyfromcache(repo, hash, filename):
209 '''Copy the specified largefile from the repo or system cache to
215 '''Copy the specified largefile from the repo or system cache to
210 filename in the repository. Return true on success or false if the
216 filename in the repository. Return true on success or false if the
211 file was not found in either cache (which should not happened:
217 file was not found in either cache (which should not happened:
212 this is meant to be called only after ensuring that the needed
218 this is meant to be called only after ensuring that the needed
213 largefile exists in the cache).'''
219 largefile exists in the cache).'''
214 path = findfile(repo, hash)
220 path = findfile(repo, hash)
215 if path is None:
221 if path is None:
216 return False
222 return False
217 util.makedirs(os.path.dirname(repo.wjoin(filename)))
223 util.makedirs(os.path.dirname(repo.wjoin(filename)))
218 # The write may fail before the file is fully written, but we
224 # The write may fail before the file is fully written, but we
219 # don't use atomic writes in the working copy.
225 # don't use atomic writes in the working copy.
220 shutil.copy(path, repo.wjoin(filename))
226 shutil.copy(path, repo.wjoin(filename))
221 return True
227 return True
222
228
223 def copytostore(repo, rev, file, uploaded=False):
229 def copytostore(repo, rev, file, uploaded=False):
224 hash = readstandin(repo, file)
230 hash = readstandin(repo, file)
225 if instore(repo, hash):
231 if instore(repo, hash):
226 return
232 return
227 copytostoreabsolute(repo, repo.wjoin(file), hash)
233 copytostoreabsolute(repo, repo.wjoin(file), hash)
228
234
229 def copytostoreabsolute(repo, file, hash):
235 def copytostoreabsolute(repo, file, hash):
230 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
231 if inusercache(repo.ui, hash):
237 if inusercache(repo.ui, hash):
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
233 else:
239 else:
234 dst = util.atomictempfile(storepath(repo, hash))
240 dst = util.atomictempfile(storepath(repo, hash))
235 for chunk in util.filechunkiter(open(file)):
241 for chunk in util.filechunkiter(open(file)):
236 dst.write(chunk)
242 dst.write(chunk)
237 dst.close()
243 dst.close()
238 util.copymode(file, storepath(repo, hash))
244 util.copymode(file, storepath(repo, hash))
239 linktousercache(repo, hash)
245 linktousercache(repo, hash)
240
246
241 def linktousercache(repo, hash):
247 def linktousercache(repo, hash):
242 util.makedirs(os.path.dirname(usercachepath(repo.ui, hash)))
248 path = usercachepath(repo.ui, hash)
243 link(storepath(repo, hash), usercachepath(repo.ui, hash))
249 if path:
250 util.makedirs(os.path.dirname(path))
251 link(storepath(repo, hash), path)
244
252
245 def getstandinmatcher(repo, pats=[], opts={}):
253 def getstandinmatcher(repo, pats=[], opts={}):
246 '''Return a match object that applies pats to the standin directory'''
254 '''Return a match object that applies pats to the standin directory'''
247 standindir = repo.pathto(shortname)
255 standindir = repo.pathto(shortname)
248 if pats:
256 if pats:
249 # patterns supplied: search standin directory relative to current dir
257 # patterns supplied: search standin directory relative to current dir
250 cwd = repo.getcwd()
258 cwd = repo.getcwd()
251 if os.path.isabs(cwd):
259 if os.path.isabs(cwd):
252 # cwd is an absolute path for hg -R <reponame>
260 # cwd is an absolute path for hg -R <reponame>
253 # work relative to the repository root in this case
261 # work relative to the repository root in this case
254 cwd = ''
262 cwd = ''
255 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
256 elif os.path.isdir(standindir):
264 elif os.path.isdir(standindir):
257 # no patterns: relative to repo root
265 # no patterns: relative to repo root
258 pats = [standindir]
266 pats = [standindir]
259 else:
267 else:
260 # no patterns and no standin dir: return matcher that matches nothing
268 # no patterns and no standin dir: return matcher that matches nothing
261 match = match_.match(repo.root, None, [], exact=True)
269 match = match_.match(repo.root, None, [], exact=True)
262 match.matchfn = lambda f: False
270 match.matchfn = lambda f: False
263 return match
271 return match
264 return getmatcher(repo, pats, opts, showbad=False)
272 return getmatcher(repo, pats, opts, showbad=False)
265
273
266 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
267 '''Wrapper around scmutil.match() that adds showbad: if false,
275 '''Wrapper around scmutil.match() that adds showbad: if false,
268 neuter the match object's bad() method so it does not print any
276 neuter the match object's bad() method so it does not print any
269 warnings about missing files or directories.'''
277 warnings about missing files or directories.'''
270 match = scmutil.match(repo[None], pats, opts)
278 match = scmutil.match(repo[None], pats, opts)
271
279
272 if not showbad:
280 if not showbad:
273 match.bad = lambda f, msg: None
281 match.bad = lambda f, msg: None
274 return match
282 return match
275
283
276 def composestandinmatcher(repo, rmatcher):
284 def composestandinmatcher(repo, rmatcher):
277 '''Return a matcher that accepts standins corresponding to the
285 '''Return a matcher that accepts standins corresponding to the
278 files accepted by rmatcher. Pass the list of files in the matcher
286 files accepted by rmatcher. Pass the list of files in the matcher
279 as the paths specified by the user.'''
287 as the paths specified by the user.'''
280 smatcher = getstandinmatcher(repo, rmatcher.files())
288 smatcher = getstandinmatcher(repo, rmatcher.files())
281 isstandin = smatcher.matchfn
289 isstandin = smatcher.matchfn
282 def composed_matchfn(f):
290 def composed_matchfn(f):
283 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
284 smatcher.matchfn = composed_matchfn
292 smatcher.matchfn = composed_matchfn
285
293
286 return smatcher
294 return smatcher
287
295
288 def standin(filename):
296 def standin(filename):
289 '''Return the repo-relative path to the standin for the specified big
297 '''Return the repo-relative path to the standin for the specified big
290 file.'''
298 file.'''
291 # Notes:
299 # Notes:
292 # 1) Most callers want an absolute path, but _create_standin() needs
300 # 1) Most callers want an absolute path, but _create_standin() needs
293 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
294 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # it up to the caller to use repo.wjoin() to get an absolute path.
295 # 2) Join with '/' because that's what dirstate always uses, even on
303 # 2) Join with '/' because that's what dirstate always uses, even on
296 # Windows. Change existing separator to '/' first in case we are
304 # Windows. Change existing separator to '/' first in case we are
297 # passed filenames from an external source (like the command line).
305 # passed filenames from an external source (like the command line).
298 return shortname + '/' + filename.replace(os.sep, '/')
306 return shortname + '/' + filename.replace(os.sep, '/')
299
307
300 def isstandin(filename):
308 def isstandin(filename):
301 '''Return true if filename is a big file standin. filename must be
309 '''Return true if filename is a big file standin. filename must be
302 in Mercurial's internal form (slash-separated).'''
310 in Mercurial's internal form (slash-separated).'''
303 return filename.startswith(shortname + '/')
311 return filename.startswith(shortname + '/')
304
312
305 def splitstandin(filename):
313 def splitstandin(filename):
306 # Split on / because that's what dirstate always uses, even on Windows.
314 # Split on / because that's what dirstate always uses, even on Windows.
307 # Change local separator to / first just in case we are passed filenames
315 # Change local separator to / first just in case we are passed filenames
308 # from an external source (like the command line).
316 # from an external source (like the command line).
309 bits = filename.replace(os.sep, '/').split('/', 1)
317 bits = filename.replace(os.sep, '/').split('/', 1)
310 if len(bits) == 2 and bits[0] == shortname:
318 if len(bits) == 2 and bits[0] == shortname:
311 return bits[1]
319 return bits[1]
312 else:
320 else:
313 return None
321 return None
314
322
315 def updatestandin(repo, standin):
323 def updatestandin(repo, standin):
316 file = repo.wjoin(splitstandin(standin))
324 file = repo.wjoin(splitstandin(standin))
317 if os.path.exists(file):
325 if os.path.exists(file):
318 hash = hashfile(file)
326 hash = hashfile(file)
319 executable = getexecutable(file)
327 executable = getexecutable(file)
320 writestandin(repo, standin, hash, executable)
328 writestandin(repo, standin, hash, executable)
321
329
322 def readstandin(repo, filename, node=None):
330 def readstandin(repo, filename, node=None):
323 '''read hex hash from standin for filename at given node, or working
331 '''read hex hash from standin for filename at given node, or working
324 directory if no node is given'''
332 directory if no node is given'''
325 return repo[node][standin(filename)].data().strip()
333 return repo[node][standin(filename)].data().strip()
326
334
327 def writestandin(repo, standin, hash, executable):
335 def writestandin(repo, standin, hash, executable):
328 '''write hash to <repo.root>/<standin>'''
336 '''write hash to <repo.root>/<standin>'''
329 writehash(hash, repo.wjoin(standin), executable)
337 writehash(hash, repo.wjoin(standin), executable)
330
338
331 def copyandhash(instream, outfile):
339 def copyandhash(instream, outfile):
332 '''Read bytes from instream (iterable) and write them to outfile,
340 '''Read bytes from instream (iterable) and write them to outfile,
333 computing the SHA-1 hash of the data along the way. Close outfile
341 computing the SHA-1 hash of the data along the way. Close outfile
334 when done and return the binary hash.'''
342 when done and return the binary hash.'''
335 hasher = util.sha1('')
343 hasher = util.sha1('')
336 for data in instream:
344 for data in instream:
337 hasher.update(data)
345 hasher.update(data)
338 outfile.write(data)
346 outfile.write(data)
339
347
340 # Blecch: closing a file that somebody else opened is rude and
348 # Blecch: closing a file that somebody else opened is rude and
341 # wrong. But it's so darn convenient and practical! After all,
349 # wrong. But it's so darn convenient and practical! After all,
342 # outfile was opened just to copy and hash.
350 # outfile was opened just to copy and hash.
343 outfile.close()
351 outfile.close()
344
352
345 return hasher.digest()
353 return hasher.digest()
346
354
347 def hashrepofile(repo, file):
355 def hashrepofile(repo, file):
348 return hashfile(repo.wjoin(file))
356 return hashfile(repo.wjoin(file))
349
357
350 def hashfile(file):
358 def hashfile(file):
351 if not os.path.exists(file):
359 if not os.path.exists(file):
352 return ''
360 return ''
353 hasher = util.sha1('')
361 hasher = util.sha1('')
354 fd = open(file, 'rb')
362 fd = open(file, 'rb')
355 for data in blockstream(fd):
363 for data in blockstream(fd):
356 hasher.update(data)
364 hasher.update(data)
357 fd.close()
365 fd.close()
358 return hasher.hexdigest()
366 return hasher.hexdigest()
359
367
360 class limitreader(object):
368 class limitreader(object):
361 def __init__(self, f, limit):
369 def __init__(self, f, limit):
362 self.f = f
370 self.f = f
363 self.limit = limit
371 self.limit = limit
364
372
365 def read(self, length):
373 def read(self, length):
366 if self.limit == 0:
374 if self.limit == 0:
367 return ''
375 return ''
368 length = length > self.limit and self.limit or length
376 length = length > self.limit and self.limit or length
369 self.limit -= length
377 self.limit -= length
370 return self.f.read(length)
378 return self.f.read(length)
371
379
372 def close(self):
380 def close(self):
373 pass
381 pass
374
382
375 def blockstream(infile, blocksize=128 * 1024):
383 def blockstream(infile, blocksize=128 * 1024):
376 """Generator that yields blocks of data from infile and closes infile."""
384 """Generator that yields blocks of data from infile and closes infile."""
377 while True:
385 while True:
378 data = infile.read(blocksize)
386 data = infile.read(blocksize)
379 if not data:
387 if not data:
380 break
388 break
381 yield data
389 yield data
382 # same blecch as copyandhash() above
390 # same blecch as copyandhash() above
383 infile.close()
391 infile.close()
384
392
385 def readhash(filename):
393 def readhash(filename):
386 rfile = open(filename, 'rb')
394 rfile = open(filename, 'rb')
387 hash = rfile.read(40)
395 hash = rfile.read(40)
388 rfile.close()
396 rfile.close()
389 if len(hash) < 40:
397 if len(hash) < 40:
390 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
398 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
391 % (filename, len(hash)))
399 % (filename, len(hash)))
392 return hash
400 return hash
393
401
394 def writehash(hash, filename, executable):
402 def writehash(hash, filename, executable):
395 util.makedirs(os.path.dirname(filename))
403 util.makedirs(os.path.dirname(filename))
396 util.writefile(filename, hash + '\n')
404 util.writefile(filename, hash + '\n')
397 os.chmod(filename, getmode(executable))
405 os.chmod(filename, getmode(executable))
398
406
399 def getexecutable(filename):
407 def getexecutable(filename):
400 mode = os.stat(filename).st_mode
408 mode = os.stat(filename).st_mode
401 return ((mode & stat.S_IXUSR) and
409 return ((mode & stat.S_IXUSR) and
402 (mode & stat.S_IXGRP) and
410 (mode & stat.S_IXGRP) and
403 (mode & stat.S_IXOTH))
411 (mode & stat.S_IXOTH))
404
412
405 def getmode(executable):
413 def getmode(executable):
406 if executable:
414 if executable:
407 return 0755
415 return 0755
408 else:
416 else:
409 return 0644
417 return 0644
410
418
411 def urljoin(first, second, *arg):
419 def urljoin(first, second, *arg):
412 def join(left, right):
420 def join(left, right):
413 if not left.endswith('/'):
421 if not left.endswith('/'):
414 left += '/'
422 left += '/'
415 if right.startswith('/'):
423 if right.startswith('/'):
416 right = right[1:]
424 right = right[1:]
417 return left + right
425 return left + right
418
426
419 url = join(first, second)
427 url = join(first, second)
420 for a in arg:
428 for a in arg:
421 url = join(url, a)
429 url = join(url, a)
422 return url
430 return url
423
431
424 def hexsha1(data):
432 def hexsha1(data):
425 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
433 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
426 object data"""
434 object data"""
427 h = util.sha1()
435 h = util.sha1()
428 for chunk in util.filechunkiter(data):
436 for chunk in util.filechunkiter(data):
429 h.update(chunk)
437 h.update(chunk)
430 return h.hexdigest()
438 return h.hexdigest()
431
439
432 def httpsendfile(ui, filename):
440 def httpsendfile(ui, filename):
433 return httpconnection.httpsendfile(ui, filename, 'rb')
441 return httpconnection.httpsendfile(ui, filename, 'rb')
434
442
435 def unixpath(path):
443 def unixpath(path):
436 '''Return a version of path normalized for use with the lfdirstate.'''
444 '''Return a version of path normalized for use with the lfdirstate.'''
437 return os.path.normpath(path).replace(os.sep, '/')
445 return os.path.normpath(path).replace(os.sep, '/')
438
446
439 def islfilesrepo(repo):
447 def islfilesrepo(repo):
440 return ('largefiles' in repo.requirements and
448 return ('largefiles' in repo.requirements and
441 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
449 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
442
450
443 def mkstemp(repo, prefix):
451 def mkstemp(repo, prefix):
444 '''Returns a file descriptor and a filename corresponding to a temporary
452 '''Returns a file descriptor and a filename corresponding to a temporary
445 file in the repo's largefiles store.'''
453 file in the repo's largefiles store.'''
446 path = repo.join(longname)
454 path = repo.join(longname)
447 util.makedirs(path)
455 util.makedirs(path)
448 return tempfile.mkstemp(prefix=prefix, dir=path)
456 return tempfile.mkstemp(prefix=prefix, dir=path)
449
457
450 class storeprotonotcapable(Exception):
458 class storeprotonotcapable(Exception):
451 def __init__(self, storetypes):
459 def __init__(self, storetypes):
452 self.storetypes = storetypes
460 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now