##// END OF EJS Templates
largefiles: py2.4 doesn't have BaseException...
Matt Mackall -
r15333:f37b71fe stable
parent child Browse files
Show More
@@ -1,451 +1,451 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import hashlib
16 import hashlib
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on copy
80 # if hardlinks fail, fallback on copy
81 shutil.copyfile(src, dest)
81 shutil.copyfile(src, dest)
82 os.chmod(dest, os.stat(src).st_mode)
82 os.chmod(dest, os.stat(src).st_mode)
83
83
84 def usercachepath(ui, hash):
84 def usercachepath(ui, hash):
85 path = ui.config(longname, 'usercache', None)
85 path = ui.config(longname, 'usercache', None)
86 if path:
86 if path:
87 path = os.path.join(path, hash)
87 path = os.path.join(path, hash)
88 else:
88 else:
89 if os.name == 'nt':
89 if os.name == 'nt':
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
91 path = os.path.join(appdata, longname, hash)
91 path = os.path.join(appdata, longname, hash)
92 elif platform.system() == 'Darwin':
92 elif platform.system() == 'Darwin':
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
94 longname, hash)
94 longname, hash)
95 elif os.name == 'posix':
95 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
97 if path:
98 path = os.path.join(path, longname, hash)
98 path = os.path.join(path, longname, hash)
99 else:
99 else:
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
101 else:
101 else:
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
103 return path
103 return path
104
104
105 def inusercache(ui, hash):
105 def inusercache(ui, hash):
106 return os.path.exists(usercachepath(ui, hash))
106 return os.path.exists(usercachepath(ui, hash))
107
107
108 def findfile(repo, hash):
108 def findfile(repo, hash):
109 if instore(repo, hash):
109 if instore(repo, hash):
110 repo.ui.note(_('Found %s in store\n') % hash)
110 repo.ui.note(_('Found %s in store\n') % hash)
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('Found %s in system cache\n') % hash)
112 repo.ui.note(_('Found %s in system cache\n') % hash)
113 link(usercachepath(repo.ui, hash), storepath(repo, hash))
113 link(usercachepath(repo.ui, hash), storepath(repo, hash))
114 else:
114 else:
115 return None
115 return None
116 return storepath(repo, hash)
116 return storepath(repo, hash)
117
117
118 class largefiles_dirstate(dirstate.dirstate):
118 class largefiles_dirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
120 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefiles_dirstate, self).normal(unixpath(f))
122 return super(largefiles_dirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefiles_dirstate, self).remove(unixpath(f))
124 return super(largefiles_dirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefiles_dirstate, self).add(unixpath(f))
126 return super(largefiles_dirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefiles_dirstate, self).drop(unixpath(f))
128 return super(largefiles_dirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefiles_dirstate, self).forget(unixpath(f))
130 return super(largefiles_dirstate, self).forget(unixpath(f))
131
131
132 def openlfdirstate(ui, repo):
132 def openlfdirstate(ui, repo):
133 '''
133 '''
134 Return a dirstate object that tracks largefiles: i.e. its root is
134 Return a dirstate object that tracks largefiles: i.e. its root is
135 the repo root, but it is saved in .hg/largefiles/dirstate.
135 the repo root, but it is saved in .hg/largefiles/dirstate.
136 '''
136 '''
137 admin = repo.join(longname)
137 admin = repo.join(longname)
138 opener = scmutil.opener(admin)
138 opener = scmutil.opener(admin)
139 if util.safehasattr(repo.dirstate, '_validate'):
139 if util.safehasattr(repo.dirstate, '_validate'):
140 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
140 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
141 repo.dirstate._validate)
141 repo.dirstate._validate)
142 else:
142 else:
143 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
143 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
144
144
145 # If the largefiles dirstate does not exist, populate and create
145 # If the largefiles dirstate does not exist, populate and create
146 # it. This ensures that we create it on the first meaningful
146 # it. This ensures that we create it on the first meaningful
147 # largefiles operation in a new clone. It also gives us an easy
147 # largefiles operation in a new clone. It also gives us an easy
148 # way to forcibly rebuild largefiles state:
148 # way to forcibly rebuild largefiles state:
149 # rm .hg/largefiles/dirstate && hg status
149 # rm .hg/largefiles/dirstate && hg status
150 # Or even, if things are really messed up:
150 # Or even, if things are really messed up:
151 # rm -rf .hg/largefiles && hg status
151 # rm -rf .hg/largefiles && hg status
152 if not os.path.exists(os.path.join(admin, 'dirstate')):
152 if not os.path.exists(os.path.join(admin, 'dirstate')):
153 util.makedirs(admin)
153 util.makedirs(admin)
154 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
155 for standin in dirstate_walk(repo.dirstate, matcher):
155 for standin in dirstate_walk(repo.dirstate, matcher):
156 lfile = splitstandin(standin)
156 lfile = splitstandin(standin)
157 hash = readstandin(repo, lfile)
157 hash = readstandin(repo, lfile)
158 lfdirstate.normallookup(lfile)
158 lfdirstate.normallookup(lfile)
159 try:
159 try:
160 if hash == hashfile(lfile):
160 if hash == hashfile(lfile):
161 lfdirstate.normal(lfile)
161 lfdirstate.normal(lfile)
162 except IOError, err:
162 except IOError, err:
163 if err.errno != errno.ENOENT:
163 if err.errno != errno.ENOENT:
164 raise
164 raise
165
165
166 lfdirstate.write()
166 lfdirstate.write()
167
167
168 return lfdirstate
168 return lfdirstate
169
169
170 def lfdirstate_status(lfdirstate, repo, rev):
170 def lfdirstate_status(lfdirstate, repo, rev):
171 wlock = repo.wlock()
171 wlock = repo.wlock()
172 try:
172 try:
173 match = match_.always(repo.root, repo.getcwd())
173 match = match_.always(repo.root, repo.getcwd())
174 s = lfdirstate.status(match, [], False, False, False)
174 s = lfdirstate.status(match, [], False, False, False)
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 for lfile in unsure:
176 for lfile in unsure:
177 if repo[rev][standin(lfile)].data().strip() != \
177 if repo[rev][standin(lfile)].data().strip() != \
178 hashfile(repo.wjoin(lfile)):
178 hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
179 modified.append(lfile)
180 else:
180 else:
181 clean.append(lfile)
181 clean.append(lfile)
182 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
183 lfdirstate.write()
183 lfdirstate.write()
184 finally:
184 finally:
185 wlock.release()
185 wlock.release()
186 return (modified, added, removed, missing, unknown, ignored, clean)
186 return (modified, added, removed, missing, unknown, ignored, clean)
187
187
188 def listlfiles(repo, rev=None, matcher=None):
188 def listlfiles(repo, rev=None, matcher=None):
189 '''return a list of largefiles in the working copy or the
189 '''return a list of largefiles in the working copy or the
190 specified changeset'''
190 specified changeset'''
191
191
192 if matcher is None:
192 if matcher is None:
193 matcher = getstandinmatcher(repo)
193 matcher = getstandinmatcher(repo)
194
194
195 # ignore unknown files in working directory
195 # ignore unknown files in working directory
196 return [splitstandin(f)
196 return [splitstandin(f)
197 for f in repo[rev].walk(matcher)
197 for f in repo[rev].walk(matcher)
198 if rev is not None or repo.dirstate[f] != '?']
198 if rev is not None or repo.dirstate[f] != '?']
199
199
200 def instore(repo, hash):
200 def instore(repo, hash):
201 return os.path.exists(storepath(repo, hash))
201 return os.path.exists(storepath(repo, hash))
202
202
203 def createdir(dir):
203 def createdir(dir):
204 if not os.path.exists(dir):
204 if not os.path.exists(dir):
205 os.makedirs(dir)
205 os.makedirs(dir)
206
206
207 def storepath(repo, hash):
207 def storepath(repo, hash):
208 return repo.join(os.path.join(longname, hash))
208 return repo.join(os.path.join(longname, hash))
209
209
210 def copyfromcache(repo, hash, filename):
210 def copyfromcache(repo, hash, filename):
211 '''Copy the specified largefile from the repo or system cache to
211 '''Copy the specified largefile from the repo or system cache to
212 filename in the repository. Return true on success or false if the
212 filename in the repository. Return true on success or false if the
213 file was not found in either cache (which should not happened:
213 file was not found in either cache (which should not happened:
214 this is meant to be called only after ensuring that the needed
214 this is meant to be called only after ensuring that the needed
215 largefile exists in the cache).'''
215 largefile exists in the cache).'''
216 path = findfile(repo, hash)
216 path = findfile(repo, hash)
217 if path is None:
217 if path is None:
218 return False
218 return False
219 util.makedirs(os.path.dirname(repo.wjoin(filename)))
219 util.makedirs(os.path.dirname(repo.wjoin(filename)))
220 shutil.copy(path, repo.wjoin(filename))
220 shutil.copy(path, repo.wjoin(filename))
221 return True
221 return True
222
222
223 def copytostore(repo, rev, file, uploaded=False):
223 def copytostore(repo, rev, file, uploaded=False):
224 hash = readstandin(repo, file)
224 hash = readstandin(repo, file)
225 if instore(repo, hash):
225 if instore(repo, hash):
226 return
226 return
227 copytostoreabsolute(repo, repo.wjoin(file), hash)
227 copytostoreabsolute(repo, repo.wjoin(file), hash)
228
228
229 def copytostoreabsolute(repo, file, hash):
229 def copytostoreabsolute(repo, file, hash):
230 createdir(os.path.dirname(storepath(repo, hash)))
230 createdir(os.path.dirname(storepath(repo, hash)))
231 if inusercache(repo.ui, hash):
231 if inusercache(repo.ui, hash):
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
233 else:
233 else:
234 shutil.copyfile(file, storepath(repo, hash))
234 shutil.copyfile(file, storepath(repo, hash))
235 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
235 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
236 linktousercache(repo, hash)
236 linktousercache(repo, hash)
237
237
238 def linktousercache(repo, hash):
238 def linktousercache(repo, hash):
239 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
239 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
240 link(storepath(repo, hash), usercachepath(repo.ui, hash))
240 link(storepath(repo, hash), usercachepath(repo.ui, hash))
241
241
242 def getstandinmatcher(repo, pats=[], opts={}):
242 def getstandinmatcher(repo, pats=[], opts={}):
243 '''Return a match object that applies pats to the standin directory'''
243 '''Return a match object that applies pats to the standin directory'''
244 standindir = repo.pathto(shortname)
244 standindir = repo.pathto(shortname)
245 if pats:
245 if pats:
246 # patterns supplied: search standin directory relative to current dir
246 # patterns supplied: search standin directory relative to current dir
247 cwd = repo.getcwd()
247 cwd = repo.getcwd()
248 if os.path.isabs(cwd):
248 if os.path.isabs(cwd):
249 # cwd is an absolute path for hg -R <reponame>
249 # cwd is an absolute path for hg -R <reponame>
250 # work relative to the repository root in this case
250 # work relative to the repository root in this case
251 cwd = ''
251 cwd = ''
252 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
252 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
253 elif os.path.isdir(standindir):
253 elif os.path.isdir(standindir):
254 # no patterns: relative to repo root
254 # no patterns: relative to repo root
255 pats = [standindir]
255 pats = [standindir]
256 else:
256 else:
257 # no patterns and no standin dir: return matcher that matches nothing
257 # no patterns and no standin dir: return matcher that matches nothing
258 match = match_.match(repo.root, None, [], exact=True)
258 match = match_.match(repo.root, None, [], exact=True)
259 match.matchfn = lambda f: False
259 match.matchfn = lambda f: False
260 return match
260 return match
261 return getmatcher(repo, pats, opts, showbad=False)
261 return getmatcher(repo, pats, opts, showbad=False)
262
262
263 def getmatcher(repo, pats=[], opts={}, showbad=True):
263 def getmatcher(repo, pats=[], opts={}, showbad=True):
264 '''Wrapper around scmutil.match() that adds showbad: if false,
264 '''Wrapper around scmutil.match() that adds showbad: if false,
265 neuter the match object's bad() method so it does not print any
265 neuter the match object's bad() method so it does not print any
266 warnings about missing files or directories.'''
266 warnings about missing files or directories.'''
267 match = scmutil.match(repo[None], pats, opts)
267 match = scmutil.match(repo[None], pats, opts)
268
268
269 if not showbad:
269 if not showbad:
270 match.bad = lambda f, msg: None
270 match.bad = lambda f, msg: None
271 return match
271 return match
272
272
273 def composestandinmatcher(repo, rmatcher):
273 def composestandinmatcher(repo, rmatcher):
274 '''Return a matcher that accepts standins corresponding to the
274 '''Return a matcher that accepts standins corresponding to the
275 files accepted by rmatcher. Pass the list of files in the matcher
275 files accepted by rmatcher. Pass the list of files in the matcher
276 as the paths specified by the user.'''
276 as the paths specified by the user.'''
277 smatcher = getstandinmatcher(repo, rmatcher.files())
277 smatcher = getstandinmatcher(repo, rmatcher.files())
278 isstandin = smatcher.matchfn
278 isstandin = smatcher.matchfn
279 def composed_matchfn(f):
279 def composed_matchfn(f):
280 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
280 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
281 smatcher.matchfn = composed_matchfn
281 smatcher.matchfn = composed_matchfn
282
282
283 return smatcher
283 return smatcher
284
284
285 def standin(filename):
285 def standin(filename):
286 '''Return the repo-relative path to the standin for the specified big
286 '''Return the repo-relative path to the standin for the specified big
287 file.'''
287 file.'''
288 # Notes:
288 # Notes:
289 # 1) Most callers want an absolute path, but _create_standin() needs
289 # 1) Most callers want an absolute path, but _create_standin() needs
290 # it repo-relative so lfadd() can pass it to repo_add(). So leave
290 # it repo-relative so lfadd() can pass it to repo_add(). So leave
291 # it up to the caller to use repo.wjoin() to get an absolute path.
291 # it up to the caller to use repo.wjoin() to get an absolute path.
292 # 2) Join with '/' because that's what dirstate always uses, even on
292 # 2) Join with '/' because that's what dirstate always uses, even on
293 # Windows. Change existing separator to '/' first in case we are
293 # Windows. Change existing separator to '/' first in case we are
294 # passed filenames from an external source (like the command line).
294 # passed filenames from an external source (like the command line).
295 return shortname + '/' + filename.replace(os.sep, '/')
295 return shortname + '/' + filename.replace(os.sep, '/')
296
296
297 def isstandin(filename):
297 def isstandin(filename):
298 '''Return true if filename is a big file standin. filename must be
298 '''Return true if filename is a big file standin. filename must be
299 in Mercurial's internal form (slash-separated).'''
299 in Mercurial's internal form (slash-separated).'''
300 return filename.startswith(shortname + '/')
300 return filename.startswith(shortname + '/')
301
301
302 def splitstandin(filename):
302 def splitstandin(filename):
303 # Split on / because that's what dirstate always uses, even on Windows.
303 # Split on / because that's what dirstate always uses, even on Windows.
304 # Change local separator to / first just in case we are passed filenames
304 # Change local separator to / first just in case we are passed filenames
305 # from an external source (like the command line).
305 # from an external source (like the command line).
306 bits = filename.replace(os.sep, '/').split('/', 1)
306 bits = filename.replace(os.sep, '/').split('/', 1)
307 if len(bits) == 2 and bits[0] == shortname:
307 if len(bits) == 2 and bits[0] == shortname:
308 return bits[1]
308 return bits[1]
309 else:
309 else:
310 return None
310 return None
311
311
312 def updatestandin(repo, standin):
312 def updatestandin(repo, standin):
313 file = repo.wjoin(splitstandin(standin))
313 file = repo.wjoin(splitstandin(standin))
314 if os.path.exists(file):
314 if os.path.exists(file):
315 hash = hashfile(file)
315 hash = hashfile(file)
316 executable = getexecutable(file)
316 executable = getexecutable(file)
317 writestandin(repo, standin, hash, executable)
317 writestandin(repo, standin, hash, executable)
318
318
319 def readstandin(repo, filename, node=None):
319 def readstandin(repo, filename, node=None):
320 '''read hex hash from standin for filename at given node, or working
320 '''read hex hash from standin for filename at given node, or working
321 directory if no node is given'''
321 directory if no node is given'''
322 return repo[node][standin(filename)].data().strip()
322 return repo[node][standin(filename)].data().strip()
323
323
324 def writestandin(repo, standin, hash, executable):
324 def writestandin(repo, standin, hash, executable):
325 '''write hash to <repo.root>/<standin>'''
325 '''write hash to <repo.root>/<standin>'''
326 writehash(hash, repo.wjoin(standin), executable)
326 writehash(hash, repo.wjoin(standin), executable)
327
327
328 def copyandhash(instream, outfile):
328 def copyandhash(instream, outfile):
329 '''Read bytes from instream (iterable) and write them to outfile,
329 '''Read bytes from instream (iterable) and write them to outfile,
330 computing the SHA-1 hash of the data along the way. Close outfile
330 computing the SHA-1 hash of the data along the way. Close outfile
331 when done and return the binary hash.'''
331 when done and return the binary hash.'''
332 hasher = util.sha1('')
332 hasher = util.sha1('')
333 for data in instream:
333 for data in instream:
334 hasher.update(data)
334 hasher.update(data)
335 outfile.write(data)
335 outfile.write(data)
336
336
337 # Blecch: closing a file that somebody else opened is rude and
337 # Blecch: closing a file that somebody else opened is rude and
338 # wrong. But it's so darn convenient and practical! After all,
338 # wrong. But it's so darn convenient and practical! After all,
339 # outfile was opened just to copy and hash.
339 # outfile was opened just to copy and hash.
340 outfile.close()
340 outfile.close()
341
341
342 return hasher.digest()
342 return hasher.digest()
343
343
344 def hashrepofile(repo, file):
344 def hashrepofile(repo, file):
345 return hashfile(repo.wjoin(file))
345 return hashfile(repo.wjoin(file))
346
346
347 def hashfile(file):
347 def hashfile(file):
348 if not os.path.exists(file):
348 if not os.path.exists(file):
349 return ''
349 return ''
350 hasher = util.sha1('')
350 hasher = util.sha1('')
351 fd = open(file, 'rb')
351 fd = open(file, 'rb')
352 for data in blockstream(fd):
352 for data in blockstream(fd):
353 hasher.update(data)
353 hasher.update(data)
354 fd.close()
354 fd.close()
355 return hasher.hexdigest()
355 return hasher.hexdigest()
356
356
357 class limitreader(object):
357 class limitreader(object):
358 def __init__(self, f, limit):
358 def __init__(self, f, limit):
359 self.f = f
359 self.f = f
360 self.limit = limit
360 self.limit = limit
361
361
362 def read(self, length):
362 def read(self, length):
363 if self.limit == 0:
363 if self.limit == 0:
364 return ''
364 return ''
365 length = length > self.limit and self.limit or length
365 length = length > self.limit and self.limit or length
366 self.limit -= length
366 self.limit -= length
367 return self.f.read(length)
367 return self.f.read(length)
368
368
369 def close(self):
369 def close(self):
370 pass
370 pass
371
371
372 def blockstream(infile, blocksize=128 * 1024):
372 def blockstream(infile, blocksize=128 * 1024):
373 """Generator that yields blocks of data from infile and closes infile."""
373 """Generator that yields blocks of data from infile and closes infile."""
374 while True:
374 while True:
375 data = infile.read(blocksize)
375 data = infile.read(blocksize)
376 if not data:
376 if not data:
377 break
377 break
378 yield data
378 yield data
379 # same blecch as copyandhash() above
379 # same blecch as copyandhash() above
380 infile.close()
380 infile.close()
381
381
382 def readhash(filename):
382 def readhash(filename):
383 rfile = open(filename, 'rb')
383 rfile = open(filename, 'rb')
384 hash = rfile.read(40)
384 hash = rfile.read(40)
385 rfile.close()
385 rfile.close()
386 if len(hash) < 40:
386 if len(hash) < 40:
387 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
387 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
388 % (filename, len(hash)))
388 % (filename, len(hash)))
389 return hash
389 return hash
390
390
391 def writehash(hash, filename, executable):
391 def writehash(hash, filename, executable):
392 util.makedirs(os.path.dirname(filename))
392 util.makedirs(os.path.dirname(filename))
393 if os.path.exists(filename):
393 if os.path.exists(filename):
394 os.unlink(filename)
394 os.unlink(filename)
395 wfile = open(filename, 'wb')
395 wfile = open(filename, 'wb')
396
396
397 try:
397 try:
398 wfile.write(hash)
398 wfile.write(hash)
399 wfile.write('\n')
399 wfile.write('\n')
400 finally:
400 finally:
401 wfile.close()
401 wfile.close()
402 if os.path.exists(filename):
402 if os.path.exists(filename):
403 os.chmod(filename, getmode(executable))
403 os.chmod(filename, getmode(executable))
404
404
405 def getexecutable(filename):
405 def getexecutable(filename):
406 mode = os.stat(filename).st_mode
406 mode = os.stat(filename).st_mode
407 return ((mode & stat.S_IXUSR) and
407 return ((mode & stat.S_IXUSR) and
408 (mode & stat.S_IXGRP) and
408 (mode & stat.S_IXGRP) and
409 (mode & stat.S_IXOTH))
409 (mode & stat.S_IXOTH))
410
410
411 def getmode(executable):
411 def getmode(executable):
412 if executable:
412 if executable:
413 return 0755
413 return 0755
414 else:
414 else:
415 return 0644
415 return 0644
416
416
417 def urljoin(first, second, *arg):
417 def urljoin(first, second, *arg):
418 def join(left, right):
418 def join(left, right):
419 if not left.endswith('/'):
419 if not left.endswith('/'):
420 left += '/'
420 left += '/'
421 if right.startswith('/'):
421 if right.startswith('/'):
422 right = right[1:]
422 right = right[1:]
423 return left + right
423 return left + right
424
424
425 url = join(first, second)
425 url = join(first, second)
426 for a in arg:
426 for a in arg:
427 url = join(url, a)
427 url = join(url, a)
428 return url
428 return url
429
429
430 def hexsha1(data):
430 def hexsha1(data):
431 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
431 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
432 object data"""
432 object data"""
433 h = hashlib.sha1()
433 h = hashlib.sha1()
434 for chunk in util.filechunkiter(data):
434 for chunk in util.filechunkiter(data):
435 h.update(chunk)
435 h.update(chunk)
436 return h.hexdigest()
436 return h.hexdigest()
437
437
438 def httpsendfile(ui, filename):
438 def httpsendfile(ui, filename):
439 return httpconnection.httpsendfile(ui, filename, 'rb')
439 return httpconnection.httpsendfile(ui, filename, 'rb')
440
440
441 def unixpath(path):
441 def unixpath(path):
442 '''Return a version of path normalized for use with the lfdirstate.'''
442 '''Return a version of path normalized for use with the lfdirstate.'''
443 return os.path.normpath(path).replace(os.sep, '/')
443 return os.path.normpath(path).replace(os.sep, '/')
444
444
445 def islfilesrepo(repo):
445 def islfilesrepo(repo):
446 return ('largefiles' in repo.requirements and
446 return ('largefiles' in repo.requirements and
447 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
447 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
448
448
449 class storeprotonotcapable(BaseException):
449 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
450 def __init__(self, storetypes):
451 self.storetypes = storetypes
451 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now