##// END OF EJS Templates
largefiles: use XDG and OS X-specific cache locations by default (issue3067)
Benjamin Pollack -
r15320:681267a5 stable
parent child Browse files
Show More
@@ -1,443 +1,451 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import shutil
14 import shutil
14 import stat
15 import stat
15 import hashlib
16 import hashlib
16
17
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
19 from mercurial.i18n import _
19
20
20 shortname = '.hglf'
21 shortname = '.hglf'
21 longname = 'largefiles'
22 longname = 'largefiles'
22
23
23
24
24 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
25
26
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
28
29
29 def repo_add(repo, list):
30 def repo_add(repo, list):
30 add = repo[None].add
31 add = repo[None].add
31 return add(list)
32 return add(list)
32
33
33 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
35 def remove(list, unlink):
35 wlock = repo.wlock()
36 wlock = repo.wlock()
36 try:
37 try:
37 if unlink:
38 if unlink:
38 for f in list:
39 for f in list:
39 try:
40 try:
40 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
42 except OSError, inst:
42 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
43 raise
44 raise
44 repo[None].forget(list)
45 repo[None].forget(list)
45 finally:
46 finally:
46 wlock.release()
47 wlock.release()
47 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
48
49
49 def repo_forget(repo, list):
50 def repo_forget(repo, list):
50 forget = repo[None].forget
51 forget = repo[None].forget
51 return forget(list)
52 return forget(list)
52
53
53 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
55 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
57 remote, force=force)
57 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
58
59
59 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
60
61
61 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
63 lfsize = opt
63 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
66 if lfsize:
66 try:
67 try:
67 lfsize = float(lfsize)
68 lfsize = float(lfsize)
68 except ValueError:
69 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
71 % lfsize)
71 if lfsize is None:
72 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
74 return lfsize
74
75
75 def link(src, dest):
76 def link(src, dest):
76 try:
77 try:
77 util.oslink(src, dest)
78 util.oslink(src, dest)
78 except OSError:
79 except OSError:
79 # if hardlinks fail, fallback on copy
80 # if hardlinks fail, fallback on copy
80 shutil.copyfile(src, dest)
81 shutil.copyfile(src, dest)
81 os.chmod(dest, os.stat(src).st_mode)
82 os.chmod(dest, os.stat(src).st_mode)
82
83
83 def usercachepath(ui, hash):
84 def usercachepath(ui, hash):
84 path = ui.config(longname, 'usercache', None)
85 path = ui.config(longname, 'usercache', None)
85 if path:
86 if path:
86 path = os.path.join(path, hash)
87 path = os.path.join(path, hash)
87 else:
88 else:
88 if os.name == 'nt':
89 if os.name == 'nt':
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 path = os.path.join(appdata, longname, hash)
91 path = os.path.join(appdata, longname, hash)
92 elif platform.system() == 'Darwin':
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
94 longname, hash)
91 elif os.name == 'posix':
95 elif os.name == 'posix':
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
98 path = os.path.join(path, longname, hash)
99 else:
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
93 else:
101 else:
94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
95 return path
103 return path
96
104
97 def inusercache(ui, hash):
105 def inusercache(ui, hash):
98 return os.path.exists(usercachepath(ui, hash))
106 return os.path.exists(usercachepath(ui, hash))
99
107
100 def findfile(repo, hash):
108 def findfile(repo, hash):
101 if instore(repo, hash):
109 if instore(repo, hash):
102 repo.ui.note(_('Found %s in store\n') % hash)
110 repo.ui.note(_('Found %s in store\n') % hash)
103 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
104 repo.ui.note(_('Found %s in system cache\n') % hash)
112 repo.ui.note(_('Found %s in system cache\n') % hash)
105 link(usercachepath(repo.ui, hash), storepath(repo, hash))
113 link(usercachepath(repo.ui, hash), storepath(repo, hash))
106 else:
114 else:
107 return None
115 return None
108 return storepath(repo, hash)
116 return storepath(repo, hash)
109
117
110 class largefiles_dirstate(dirstate.dirstate):
118 class largefiles_dirstate(dirstate.dirstate):
111 def __getitem__(self, key):
119 def __getitem__(self, key):
112 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
120 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
113 def normal(self, f):
121 def normal(self, f):
114 return super(largefiles_dirstate, self).normal(unixpath(f))
122 return super(largefiles_dirstate, self).normal(unixpath(f))
115 def remove(self, f):
123 def remove(self, f):
116 return super(largefiles_dirstate, self).remove(unixpath(f))
124 return super(largefiles_dirstate, self).remove(unixpath(f))
117 def add(self, f):
125 def add(self, f):
118 return super(largefiles_dirstate, self).add(unixpath(f))
126 return super(largefiles_dirstate, self).add(unixpath(f))
119 def drop(self, f):
127 def drop(self, f):
120 return super(largefiles_dirstate, self).drop(unixpath(f))
128 return super(largefiles_dirstate, self).drop(unixpath(f))
121 def forget(self, f):
129 def forget(self, f):
122 return super(largefiles_dirstate, self).forget(unixpath(f))
130 return super(largefiles_dirstate, self).forget(unixpath(f))
123
131
124 def openlfdirstate(ui, repo):
132 def openlfdirstate(ui, repo):
125 '''
133 '''
126 Return a dirstate object that tracks largefiles: i.e. its root is
134 Return a dirstate object that tracks largefiles: i.e. its root is
127 the repo root, but it is saved in .hg/largefiles/dirstate.
135 the repo root, but it is saved in .hg/largefiles/dirstate.
128 '''
136 '''
129 admin = repo.join(longname)
137 admin = repo.join(longname)
130 opener = scmutil.opener(admin)
138 opener = scmutil.opener(admin)
131 if util.safehasattr(repo.dirstate, '_validate'):
139 if util.safehasattr(repo.dirstate, '_validate'):
132 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
140 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
133 repo.dirstate._validate)
141 repo.dirstate._validate)
134 else:
142 else:
135 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
143 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
136
144
137 # If the largefiles dirstate does not exist, populate and create
145 # If the largefiles dirstate does not exist, populate and create
138 # it. This ensures that we create it on the first meaningful
146 # it. This ensures that we create it on the first meaningful
139 # largefiles operation in a new clone. It also gives us an easy
147 # largefiles operation in a new clone. It also gives us an easy
140 # way to forcibly rebuild largefiles state:
148 # way to forcibly rebuild largefiles state:
141 # rm .hg/largefiles/dirstate && hg status
149 # rm .hg/largefiles/dirstate && hg status
142 # Or even, if things are really messed up:
150 # Or even, if things are really messed up:
143 # rm -rf .hg/largefiles && hg status
151 # rm -rf .hg/largefiles && hg status
144 if not os.path.exists(os.path.join(admin, 'dirstate')):
152 if not os.path.exists(os.path.join(admin, 'dirstate')):
145 util.makedirs(admin)
153 util.makedirs(admin)
146 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
147 for standin in dirstate_walk(repo.dirstate, matcher):
155 for standin in dirstate_walk(repo.dirstate, matcher):
148 lfile = splitstandin(standin)
156 lfile = splitstandin(standin)
149 hash = readstandin(repo, lfile)
157 hash = readstandin(repo, lfile)
150 lfdirstate.normallookup(lfile)
158 lfdirstate.normallookup(lfile)
151 try:
159 try:
152 if hash == hashfile(lfile):
160 if hash == hashfile(lfile):
153 lfdirstate.normal(lfile)
161 lfdirstate.normal(lfile)
154 except IOError, err:
162 except IOError, err:
155 if err.errno != errno.ENOENT:
163 if err.errno != errno.ENOENT:
156 raise
164 raise
157
165
158 lfdirstate.write()
166 lfdirstate.write()
159
167
160 return lfdirstate
168 return lfdirstate
161
169
162 def lfdirstate_status(lfdirstate, repo, rev):
170 def lfdirstate_status(lfdirstate, repo, rev):
163 wlock = repo.wlock()
171 wlock = repo.wlock()
164 try:
172 try:
165 match = match_.always(repo.root, repo.getcwd())
173 match = match_.always(repo.root, repo.getcwd())
166 s = lfdirstate.status(match, [], False, False, False)
174 s = lfdirstate.status(match, [], False, False, False)
167 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
168 for lfile in unsure:
176 for lfile in unsure:
169 if repo[rev][standin(lfile)].data().strip() != \
177 if repo[rev][standin(lfile)].data().strip() != \
170 hashfile(repo.wjoin(lfile)):
178 hashfile(repo.wjoin(lfile)):
171 modified.append(lfile)
179 modified.append(lfile)
172 else:
180 else:
173 clean.append(lfile)
181 clean.append(lfile)
174 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
175 lfdirstate.write()
183 lfdirstate.write()
176 finally:
184 finally:
177 wlock.release()
185 wlock.release()
178 return (modified, added, removed, missing, unknown, ignored, clean)
186 return (modified, added, removed, missing, unknown, ignored, clean)
179
187
180 def listlfiles(repo, rev=None, matcher=None):
188 def listlfiles(repo, rev=None, matcher=None):
181 '''return a list of largefiles in the working copy or the
189 '''return a list of largefiles in the working copy or the
182 specified changeset'''
190 specified changeset'''
183
191
184 if matcher is None:
192 if matcher is None:
185 matcher = getstandinmatcher(repo)
193 matcher = getstandinmatcher(repo)
186
194
187 # ignore unknown files in working directory
195 # ignore unknown files in working directory
188 return [splitstandin(f)
196 return [splitstandin(f)
189 for f in repo[rev].walk(matcher)
197 for f in repo[rev].walk(matcher)
190 if rev is not None or repo.dirstate[f] != '?']
198 if rev is not None or repo.dirstate[f] != '?']
191
199
192 def instore(repo, hash):
200 def instore(repo, hash):
193 return os.path.exists(storepath(repo, hash))
201 return os.path.exists(storepath(repo, hash))
194
202
195 def createdir(dir):
203 def createdir(dir):
196 if not os.path.exists(dir):
204 if not os.path.exists(dir):
197 os.makedirs(dir)
205 os.makedirs(dir)
198
206
199 def storepath(repo, hash):
207 def storepath(repo, hash):
200 return repo.join(os.path.join(longname, hash))
208 return repo.join(os.path.join(longname, hash))
201
209
202 def copyfromcache(repo, hash, filename):
210 def copyfromcache(repo, hash, filename):
203 '''Copy the specified largefile from the repo or system cache to
211 '''Copy the specified largefile from the repo or system cache to
204 filename in the repository. Return true on success or false if the
212 filename in the repository. Return true on success or false if the
205 file was not found in either cache (which should not happened:
213 file was not found in either cache (which should not happened:
206 this is meant to be called only after ensuring that the needed
214 this is meant to be called only after ensuring that the needed
207 largefile exists in the cache).'''
215 largefile exists in the cache).'''
208 path = findfile(repo, hash)
216 path = findfile(repo, hash)
209 if path is None:
217 if path is None:
210 return False
218 return False
211 util.makedirs(os.path.dirname(repo.wjoin(filename)))
219 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 shutil.copy(path, repo.wjoin(filename))
220 shutil.copy(path, repo.wjoin(filename))
213 return True
221 return True
214
222
215 def copytostore(repo, rev, file, uploaded=False):
223 def copytostore(repo, rev, file, uploaded=False):
216 hash = readstandin(repo, file)
224 hash = readstandin(repo, file)
217 if instore(repo, hash):
225 if instore(repo, hash):
218 return
226 return
219 copytostoreabsolute(repo, repo.wjoin(file), hash)
227 copytostoreabsolute(repo, repo.wjoin(file), hash)
220
228
221 def copytostoreabsolute(repo, file, hash):
229 def copytostoreabsolute(repo, file, hash):
222 createdir(os.path.dirname(storepath(repo, hash)))
230 createdir(os.path.dirname(storepath(repo, hash)))
223 if inusercache(repo.ui, hash):
231 if inusercache(repo.ui, hash):
224 link(usercachepath(repo.ui, hash), storepath(repo, hash))
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
225 else:
233 else:
226 shutil.copyfile(file, storepath(repo, hash))
234 shutil.copyfile(file, storepath(repo, hash))
227 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
235 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
228 linktousercache(repo, hash)
236 linktousercache(repo, hash)
229
237
230 def linktousercache(repo, hash):
238 def linktousercache(repo, hash):
231 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
239 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
232 link(storepath(repo, hash), usercachepath(repo.ui, hash))
240 link(storepath(repo, hash), usercachepath(repo.ui, hash))
233
241
234 def getstandinmatcher(repo, pats=[], opts={}):
242 def getstandinmatcher(repo, pats=[], opts={}):
235 '''Return a match object that applies pats to the standin directory'''
243 '''Return a match object that applies pats to the standin directory'''
236 standindir = repo.pathto(shortname)
244 standindir = repo.pathto(shortname)
237 if pats:
245 if pats:
238 # patterns supplied: search standin directory relative to current dir
246 # patterns supplied: search standin directory relative to current dir
239 cwd = repo.getcwd()
247 cwd = repo.getcwd()
240 if os.path.isabs(cwd):
248 if os.path.isabs(cwd):
241 # cwd is an absolute path for hg -R <reponame>
249 # cwd is an absolute path for hg -R <reponame>
242 # work relative to the repository root in this case
250 # work relative to the repository root in this case
243 cwd = ''
251 cwd = ''
244 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
252 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
245 elif os.path.isdir(standindir):
253 elif os.path.isdir(standindir):
246 # no patterns: relative to repo root
254 # no patterns: relative to repo root
247 pats = [standindir]
255 pats = [standindir]
248 else:
256 else:
249 # no patterns and no standin dir: return matcher that matches nothing
257 # no patterns and no standin dir: return matcher that matches nothing
250 match = match_.match(repo.root, None, [], exact=True)
258 match = match_.match(repo.root, None, [], exact=True)
251 match.matchfn = lambda f: False
259 match.matchfn = lambda f: False
252 return match
260 return match
253 return getmatcher(repo, pats, opts, showbad=False)
261 return getmatcher(repo, pats, opts, showbad=False)
254
262
255 def getmatcher(repo, pats=[], opts={}, showbad=True):
263 def getmatcher(repo, pats=[], opts={}, showbad=True):
256 '''Wrapper around scmutil.match() that adds showbad: if false,
264 '''Wrapper around scmutil.match() that adds showbad: if false,
257 neuter the match object's bad() method so it does not print any
265 neuter the match object's bad() method so it does not print any
258 warnings about missing files or directories.'''
266 warnings about missing files or directories.'''
259 match = scmutil.match(repo[None], pats, opts)
267 match = scmutil.match(repo[None], pats, opts)
260
268
261 if not showbad:
269 if not showbad:
262 match.bad = lambda f, msg: None
270 match.bad = lambda f, msg: None
263 return match
271 return match
264
272
265 def composestandinmatcher(repo, rmatcher):
273 def composestandinmatcher(repo, rmatcher):
266 '''Return a matcher that accepts standins corresponding to the
274 '''Return a matcher that accepts standins corresponding to the
267 files accepted by rmatcher. Pass the list of files in the matcher
275 files accepted by rmatcher. Pass the list of files in the matcher
268 as the paths specified by the user.'''
276 as the paths specified by the user.'''
269 smatcher = getstandinmatcher(repo, rmatcher.files())
277 smatcher = getstandinmatcher(repo, rmatcher.files())
270 isstandin = smatcher.matchfn
278 isstandin = smatcher.matchfn
271 def composed_matchfn(f):
279 def composed_matchfn(f):
272 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
280 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
273 smatcher.matchfn = composed_matchfn
281 smatcher.matchfn = composed_matchfn
274
282
275 return smatcher
283 return smatcher
276
284
277 def standin(filename):
285 def standin(filename):
278 '''Return the repo-relative path to the standin for the specified big
286 '''Return the repo-relative path to the standin for the specified big
279 file.'''
287 file.'''
280 # Notes:
288 # Notes:
281 # 1) Most callers want an absolute path, but _create_standin() needs
289 # 1) Most callers want an absolute path, but _create_standin() needs
282 # it repo-relative so lfadd() can pass it to repo_add(). So leave
290 # it repo-relative so lfadd() can pass it to repo_add(). So leave
283 # it up to the caller to use repo.wjoin() to get an absolute path.
291 # it up to the caller to use repo.wjoin() to get an absolute path.
284 # 2) Join with '/' because that's what dirstate always uses, even on
292 # 2) Join with '/' because that's what dirstate always uses, even on
285 # Windows. Change existing separator to '/' first in case we are
293 # Windows. Change existing separator to '/' first in case we are
286 # passed filenames from an external source (like the command line).
294 # passed filenames from an external source (like the command line).
287 return shortname + '/' + filename.replace(os.sep, '/')
295 return shortname + '/' + filename.replace(os.sep, '/')
288
296
289 def isstandin(filename):
297 def isstandin(filename):
290 '''Return true if filename is a big file standin. filename must be
298 '''Return true if filename is a big file standin. filename must be
291 in Mercurial's internal form (slash-separated).'''
299 in Mercurial's internal form (slash-separated).'''
292 return filename.startswith(shortname + '/')
300 return filename.startswith(shortname + '/')
293
301
294 def splitstandin(filename):
302 def splitstandin(filename):
295 # Split on / because that's what dirstate always uses, even on Windows.
303 # Split on / because that's what dirstate always uses, even on Windows.
296 # Change local separator to / first just in case we are passed filenames
304 # Change local separator to / first just in case we are passed filenames
297 # from an external source (like the command line).
305 # from an external source (like the command line).
298 bits = filename.replace(os.sep, '/').split('/', 1)
306 bits = filename.replace(os.sep, '/').split('/', 1)
299 if len(bits) == 2 and bits[0] == shortname:
307 if len(bits) == 2 and bits[0] == shortname:
300 return bits[1]
308 return bits[1]
301 else:
309 else:
302 return None
310 return None
303
311
304 def updatestandin(repo, standin):
312 def updatestandin(repo, standin):
305 file = repo.wjoin(splitstandin(standin))
313 file = repo.wjoin(splitstandin(standin))
306 if os.path.exists(file):
314 if os.path.exists(file):
307 hash = hashfile(file)
315 hash = hashfile(file)
308 executable = getexecutable(file)
316 executable = getexecutable(file)
309 writestandin(repo, standin, hash, executable)
317 writestandin(repo, standin, hash, executable)
310
318
311 def readstandin(repo, filename, node=None):
319 def readstandin(repo, filename, node=None):
312 '''read hex hash from standin for filename at given node, or working
320 '''read hex hash from standin for filename at given node, or working
313 directory if no node is given'''
321 directory if no node is given'''
314 return repo[node][standin(filename)].data().strip()
322 return repo[node][standin(filename)].data().strip()
315
323
316 def writestandin(repo, standin, hash, executable):
324 def writestandin(repo, standin, hash, executable):
317 '''write hash to <repo.root>/<standin>'''
325 '''write hash to <repo.root>/<standin>'''
318 writehash(hash, repo.wjoin(standin), executable)
326 writehash(hash, repo.wjoin(standin), executable)
319
327
320 def copyandhash(instream, outfile):
328 def copyandhash(instream, outfile):
321 '''Read bytes from instream (iterable) and write them to outfile,
329 '''Read bytes from instream (iterable) and write them to outfile,
322 computing the SHA-1 hash of the data along the way. Close outfile
330 computing the SHA-1 hash of the data along the way. Close outfile
323 when done and return the binary hash.'''
331 when done and return the binary hash.'''
324 hasher = util.sha1('')
332 hasher = util.sha1('')
325 for data in instream:
333 for data in instream:
326 hasher.update(data)
334 hasher.update(data)
327 outfile.write(data)
335 outfile.write(data)
328
336
329 # Blecch: closing a file that somebody else opened is rude and
337 # Blecch: closing a file that somebody else opened is rude and
330 # wrong. But it's so darn convenient and practical! After all,
338 # wrong. But it's so darn convenient and practical! After all,
331 # outfile was opened just to copy and hash.
339 # outfile was opened just to copy and hash.
332 outfile.close()
340 outfile.close()
333
341
334 return hasher.digest()
342 return hasher.digest()
335
343
336 def hashrepofile(repo, file):
344 def hashrepofile(repo, file):
337 return hashfile(repo.wjoin(file))
345 return hashfile(repo.wjoin(file))
338
346
339 def hashfile(file):
347 def hashfile(file):
340 if not os.path.exists(file):
348 if not os.path.exists(file):
341 return ''
349 return ''
342 hasher = util.sha1('')
350 hasher = util.sha1('')
343 fd = open(file, 'rb')
351 fd = open(file, 'rb')
344 for data in blockstream(fd):
352 for data in blockstream(fd):
345 hasher.update(data)
353 hasher.update(data)
346 fd.close()
354 fd.close()
347 return hasher.hexdigest()
355 return hasher.hexdigest()
348
356
349 class limitreader(object):
357 class limitreader(object):
350 def __init__(self, f, limit):
358 def __init__(self, f, limit):
351 self.f = f
359 self.f = f
352 self.limit = limit
360 self.limit = limit
353
361
354 def read(self, length):
362 def read(self, length):
355 if self.limit == 0:
363 if self.limit == 0:
356 return ''
364 return ''
357 length = length > self.limit and self.limit or length
365 length = length > self.limit and self.limit or length
358 self.limit -= length
366 self.limit -= length
359 return self.f.read(length)
367 return self.f.read(length)
360
368
361 def close(self):
369 def close(self):
362 pass
370 pass
363
371
364 def blockstream(infile, blocksize=128 * 1024):
372 def blockstream(infile, blocksize=128 * 1024):
365 """Generator that yields blocks of data from infile and closes infile."""
373 """Generator that yields blocks of data from infile and closes infile."""
366 while True:
374 while True:
367 data = infile.read(blocksize)
375 data = infile.read(blocksize)
368 if not data:
376 if not data:
369 break
377 break
370 yield data
378 yield data
371 # same blecch as copyandhash() above
379 # same blecch as copyandhash() above
372 infile.close()
380 infile.close()
373
381
374 def readhash(filename):
382 def readhash(filename):
375 rfile = open(filename, 'rb')
383 rfile = open(filename, 'rb')
376 hash = rfile.read(40)
384 hash = rfile.read(40)
377 rfile.close()
385 rfile.close()
378 if len(hash) < 40:
386 if len(hash) < 40:
379 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
387 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
380 % (filename, len(hash)))
388 % (filename, len(hash)))
381 return hash
389 return hash
382
390
383 def writehash(hash, filename, executable):
391 def writehash(hash, filename, executable):
384 util.makedirs(os.path.dirname(filename))
392 util.makedirs(os.path.dirname(filename))
385 if os.path.exists(filename):
393 if os.path.exists(filename):
386 os.unlink(filename)
394 os.unlink(filename)
387 wfile = open(filename, 'wb')
395 wfile = open(filename, 'wb')
388
396
389 try:
397 try:
390 wfile.write(hash)
398 wfile.write(hash)
391 wfile.write('\n')
399 wfile.write('\n')
392 finally:
400 finally:
393 wfile.close()
401 wfile.close()
394 if os.path.exists(filename):
402 if os.path.exists(filename):
395 os.chmod(filename, getmode(executable))
403 os.chmod(filename, getmode(executable))
396
404
397 def getexecutable(filename):
405 def getexecutable(filename):
398 mode = os.stat(filename).st_mode
406 mode = os.stat(filename).st_mode
399 return ((mode & stat.S_IXUSR) and
407 return ((mode & stat.S_IXUSR) and
400 (mode & stat.S_IXGRP) and
408 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXOTH))
409 (mode & stat.S_IXOTH))
402
410
403 def getmode(executable):
411 def getmode(executable):
404 if executable:
412 if executable:
405 return 0755
413 return 0755
406 else:
414 else:
407 return 0644
415 return 0644
408
416
409 def urljoin(first, second, *arg):
417 def urljoin(first, second, *arg):
410 def join(left, right):
418 def join(left, right):
411 if not left.endswith('/'):
419 if not left.endswith('/'):
412 left += '/'
420 left += '/'
413 if right.startswith('/'):
421 if right.startswith('/'):
414 right = right[1:]
422 right = right[1:]
415 return left + right
423 return left + right
416
424
417 url = join(first, second)
425 url = join(first, second)
418 for a in arg:
426 for a in arg:
419 url = join(url, a)
427 url = join(url, a)
420 return url
428 return url
421
429
422 def hexsha1(data):
430 def hexsha1(data):
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
431 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 object data"""
432 object data"""
425 h = hashlib.sha1()
433 h = hashlib.sha1()
426 for chunk in util.filechunkiter(data):
434 for chunk in util.filechunkiter(data):
427 h.update(chunk)
435 h.update(chunk)
428 return h.hexdigest()
436 return h.hexdigest()
429
437
430 def httpsendfile(ui, filename):
438 def httpsendfile(ui, filename):
431 return httpconnection.httpsendfile(ui, filename, 'rb')
439 return httpconnection.httpsendfile(ui, filename, 'rb')
432
440
433 def unixpath(path):
441 def unixpath(path):
434 '''Return a version of path normalized for use with the lfdirstate.'''
442 '''Return a version of path normalized for use with the lfdirstate.'''
435 return os.path.normpath(path).replace(os.sep, '/')
443 return os.path.normpath(path).replace(os.sep, '/')
436
444
437 def islfilesrepo(repo):
445 def islfilesrepo(repo):
438 return ('largefiles' in repo.requirements and
446 return ('largefiles' in repo.requirements and
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
447 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440
448
441 class storeprotonotcapable(BaseException):
449 class storeprotonotcapable(BaseException):
442 def __init__(self, storetypes):
450 def __init__(self, storetypes):
443 self.storetypes = storetypes
451 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now