##// END OF EJS Templates
largefiles: use ui.configpath() where appropriate
Greg Ward -
r15350:8b8dd132 stable
parent child Browse files
Show More
@@ -1,447 +1,447 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
64 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on copy
79 # if hardlinks fail, fallback on copy
80 shutil.copyfile(src, dest)
80 shutil.copyfile(src, dest)
81 os.chmod(dest, os.stat(src).st_mode)
81 os.chmod(dest, os.stat(src).st_mode)
82
82
83 def usercachepath(ui, hash):
83 def usercachepath(ui, hash):
84 path = ui.config(longname, 'usercache', None)
84 path = ui.configpath(longname, 'usercache', None)
85 if path:
85 if path:
86 path = os.path.join(path, hash)
86 path = os.path.join(path, hash)
87 else:
87 else:
88 if os.name == 'nt':
88 if os.name == 'nt':
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 path = os.path.join(appdata, longname, hash)
90 path = os.path.join(appdata, longname, hash)
91 elif platform.system() == 'Darwin':
91 elif platform.system() == 'Darwin':
92 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
92 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
93 longname, hash)
93 longname, hash)
94 elif os.name == 'posix':
94 elif os.name == 'posix':
95 path = os.getenv('XDG_CACHE_HOME')
95 path = os.getenv('XDG_CACHE_HOME')
96 if path:
96 if path:
97 path = os.path.join(path, longname, hash)
97 path = os.path.join(path, longname, hash)
98 else:
98 else:
99 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
99 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
100 else:
100 else:
101 raise util.Abort(_('unknown operating system: %s\n') % os.name)
101 raise util.Abort(_('unknown operating system: %s\n') % os.name)
102 return path
102 return path
103
103
104 def inusercache(ui, hash):
104 def inusercache(ui, hash):
105 return os.path.exists(usercachepath(ui, hash))
105 return os.path.exists(usercachepath(ui, hash))
106
106
107 def findfile(repo, hash):
107 def findfile(repo, hash):
108 if instore(repo, hash):
108 if instore(repo, hash):
109 repo.ui.note(_('Found %s in store\n') % hash)
109 repo.ui.note(_('Found %s in store\n') % hash)
110 elif inusercache(repo.ui, hash):
110 elif inusercache(repo.ui, hash):
111 repo.ui.note(_('Found %s in system cache\n') % hash)
111 repo.ui.note(_('Found %s in system cache\n') % hash)
112 link(usercachepath(repo.ui, hash), storepath(repo, hash))
112 link(usercachepath(repo.ui, hash), storepath(repo, hash))
113 else:
113 else:
114 return None
114 return None
115 return storepath(repo, hash)
115 return storepath(repo, hash)
116
116
117 class largefiles_dirstate(dirstate.dirstate):
117 class largefiles_dirstate(dirstate.dirstate):
118 def __getitem__(self, key):
118 def __getitem__(self, key):
119 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
119 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
120 def normal(self, f):
120 def normal(self, f):
121 return super(largefiles_dirstate, self).normal(unixpath(f))
121 return super(largefiles_dirstate, self).normal(unixpath(f))
122 def remove(self, f):
122 def remove(self, f):
123 return super(largefiles_dirstate, self).remove(unixpath(f))
123 return super(largefiles_dirstate, self).remove(unixpath(f))
124 def add(self, f):
124 def add(self, f):
125 return super(largefiles_dirstate, self).add(unixpath(f))
125 return super(largefiles_dirstate, self).add(unixpath(f))
126 def drop(self, f):
126 def drop(self, f):
127 return super(largefiles_dirstate, self).drop(unixpath(f))
127 return super(largefiles_dirstate, self).drop(unixpath(f))
128 def forget(self, f):
128 def forget(self, f):
129 return super(largefiles_dirstate, self).forget(unixpath(f))
129 return super(largefiles_dirstate, self).forget(unixpath(f))
130
130
131 def openlfdirstate(ui, repo):
131 def openlfdirstate(ui, repo):
132 '''
132 '''
133 Return a dirstate object that tracks largefiles: i.e. its root is
133 Return a dirstate object that tracks largefiles: i.e. its root is
134 the repo root, but it is saved in .hg/largefiles/dirstate.
134 the repo root, but it is saved in .hg/largefiles/dirstate.
135 '''
135 '''
136 admin = repo.join(longname)
136 admin = repo.join(longname)
137 opener = scmutil.opener(admin)
137 opener = scmutil.opener(admin)
138 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
138 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
139 repo.dirstate._validate)
139 repo.dirstate._validate)
140
140
141 # If the largefiles dirstate does not exist, populate and create
141 # If the largefiles dirstate does not exist, populate and create
142 # it. This ensures that we create it on the first meaningful
142 # it. This ensures that we create it on the first meaningful
143 # largefiles operation in a new clone. It also gives us an easy
143 # largefiles operation in a new clone. It also gives us an easy
144 # way to forcibly rebuild largefiles state:
144 # way to forcibly rebuild largefiles state:
145 # rm .hg/largefiles/dirstate && hg status
145 # rm .hg/largefiles/dirstate && hg status
146 # Or even, if things are really messed up:
146 # Or even, if things are really messed up:
147 # rm -rf .hg/largefiles && hg status
147 # rm -rf .hg/largefiles && hg status
148 if not os.path.exists(os.path.join(admin, 'dirstate')):
148 if not os.path.exists(os.path.join(admin, 'dirstate')):
149 util.makedirs(admin)
149 util.makedirs(admin)
150 matcher = getstandinmatcher(repo)
150 matcher = getstandinmatcher(repo)
151 for standin in dirstate_walk(repo.dirstate, matcher):
151 for standin in dirstate_walk(repo.dirstate, matcher):
152 lfile = splitstandin(standin)
152 lfile = splitstandin(standin)
153 hash = readstandin(repo, lfile)
153 hash = readstandin(repo, lfile)
154 lfdirstate.normallookup(lfile)
154 lfdirstate.normallookup(lfile)
155 try:
155 try:
156 if hash == hashfile(lfile):
156 if hash == hashfile(lfile):
157 lfdirstate.normal(lfile)
157 lfdirstate.normal(lfile)
158 except IOError, err:
158 except IOError, err:
159 if err.errno != errno.ENOENT:
159 if err.errno != errno.ENOENT:
160 raise
160 raise
161
161
162 lfdirstate.write()
162 lfdirstate.write()
163
163
164 return lfdirstate
164 return lfdirstate
165
165
166 def lfdirstate_status(lfdirstate, repo, rev):
166 def lfdirstate_status(lfdirstate, repo, rev):
167 wlock = repo.wlock()
167 wlock = repo.wlock()
168 try:
168 try:
169 match = match_.always(repo.root, repo.getcwd())
169 match = match_.always(repo.root, repo.getcwd())
170 s = lfdirstate.status(match, [], False, False, False)
170 s = lfdirstate.status(match, [], False, False, False)
171 unsure, modified, added, removed, missing, unknown, ignored, clean = s
171 unsure, modified, added, removed, missing, unknown, ignored, clean = s
172 for lfile in unsure:
172 for lfile in unsure:
173 if repo[rev][standin(lfile)].data().strip() != \
173 if repo[rev][standin(lfile)].data().strip() != \
174 hashfile(repo.wjoin(lfile)):
174 hashfile(repo.wjoin(lfile)):
175 modified.append(lfile)
175 modified.append(lfile)
176 else:
176 else:
177 clean.append(lfile)
177 clean.append(lfile)
178 lfdirstate.normal(lfile)
178 lfdirstate.normal(lfile)
179 lfdirstate.write()
179 lfdirstate.write()
180 finally:
180 finally:
181 wlock.release()
181 wlock.release()
182 return (modified, added, removed, missing, unknown, ignored, clean)
182 return (modified, added, removed, missing, unknown, ignored, clean)
183
183
184 def listlfiles(repo, rev=None, matcher=None):
184 def listlfiles(repo, rev=None, matcher=None):
185 '''return a list of largefiles in the working copy or the
185 '''return a list of largefiles in the working copy or the
186 specified changeset'''
186 specified changeset'''
187
187
188 if matcher is None:
188 if matcher is None:
189 matcher = getstandinmatcher(repo)
189 matcher = getstandinmatcher(repo)
190
190
191 # ignore unknown files in working directory
191 # ignore unknown files in working directory
192 return [splitstandin(f)
192 return [splitstandin(f)
193 for f in repo[rev].walk(matcher)
193 for f in repo[rev].walk(matcher)
194 if rev is not None or repo.dirstate[f] != '?']
194 if rev is not None or repo.dirstate[f] != '?']
195
195
196 def instore(repo, hash):
196 def instore(repo, hash):
197 return os.path.exists(storepath(repo, hash))
197 return os.path.exists(storepath(repo, hash))
198
198
199 def createdir(dir):
199 def createdir(dir):
200 if not os.path.exists(dir):
200 if not os.path.exists(dir):
201 os.makedirs(dir)
201 os.makedirs(dir)
202
202
203 def storepath(repo, hash):
203 def storepath(repo, hash):
204 return repo.join(os.path.join(longname, hash))
204 return repo.join(os.path.join(longname, hash))
205
205
206 def copyfromcache(repo, hash, filename):
206 def copyfromcache(repo, hash, filename):
207 '''Copy the specified largefile from the repo or system cache to
207 '''Copy the specified largefile from the repo or system cache to
208 filename in the repository. Return true on success or false if the
208 filename in the repository. Return true on success or false if the
209 file was not found in either cache (which should not happened:
209 file was not found in either cache (which should not happened:
210 this is meant to be called only after ensuring that the needed
210 this is meant to be called only after ensuring that the needed
211 largefile exists in the cache).'''
211 largefile exists in the cache).'''
212 path = findfile(repo, hash)
212 path = findfile(repo, hash)
213 if path is None:
213 if path is None:
214 return False
214 return False
215 util.makedirs(os.path.dirname(repo.wjoin(filename)))
215 util.makedirs(os.path.dirname(repo.wjoin(filename)))
216 shutil.copy(path, repo.wjoin(filename))
216 shutil.copy(path, repo.wjoin(filename))
217 return True
217 return True
218
218
219 def copytostore(repo, rev, file, uploaded=False):
219 def copytostore(repo, rev, file, uploaded=False):
220 hash = readstandin(repo, file)
220 hash = readstandin(repo, file)
221 if instore(repo, hash):
221 if instore(repo, hash):
222 return
222 return
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224
224
225 def copytostoreabsolute(repo, file, hash):
225 def copytostoreabsolute(repo, file, hash):
226 createdir(os.path.dirname(storepath(repo, hash)))
226 createdir(os.path.dirname(storepath(repo, hash)))
227 if inusercache(repo.ui, hash):
227 if inusercache(repo.ui, hash):
228 link(usercachepath(repo.ui, hash), storepath(repo, hash))
228 link(usercachepath(repo.ui, hash), storepath(repo, hash))
229 else:
229 else:
230 shutil.copyfile(file, storepath(repo, hash))
230 shutil.copyfile(file, storepath(repo, hash))
231 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
231 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
232 linktousercache(repo, hash)
232 linktousercache(repo, hash)
233
233
234 def linktousercache(repo, hash):
234 def linktousercache(repo, hash):
235 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
235 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
236 link(storepath(repo, hash), usercachepath(repo.ui, hash))
236 link(storepath(repo, hash), usercachepath(repo.ui, hash))
237
237
238 def getstandinmatcher(repo, pats=[], opts={}):
238 def getstandinmatcher(repo, pats=[], opts={}):
239 '''Return a match object that applies pats to the standin directory'''
239 '''Return a match object that applies pats to the standin directory'''
240 standindir = repo.pathto(shortname)
240 standindir = repo.pathto(shortname)
241 if pats:
241 if pats:
242 # patterns supplied: search standin directory relative to current dir
242 # patterns supplied: search standin directory relative to current dir
243 cwd = repo.getcwd()
243 cwd = repo.getcwd()
244 if os.path.isabs(cwd):
244 if os.path.isabs(cwd):
245 # cwd is an absolute path for hg -R <reponame>
245 # cwd is an absolute path for hg -R <reponame>
246 # work relative to the repository root in this case
246 # work relative to the repository root in this case
247 cwd = ''
247 cwd = ''
248 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
248 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
249 elif os.path.isdir(standindir):
249 elif os.path.isdir(standindir):
250 # no patterns: relative to repo root
250 # no patterns: relative to repo root
251 pats = [standindir]
251 pats = [standindir]
252 else:
252 else:
253 # no patterns and no standin dir: return matcher that matches nothing
253 # no patterns and no standin dir: return matcher that matches nothing
254 match = match_.match(repo.root, None, [], exact=True)
254 match = match_.match(repo.root, None, [], exact=True)
255 match.matchfn = lambda f: False
255 match.matchfn = lambda f: False
256 return match
256 return match
257 return getmatcher(repo, pats, opts, showbad=False)
257 return getmatcher(repo, pats, opts, showbad=False)
258
258
259 def getmatcher(repo, pats=[], opts={}, showbad=True):
259 def getmatcher(repo, pats=[], opts={}, showbad=True):
260 '''Wrapper around scmutil.match() that adds showbad: if false,
260 '''Wrapper around scmutil.match() that adds showbad: if false,
261 neuter the match object's bad() method so it does not print any
261 neuter the match object's bad() method so it does not print any
262 warnings about missing files or directories.'''
262 warnings about missing files or directories.'''
263 match = scmutil.match(repo[None], pats, opts)
263 match = scmutil.match(repo[None], pats, opts)
264
264
265 if not showbad:
265 if not showbad:
266 match.bad = lambda f, msg: None
266 match.bad = lambda f, msg: None
267 return match
267 return match
268
268
269 def composestandinmatcher(repo, rmatcher):
269 def composestandinmatcher(repo, rmatcher):
270 '''Return a matcher that accepts standins corresponding to the
270 '''Return a matcher that accepts standins corresponding to the
271 files accepted by rmatcher. Pass the list of files in the matcher
271 files accepted by rmatcher. Pass the list of files in the matcher
272 as the paths specified by the user.'''
272 as the paths specified by the user.'''
273 smatcher = getstandinmatcher(repo, rmatcher.files())
273 smatcher = getstandinmatcher(repo, rmatcher.files())
274 isstandin = smatcher.matchfn
274 isstandin = smatcher.matchfn
275 def composed_matchfn(f):
275 def composed_matchfn(f):
276 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
276 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
277 smatcher.matchfn = composed_matchfn
277 smatcher.matchfn = composed_matchfn
278
278
279 return smatcher
279 return smatcher
280
280
281 def standin(filename):
281 def standin(filename):
282 '''Return the repo-relative path to the standin for the specified big
282 '''Return the repo-relative path to the standin for the specified big
283 file.'''
283 file.'''
284 # Notes:
284 # Notes:
285 # 1) Most callers want an absolute path, but _create_standin() needs
285 # 1) Most callers want an absolute path, but _create_standin() needs
286 # it repo-relative so lfadd() can pass it to repo_add(). So leave
286 # it repo-relative so lfadd() can pass it to repo_add(). So leave
287 # it up to the caller to use repo.wjoin() to get an absolute path.
287 # it up to the caller to use repo.wjoin() to get an absolute path.
288 # 2) Join with '/' because that's what dirstate always uses, even on
288 # 2) Join with '/' because that's what dirstate always uses, even on
289 # Windows. Change existing separator to '/' first in case we are
289 # Windows. Change existing separator to '/' first in case we are
290 # passed filenames from an external source (like the command line).
290 # passed filenames from an external source (like the command line).
291 return shortname + '/' + filename.replace(os.sep, '/')
291 return shortname + '/' + filename.replace(os.sep, '/')
292
292
293 def isstandin(filename):
293 def isstandin(filename):
294 '''Return true if filename is a big file standin. filename must be
294 '''Return true if filename is a big file standin. filename must be
295 in Mercurial's internal form (slash-separated).'''
295 in Mercurial's internal form (slash-separated).'''
296 return filename.startswith(shortname + '/')
296 return filename.startswith(shortname + '/')
297
297
298 def splitstandin(filename):
298 def splitstandin(filename):
299 # Split on / because that's what dirstate always uses, even on Windows.
299 # Split on / because that's what dirstate always uses, even on Windows.
300 # Change local separator to / first just in case we are passed filenames
300 # Change local separator to / first just in case we are passed filenames
301 # from an external source (like the command line).
301 # from an external source (like the command line).
302 bits = filename.replace(os.sep, '/').split('/', 1)
302 bits = filename.replace(os.sep, '/').split('/', 1)
303 if len(bits) == 2 and bits[0] == shortname:
303 if len(bits) == 2 and bits[0] == shortname:
304 return bits[1]
304 return bits[1]
305 else:
305 else:
306 return None
306 return None
307
307
308 def updatestandin(repo, standin):
308 def updatestandin(repo, standin):
309 file = repo.wjoin(splitstandin(standin))
309 file = repo.wjoin(splitstandin(standin))
310 if os.path.exists(file):
310 if os.path.exists(file):
311 hash = hashfile(file)
311 hash = hashfile(file)
312 executable = getexecutable(file)
312 executable = getexecutable(file)
313 writestandin(repo, standin, hash, executable)
313 writestandin(repo, standin, hash, executable)
314
314
315 def readstandin(repo, filename, node=None):
315 def readstandin(repo, filename, node=None):
316 '''read hex hash from standin for filename at given node, or working
316 '''read hex hash from standin for filename at given node, or working
317 directory if no node is given'''
317 directory if no node is given'''
318 return repo[node][standin(filename)].data().strip()
318 return repo[node][standin(filename)].data().strip()
319
319
320 def writestandin(repo, standin, hash, executable):
320 def writestandin(repo, standin, hash, executable):
321 '''write hash to <repo.root>/<standin>'''
321 '''write hash to <repo.root>/<standin>'''
322 writehash(hash, repo.wjoin(standin), executable)
322 writehash(hash, repo.wjoin(standin), executable)
323
323
324 def copyandhash(instream, outfile):
324 def copyandhash(instream, outfile):
325 '''Read bytes from instream (iterable) and write them to outfile,
325 '''Read bytes from instream (iterable) and write them to outfile,
326 computing the SHA-1 hash of the data along the way. Close outfile
326 computing the SHA-1 hash of the data along the way. Close outfile
327 when done and return the binary hash.'''
327 when done and return the binary hash.'''
328 hasher = util.sha1('')
328 hasher = util.sha1('')
329 for data in instream:
329 for data in instream:
330 hasher.update(data)
330 hasher.update(data)
331 outfile.write(data)
331 outfile.write(data)
332
332
333 # Blecch: closing a file that somebody else opened is rude and
333 # Blecch: closing a file that somebody else opened is rude and
334 # wrong. But it's so darn convenient and practical! After all,
334 # wrong. But it's so darn convenient and practical! After all,
335 # outfile was opened just to copy and hash.
335 # outfile was opened just to copy and hash.
336 outfile.close()
336 outfile.close()
337
337
338 return hasher.digest()
338 return hasher.digest()
339
339
340 def hashrepofile(repo, file):
340 def hashrepofile(repo, file):
341 return hashfile(repo.wjoin(file))
341 return hashfile(repo.wjoin(file))
342
342
343 def hashfile(file):
343 def hashfile(file):
344 if not os.path.exists(file):
344 if not os.path.exists(file):
345 return ''
345 return ''
346 hasher = util.sha1('')
346 hasher = util.sha1('')
347 fd = open(file, 'rb')
347 fd = open(file, 'rb')
348 for data in blockstream(fd):
348 for data in blockstream(fd):
349 hasher.update(data)
349 hasher.update(data)
350 fd.close()
350 fd.close()
351 return hasher.hexdigest()
351 return hasher.hexdigest()
352
352
353 class limitreader(object):
353 class limitreader(object):
354 def __init__(self, f, limit):
354 def __init__(self, f, limit):
355 self.f = f
355 self.f = f
356 self.limit = limit
356 self.limit = limit
357
357
358 def read(self, length):
358 def read(self, length):
359 if self.limit == 0:
359 if self.limit == 0:
360 return ''
360 return ''
361 length = length > self.limit and self.limit or length
361 length = length > self.limit and self.limit or length
362 self.limit -= length
362 self.limit -= length
363 return self.f.read(length)
363 return self.f.read(length)
364
364
365 def close(self):
365 def close(self):
366 pass
366 pass
367
367
368 def blockstream(infile, blocksize=128 * 1024):
368 def blockstream(infile, blocksize=128 * 1024):
369 """Generator that yields blocks of data from infile and closes infile."""
369 """Generator that yields blocks of data from infile and closes infile."""
370 while True:
370 while True:
371 data = infile.read(blocksize)
371 data = infile.read(blocksize)
372 if not data:
372 if not data:
373 break
373 break
374 yield data
374 yield data
375 # same blecch as copyandhash() above
375 # same blecch as copyandhash() above
376 infile.close()
376 infile.close()
377
377
378 def readhash(filename):
378 def readhash(filename):
379 rfile = open(filename, 'rb')
379 rfile = open(filename, 'rb')
380 hash = rfile.read(40)
380 hash = rfile.read(40)
381 rfile.close()
381 rfile.close()
382 if len(hash) < 40:
382 if len(hash) < 40:
383 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
383 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
384 % (filename, len(hash)))
384 % (filename, len(hash)))
385 return hash
385 return hash
386
386
387 def writehash(hash, filename, executable):
387 def writehash(hash, filename, executable):
388 util.makedirs(os.path.dirname(filename))
388 util.makedirs(os.path.dirname(filename))
389 if os.path.exists(filename):
389 if os.path.exists(filename):
390 os.unlink(filename)
390 os.unlink(filename)
391 wfile = open(filename, 'wb')
391 wfile = open(filename, 'wb')
392
392
393 try:
393 try:
394 wfile.write(hash)
394 wfile.write(hash)
395 wfile.write('\n')
395 wfile.write('\n')
396 finally:
396 finally:
397 wfile.close()
397 wfile.close()
398 if os.path.exists(filename):
398 if os.path.exists(filename):
399 os.chmod(filename, getmode(executable))
399 os.chmod(filename, getmode(executable))
400
400
401 def getexecutable(filename):
401 def getexecutable(filename):
402 mode = os.stat(filename).st_mode
402 mode = os.stat(filename).st_mode
403 return ((mode & stat.S_IXUSR) and
403 return ((mode & stat.S_IXUSR) and
404 (mode & stat.S_IXGRP) and
404 (mode & stat.S_IXGRP) and
405 (mode & stat.S_IXOTH))
405 (mode & stat.S_IXOTH))
406
406
407 def getmode(executable):
407 def getmode(executable):
408 if executable:
408 if executable:
409 return 0755
409 return 0755
410 else:
410 else:
411 return 0644
411 return 0644
412
412
413 def urljoin(first, second, *arg):
413 def urljoin(first, second, *arg):
414 def join(left, right):
414 def join(left, right):
415 if not left.endswith('/'):
415 if not left.endswith('/'):
416 left += '/'
416 left += '/'
417 if right.startswith('/'):
417 if right.startswith('/'):
418 right = right[1:]
418 right = right[1:]
419 return left + right
419 return left + right
420
420
421 url = join(first, second)
421 url = join(first, second)
422 for a in arg:
422 for a in arg:
423 url = join(url, a)
423 url = join(url, a)
424 return url
424 return url
425
425
426 def hexsha1(data):
426 def hexsha1(data):
427 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
427 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
428 object data"""
428 object data"""
429 h = util.sha1()
429 h = util.sha1()
430 for chunk in util.filechunkiter(data):
430 for chunk in util.filechunkiter(data):
431 h.update(chunk)
431 h.update(chunk)
432 return h.hexdigest()
432 return h.hexdigest()
433
433
434 def httpsendfile(ui, filename):
434 def httpsendfile(ui, filename):
435 return httpconnection.httpsendfile(ui, filename, 'rb')
435 return httpconnection.httpsendfile(ui, filename, 'rb')
436
436
437 def unixpath(path):
437 def unixpath(path):
438 '''Return a version of path normalized for use with the lfdirstate.'''
438 '''Return a version of path normalized for use with the lfdirstate.'''
439 return os.path.normpath(path).replace(os.sep, '/')
439 return os.path.normpath(path).replace(os.sep, '/')
440
440
441 def islfilesrepo(repo):
441 def islfilesrepo(repo):
442 return ('largefiles' in repo.requirements and
442 return ('largefiles' in repo.requirements and
443 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
443 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
444
444
445 class storeprotonotcapable(Exception):
445 class storeprotonotcapable(Exception):
446 def __init__(self, storetypes):
446 def __init__(self, storetypes):
447 self.storetypes = storetypes
447 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now