##// END OF EJS Templates
largefiles: ensure destination directory exists before findfile links to there...
Hao Lian -
r15408:db8b0ee7 stable
parent child Browse files
Show More
@@ -1,451 +1,453 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on copy
80 # if hardlinks fail, fallback on copy
81 shutil.copyfile(src, dest)
81 shutil.copyfile(src, dest)
82 os.chmod(dest, os.stat(src).st_mode)
82 os.chmod(dest, os.stat(src).st_mode)
83
83
84 def usercachepath(ui, hash):
84 def usercachepath(ui, hash):
85 path = ui.configpath(longname, 'usercache', None)
85 path = ui.configpath(longname, 'usercache', None)
86 if path:
86 if path:
87 path = os.path.join(path, hash)
87 path = os.path.join(path, hash)
88 else:
88 else:
89 if os.name == 'nt':
89 if os.name == 'nt':
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
91 path = os.path.join(appdata, longname, hash)
91 path = os.path.join(appdata, longname, hash)
92 elif platform.system() == 'Darwin':
92 elif platform.system() == 'Darwin':
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
93 path = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
94 longname, hash)
94 longname, hash)
95 elif os.name == 'posix':
95 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
97 if path:
98 path = os.path.join(path, longname, hash)
98 path = os.path.join(path, longname, hash)
99 else:
99 else:
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
100 path = os.path.join(os.getenv('HOME'), '.cache', longname, hash)
101 else:
101 else:
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
102 raise util.Abort(_('unknown operating system: %s\n') % os.name)
103 return path
103 return path
104
104
105 def inusercache(ui, hash):
105 def inusercache(ui, hash):
106 return os.path.exists(usercachepath(ui, hash))
106 return os.path.exists(usercachepath(ui, hash))
107
107
108 def findfile(repo, hash):
108 def findfile(repo, hash):
109 if instore(repo, hash):
109 if instore(repo, hash):
110 repo.ui.note(_('Found %s in store\n') % hash)
110 repo.ui.note(_('Found %s in store\n') % hash)
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('Found %s in system cache\n') % hash)
112 repo.ui.note(_('Found %s in system cache\n') % hash)
113 link(usercachepath(repo.ui, hash), storepath(repo, hash))
113 path = storepath(repo, hash)
114 util.makedirs(os.path.dirname(path))
115 link(usercachepath(repo.ui, hash), path)
114 else:
116 else:
115 return None
117 return None
116 return storepath(repo, hash)
118 return storepath(repo, hash)
117
119
118 class largefiles_dirstate(dirstate.dirstate):
120 class largefiles_dirstate(dirstate.dirstate):
119 def __getitem__(self, key):
121 def __getitem__(self, key):
120 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
122 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
123 def normal(self, f):
122 return super(largefiles_dirstate, self).normal(unixpath(f))
124 return super(largefiles_dirstate, self).normal(unixpath(f))
123 def remove(self, f):
125 def remove(self, f):
124 return super(largefiles_dirstate, self).remove(unixpath(f))
126 return super(largefiles_dirstate, self).remove(unixpath(f))
125 def add(self, f):
127 def add(self, f):
126 return super(largefiles_dirstate, self).add(unixpath(f))
128 return super(largefiles_dirstate, self).add(unixpath(f))
127 def drop(self, f):
129 def drop(self, f):
128 return super(largefiles_dirstate, self).drop(unixpath(f))
130 return super(largefiles_dirstate, self).drop(unixpath(f))
129 def forget(self, f):
131 def forget(self, f):
130 return super(largefiles_dirstate, self).forget(unixpath(f))
132 return super(largefiles_dirstate, self).forget(unixpath(f))
131
133
132 def openlfdirstate(ui, repo):
134 def openlfdirstate(ui, repo):
133 '''
135 '''
134 Return a dirstate object that tracks largefiles: i.e. its root is
136 Return a dirstate object that tracks largefiles: i.e. its root is
135 the repo root, but it is saved in .hg/largefiles/dirstate.
137 the repo root, but it is saved in .hg/largefiles/dirstate.
136 '''
138 '''
137 admin = repo.join(longname)
139 admin = repo.join(longname)
138 opener = scmutil.opener(admin)
140 opener = scmutil.opener(admin)
139 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
141 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
140 repo.dirstate._validate)
142 repo.dirstate._validate)
141
143
142 # If the largefiles dirstate does not exist, populate and create
144 # If the largefiles dirstate does not exist, populate and create
143 # it. This ensures that we create it on the first meaningful
145 # it. This ensures that we create it on the first meaningful
144 # largefiles operation in a new clone. It also gives us an easy
146 # largefiles operation in a new clone. It also gives us an easy
145 # way to forcibly rebuild largefiles state:
147 # way to forcibly rebuild largefiles state:
146 # rm .hg/largefiles/dirstate && hg status
148 # rm .hg/largefiles/dirstate && hg status
147 # Or even, if things are really messed up:
149 # Or even, if things are really messed up:
148 # rm -rf .hg/largefiles && hg status
150 # rm -rf .hg/largefiles && hg status
149 if not os.path.exists(os.path.join(admin, 'dirstate')):
151 if not os.path.exists(os.path.join(admin, 'dirstate')):
150 util.makedirs(admin)
152 util.makedirs(admin)
151 matcher = getstandinmatcher(repo)
153 matcher = getstandinmatcher(repo)
152 for standin in dirstate_walk(repo.dirstate, matcher):
154 for standin in dirstate_walk(repo.dirstate, matcher):
153 lfile = splitstandin(standin)
155 lfile = splitstandin(standin)
154 hash = readstandin(repo, lfile)
156 hash = readstandin(repo, lfile)
155 lfdirstate.normallookup(lfile)
157 lfdirstate.normallookup(lfile)
156 try:
158 try:
157 if hash == hashfile(lfile):
159 if hash == hashfile(lfile):
158 lfdirstate.normal(lfile)
160 lfdirstate.normal(lfile)
159 except IOError, err:
161 except IOError, err:
160 if err.errno != errno.ENOENT:
162 if err.errno != errno.ENOENT:
161 raise
163 raise
162
164
163 lfdirstate.write()
165 lfdirstate.write()
164
166
165 return lfdirstate
167 return lfdirstate
166
168
167 def lfdirstate_status(lfdirstate, repo, rev):
169 def lfdirstate_status(lfdirstate, repo, rev):
168 wlock = repo.wlock()
170 wlock = repo.wlock()
169 try:
171 try:
170 match = match_.always(repo.root, repo.getcwd())
172 match = match_.always(repo.root, repo.getcwd())
171 s = lfdirstate.status(match, [], False, False, False)
173 s = lfdirstate.status(match, [], False, False, False)
172 unsure, modified, added, removed, missing, unknown, ignored, clean = s
174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
173 for lfile in unsure:
175 for lfile in unsure:
174 if repo[rev][standin(lfile)].data().strip() != \
176 if repo[rev][standin(lfile)].data().strip() != \
175 hashfile(repo.wjoin(lfile)):
177 hashfile(repo.wjoin(lfile)):
176 modified.append(lfile)
178 modified.append(lfile)
177 else:
179 else:
178 clean.append(lfile)
180 clean.append(lfile)
179 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
180 lfdirstate.write()
182 lfdirstate.write()
181 finally:
183 finally:
182 wlock.release()
184 wlock.release()
183 return (modified, added, removed, missing, unknown, ignored, clean)
185 return (modified, added, removed, missing, unknown, ignored, clean)
184
186
185 def listlfiles(repo, rev=None, matcher=None):
187 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
188 '''return a list of largefiles in the working copy or the
187 specified changeset'''
189 specified changeset'''
188
190
189 if matcher is None:
191 if matcher is None:
190 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
191
193
192 # ignore unknown files in working directory
194 # ignore unknown files in working directory
193 return [splitstandin(f)
195 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
196 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
197 if rev is not None or repo.dirstate[f] != '?']
196
198
197 def instore(repo, hash):
199 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
200 return os.path.exists(storepath(repo, hash))
199
201
200 def storepath(repo, hash):
202 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
203 return repo.join(os.path.join(longname, hash))
202
204
203 def copyfromcache(repo, hash, filename):
205 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
206 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
207 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
208 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
209 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
210 largefile exists in the cache).'''
209 path = findfile(repo, hash)
211 path = findfile(repo, hash)
210 if path is None:
212 if path is None:
211 return False
213 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 shutil.copy(path, repo.wjoin(filename))
215 shutil.copy(path, repo.wjoin(filename))
214 return True
216 return True
215
217
216 def copytostore(repo, rev, file, uploaded=False):
218 def copytostore(repo, rev, file, uploaded=False):
217 hash = readstandin(repo, file)
219 hash = readstandin(repo, file)
218 if instore(repo, hash):
220 if instore(repo, hash):
219 return
221 return
220 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
221
223
222 def copytostoreabsolute(repo, file, hash):
224 def copytostoreabsolute(repo, file, hash):
223 util.makedirs(os.path.dirname(storepath(repo, hash)))
225 util.makedirs(os.path.dirname(storepath(repo, hash)))
224 if inusercache(repo.ui, hash):
226 if inusercache(repo.ui, hash):
225 link(usercachepath(repo.ui, hash), storepath(repo, hash))
227 link(usercachepath(repo.ui, hash), storepath(repo, hash))
226 else:
228 else:
227 shutil.copyfile(file, storepath(repo, hash))
229 shutil.copyfile(file, storepath(repo, hash))
228 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
230 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
229 linktousercache(repo, hash)
231 linktousercache(repo, hash)
230
232
231 def linktousercache(repo, hash):
233 def linktousercache(repo, hash):
232 util.makedirs(os.path.dirname(usercachepath(repo.ui, hash)))
234 util.makedirs(os.path.dirname(usercachepath(repo.ui, hash)))
233 link(storepath(repo, hash), usercachepath(repo.ui, hash))
235 link(storepath(repo, hash), usercachepath(repo.ui, hash))
234
236
235 def getstandinmatcher(repo, pats=[], opts={}):
237 def getstandinmatcher(repo, pats=[], opts={}):
236 '''Return a match object that applies pats to the standin directory'''
238 '''Return a match object that applies pats to the standin directory'''
237 standindir = repo.pathto(shortname)
239 standindir = repo.pathto(shortname)
238 if pats:
240 if pats:
239 # patterns supplied: search standin directory relative to current dir
241 # patterns supplied: search standin directory relative to current dir
240 cwd = repo.getcwd()
242 cwd = repo.getcwd()
241 if os.path.isabs(cwd):
243 if os.path.isabs(cwd):
242 # cwd is an absolute path for hg -R <reponame>
244 # cwd is an absolute path for hg -R <reponame>
243 # work relative to the repository root in this case
245 # work relative to the repository root in this case
244 cwd = ''
246 cwd = ''
245 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
247 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
246 elif os.path.isdir(standindir):
248 elif os.path.isdir(standindir):
247 # no patterns: relative to repo root
249 # no patterns: relative to repo root
248 pats = [standindir]
250 pats = [standindir]
249 else:
251 else:
250 # no patterns and no standin dir: return matcher that matches nothing
252 # no patterns and no standin dir: return matcher that matches nothing
251 match = match_.match(repo.root, None, [], exact=True)
253 match = match_.match(repo.root, None, [], exact=True)
252 match.matchfn = lambda f: False
254 match.matchfn = lambda f: False
253 return match
255 return match
254 return getmatcher(repo, pats, opts, showbad=False)
256 return getmatcher(repo, pats, opts, showbad=False)
255
257
256 def getmatcher(repo, pats=[], opts={}, showbad=True):
258 def getmatcher(repo, pats=[], opts={}, showbad=True):
257 '''Wrapper around scmutil.match() that adds showbad: if false,
259 '''Wrapper around scmutil.match() that adds showbad: if false,
258 neuter the match object's bad() method so it does not print any
260 neuter the match object's bad() method so it does not print any
259 warnings about missing files or directories.'''
261 warnings about missing files or directories.'''
260 match = scmutil.match(repo[None], pats, opts)
262 match = scmutil.match(repo[None], pats, opts)
261
263
262 if not showbad:
264 if not showbad:
263 match.bad = lambda f, msg: None
265 match.bad = lambda f, msg: None
264 return match
266 return match
265
267
266 def composestandinmatcher(repo, rmatcher):
268 def composestandinmatcher(repo, rmatcher):
267 '''Return a matcher that accepts standins corresponding to the
269 '''Return a matcher that accepts standins corresponding to the
268 files accepted by rmatcher. Pass the list of files in the matcher
270 files accepted by rmatcher. Pass the list of files in the matcher
269 as the paths specified by the user.'''
271 as the paths specified by the user.'''
270 smatcher = getstandinmatcher(repo, rmatcher.files())
272 smatcher = getstandinmatcher(repo, rmatcher.files())
271 isstandin = smatcher.matchfn
273 isstandin = smatcher.matchfn
272 def composed_matchfn(f):
274 def composed_matchfn(f):
273 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
275 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
274 smatcher.matchfn = composed_matchfn
276 smatcher.matchfn = composed_matchfn
275
277
276 return smatcher
278 return smatcher
277
279
278 def standin(filename):
280 def standin(filename):
279 '''Return the repo-relative path to the standin for the specified big
281 '''Return the repo-relative path to the standin for the specified big
280 file.'''
282 file.'''
281 # Notes:
283 # Notes:
282 # 1) Most callers want an absolute path, but _create_standin() needs
284 # 1) Most callers want an absolute path, but _create_standin() needs
283 # it repo-relative so lfadd() can pass it to repo_add(). So leave
285 # it repo-relative so lfadd() can pass it to repo_add(). So leave
284 # it up to the caller to use repo.wjoin() to get an absolute path.
286 # it up to the caller to use repo.wjoin() to get an absolute path.
285 # 2) Join with '/' because that's what dirstate always uses, even on
287 # 2) Join with '/' because that's what dirstate always uses, even on
286 # Windows. Change existing separator to '/' first in case we are
288 # Windows. Change existing separator to '/' first in case we are
287 # passed filenames from an external source (like the command line).
289 # passed filenames from an external source (like the command line).
288 return shortname + '/' + filename.replace(os.sep, '/')
290 return shortname + '/' + filename.replace(os.sep, '/')
289
291
290 def isstandin(filename):
292 def isstandin(filename):
291 '''Return true if filename is a big file standin. filename must be
293 '''Return true if filename is a big file standin. filename must be
292 in Mercurial's internal form (slash-separated).'''
294 in Mercurial's internal form (slash-separated).'''
293 return filename.startswith(shortname + '/')
295 return filename.startswith(shortname + '/')
294
296
295 def splitstandin(filename):
297 def splitstandin(filename):
296 # Split on / because that's what dirstate always uses, even on Windows.
298 # Split on / because that's what dirstate always uses, even on Windows.
297 # Change local separator to / first just in case we are passed filenames
299 # Change local separator to / first just in case we are passed filenames
298 # from an external source (like the command line).
300 # from an external source (like the command line).
299 bits = filename.replace(os.sep, '/').split('/', 1)
301 bits = filename.replace(os.sep, '/').split('/', 1)
300 if len(bits) == 2 and bits[0] == shortname:
302 if len(bits) == 2 and bits[0] == shortname:
301 return bits[1]
303 return bits[1]
302 else:
304 else:
303 return None
305 return None
304
306
305 def updatestandin(repo, standin):
307 def updatestandin(repo, standin):
306 file = repo.wjoin(splitstandin(standin))
308 file = repo.wjoin(splitstandin(standin))
307 if os.path.exists(file):
309 if os.path.exists(file):
308 hash = hashfile(file)
310 hash = hashfile(file)
309 executable = getexecutable(file)
311 executable = getexecutable(file)
310 writestandin(repo, standin, hash, executable)
312 writestandin(repo, standin, hash, executable)
311
313
312 def readstandin(repo, filename, node=None):
314 def readstandin(repo, filename, node=None):
313 '''read hex hash from standin for filename at given node, or working
315 '''read hex hash from standin for filename at given node, or working
314 directory if no node is given'''
316 directory if no node is given'''
315 return repo[node][standin(filename)].data().strip()
317 return repo[node][standin(filename)].data().strip()
316
318
317 def writestandin(repo, standin, hash, executable):
319 def writestandin(repo, standin, hash, executable):
318 '''write hash to <repo.root>/<standin>'''
320 '''write hash to <repo.root>/<standin>'''
319 writehash(hash, repo.wjoin(standin), executable)
321 writehash(hash, repo.wjoin(standin), executable)
320
322
321 def copyandhash(instream, outfile):
323 def copyandhash(instream, outfile):
322 '''Read bytes from instream (iterable) and write them to outfile,
324 '''Read bytes from instream (iterable) and write them to outfile,
323 computing the SHA-1 hash of the data along the way. Close outfile
325 computing the SHA-1 hash of the data along the way. Close outfile
324 when done and return the binary hash.'''
326 when done and return the binary hash.'''
325 hasher = util.sha1('')
327 hasher = util.sha1('')
326 for data in instream:
328 for data in instream:
327 hasher.update(data)
329 hasher.update(data)
328 outfile.write(data)
330 outfile.write(data)
329
331
330 # Blecch: closing a file that somebody else opened is rude and
332 # Blecch: closing a file that somebody else opened is rude and
331 # wrong. But it's so darn convenient and practical! After all,
333 # wrong. But it's so darn convenient and practical! After all,
332 # outfile was opened just to copy and hash.
334 # outfile was opened just to copy and hash.
333 outfile.close()
335 outfile.close()
334
336
335 return hasher.digest()
337 return hasher.digest()
336
338
337 def hashrepofile(repo, file):
339 def hashrepofile(repo, file):
338 return hashfile(repo.wjoin(file))
340 return hashfile(repo.wjoin(file))
339
341
340 def hashfile(file):
342 def hashfile(file):
341 if not os.path.exists(file):
343 if not os.path.exists(file):
342 return ''
344 return ''
343 hasher = util.sha1('')
345 hasher = util.sha1('')
344 fd = open(file, 'rb')
346 fd = open(file, 'rb')
345 for data in blockstream(fd):
347 for data in blockstream(fd):
346 hasher.update(data)
348 hasher.update(data)
347 fd.close()
349 fd.close()
348 return hasher.hexdigest()
350 return hasher.hexdigest()
349
351
350 class limitreader(object):
352 class limitreader(object):
351 def __init__(self, f, limit):
353 def __init__(self, f, limit):
352 self.f = f
354 self.f = f
353 self.limit = limit
355 self.limit = limit
354
356
355 def read(self, length):
357 def read(self, length):
356 if self.limit == 0:
358 if self.limit == 0:
357 return ''
359 return ''
358 length = length > self.limit and self.limit or length
360 length = length > self.limit and self.limit or length
359 self.limit -= length
361 self.limit -= length
360 return self.f.read(length)
362 return self.f.read(length)
361
363
362 def close(self):
364 def close(self):
363 pass
365 pass
364
366
365 def blockstream(infile, blocksize=128 * 1024):
367 def blockstream(infile, blocksize=128 * 1024):
366 """Generator that yields blocks of data from infile and closes infile."""
368 """Generator that yields blocks of data from infile and closes infile."""
367 while True:
369 while True:
368 data = infile.read(blocksize)
370 data = infile.read(blocksize)
369 if not data:
371 if not data:
370 break
372 break
371 yield data
373 yield data
372 # same blecch as copyandhash() above
374 # same blecch as copyandhash() above
373 infile.close()
375 infile.close()
374
376
375 def readhash(filename):
377 def readhash(filename):
376 rfile = open(filename, 'rb')
378 rfile = open(filename, 'rb')
377 hash = rfile.read(40)
379 hash = rfile.read(40)
378 rfile.close()
380 rfile.close()
379 if len(hash) < 40:
381 if len(hash) < 40:
380 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
382 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
381 % (filename, len(hash)))
383 % (filename, len(hash)))
382 return hash
384 return hash
383
385
384 def writehash(hash, filename, executable):
386 def writehash(hash, filename, executable):
385 util.makedirs(os.path.dirname(filename))
387 util.makedirs(os.path.dirname(filename))
386 if os.path.exists(filename):
388 if os.path.exists(filename):
387 os.unlink(filename)
389 os.unlink(filename)
388 wfile = open(filename, 'wb')
390 wfile = open(filename, 'wb')
389
391
390 try:
392 try:
391 wfile.write(hash)
393 wfile.write(hash)
392 wfile.write('\n')
394 wfile.write('\n')
393 finally:
395 finally:
394 wfile.close()
396 wfile.close()
395 if os.path.exists(filename):
397 if os.path.exists(filename):
396 os.chmod(filename, getmode(executable))
398 os.chmod(filename, getmode(executable))
397
399
398 def getexecutable(filename):
400 def getexecutable(filename):
399 mode = os.stat(filename).st_mode
401 mode = os.stat(filename).st_mode
400 return ((mode & stat.S_IXUSR) and
402 return ((mode & stat.S_IXUSR) and
401 (mode & stat.S_IXGRP) and
403 (mode & stat.S_IXGRP) and
402 (mode & stat.S_IXOTH))
404 (mode & stat.S_IXOTH))
403
405
404 def getmode(executable):
406 def getmode(executable):
405 if executable:
407 if executable:
406 return 0755
408 return 0755
407 else:
409 else:
408 return 0644
410 return 0644
409
411
410 def urljoin(first, second, *arg):
412 def urljoin(first, second, *arg):
411 def join(left, right):
413 def join(left, right):
412 if not left.endswith('/'):
414 if not left.endswith('/'):
413 left += '/'
415 left += '/'
414 if right.startswith('/'):
416 if right.startswith('/'):
415 right = right[1:]
417 right = right[1:]
416 return left + right
418 return left + right
417
419
418 url = join(first, second)
420 url = join(first, second)
419 for a in arg:
421 for a in arg:
420 url = join(url, a)
422 url = join(url, a)
421 return url
423 return url
422
424
423 def hexsha1(data):
425 def hexsha1(data):
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
426 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 object data"""
427 object data"""
426 h = util.sha1()
428 h = util.sha1()
427 for chunk in util.filechunkiter(data):
429 for chunk in util.filechunkiter(data):
428 h.update(chunk)
430 h.update(chunk)
429 return h.hexdigest()
431 return h.hexdigest()
430
432
431 def httpsendfile(ui, filename):
433 def httpsendfile(ui, filename):
432 return httpconnection.httpsendfile(ui, filename, 'rb')
434 return httpconnection.httpsendfile(ui, filename, 'rb')
433
435
434 def unixpath(path):
436 def unixpath(path):
435 '''Return a version of path normalized for use with the lfdirstate.'''
437 '''Return a version of path normalized for use with the lfdirstate.'''
436 return os.path.normpath(path).replace(os.sep, '/')
438 return os.path.normpath(path).replace(os.sep, '/')
437
439
438 def islfilesrepo(repo):
440 def islfilesrepo(repo):
439 return ('largefiles' in repo.requirements and
441 return ('largefiles' in repo.requirements and
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
442 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441
443
442 def mkstemp(repo, prefix):
444 def mkstemp(repo, prefix):
443 '''Returns a file descriptor and a filename corresponding to a temporary
445 '''Returns a file descriptor and a filename corresponding to a temporary
444 file in the repo's largefiles store.'''
446 file in the repo's largefiles store.'''
445 path = repo.join(longname)
447 path = repo.join(longname)
446 util.makedirs(path)
448 util.makedirs(path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
449 return tempfile.mkstemp(prefix=prefix, dir=path)
448
450
449 class storeprotonotcapable(Exception):
451 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
452 def __init__(self, storetypes):
451 self.storetypes = storetypes
453 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now