##// END OF EJS Templates
i18n: use util.pconvert() instead of 'str.replace()' for problematic encoding...
FUJIWARA Katsunori -
r16066:6a42846c stable
parent child Browse files
Show More
@@ -1,451 +1,451 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16 import tempfile
16 import tempfile
17
17
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
26
26
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
28 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
29
29
30 def repo_add(repo, list):
30 def repo_add(repo, list):
31 add = repo[None].add
31 add = repo[None].add
32 return add(list)
32 return add(list)
33
33
34 def repo_remove(repo, list, unlink=False):
34 def repo_remove(repo, list, unlink=False):
35 def remove(list, unlink):
35 def remove(list, unlink):
36 wlock = repo.wlock()
36 wlock = repo.wlock()
37 try:
37 try:
38 if unlink:
38 if unlink:
39 for f in list:
39 for f in list:
40 try:
40 try:
41 util.unlinkpath(repo.wjoin(f))
41 util.unlinkpath(repo.wjoin(f))
42 except OSError, inst:
42 except OSError, inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45 repo[None].forget(list)
45 repo[None].forget(list)
46 finally:
46 finally:
47 wlock.release()
47 wlock.release()
48 return remove(list, unlink=unlink)
48 return remove(list, unlink=unlink)
49
49
50 def repo_forget(repo, list):
50 def repo_forget(repo, list):
51 forget = repo[None].forget
51 forget = repo[None].forget
52 return forget(list)
52 return forget(list)
53
53
54 def findoutgoing(repo, remote, force):
54 def findoutgoing(repo, remote, force):
55 from mercurial import discovery
55 from mercurial import discovery
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 common, _anyinc, _heads = discovery.findcommonincoming(repo,
57 remote, force=force)
57 remote, force=force)
58 return repo.changelog.findmissing(common)
58 return repo.changelog.findmissing(common)
59
59
60 # -- Private worker functions ------------------------------------------
60 # -- Private worker functions ------------------------------------------
61
61
62 def getminsize(ui, assumelfiles, opt, default=10):
62 def getminsize(ui, assumelfiles, opt, default=10):
63 lfsize = opt
63 lfsize = opt
64 if not lfsize and assumelfiles:
64 if not lfsize and assumelfiles:
65 lfsize = ui.config(longname, 'minsize', default=default)
65 lfsize = ui.config(longname, 'minsize', default=default)
66 if lfsize:
66 if lfsize:
67 try:
67 try:
68 lfsize = float(lfsize)
68 lfsize = float(lfsize)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 raise util.Abort(_('largefiles: size must be number (not %s)\n')
71 % lfsize)
71 % lfsize)
72 if lfsize is None:
72 if lfsize is None:
73 raise util.Abort(_('minimum size for largefiles must be specified'))
73 raise util.Abort(_('minimum size for largefiles must be specified'))
74 return lfsize
74 return lfsize
75
75
76 def link(src, dest):
76 def link(src, dest):
77 try:
77 try:
78 util.oslink(src, dest)
78 util.oslink(src, dest)
79 except OSError:
79 except OSError:
80 # if hardlinks fail, fallback on atomic copy
80 # if hardlinks fail, fallback on atomic copy
81 dst = util.atomictempfile(dest)
81 dst = util.atomictempfile(dest)
82 for chunk in util.filechunkiter(open(src, 'rb')):
82 for chunk in util.filechunkiter(open(src, 'rb')):
83 dst.write(chunk)
83 dst.write(chunk)
84 dst.close()
84 dst.close()
85 os.chmod(dest, os.stat(src).st_mode)
85 os.chmod(dest, os.stat(src).st_mode)
86
86
87 def usercachepath(ui, hash):
87 def usercachepath(ui, hash):
88 path = ui.configpath(longname, 'usercache', None)
88 path = ui.configpath(longname, 'usercache', None)
89 if path:
89 if path:
90 path = os.path.join(path, hash)
90 path = os.path.join(path, hash)
91 else:
91 else:
92 if os.name == 'nt':
92 if os.name == 'nt':
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
94 if appdata:
94 if appdata:
95 path = os.path.join(appdata, longname, hash)
95 path = os.path.join(appdata, longname, hash)
96 elif platform.system() == 'Darwin':
96 elif platform.system() == 'Darwin':
97 home = os.getenv('HOME')
97 home = os.getenv('HOME')
98 if home:
98 if home:
99 path = os.path.join(home, 'Library', 'Caches',
99 path = os.path.join(home, 'Library', 'Caches',
100 longname, hash)
100 longname, hash)
101 elif os.name == 'posix':
101 elif os.name == 'posix':
102 path = os.getenv('XDG_CACHE_HOME')
102 path = os.getenv('XDG_CACHE_HOME')
103 if path:
103 if path:
104 path = os.path.join(path, longname, hash)
104 path = os.path.join(path, longname, hash)
105 else:
105 else:
106 home = os.getenv('HOME')
106 home = os.getenv('HOME')
107 if home:
107 if home:
108 path = os.path.join(home, '.cache', longname, hash)
108 path = os.path.join(home, '.cache', longname, hash)
109 else:
109 else:
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 raise util.Abort(_('unknown operating system: %s\n') % os.name)
111 return path
111 return path
112
112
113 def inusercache(ui, hash):
113 def inusercache(ui, hash):
114 path = usercachepath(ui, hash)
114 path = usercachepath(ui, hash)
115 return path and os.path.exists(path)
115 return path and os.path.exists(path)
116
116
117 def findfile(repo, hash):
117 def findfile(repo, hash):
118 if instore(repo, hash):
118 if instore(repo, hash):
119 repo.ui.note(_('Found %s in store\n') % hash)
119 repo.ui.note(_('Found %s in store\n') % hash)
120 return storepath(repo, hash)
120 return storepath(repo, hash)
121 elif inusercache(repo.ui, hash):
121 elif inusercache(repo.ui, hash):
122 repo.ui.note(_('Found %s in system cache\n') % hash)
122 repo.ui.note(_('Found %s in system cache\n') % hash)
123 path = storepath(repo, hash)
123 path = storepath(repo, hash)
124 util.makedirs(os.path.dirname(path))
124 util.makedirs(os.path.dirname(path))
125 link(usercachepath(repo.ui, hash), path)
125 link(usercachepath(repo.ui, hash), path)
126 return path
126 return path
127 return None
127 return None
128
128
129 class largefiles_dirstate(dirstate.dirstate):
129 class largefiles_dirstate(dirstate.dirstate):
130 def __getitem__(self, key):
130 def __getitem__(self, key):
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
131 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
132 def normal(self, f):
132 def normal(self, f):
133 return super(largefiles_dirstate, self).normal(unixpath(f))
133 return super(largefiles_dirstate, self).normal(unixpath(f))
134 def remove(self, f):
134 def remove(self, f):
135 return super(largefiles_dirstate, self).remove(unixpath(f))
135 return super(largefiles_dirstate, self).remove(unixpath(f))
136 def add(self, f):
136 def add(self, f):
137 return super(largefiles_dirstate, self).add(unixpath(f))
137 return super(largefiles_dirstate, self).add(unixpath(f))
138 def drop(self, f):
138 def drop(self, f):
139 return super(largefiles_dirstate, self).drop(unixpath(f))
139 return super(largefiles_dirstate, self).drop(unixpath(f))
140 def forget(self, f):
140 def forget(self, f):
141 return super(largefiles_dirstate, self).forget(unixpath(f))
141 return super(largefiles_dirstate, self).forget(unixpath(f))
142 def normallookup(self, f):
142 def normallookup(self, f):
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
143 return super(largefiles_dirstate, self).normallookup(unixpath(f))
144
144
145 def openlfdirstate(ui, repo):
145 def openlfdirstate(ui, repo):
146 '''
146 '''
147 Return a dirstate object that tracks largefiles: i.e. its root is
147 Return a dirstate object that tracks largefiles: i.e. its root is
148 the repo root, but it is saved in .hg/largefiles/dirstate.
148 the repo root, but it is saved in .hg/largefiles/dirstate.
149 '''
149 '''
150 admin = repo.join(longname)
150 admin = repo.join(longname)
151 opener = scmutil.opener(admin)
151 opener = scmutil.opener(admin)
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
152 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
153 repo.dirstate._validate)
153 repo.dirstate._validate)
154
154
155 # If the largefiles dirstate does not exist, populate and create
155 # If the largefiles dirstate does not exist, populate and create
156 # it. This ensures that we create it on the first meaningful
156 # it. This ensures that we create it on the first meaningful
157 # largefiles operation in a new clone.
157 # largefiles operation in a new clone.
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 if not os.path.exists(os.path.join(admin, 'dirstate')):
159 util.makedirs(admin)
159 util.makedirs(admin)
160 matcher = getstandinmatcher(repo)
160 matcher = getstandinmatcher(repo)
161 for standin in dirstate_walk(repo.dirstate, matcher):
161 for standin in dirstate_walk(repo.dirstate, matcher):
162 lfile = splitstandin(standin)
162 lfile = splitstandin(standin)
163 hash = readstandin(repo, lfile)
163 hash = readstandin(repo, lfile)
164 lfdirstate.normallookup(lfile)
164 lfdirstate.normallookup(lfile)
165 try:
165 try:
166 if hash == hashfile(repo.wjoin(lfile)):
166 if hash == hashfile(repo.wjoin(lfile)):
167 lfdirstate.normal(lfile)
167 lfdirstate.normal(lfile)
168 except OSError, err:
168 except OSError, err:
169 if err.errno != errno.ENOENT:
169 if err.errno != errno.ENOENT:
170 raise
170 raise
171 return lfdirstate
171 return lfdirstate
172
172
173 def lfdirstate_status(lfdirstate, repo, rev):
173 def lfdirstate_status(lfdirstate, repo, rev):
174 match = match_.always(repo.root, repo.getcwd())
174 match = match_.always(repo.root, repo.getcwd())
175 s = lfdirstate.status(match, [], False, False, False)
175 s = lfdirstate.status(match, [], False, False, False)
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 unsure, modified, added, removed, missing, unknown, ignored, clean = s
177 for lfile in unsure:
177 for lfile in unsure:
178 if repo[rev][standin(lfile)].data().strip() != \
178 if repo[rev][standin(lfile)].data().strip() != \
179 hashfile(repo.wjoin(lfile)):
179 hashfile(repo.wjoin(lfile)):
180 modified.append(lfile)
180 modified.append(lfile)
181 else:
181 else:
182 clean.append(lfile)
182 clean.append(lfile)
183 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
184 return (modified, added, removed, missing, unknown, ignored, clean)
184 return (modified, added, removed, missing, unknown, ignored, clean)
185
185
186 def listlfiles(repo, rev=None, matcher=None):
186 def listlfiles(repo, rev=None, matcher=None):
187 '''return a list of largefiles in the working copy or the
187 '''return a list of largefiles in the working copy or the
188 specified changeset'''
188 specified changeset'''
189
189
190 if matcher is None:
190 if matcher is None:
191 matcher = getstandinmatcher(repo)
191 matcher = getstandinmatcher(repo)
192
192
193 # ignore unknown files in working directory
193 # ignore unknown files in working directory
194 return [splitstandin(f)
194 return [splitstandin(f)
195 for f in repo[rev].walk(matcher)
195 for f in repo[rev].walk(matcher)
196 if rev is not None or repo.dirstate[f] != '?']
196 if rev is not None or repo.dirstate[f] != '?']
197
197
198 def instore(repo, hash):
198 def instore(repo, hash):
199 return os.path.exists(storepath(repo, hash))
199 return os.path.exists(storepath(repo, hash))
200
200
201 def storepath(repo, hash):
201 def storepath(repo, hash):
202 return repo.join(os.path.join(longname, hash))
202 return repo.join(os.path.join(longname, hash))
203
203
204 def copyfromcache(repo, hash, filename):
204 def copyfromcache(repo, hash, filename):
205 '''Copy the specified largefile from the repo or system cache to
205 '''Copy the specified largefile from the repo or system cache to
206 filename in the repository. Return true on success or false if the
206 filename in the repository. Return true on success or false if the
207 file was not found in either cache (which should not happened:
207 file was not found in either cache (which should not happened:
208 this is meant to be called only after ensuring that the needed
208 this is meant to be called only after ensuring that the needed
209 largefile exists in the cache).'''
209 largefile exists in the cache).'''
210 path = findfile(repo, hash)
210 path = findfile(repo, hash)
211 if path is None:
211 if path is None:
212 return False
212 return False
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 util.makedirs(os.path.dirname(repo.wjoin(filename)))
214 # The write may fail before the file is fully written, but we
214 # The write may fail before the file is fully written, but we
215 # don't use atomic writes in the working copy.
215 # don't use atomic writes in the working copy.
216 shutil.copy(path, repo.wjoin(filename))
216 shutil.copy(path, repo.wjoin(filename))
217 return True
217 return True
218
218
219 def copytostore(repo, rev, file, uploaded=False):
219 def copytostore(repo, rev, file, uploaded=False):
220 hash = readstandin(repo, file)
220 hash = readstandin(repo, file)
221 if instore(repo, hash):
221 if instore(repo, hash):
222 return
222 return
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 copytostoreabsolute(repo, repo.wjoin(file), hash)
224
224
225 def copyalltostore(repo, node):
225 def copyalltostore(repo, node):
226 '''Copy all largefiles in a given revision to the store'''
226 '''Copy all largefiles in a given revision to the store'''
227
227
228 ctx = repo[node]
228 ctx = repo[node]
229 for filename in ctx.files():
229 for filename in ctx.files():
230 if isstandin(filename) and filename in ctx.manifest():
230 if isstandin(filename) and filename in ctx.manifest():
231 realfile = splitstandin(filename)
231 realfile = splitstandin(filename)
232 copytostore(repo, ctx.node(), realfile)
232 copytostore(repo, ctx.node(), realfile)
233
233
234
234
235 def copytostoreabsolute(repo, file, hash):
235 def copytostoreabsolute(repo, file, hash):
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 util.makedirs(os.path.dirname(storepath(repo, hash)))
237 if inusercache(repo.ui, hash):
237 if inusercache(repo.ui, hash):
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
239 else:
239 else:
240 dst = util.atomictempfile(storepath(repo, hash))
240 dst = util.atomictempfile(storepath(repo, hash))
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 util.copymode(file, storepath(repo, hash))
244 util.copymode(file, storepath(repo, hash))
245 linktousercache(repo, hash)
245 linktousercache(repo, hash)
246
246
247 def linktousercache(repo, hash):
247 def linktousercache(repo, hash):
248 path = usercachepath(repo.ui, hash)
248 path = usercachepath(repo.ui, hash)
249 if path:
249 if path:
250 util.makedirs(os.path.dirname(path))
250 util.makedirs(os.path.dirname(path))
251 link(storepath(repo, hash), path)
251 link(storepath(repo, hash), path)
252
252
253 def getstandinmatcher(repo, pats=[], opts={}):
253 def getstandinmatcher(repo, pats=[], opts={}):
254 '''Return a match object that applies pats to the standin directory'''
254 '''Return a match object that applies pats to the standin directory'''
255 standindir = repo.pathto(shortname)
255 standindir = repo.pathto(shortname)
256 if pats:
256 if pats:
257 # patterns supplied: search standin directory relative to current dir
257 # patterns supplied: search standin directory relative to current dir
258 cwd = repo.getcwd()
258 cwd = repo.getcwd()
259 if os.path.isabs(cwd):
259 if os.path.isabs(cwd):
260 # cwd is an absolute path for hg -R <reponame>
260 # cwd is an absolute path for hg -R <reponame>
261 # work relative to the repository root in this case
261 # work relative to the repository root in this case
262 cwd = ''
262 cwd = ''
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
264 elif os.path.isdir(standindir):
264 elif os.path.isdir(standindir):
265 # no patterns: relative to repo root
265 # no patterns: relative to repo root
266 pats = [standindir]
266 pats = [standindir]
267 else:
267 else:
268 # no patterns and no standin dir: return matcher that matches nothing
268 # no patterns and no standin dir: return matcher that matches nothing
269 match = match_.match(repo.root, None, [], exact=True)
269 match = match_.match(repo.root, None, [], exact=True)
270 match.matchfn = lambda f: False
270 match.matchfn = lambda f: False
271 return match
271 return match
272 return getmatcher(repo, pats, opts, showbad=False)
272 return getmatcher(repo, pats, opts, showbad=False)
273
273
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 def getmatcher(repo, pats=[], opts={}, showbad=True):
275 '''Wrapper around scmutil.match() that adds showbad: if false,
275 '''Wrapper around scmutil.match() that adds showbad: if false,
276 neuter the match object's bad() method so it does not print any
276 neuter the match object's bad() method so it does not print any
277 warnings about missing files or directories.'''
277 warnings about missing files or directories.'''
278 match = scmutil.match(repo[None], pats, opts)
278 match = scmutil.match(repo[None], pats, opts)
279
279
280 if not showbad:
280 if not showbad:
281 match.bad = lambda f, msg: None
281 match.bad = lambda f, msg: None
282 return match
282 return match
283
283
284 def composestandinmatcher(repo, rmatcher):
284 def composestandinmatcher(repo, rmatcher):
285 '''Return a matcher that accepts standins corresponding to the
285 '''Return a matcher that accepts standins corresponding to the
286 files accepted by rmatcher. Pass the list of files in the matcher
286 files accepted by rmatcher. Pass the list of files in the matcher
287 as the paths specified by the user.'''
287 as the paths specified by the user.'''
288 smatcher = getstandinmatcher(repo, rmatcher.files())
288 smatcher = getstandinmatcher(repo, rmatcher.files())
289 isstandin = smatcher.matchfn
289 isstandin = smatcher.matchfn
290 def composed_matchfn(f):
290 def composed_matchfn(f):
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
292 smatcher.matchfn = composed_matchfn
292 smatcher.matchfn = composed_matchfn
293
293
294 return smatcher
294 return smatcher
295
295
296 def standin(filename):
296 def standin(filename):
297 '''Return the repo-relative path to the standin for the specified big
297 '''Return the repo-relative path to the standin for the specified big
298 file.'''
298 file.'''
299 # Notes:
299 # Notes:
300 # 1) Most callers want an absolute path, but _create_standin() needs
300 # 1) Most callers want an absolute path, but _create_standin() needs
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
301 # it repo-relative so lfadd() can pass it to repo_add(). So leave
302 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # it up to the caller to use repo.wjoin() to get an absolute path.
303 # 2) Join with '/' because that's what dirstate always uses, even on
303 # 2) Join with '/' because that's what dirstate always uses, even on
304 # Windows. Change existing separator to '/' first in case we are
304 # Windows. Change existing separator to '/' first in case we are
305 # passed filenames from an external source (like the command line).
305 # passed filenames from an external source (like the command line).
306 return shortname + '/' + filename.replace(os.sep, '/')
306 return shortname + '/' + util.pconvert(filename)
307
307
308 def isstandin(filename):
308 def isstandin(filename):
309 '''Return true if filename is a big file standin. filename must be
309 '''Return true if filename is a big file standin. filename must be
310 in Mercurial's internal form (slash-separated).'''
310 in Mercurial's internal form (slash-separated).'''
311 return filename.startswith(shortname + '/')
311 return filename.startswith(shortname + '/')
312
312
313 def splitstandin(filename):
313 def splitstandin(filename):
314 # Split on / because that's what dirstate always uses, even on Windows.
314 # Split on / because that's what dirstate always uses, even on Windows.
315 # Change local separator to / first just in case we are passed filenames
315 # Change local separator to / first just in case we are passed filenames
316 # from an external source (like the command line).
316 # from an external source (like the command line).
317 bits = filename.replace(os.sep, '/').split('/', 1)
317 bits = util.pconvert(filename).split('/', 1)
318 if len(bits) == 2 and bits[0] == shortname:
318 if len(bits) == 2 and bits[0] == shortname:
319 return bits[1]
319 return bits[1]
320 else:
320 else:
321 return None
321 return None
322
322
323 def updatestandin(repo, standin):
323 def updatestandin(repo, standin):
324 file = repo.wjoin(splitstandin(standin))
324 file = repo.wjoin(splitstandin(standin))
325 if os.path.exists(file):
325 if os.path.exists(file):
326 hash = hashfile(file)
326 hash = hashfile(file)
327 executable = getexecutable(file)
327 executable = getexecutable(file)
328 writestandin(repo, standin, hash, executable)
328 writestandin(repo, standin, hash, executable)
329
329
330 def readstandin(repo, filename, node=None):
330 def readstandin(repo, filename, node=None):
331 '''read hex hash from standin for filename at given node, or working
331 '''read hex hash from standin for filename at given node, or working
332 directory if no node is given'''
332 directory if no node is given'''
333 return repo[node][standin(filename)].data().strip()
333 return repo[node][standin(filename)].data().strip()
334
334
335 def writestandin(repo, standin, hash, executable):
335 def writestandin(repo, standin, hash, executable):
336 '''write hash to <repo.root>/<standin>'''
336 '''write hash to <repo.root>/<standin>'''
337 writehash(hash, repo.wjoin(standin), executable)
337 writehash(hash, repo.wjoin(standin), executable)
338
338
339 def copyandhash(instream, outfile):
339 def copyandhash(instream, outfile):
340 '''Read bytes from instream (iterable) and write them to outfile,
340 '''Read bytes from instream (iterable) and write them to outfile,
341 computing the SHA-1 hash of the data along the way. Close outfile
341 computing the SHA-1 hash of the data along the way. Close outfile
342 when done and return the binary hash.'''
342 when done and return the binary hash.'''
343 hasher = util.sha1('')
343 hasher = util.sha1('')
344 for data in instream:
344 for data in instream:
345 hasher.update(data)
345 hasher.update(data)
346 outfile.write(data)
346 outfile.write(data)
347
347
348 # Blecch: closing a file that somebody else opened is rude and
348 # Blecch: closing a file that somebody else opened is rude and
349 # wrong. But it's so darn convenient and practical! After all,
349 # wrong. But it's so darn convenient and practical! After all,
350 # outfile was opened just to copy and hash.
350 # outfile was opened just to copy and hash.
351 outfile.close()
351 outfile.close()
352
352
353 return hasher.digest()
353 return hasher.digest()
354
354
355 def hashrepofile(repo, file):
355 def hashrepofile(repo, file):
356 return hashfile(repo.wjoin(file))
356 return hashfile(repo.wjoin(file))
357
357
358 def hashfile(file):
358 def hashfile(file):
359 if not os.path.exists(file):
359 if not os.path.exists(file):
360 return ''
360 return ''
361 hasher = util.sha1('')
361 hasher = util.sha1('')
362 fd = open(file, 'rb')
362 fd = open(file, 'rb')
363 for data in blockstream(fd):
363 for data in blockstream(fd):
364 hasher.update(data)
364 hasher.update(data)
365 fd.close()
365 fd.close()
366 return hasher.hexdigest()
366 return hasher.hexdigest()
367
367
368 class limitreader(object):
368 class limitreader(object):
369 def __init__(self, f, limit):
369 def __init__(self, f, limit):
370 self.f = f
370 self.f = f
371 self.limit = limit
371 self.limit = limit
372
372
373 def read(self, length):
373 def read(self, length):
374 if self.limit == 0:
374 if self.limit == 0:
375 return ''
375 return ''
376 length = length > self.limit and self.limit or length
376 length = length > self.limit and self.limit or length
377 self.limit -= length
377 self.limit -= length
378 return self.f.read(length)
378 return self.f.read(length)
379
379
380 def close(self):
380 def close(self):
381 pass
381 pass
382
382
383 def blockstream(infile, blocksize=128 * 1024):
383 def blockstream(infile, blocksize=128 * 1024):
384 """Generator that yields blocks of data from infile and closes infile."""
384 """Generator that yields blocks of data from infile and closes infile."""
385 while True:
385 while True:
386 data = infile.read(blocksize)
386 data = infile.read(blocksize)
387 if not data:
387 if not data:
388 break
388 break
389 yield data
389 yield data
390 # same blecch as copyandhash() above
390 # same blecch as copyandhash() above
391 infile.close()
391 infile.close()
392
392
393 def writehash(hash, filename, executable):
393 def writehash(hash, filename, executable):
394 util.makedirs(os.path.dirname(filename))
394 util.makedirs(os.path.dirname(filename))
395 util.writefile(filename, hash + '\n')
395 util.writefile(filename, hash + '\n')
396 os.chmod(filename, getmode(executable))
396 os.chmod(filename, getmode(executable))
397
397
398 def getexecutable(filename):
398 def getexecutable(filename):
399 mode = os.stat(filename).st_mode
399 mode = os.stat(filename).st_mode
400 return ((mode & stat.S_IXUSR) and
400 return ((mode & stat.S_IXUSR) and
401 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXGRP) and
402 (mode & stat.S_IXOTH))
402 (mode & stat.S_IXOTH))
403
403
404 def getmode(executable):
404 def getmode(executable):
405 if executable:
405 if executable:
406 return 0755
406 return 0755
407 else:
407 else:
408 return 0644
408 return 0644
409
409
410 def urljoin(first, second, *arg):
410 def urljoin(first, second, *arg):
411 def join(left, right):
411 def join(left, right):
412 if not left.endswith('/'):
412 if not left.endswith('/'):
413 left += '/'
413 left += '/'
414 if right.startswith('/'):
414 if right.startswith('/'):
415 right = right[1:]
415 right = right[1:]
416 return left + right
416 return left + right
417
417
418 url = join(first, second)
418 url = join(first, second)
419 for a in arg:
419 for a in arg:
420 url = join(url, a)
420 url = join(url, a)
421 return url
421 return url
422
422
423 def hexsha1(data):
423 def hexsha1(data):
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
425 object data"""
425 object data"""
426 h = util.sha1()
426 h = util.sha1()
427 for chunk in util.filechunkiter(data):
427 for chunk in util.filechunkiter(data):
428 h.update(chunk)
428 h.update(chunk)
429 return h.hexdigest()
429 return h.hexdigest()
430
430
431 def httpsendfile(ui, filename):
431 def httpsendfile(ui, filename):
432 return httpconnection.httpsendfile(ui, filename, 'rb')
432 return httpconnection.httpsendfile(ui, filename, 'rb')
433
433
434 def unixpath(path):
434 def unixpath(path):
435 '''Return a version of path normalized for use with the lfdirstate.'''
435 '''Return a version of path normalized for use with the lfdirstate.'''
436 return os.path.normpath(path).replace(os.sep, '/')
436 return util.pconvert(os.path.normpath(path))
437
437
438 def islfilesrepo(repo):
438 def islfilesrepo(repo):
439 return ('largefiles' in repo.requirements and
439 return ('largefiles' in repo.requirements and
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
441
441
442 def mkstemp(repo, prefix):
442 def mkstemp(repo, prefix):
443 '''Returns a file descriptor and a filename corresponding to a temporary
443 '''Returns a file descriptor and a filename corresponding to a temporary
444 file in the repo's largefiles store.'''
444 file in the repo's largefiles store.'''
445 path = repo.join(longname)
445 path = repo.join(longname)
446 util.makedirs(path)
446 util.makedirs(path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
447 return tempfile.mkstemp(prefix=prefix, dir=path)
448
448
449 class storeprotonotcapable(Exception):
449 class storeprotonotcapable(Exception):
450 def __init__(self, storetypes):
450 def __init__(self, storetypes):
451 self.storetypes = storetypes
451 self.storetypes = storetypes
General Comments 0
You need to be logged in to leave comments. Login now