##// END OF EJS Templates
largefiles: lowercase messages
Martin Geisler -
r16928:73b9286e default
parent child Browse files
Show More
@@ -1,467 +1,467
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repoadd(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def reporemove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repoforget(repo, list):
49 def repoforget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
64 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on atomic copy
79 # if hardlinks fail, fallback on atomic copy
80 dst = util.atomictempfile(dest)
80 dst = util.atomictempfile(dest)
81 for chunk in util.filechunkiter(open(src, 'rb')):
81 for chunk in util.filechunkiter(open(src, 'rb')):
82 dst.write(chunk)
82 dst.write(chunk)
83 dst.close()
83 dst.close()
84 os.chmod(dest, os.stat(src).st_mode)
84 os.chmod(dest, os.stat(src).st_mode)
85
85
86 def usercachepath(ui, hash):
86 def usercachepath(ui, hash):
87 path = ui.configpath(longname, 'usercache', None)
87 path = ui.configpath(longname, 'usercache', None)
88 if path:
88 if path:
89 path = os.path.join(path, hash)
89 path = os.path.join(path, hash)
90 else:
90 else:
91 if os.name == 'nt':
91 if os.name == 'nt':
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 if appdata:
93 if appdata:
94 path = os.path.join(appdata, longname, hash)
94 path = os.path.join(appdata, longname, hash)
95 elif platform.system() == 'Darwin':
95 elif platform.system() == 'Darwin':
96 home = os.getenv('HOME')
96 home = os.getenv('HOME')
97 if home:
97 if home:
98 path = os.path.join(home, 'Library', 'Caches',
98 path = os.path.join(home, 'Library', 'Caches',
99 longname, hash)
99 longname, hash)
100 elif os.name == 'posix':
100 elif os.name == 'posix':
101 path = os.getenv('XDG_CACHE_HOME')
101 path = os.getenv('XDG_CACHE_HOME')
102 if path:
102 if path:
103 path = os.path.join(path, longname, hash)
103 path = os.path.join(path, longname, hash)
104 else:
104 else:
105 home = os.getenv('HOME')
105 home = os.getenv('HOME')
106 if home:
106 if home:
107 path = os.path.join(home, '.cache', longname, hash)
107 path = os.path.join(home, '.cache', longname, hash)
108 else:
108 else:
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 return path
110 return path
111
111
112 def inusercache(ui, hash):
112 def inusercache(ui, hash):
113 path = usercachepath(ui, hash)
113 path = usercachepath(ui, hash)
114 return path and os.path.exists(path)
114 return path and os.path.exists(path)
115
115
116 def findfile(repo, hash):
116 def findfile(repo, hash):
117 if instore(repo, hash):
117 if instore(repo, hash):
118 repo.ui.note(_('Found %s in store\n') % hash)
118 repo.ui.note(_('found %s in store\n') % hash)
119 return storepath(repo, hash)
119 return storepath(repo, hash)
120 elif inusercache(repo.ui, hash):
120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
122 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
123 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
124 link(usercachepath(repo.ui, hash), path)
125 return path
125 return path
126 return None
126 return None
127
127
128 class largefilesdirstate(dirstate.dirstate):
128 class largefilesdirstate(dirstate.dirstate):
129 def __getitem__(self, key):
129 def __getitem__(self, key):
130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
131 def normal(self, f):
131 def normal(self, f):
132 return super(largefilesdirstate, self).normal(unixpath(f))
132 return super(largefilesdirstate, self).normal(unixpath(f))
133 def remove(self, f):
133 def remove(self, f):
134 return super(largefilesdirstate, self).remove(unixpath(f))
134 return super(largefilesdirstate, self).remove(unixpath(f))
135 def add(self, f):
135 def add(self, f):
136 return super(largefilesdirstate, self).add(unixpath(f))
136 return super(largefilesdirstate, self).add(unixpath(f))
137 def drop(self, f):
137 def drop(self, f):
138 return super(largefilesdirstate, self).drop(unixpath(f))
138 return super(largefilesdirstate, self).drop(unixpath(f))
139 def forget(self, f):
139 def forget(self, f):
140 return super(largefilesdirstate, self).forget(unixpath(f))
140 return super(largefilesdirstate, self).forget(unixpath(f))
141 def normallookup(self, f):
141 def normallookup(self, f):
142 return super(largefilesdirstate, self).normallookup(unixpath(f))
142 return super(largefilesdirstate, self).normallookup(unixpath(f))
143
143
144 def openlfdirstate(ui, repo):
144 def openlfdirstate(ui, repo):
145 '''
145 '''
146 Return a dirstate object that tracks largefiles: i.e. its root is
146 Return a dirstate object that tracks largefiles: i.e. its root is
147 the repo root, but it is saved in .hg/largefiles/dirstate.
147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 '''
148 '''
149 admin = repo.join(longname)
149 admin = repo.join(longname)
150 opener = scmutil.opener(admin)
150 opener = scmutil.opener(admin)
151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
152 repo.dirstate._validate)
152 repo.dirstate._validate)
153
153
154 # If the largefiles dirstate does not exist, populate and create
154 # If the largefiles dirstate does not exist, populate and create
155 # it. This ensures that we create it on the first meaningful
155 # it. This ensures that we create it on the first meaningful
156 # largefiles operation in a new clone.
156 # largefiles operation in a new clone.
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 util.makedirs(admin)
158 util.makedirs(admin)
159 matcher = getstandinmatcher(repo)
159 matcher = getstandinmatcher(repo)
160 for standin in dirstatewalk(repo.dirstate, matcher):
160 for standin in dirstatewalk(repo.dirstate, matcher):
161 lfile = splitstandin(standin)
161 lfile = splitstandin(standin)
162 hash = readstandin(repo, lfile)
162 hash = readstandin(repo, lfile)
163 lfdirstate.normallookup(lfile)
163 lfdirstate.normallookup(lfile)
164 try:
164 try:
165 if hash == hashfile(repo.wjoin(lfile)):
165 if hash == hashfile(repo.wjoin(lfile)):
166 lfdirstate.normal(lfile)
166 lfdirstate.normal(lfile)
167 except OSError, err:
167 except OSError, err:
168 if err.errno != errno.ENOENT:
168 if err.errno != errno.ENOENT:
169 raise
169 raise
170 return lfdirstate
170 return lfdirstate
171
171
172 def lfdirstatestatus(lfdirstate, repo, rev):
172 def lfdirstatestatus(lfdirstate, repo, rev):
173 match = match_.always(repo.root, repo.getcwd())
173 match = match_.always(repo.root, repo.getcwd())
174 s = lfdirstate.status(match, [], False, False, False)
174 s = lfdirstate.status(match, [], False, False, False)
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 for lfile in unsure:
176 for lfile in unsure:
177 if repo[rev][standin(lfile)].data().strip() != \
177 if repo[rev][standin(lfile)].data().strip() != \
178 hashfile(repo.wjoin(lfile)):
178 hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
179 modified.append(lfile)
180 else:
180 else:
181 clean.append(lfile)
181 clean.append(lfile)
182 lfdirstate.normal(lfile)
182 lfdirstate.normal(lfile)
183 return (modified, added, removed, missing, unknown, ignored, clean)
183 return (modified, added, removed, missing, unknown, ignored, clean)
184
184
185 def listlfiles(repo, rev=None, matcher=None):
185 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
186 '''return a list of largefiles in the working copy or the
187 specified changeset'''
187 specified changeset'''
188
188
189 if matcher is None:
189 if matcher is None:
190 matcher = getstandinmatcher(repo)
190 matcher = getstandinmatcher(repo)
191
191
192 # ignore unknown files in working directory
192 # ignore unknown files in working directory
193 return [splitstandin(f)
193 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
194 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
195 if rev is not None or repo.dirstate[f] != '?']
196
196
197 def instore(repo, hash):
197 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
198 return os.path.exists(storepath(repo, hash))
199
199
200 def storepath(repo, hash):
200 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
201 return repo.join(os.path.join(longname, hash))
202
202
203 def copyfromcache(repo, hash, filename):
203 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
204 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
205 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
206 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
207 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
208 largefile exists in the cache).'''
209 path = findfile(repo, hash)
209 path = findfile(repo, hash)
210 if path is None:
210 if path is None:
211 return False
211 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 # The write may fail before the file is fully written, but we
213 # The write may fail before the file is fully written, but we
214 # don't use atomic writes in the working copy.
214 # don't use atomic writes in the working copy.
215 shutil.copy(path, repo.wjoin(filename))
215 shutil.copy(path, repo.wjoin(filename))
216 return True
216 return True
217
217
218 def copytostore(repo, rev, file, uploaded=False):
218 def copytostore(repo, rev, file, uploaded=False):
219 hash = readstandin(repo, file)
219 hash = readstandin(repo, file)
220 if instore(repo, hash):
220 if instore(repo, hash):
221 return
221 return
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223
223
224 def copyalltostore(repo, node):
224 def copyalltostore(repo, node):
225 '''Copy all largefiles in a given revision to the store'''
225 '''Copy all largefiles in a given revision to the store'''
226
226
227 ctx = repo[node]
227 ctx = repo[node]
228 for filename in ctx.files():
228 for filename in ctx.files():
229 if isstandin(filename) and filename in ctx.manifest():
229 if isstandin(filename) and filename in ctx.manifest():
230 realfile = splitstandin(filename)
230 realfile = splitstandin(filename)
231 copytostore(repo, ctx.node(), realfile)
231 copytostore(repo, ctx.node(), realfile)
232
232
233
233
234 def copytostoreabsolute(repo, file, hash):
234 def copytostoreabsolute(repo, file, hash):
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 if inusercache(repo.ui, hash):
236 if inusercache(repo.ui, hash):
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 else:
238 else:
239 dst = util.atomictempfile(storepath(repo, hash),
239 dst = util.atomictempfile(storepath(repo, hash),
240 createmode=repo.store.createmode)
240 createmode=repo.store.createmode)
241 for chunk in util.filechunkiter(open(file, 'rb')):
241 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
242 dst.write(chunk)
243 dst.close()
243 dst.close()
244 linktousercache(repo, hash)
244 linktousercache(repo, hash)
245
245
246 def linktousercache(repo, hash):
246 def linktousercache(repo, hash):
247 path = usercachepath(repo.ui, hash)
247 path = usercachepath(repo.ui, hash)
248 if path:
248 if path:
249 util.makedirs(os.path.dirname(path))
249 util.makedirs(os.path.dirname(path))
250 link(storepath(repo, hash), path)
250 link(storepath(repo, hash), path)
251
251
252 def getstandinmatcher(repo, pats=[], opts={}):
252 def getstandinmatcher(repo, pats=[], opts={}):
253 '''Return a match object that applies pats to the standin directory'''
253 '''Return a match object that applies pats to the standin directory'''
254 standindir = repo.pathto(shortname)
254 standindir = repo.pathto(shortname)
255 if pats:
255 if pats:
256 # patterns supplied: search standin directory relative to current dir
256 # patterns supplied: search standin directory relative to current dir
257 cwd = repo.getcwd()
257 cwd = repo.getcwd()
258 if os.path.isabs(cwd):
258 if os.path.isabs(cwd):
259 # cwd is an absolute path for hg -R <reponame>
259 # cwd is an absolute path for hg -R <reponame>
260 # work relative to the repository root in this case
260 # work relative to the repository root in this case
261 cwd = ''
261 cwd = ''
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 elif os.path.isdir(standindir):
263 elif os.path.isdir(standindir):
264 # no patterns: relative to repo root
264 # no patterns: relative to repo root
265 pats = [standindir]
265 pats = [standindir]
266 else:
266 else:
267 # no patterns and no standin dir: return matcher that matches nothing
267 # no patterns and no standin dir: return matcher that matches nothing
268 match = match_.match(repo.root, None, [], exact=True)
268 match = match_.match(repo.root, None, [], exact=True)
269 match.matchfn = lambda f: False
269 match.matchfn = lambda f: False
270 return match
270 return match
271 return getmatcher(repo, pats, opts, showbad=False)
271 return getmatcher(repo, pats, opts, showbad=False)
272
272
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 '''Wrapper around scmutil.match() that adds showbad: if false,
274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 neuter the match object's bad() method so it does not print any
275 neuter the match object's bad() method so it does not print any
276 warnings about missing files or directories.'''
276 warnings about missing files or directories.'''
277 match = scmutil.match(repo[None], pats, opts)
277 match = scmutil.match(repo[None], pats, opts)
278
278
279 if not showbad:
279 if not showbad:
280 match.bad = lambda f, msg: None
280 match.bad = lambda f, msg: None
281 return match
281 return match
282
282
283 def composestandinmatcher(repo, rmatcher):
283 def composestandinmatcher(repo, rmatcher):
284 '''Return a matcher that accepts standins corresponding to the
284 '''Return a matcher that accepts standins corresponding to the
285 files accepted by rmatcher. Pass the list of files in the matcher
285 files accepted by rmatcher. Pass the list of files in the matcher
286 as the paths specified by the user.'''
286 as the paths specified by the user.'''
287 smatcher = getstandinmatcher(repo, rmatcher.files())
287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 isstandin = smatcher.matchfn
288 isstandin = smatcher.matchfn
289 def composedmatchfn(f):
289 def composedmatchfn(f):
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 smatcher.matchfn = composedmatchfn
291 smatcher.matchfn = composedmatchfn
292
292
293 return smatcher
293 return smatcher
294
294
295 def standin(filename):
295 def standin(filename):
296 '''Return the repo-relative path to the standin for the specified big
296 '''Return the repo-relative path to the standin for the specified big
297 file.'''
297 file.'''
298 # Notes:
298 # Notes:
299 # 1) Most callers want an absolute path, but _createstandin() needs
299 # 1) Most callers want an absolute path, but _createstandin() needs
300 # it repo-relative so lfadd() can pass it to repoadd(). So leave
300 # it repo-relative so lfadd() can pass it to repoadd(). So leave
301 # it up to the caller to use repo.wjoin() to get an absolute path.
301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # 2) Join with '/' because that's what dirstate always uses, even on
302 # 2) Join with '/' because that's what dirstate always uses, even on
303 # Windows. Change existing separator to '/' first in case we are
303 # Windows. Change existing separator to '/' first in case we are
304 # passed filenames from an external source (like the command line).
304 # passed filenames from an external source (like the command line).
305 return shortname + '/' + util.pconvert(filename)
305 return shortname + '/' + util.pconvert(filename)
306
306
307 def isstandin(filename):
307 def isstandin(filename):
308 '''Return true if filename is a big file standin. filename must be
308 '''Return true if filename is a big file standin. filename must be
309 in Mercurial's internal form (slash-separated).'''
309 in Mercurial's internal form (slash-separated).'''
310 return filename.startswith(shortname + '/')
310 return filename.startswith(shortname + '/')
311
311
312 def splitstandin(filename):
312 def splitstandin(filename):
313 # Split on / because that's what dirstate always uses, even on Windows.
313 # Split on / because that's what dirstate always uses, even on Windows.
314 # Change local separator to / first just in case we are passed filenames
314 # Change local separator to / first just in case we are passed filenames
315 # from an external source (like the command line).
315 # from an external source (like the command line).
316 bits = util.pconvert(filename).split('/', 1)
316 bits = util.pconvert(filename).split('/', 1)
317 if len(bits) == 2 and bits[0] == shortname:
317 if len(bits) == 2 and bits[0] == shortname:
318 return bits[1]
318 return bits[1]
319 else:
319 else:
320 return None
320 return None
321
321
322 def updatestandin(repo, standin):
322 def updatestandin(repo, standin):
323 file = repo.wjoin(splitstandin(standin))
323 file = repo.wjoin(splitstandin(standin))
324 if os.path.exists(file):
324 if os.path.exists(file):
325 hash = hashfile(file)
325 hash = hashfile(file)
326 executable = getexecutable(file)
326 executable = getexecutable(file)
327 writestandin(repo, standin, hash, executable)
327 writestandin(repo, standin, hash, executable)
328
328
329 def readstandin(repo, filename, node=None):
329 def readstandin(repo, filename, node=None):
330 '''read hex hash from standin for filename at given node, or working
330 '''read hex hash from standin for filename at given node, or working
331 directory if no node is given'''
331 directory if no node is given'''
332 return repo[node][standin(filename)].data().strip()
332 return repo[node][standin(filename)].data().strip()
333
333
334 def writestandin(repo, standin, hash, executable):
334 def writestandin(repo, standin, hash, executable):
335 '''write hash to <repo.root>/<standin>'''
335 '''write hash to <repo.root>/<standin>'''
336 writehash(hash, repo.wjoin(standin), executable)
336 writehash(hash, repo.wjoin(standin), executable)
337
337
338 def copyandhash(instream, outfile):
338 def copyandhash(instream, outfile):
339 '''Read bytes from instream (iterable) and write them to outfile,
339 '''Read bytes from instream (iterable) and write them to outfile,
340 computing the SHA-1 hash of the data along the way. Close outfile
340 computing the SHA-1 hash of the data along the way. Close outfile
341 when done and return the binary hash.'''
341 when done and return the binary hash.'''
342 hasher = util.sha1('')
342 hasher = util.sha1('')
343 for data in instream:
343 for data in instream:
344 hasher.update(data)
344 hasher.update(data)
345 outfile.write(data)
345 outfile.write(data)
346
346
347 # Blecch: closing a file that somebody else opened is rude and
347 # Blecch: closing a file that somebody else opened is rude and
348 # wrong. But it's so darn convenient and practical! After all,
348 # wrong. But it's so darn convenient and practical! After all,
349 # outfile was opened just to copy and hash.
349 # outfile was opened just to copy and hash.
350 outfile.close()
350 outfile.close()
351
351
352 return hasher.digest()
352 return hasher.digest()
353
353
354 def hashrepofile(repo, file):
354 def hashrepofile(repo, file):
355 return hashfile(repo.wjoin(file))
355 return hashfile(repo.wjoin(file))
356
356
357 def hashfile(file):
357 def hashfile(file):
358 if not os.path.exists(file):
358 if not os.path.exists(file):
359 return ''
359 return ''
360 hasher = util.sha1('')
360 hasher = util.sha1('')
361 fd = open(file, 'rb')
361 fd = open(file, 'rb')
362 for data in blockstream(fd):
362 for data in blockstream(fd):
363 hasher.update(data)
363 hasher.update(data)
364 fd.close()
364 fd.close()
365 return hasher.hexdigest()
365 return hasher.hexdigest()
366
366
367 class limitreader(object):
367 class limitreader(object):
368 def __init__(self, f, limit):
368 def __init__(self, f, limit):
369 self.f = f
369 self.f = f
370 self.limit = limit
370 self.limit = limit
371
371
372 def read(self, length):
372 def read(self, length):
373 if self.limit == 0:
373 if self.limit == 0:
374 return ''
374 return ''
375 length = length > self.limit and self.limit or length
375 length = length > self.limit and self.limit or length
376 self.limit -= length
376 self.limit -= length
377 return self.f.read(length)
377 return self.f.read(length)
378
378
379 def close(self):
379 def close(self):
380 pass
380 pass
381
381
382 def blockstream(infile, blocksize=128 * 1024):
382 def blockstream(infile, blocksize=128 * 1024):
383 """Generator that yields blocks of data from infile and closes infile."""
383 """Generator that yields blocks of data from infile and closes infile."""
384 while True:
384 while True:
385 data = infile.read(blocksize)
385 data = infile.read(blocksize)
386 if not data:
386 if not data:
387 break
387 break
388 yield data
388 yield data
389 # same blecch as copyandhash() above
389 # same blecch as copyandhash() above
390 infile.close()
390 infile.close()
391
391
392 def writehash(hash, filename, executable):
392 def writehash(hash, filename, executable):
393 util.makedirs(os.path.dirname(filename))
393 util.makedirs(os.path.dirname(filename))
394 util.writefile(filename, hash + '\n')
394 util.writefile(filename, hash + '\n')
395 os.chmod(filename, getmode(executable))
395 os.chmod(filename, getmode(executable))
396
396
397 def getexecutable(filename):
397 def getexecutable(filename):
398 mode = os.stat(filename).st_mode
398 mode = os.stat(filename).st_mode
399 return ((mode & stat.S_IXUSR) and
399 return ((mode & stat.S_IXUSR) and
400 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXOTH))
401 (mode & stat.S_IXOTH))
402
402
403 def getmode(executable):
403 def getmode(executable):
404 if executable:
404 if executable:
405 return 0755
405 return 0755
406 else:
406 else:
407 return 0644
407 return 0644
408
408
409 def urljoin(first, second, *arg):
409 def urljoin(first, second, *arg):
410 def join(left, right):
410 def join(left, right):
411 if not left.endswith('/'):
411 if not left.endswith('/'):
412 left += '/'
412 left += '/'
413 if right.startswith('/'):
413 if right.startswith('/'):
414 right = right[1:]
414 right = right[1:]
415 return left + right
415 return left + right
416
416
417 url = join(first, second)
417 url = join(first, second)
418 for a in arg:
418 for a in arg:
419 url = join(url, a)
419 url = join(url, a)
420 return url
420 return url
421
421
422 def hexsha1(data):
422 def hexsha1(data):
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 object data"""
424 object data"""
425 h = util.sha1()
425 h = util.sha1()
426 for chunk in util.filechunkiter(data):
426 for chunk in util.filechunkiter(data):
427 h.update(chunk)
427 h.update(chunk)
428 return h.hexdigest()
428 return h.hexdigest()
429
429
430 def httpsendfile(ui, filename):
430 def httpsendfile(ui, filename):
431 return httpconnection.httpsendfile(ui, filename, 'rb')
431 return httpconnection.httpsendfile(ui, filename, 'rb')
432
432
433 def unixpath(path):
433 def unixpath(path):
434 '''Return a version of path normalized for use with the lfdirstate.'''
434 '''Return a version of path normalized for use with the lfdirstate.'''
435 return util.pconvert(os.path.normpath(path))
435 return util.pconvert(os.path.normpath(path))
436
436
437 def islfilesrepo(repo):
437 def islfilesrepo(repo):
438 return ('largefiles' in repo.requirements and
438 return ('largefiles' in repo.requirements and
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440
440
441 class storeprotonotcapable(Exception):
441 class storeprotonotcapable(Exception):
442 def __init__(self, storetypes):
442 def __init__(self, storetypes):
443 self.storetypes = storetypes
443 self.storetypes = storetypes
444
444
445 def getcurrentheads(repo):
445 def getcurrentheads(repo):
446 branches = repo.branchmap()
446 branches = repo.branchmap()
447 heads = []
447 heads = []
448 for branch in branches:
448 for branch in branches:
449 newheads = repo.branchheads(branch)
449 newheads = repo.branchheads(branch)
450 heads = heads + newheads
450 heads = heads + newheads
451 return heads
451 return heads
452
452
453 def getstandinsstate(repo):
453 def getstandinsstate(repo):
454 standins = []
454 standins = []
455 matcher = getstandinmatcher(repo)
455 matcher = getstandinmatcher(repo)
456 for standin in dirstatewalk(repo.dirstate, matcher):
456 for standin in dirstatewalk(repo.dirstate, matcher):
457 lfile = splitstandin(standin)
457 lfile = splitstandin(standin)
458 standins.append((lfile, readstandin(repo, lfile)))
458 standins.append((lfile, readstandin(repo, lfile)))
459 return standins
459 return standins
460
460
461 def getlfilestoupdate(oldstandins, newstandins):
461 def getlfilestoupdate(oldstandins, newstandins):
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 filelist = []
463 filelist = []
464 for f in changedstandins:
464 for f in changedstandins:
465 if f[0] not in filelist:
465 if f[0] not in filelist:
466 filelist.append(f[0])
466 filelist.append(f[0])
467 return filelist
467 return filelist
@@ -1,82 +1,82
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''store class for local filesystem'''
9 '''store class for local filesystem'''
10
10
11 import os
11 import os
12
12
13 from mercurial import util
13 from mercurial import util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 import lfutil
16 import lfutil
17 import basestore
17 import basestore
18
18
19 class localstore(basestore.basestore):
19 class localstore(basestore.basestore):
20 '''localstore first attempts to grab files out of the store in the remote
20 '''localstore first attempts to grab files out of the store in the remote
21 Mercurial repository. Failling that, it attempts to grab the files from
21 Mercurial repository. Failling that, it attempts to grab the files from
22 the user cache.'''
22 the user cache.'''
23
23
24 def __init__(self, ui, repo, remote):
24 def __init__(self, ui, repo, remote):
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
27 self.remote = remote
27 self.remote = remote
28
28
29 def put(self, source, hash):
29 def put(self, source, hash):
30 util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
30 util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
31 if lfutil.instore(self.remote, hash):
31 if lfutil.instore(self.remote, hash):
32 return
32 return
33 lfutil.link(lfutil.storepath(self.repo, hash),
33 lfutil.link(lfutil.storepath(self.repo, hash),
34 lfutil.storepath(self.remote, hash))
34 lfutil.storepath(self.remote, hash))
35
35
36 def exists(self, hash):
36 def exists(self, hash):
37 return lfutil.instore(self.remote, hash)
37 return lfutil.instore(self.remote, hash)
38
38
39 def _getfile(self, tmpfile, filename, hash):
39 def _getfile(self, tmpfile, filename, hash):
40 if lfutil.instore(self.remote, hash):
40 if lfutil.instore(self.remote, hash):
41 path = lfutil.storepath(self.remote, hash)
41 path = lfutil.storepath(self.remote, hash)
42 elif lfutil.inusercache(self.ui, hash):
42 elif lfutil.inusercache(self.ui, hash):
43 path = lfutil.usercachepath(self.ui, hash)
43 path = lfutil.usercachepath(self.ui, hash)
44 else:
44 else:
45 raise basestore.StoreError(filename, hash, '',
45 raise basestore.StoreError(filename, hash, '',
46 _("Can't get file locally"))
46 _("can't get file locally"))
47 fd = open(path, 'rb')
47 fd = open(path, 'rb')
48 try:
48 try:
49 return lfutil.copyandhash(fd, tmpfile)
49 return lfutil.copyandhash(fd, tmpfile)
50 finally:
50 finally:
51 fd.close()
51 fd.close()
52
52
53 def _verifyfile(self, cctx, cset, contents, standin, verified):
53 def _verifyfile(self, cctx, cset, contents, standin, verified):
54 filename = lfutil.splitstandin(standin)
54 filename = lfutil.splitstandin(standin)
55 if not filename:
55 if not filename:
56 return False
56 return False
57 fctx = cctx[standin]
57 fctx = cctx[standin]
58 key = (filename, fctx.filenode())
58 key = (filename, fctx.filenode())
59 if key in verified:
59 if key in verified:
60 return False
60 return False
61
61
62 expecthash = fctx.data()[0:40]
62 expecthash = fctx.data()[0:40]
63 verified.add(key)
63 verified.add(key)
64 if not lfutil.instore(self.remote, expecthash):
64 if not lfutil.instore(self.remote, expecthash):
65 self.ui.warn(
65 self.ui.warn(
66 _('changeset %s: %s missing\n'
66 _('changeset %s: %s missing\n'
67 ' (looked for hash %s)\n')
67 ' (looked for hash %s)\n')
68 % (cset, filename, expecthash))
68 % (cset, filename, expecthash))
69 return True # failed
69 return True # failed
70
70
71 if contents:
71 if contents:
72 storepath = lfutil.storepath(self.remote, expecthash)
72 storepath = lfutil.storepath(self.remote, expecthash)
73 actualhash = lfutil.hashfile(storepath)
73 actualhash = lfutil.hashfile(storepath)
74 if actualhash != expecthash:
74 if actualhash != expecthash:
75 self.ui.warn(
75 self.ui.warn(
76 _('changeset %s: %s: contents differ\n'
76 _('changeset %s: %s: contents differ\n'
77 ' (%s:\n'
77 ' (%s:\n'
78 ' expected hash %s,\n'
78 ' expected hash %s,\n'
79 ' but got %s)\n')
79 ' but got %s)\n')
80 % (cset, filename, storepath, expecthash, actualhash))
80 % (cset, filename, storepath, expecthash, actualhash))
81 return True # failed
81 return True # failed
82 return False
82 return False
@@ -1,123 +1,123
1 $ "$TESTDIR/hghave" unix-permissions || exit 80
1 $ "$TESTDIR/hghave" unix-permissions || exit 80
2
2
3 Create user cache directory
3 Create user cache directory
4
4
5 $ USERCACHE=`pwd`/cache; export USERCACHE
5 $ USERCACHE=`pwd`/cache; export USERCACHE
6 $ cat <<EOF >> ${HGRCPATH}
6 $ cat <<EOF >> ${HGRCPATH}
7 > [extensions]
7 > [extensions]
8 > hgext.largefiles=
8 > hgext.largefiles=
9 > [largefiles]
9 > [largefiles]
10 > usercache=${USERCACHE}
10 > usercache=${USERCACHE}
11 > EOF
11 > EOF
12 $ mkdir -p ${USERCACHE}
12 $ mkdir -p ${USERCACHE}
13
13
14 Create source repo, and commit adding largefile.
14 Create source repo, and commit adding largefile.
15
15
16 $ hg init src
16 $ hg init src
17 $ cd src
17 $ cd src
18 $ echo large > large
18 $ echo large > large
19 $ hg add --large large
19 $ hg add --large large
20 $ hg commit -m 'add largefile'
20 $ hg commit -m 'add largefile'
21 $ cd ..
21 $ cd ..
22
22
23 Discard all cached largefiles in USERCACHE
23 Discard all cached largefiles in USERCACHE
24
24
25 $ rm -rf ${USERCACHE}
25 $ rm -rf ${USERCACHE}
26
26
27 Create mirror repo, and pull from source without largefile:
27 Create mirror repo, and pull from source without largefile:
28 "pull" is used instead of "clone" for suppression of (1) updating to
28 "pull" is used instead of "clone" for suppression of (1) updating to
29 tip (= cahcing largefile from source repo), and (2) recording source
29 tip (= cahcing largefile from source repo), and (2) recording source
30 repo as "default" path in .hg/hgrc.
30 repo as "default" path in .hg/hgrc.
31
31
32 $ hg init mirror
32 $ hg init mirror
33 $ cd mirror
33 $ cd mirror
34 $ hg pull ../src
34 $ hg pull ../src
35 pulling from ../src
35 pulling from ../src
36 requesting all changes
36 requesting all changes
37 adding changesets
37 adding changesets
38 adding manifests
38 adding manifests
39 adding file changes
39 adding file changes
40 added 1 changesets with 1 changes to 1 files
40 added 1 changesets with 1 changes to 1 files
41 (run 'hg update' to get a working copy)
41 (run 'hg update' to get a working copy)
42 caching new largefiles
42 caching new largefiles
43 0 largefiles cached
43 0 largefiles cached
44
44
45 Update working directory to "tip", which requires largefile("large"),
45 Update working directory to "tip", which requires largefile("large"),
46 but there is no cache file for it. So, hg must treat it as
46 but there is no cache file for it. So, hg must treat it as
47 "missing"(!) file.
47 "missing"(!) file.
48
48
49 $ hg update
49 $ hg update
50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 getting changed largefiles
51 getting changed largefiles
52 large: Can't get file locally
52 large: can't get file locally
53 (no default or default-push path set in hgrc)
53 (no default or default-push path set in hgrc)
54 0 largefiles updated, 0 removed
54 0 largefiles updated, 0 removed
55 $ hg status
55 $ hg status
56 ! large
56 ! large
57
57
58 Update working directory to null: this cleanup .hg/largefiles/dirstate
58 Update working directory to null: this cleanup .hg/largefiles/dirstate
59
59
60 $ hg update null
60 $ hg update null
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62 getting changed largefiles
62 getting changed largefiles
63 0 largefiles updated, 0 removed
63 0 largefiles updated, 0 removed
64
64
65 Update working directory to tip, again.
65 Update working directory to tip, again.
66
66
67 $ hg update
67 $ hg update
68 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 getting changed largefiles
69 getting changed largefiles
70 large: Can't get file locally
70 large: can't get file locally
71 (no default or default-push path set in hgrc)
71 (no default or default-push path set in hgrc)
72 0 largefiles updated, 0 removed
72 0 largefiles updated, 0 removed
73 $ hg status
73 $ hg status
74 ! large
74 ! large
75
75
76 Portable way to print file permissions:
76 Portable way to print file permissions:
77
77
78 $ cd ..
78 $ cd ..
79 $ cat > ls-l.py <<EOF
79 $ cat > ls-l.py <<EOF
80 > #!/usr/bin/env python
80 > #!/usr/bin/env python
81 > import sys, os
81 > import sys, os
82 > path = sys.argv[1]
82 > path = sys.argv[1]
83 > print '%03o' % (os.lstat(path).st_mode & 0777)
83 > print '%03o' % (os.lstat(path).st_mode & 0777)
84 > EOF
84 > EOF
85 $ chmod +x ls-l.py
85 $ chmod +x ls-l.py
86
86
87 Test that files in .hg/largefiles inherit mode from .hg/store, not
87 Test that files in .hg/largefiles inherit mode from .hg/store, not
88 from file in working copy:
88 from file in working copy:
89
89
90 $ cd src
90 $ cd src
91 $ chmod 750 .hg/store
91 $ chmod 750 .hg/store
92 $ chmod 660 large
92 $ chmod 660 large
93 $ echo change >> large
93 $ echo change >> large
94 $ hg commit -m change
94 $ hg commit -m change
95 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
95 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
96 640
96 640
97
97
98 Test permission of with files in .hg/largefiles created by update:
98 Test permission of with files in .hg/largefiles created by update:
99
99
100 $ cd ../mirror
100 $ cd ../mirror
101 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
101 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
102 $ chmod 750 .hg/store
102 $ chmod 750 .hg/store
103 $ hg pull ../src --update -q
103 $ hg pull ../src --update -q
104 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
104 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
105 640
105 640
106
106
107 Test permission of files created by push:
107 Test permission of files created by push:
108
108
109 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
109 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
110 > --config "web.allow_push=*" --config web.push_ssl=no
110 > --config "web.allow_push=*" --config web.push_ssl=no
111 $ cat hg.pid >> $DAEMON_PIDS
111 $ cat hg.pid >> $DAEMON_PIDS
112
112
113 $ echo change >> large
113 $ echo change >> large
114 $ hg commit -m change
114 $ hg commit -m change
115
115
116 $ rm -r "$USERCACHE"
116 $ rm -r "$USERCACHE"
117
117
118 $ hg push -q http://localhost:$HGPORT/
118 $ hg push -q http://localhost:$HGPORT/
119
119
120 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
120 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
121 640
121 640
122
122
123 $ cd ..
123 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now