##// END OF EJS Templates
largefiles: refactoring - create destination dir in lfutil.link
Mads Kiilerich -
r18998:d035c390 default
parent child Browse files
Show More
@@ -1,419 +1,418 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 try:
43 try:
43 util.oslink(src, dest)
44 util.oslink(src, dest)
44 except OSError:
45 except OSError:
45 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
46 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
47 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 dst.write(chunk)
49 dst.write(chunk)
49 dst.close()
50 dst.close()
50 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
51
52
52 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
53 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
54 if path:
55 if path:
55 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
56 else:
57 else:
57 if os.name == 'nt':
58 if os.name == 'nt':
58 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 if appdata:
60 if appdata:
60 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
61 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
62 home = os.getenv('HOME')
63 home = os.getenv('HOME')
63 if home:
64 if home:
64 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
65 longname, hash)
66 longname, hash)
66 elif os.name == 'posix':
67 elif os.name == 'posix':
67 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
68 if path:
69 if path:
69 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
70 else:
71 else:
71 home = os.getenv('HOME')
72 home = os.getenv('HOME')
72 if home:
73 if home:
73 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
74 else:
75 else:
75 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 return path
77 return path
77
78
78 def inusercache(ui, hash):
79 def inusercache(ui, hash):
79 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
80 return path and os.path.exists(path)
81 return path and os.path.exists(path)
81
82
82 def findfile(repo, hash):
83 def findfile(repo, hash):
83 if instore(repo, hash):
84 if instore(repo, hash):
84 repo.ui.note(_('found %s in store\n') % hash)
85 repo.ui.note(_('found %s in store\n') % hash)
85 return storepath(repo, hash)
86 return storepath(repo, hash)
86 elif inusercache(repo.ui, hash):
87 elif inusercache(repo.ui, hash):
87 repo.ui.note(_('found %s in system cache\n') % hash)
88 repo.ui.note(_('found %s in system cache\n') % hash)
88 path = storepath(repo, hash)
89 path = storepath(repo, hash)
89 util.makedirs(os.path.dirname(path))
90 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
91 return path
91 return path
92 return None
92 return None
93
93
94 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
97 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
99 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
101 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
103 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
105 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
107 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self):
109 def _ignore(self):
110 return False
110 return False
111
111
112 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
113 '''
113 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
116 '''
117 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
120 repo.dirstate._validate)
121
121
122 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 util.makedirs(lfstoredir)
126 util.makedirs(lfstoredir)
127 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
128 for standin in repo.dirstate.walk(matcher, [], False, False):
128 for standin in repo.dirstate.walk(matcher, [], False, False):
129 lfile = splitstandin(standin)
129 lfile = splitstandin(standin)
130 hash = readstandin(repo, lfile)
130 hash = readstandin(repo, lfile)
131 lfdirstate.normallookup(lfile)
131 lfdirstate.normallookup(lfile)
132 try:
132 try:
133 if hash == hashfile(repo.wjoin(lfile)):
133 if hash == hashfile(repo.wjoin(lfile)):
134 lfdirstate.normal(lfile)
134 lfdirstate.normal(lfile)
135 except OSError, err:
135 except OSError, err:
136 if err.errno != errno.ENOENT:
136 if err.errno != errno.ENOENT:
137 raise
137 raise
138 return lfdirstate
138 return lfdirstate
139
139
140 def lfdirstatestatus(lfdirstate, repo, rev):
140 def lfdirstatestatus(lfdirstate, repo, rev):
141 match = match_.always(repo.root, repo.getcwd())
141 match = match_.always(repo.root, repo.getcwd())
142 s = lfdirstate.status(match, [], False, False, False)
142 s = lfdirstate.status(match, [], False, False, False)
143 unsure, modified, added, removed, missing, unknown, ignored, clean = s
143 unsure, modified, added, removed, missing, unknown, ignored, clean = s
144 for lfile in unsure:
144 for lfile in unsure:
145 try:
145 try:
146 fctx = repo[rev][standin(lfile)]
146 fctx = repo[rev][standin(lfile)]
147 except LookupError:
147 except LookupError:
148 fctx = None
148 fctx = None
149 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
150 modified.append(lfile)
150 modified.append(lfile)
151 else:
151 else:
152 clean.append(lfile)
152 clean.append(lfile)
153 lfdirstate.normal(lfile)
153 lfdirstate.normal(lfile)
154 return (modified, added, removed, missing, unknown, ignored, clean)
154 return (modified, added, removed, missing, unknown, ignored, clean)
155
155
156 def listlfiles(repo, rev=None, matcher=None):
156 def listlfiles(repo, rev=None, matcher=None):
157 '''return a list of largefiles in the working copy or the
157 '''return a list of largefiles in the working copy or the
158 specified changeset'''
158 specified changeset'''
159
159
160 if matcher is None:
160 if matcher is None:
161 matcher = getstandinmatcher(repo)
161 matcher = getstandinmatcher(repo)
162
162
163 # ignore unknown files in working directory
163 # ignore unknown files in working directory
164 return [splitstandin(f)
164 return [splitstandin(f)
165 for f in repo[rev].walk(matcher)
165 for f in repo[rev].walk(matcher)
166 if rev is not None or repo.dirstate[f] != '?']
166 if rev is not None or repo.dirstate[f] != '?']
167
167
168 def instore(repo, hash):
168 def instore(repo, hash):
169 return os.path.exists(storepath(repo, hash))
169 return os.path.exists(storepath(repo, hash))
170
170
171 def storepath(repo, hash):
171 def storepath(repo, hash):
172 return repo.join(os.path.join(longname, hash))
172 return repo.join(os.path.join(longname, hash))
173
173
174 def copyfromcache(repo, hash, filename):
174 def copyfromcache(repo, hash, filename):
175 '''Copy the specified largefile from the repo or system cache to
175 '''Copy the specified largefile from the repo or system cache to
176 filename in the repository. Return true on success or false if the
176 filename in the repository. Return true on success or false if the
177 file was not found in either cache (which should not happened:
177 file was not found in either cache (which should not happened:
178 this is meant to be called only after ensuring that the needed
178 this is meant to be called only after ensuring that the needed
179 largefile exists in the cache).'''
179 largefile exists in the cache).'''
180 path = findfile(repo, hash)
180 path = findfile(repo, hash)
181 if path is None:
181 if path is None:
182 return False
182 return False
183 util.makedirs(os.path.dirname(repo.wjoin(filename)))
183 util.makedirs(os.path.dirname(repo.wjoin(filename)))
184 # The write may fail before the file is fully written, but we
184 # The write may fail before the file is fully written, but we
185 # don't use atomic writes in the working copy.
185 # don't use atomic writes in the working copy.
186 shutil.copy(path, repo.wjoin(filename))
186 shutil.copy(path, repo.wjoin(filename))
187 return True
187 return True
188
188
189 def copytostore(repo, rev, file, uploaded=False):
189 def copytostore(repo, rev, file, uploaded=False):
190 hash = readstandin(repo, file, rev)
190 hash = readstandin(repo, file, rev)
191 if instore(repo, hash):
191 if instore(repo, hash):
192 return
192 return
193 copytostoreabsolute(repo, repo.wjoin(file), hash)
193 copytostoreabsolute(repo, repo.wjoin(file), hash)
194
194
195 def copyalltostore(repo, node):
195 def copyalltostore(repo, node):
196 '''Copy all largefiles in a given revision to the store'''
196 '''Copy all largefiles in a given revision to the store'''
197
197
198 ctx = repo[node]
198 ctx = repo[node]
199 for filename in ctx.files():
199 for filename in ctx.files():
200 if isstandin(filename) and filename in ctx.manifest():
200 if isstandin(filename) and filename in ctx.manifest():
201 realfile = splitstandin(filename)
201 realfile = splitstandin(filename)
202 copytostore(repo, ctx.node(), realfile)
202 copytostore(repo, ctx.node(), realfile)
203
203
204
204
205 def copytostoreabsolute(repo, file, hash):
205 def copytostoreabsolute(repo, file, hash):
206 util.makedirs(os.path.dirname(storepath(repo, hash)))
207 if inusercache(repo.ui, hash):
206 if inusercache(repo.ui, hash):
208 link(usercachepath(repo.ui, hash), storepath(repo, hash))
207 link(usercachepath(repo.ui, hash), storepath(repo, hash))
209 elif not getattr(repo, "_isconverting", False):
208 elif not getattr(repo, "_isconverting", False):
209 util.makedirs(os.path.dirname(storepath(repo, hash)))
210 dst = util.atomictempfile(storepath(repo, hash),
210 dst = util.atomictempfile(storepath(repo, hash),
211 createmode=repo.store.createmode)
211 createmode=repo.store.createmode)
212 for chunk in util.filechunkiter(open(file, 'rb')):
212 for chunk in util.filechunkiter(open(file, 'rb')):
213 dst.write(chunk)
213 dst.write(chunk)
214 dst.close()
214 dst.close()
215 linktousercache(repo, hash)
215 linktousercache(repo, hash)
216
216
217 def linktousercache(repo, hash):
217 def linktousercache(repo, hash):
218 path = usercachepath(repo.ui, hash)
218 path = usercachepath(repo.ui, hash)
219 if path:
219 if path:
220 util.makedirs(os.path.dirname(path))
221 link(storepath(repo, hash), path)
220 link(storepath(repo, hash), path)
222
221
223 def getstandinmatcher(repo, pats=[], opts={}):
222 def getstandinmatcher(repo, pats=[], opts={}):
224 '''Return a match object that applies pats to the standin directory'''
223 '''Return a match object that applies pats to the standin directory'''
225 standindir = repo.wjoin(shortname)
224 standindir = repo.wjoin(shortname)
226 if pats:
225 if pats:
227 pats = [os.path.join(standindir, pat) for pat in pats]
226 pats = [os.path.join(standindir, pat) for pat in pats]
228 else:
227 else:
229 # no patterns: relative to repo root
228 # no patterns: relative to repo root
230 pats = [standindir]
229 pats = [standindir]
231 # no warnings about missing files or directories
230 # no warnings about missing files or directories
232 match = scmutil.match(repo[None], pats, opts)
231 match = scmutil.match(repo[None], pats, opts)
233 match.bad = lambda f, msg: None
232 match.bad = lambda f, msg: None
234 return match
233 return match
235
234
236 def composestandinmatcher(repo, rmatcher):
235 def composestandinmatcher(repo, rmatcher):
237 '''Return a matcher that accepts standins corresponding to the
236 '''Return a matcher that accepts standins corresponding to the
238 files accepted by rmatcher. Pass the list of files in the matcher
237 files accepted by rmatcher. Pass the list of files in the matcher
239 as the paths specified by the user.'''
238 as the paths specified by the user.'''
240 smatcher = getstandinmatcher(repo, rmatcher.files())
239 smatcher = getstandinmatcher(repo, rmatcher.files())
241 isstandin = smatcher.matchfn
240 isstandin = smatcher.matchfn
242 def composedmatchfn(f):
241 def composedmatchfn(f):
243 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
242 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
244 smatcher.matchfn = composedmatchfn
243 smatcher.matchfn = composedmatchfn
245
244
246 return smatcher
245 return smatcher
247
246
248 def standin(filename):
247 def standin(filename):
249 '''Return the repo-relative path to the standin for the specified big
248 '''Return the repo-relative path to the standin for the specified big
250 file.'''
249 file.'''
251 # Notes:
250 # Notes:
252 # 1) Some callers want an absolute path, but for instance addlargefiles
251 # 1) Some callers want an absolute path, but for instance addlargefiles
253 # needs it repo-relative so it can be passed to repo[None].add(). So
252 # needs it repo-relative so it can be passed to repo[None].add(). So
254 # leave it up to the caller to use repo.wjoin() to get an absolute path.
253 # leave it up to the caller to use repo.wjoin() to get an absolute path.
255 # 2) Join with '/' because that's what dirstate always uses, even on
254 # 2) Join with '/' because that's what dirstate always uses, even on
256 # Windows. Change existing separator to '/' first in case we are
255 # Windows. Change existing separator to '/' first in case we are
257 # passed filenames from an external source (like the command line).
256 # passed filenames from an external source (like the command line).
258 return shortnameslash + util.pconvert(filename)
257 return shortnameslash + util.pconvert(filename)
259
258
260 def isstandin(filename):
259 def isstandin(filename):
261 '''Return true if filename is a big file standin. filename must be
260 '''Return true if filename is a big file standin. filename must be
262 in Mercurial's internal form (slash-separated).'''
261 in Mercurial's internal form (slash-separated).'''
263 return filename.startswith(shortnameslash)
262 return filename.startswith(shortnameslash)
264
263
265 def splitstandin(filename):
264 def splitstandin(filename):
266 # Split on / because that's what dirstate always uses, even on Windows.
265 # Split on / because that's what dirstate always uses, even on Windows.
267 # Change local separator to / first just in case we are passed filenames
266 # Change local separator to / first just in case we are passed filenames
268 # from an external source (like the command line).
267 # from an external source (like the command line).
269 bits = util.pconvert(filename).split('/', 1)
268 bits = util.pconvert(filename).split('/', 1)
270 if len(bits) == 2 and bits[0] == shortname:
269 if len(bits) == 2 and bits[0] == shortname:
271 return bits[1]
270 return bits[1]
272 else:
271 else:
273 return None
272 return None
274
273
275 def updatestandin(repo, standin):
274 def updatestandin(repo, standin):
276 file = repo.wjoin(splitstandin(standin))
275 file = repo.wjoin(splitstandin(standin))
277 if os.path.exists(file):
276 if os.path.exists(file):
278 hash = hashfile(file)
277 hash = hashfile(file)
279 executable = getexecutable(file)
278 executable = getexecutable(file)
280 writestandin(repo, standin, hash, executable)
279 writestandin(repo, standin, hash, executable)
281
280
282 def readstandin(repo, filename, node=None):
281 def readstandin(repo, filename, node=None):
283 '''read hex hash from standin for filename at given node, or working
282 '''read hex hash from standin for filename at given node, or working
284 directory if no node is given'''
283 directory if no node is given'''
285 return repo[node][standin(filename)].data().strip()
284 return repo[node][standin(filename)].data().strip()
286
285
287 def writestandin(repo, standin, hash, executable):
286 def writestandin(repo, standin, hash, executable):
288 '''write hash to <repo.root>/<standin>'''
287 '''write hash to <repo.root>/<standin>'''
289 writehash(hash, repo.wjoin(standin), executable)
288 writehash(hash, repo.wjoin(standin), executable)
290
289
291 def copyandhash(instream, outfile):
290 def copyandhash(instream, outfile):
292 '''Read bytes from instream (iterable) and write them to outfile,
291 '''Read bytes from instream (iterable) and write them to outfile,
293 computing the SHA-1 hash of the data along the way. Close outfile
292 computing the SHA-1 hash of the data along the way. Close outfile
294 when done and return the binary hash.'''
293 when done and return the binary hash.'''
295 hasher = util.sha1('')
294 hasher = util.sha1('')
296 for data in instream:
295 for data in instream:
297 hasher.update(data)
296 hasher.update(data)
298 outfile.write(data)
297 outfile.write(data)
299
298
300 # Blecch: closing a file that somebody else opened is rude and
299 # Blecch: closing a file that somebody else opened is rude and
301 # wrong. But it's so darn convenient and practical! After all,
300 # wrong. But it's so darn convenient and practical! After all,
302 # outfile was opened just to copy and hash.
301 # outfile was opened just to copy and hash.
303 outfile.close()
302 outfile.close()
304
303
305 return hasher.digest()
304 return hasher.digest()
306
305
307 def hashrepofile(repo, file):
306 def hashrepofile(repo, file):
308 return hashfile(repo.wjoin(file))
307 return hashfile(repo.wjoin(file))
309
308
310 def hashfile(file):
309 def hashfile(file):
311 if not os.path.exists(file):
310 if not os.path.exists(file):
312 return ''
311 return ''
313 hasher = util.sha1('')
312 hasher = util.sha1('')
314 fd = open(file, 'rb')
313 fd = open(file, 'rb')
315 for data in blockstream(fd):
314 for data in blockstream(fd):
316 hasher.update(data)
315 hasher.update(data)
317 fd.close()
316 fd.close()
318 return hasher.hexdigest()
317 return hasher.hexdigest()
319
318
320 class limitreader(object):
319 class limitreader(object):
321 def __init__(self, f, limit):
320 def __init__(self, f, limit):
322 self.f = f
321 self.f = f
323 self.limit = limit
322 self.limit = limit
324
323
325 def read(self, length):
324 def read(self, length):
326 if self.limit == 0:
325 if self.limit == 0:
327 return ''
326 return ''
328 length = length > self.limit and self.limit or length
327 length = length > self.limit and self.limit or length
329 self.limit -= length
328 self.limit -= length
330 return self.f.read(length)
329 return self.f.read(length)
331
330
332 def close(self):
331 def close(self):
333 pass
332 pass
334
333
335 def blockstream(infile, blocksize=128 * 1024):
334 def blockstream(infile, blocksize=128 * 1024):
336 """Generator that yields blocks of data from infile and closes infile."""
335 """Generator that yields blocks of data from infile and closes infile."""
337 while True:
336 while True:
338 data = infile.read(blocksize)
337 data = infile.read(blocksize)
339 if not data:
338 if not data:
340 break
339 break
341 yield data
340 yield data
342 # same blecch as copyandhash() above
341 # same blecch as copyandhash() above
343 infile.close()
342 infile.close()
344
343
345 def writehash(hash, filename, executable):
344 def writehash(hash, filename, executable):
346 util.makedirs(os.path.dirname(filename))
345 util.makedirs(os.path.dirname(filename))
347 util.writefile(filename, hash + '\n')
346 util.writefile(filename, hash + '\n')
348 os.chmod(filename, getmode(executable))
347 os.chmod(filename, getmode(executable))
349
348
350 def getexecutable(filename):
349 def getexecutable(filename):
351 mode = os.stat(filename).st_mode
350 mode = os.stat(filename).st_mode
352 return ((mode & stat.S_IXUSR) and
351 return ((mode & stat.S_IXUSR) and
353 (mode & stat.S_IXGRP) and
352 (mode & stat.S_IXGRP) and
354 (mode & stat.S_IXOTH))
353 (mode & stat.S_IXOTH))
355
354
356 def getmode(executable):
355 def getmode(executable):
357 if executable:
356 if executable:
358 return 0755
357 return 0755
359 else:
358 else:
360 return 0644
359 return 0644
361
360
362 def urljoin(first, second, *arg):
361 def urljoin(first, second, *arg):
363 def join(left, right):
362 def join(left, right):
364 if not left.endswith('/'):
363 if not left.endswith('/'):
365 left += '/'
364 left += '/'
366 if right.startswith('/'):
365 if right.startswith('/'):
367 right = right[1:]
366 right = right[1:]
368 return left + right
367 return left + right
369
368
370 url = join(first, second)
369 url = join(first, second)
371 for a in arg:
370 for a in arg:
372 url = join(url, a)
371 url = join(url, a)
373 return url
372 return url
374
373
375 def hexsha1(data):
374 def hexsha1(data):
376 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
375 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
377 object data"""
376 object data"""
378 h = util.sha1()
377 h = util.sha1()
379 for chunk in util.filechunkiter(data):
378 for chunk in util.filechunkiter(data):
380 h.update(chunk)
379 h.update(chunk)
381 return h.hexdigest()
380 return h.hexdigest()
382
381
383 def httpsendfile(ui, filename):
382 def httpsendfile(ui, filename):
384 return httpconnection.httpsendfile(ui, filename, 'rb')
383 return httpconnection.httpsendfile(ui, filename, 'rb')
385
384
386 def unixpath(path):
385 def unixpath(path):
387 '''Return a version of path normalized for use with the lfdirstate.'''
386 '''Return a version of path normalized for use with the lfdirstate.'''
388 return util.pconvert(os.path.normpath(path))
387 return util.pconvert(os.path.normpath(path))
389
388
390 def islfilesrepo(repo):
389 def islfilesrepo(repo):
391 if ('largefiles' in repo.requirements and
390 if ('largefiles' in repo.requirements and
392 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
391 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
393 return True
392 return True
394
393
395 return util.any(openlfdirstate(repo.ui, repo, False))
394 return util.any(openlfdirstate(repo.ui, repo, False))
396
395
397 class storeprotonotcapable(Exception):
396 class storeprotonotcapable(Exception):
398 def __init__(self, storetypes):
397 def __init__(self, storetypes):
399 self.storetypes = storetypes
398 self.storetypes = storetypes
400
399
401 def getstandinsstate(repo):
400 def getstandinsstate(repo):
402 standins = []
401 standins = []
403 matcher = getstandinmatcher(repo)
402 matcher = getstandinmatcher(repo)
404 for standin in repo.dirstate.walk(matcher, [], False, False):
403 for standin in repo.dirstate.walk(matcher, [], False, False):
405 lfile = splitstandin(standin)
404 lfile = splitstandin(standin)
406 try:
405 try:
407 hash = readstandin(repo, lfile)
406 hash = readstandin(repo, lfile)
408 except IOError:
407 except IOError:
409 hash = None
408 hash = None
410 standins.append((lfile, hash))
409 standins.append((lfile, hash))
411 return standins
410 return standins
412
411
413 def getlfilestoupdate(oldstandins, newstandins):
412 def getlfilestoupdate(oldstandins, newstandins):
414 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
413 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
415 filelist = []
414 filelist = []
416 for f in changedstandins:
415 for f in changedstandins:
417 if f[0] not in filelist:
416 if f[0] not in filelist:
418 filelist.append(f[0])
417 filelist.append(f[0])
419 return filelist
418 return filelist
@@ -1,81 +1,77 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''store class for local filesystem'''
9 '''store class for local filesystem'''
10
10
11 import os
12
13 from mercurial import util
14 from mercurial.i18n import _
11 from mercurial.i18n import _
15
12
16 import lfutil
13 import lfutil
17 import basestore
14 import basestore
18
15
19 class localstore(basestore.basestore):
16 class localstore(basestore.basestore):
20 '''localstore first attempts to grab files out of the store in the remote
17 '''localstore first attempts to grab files out of the store in the remote
21 Mercurial repository. Failing that, it attempts to grab the files from
18 Mercurial repository. Failing that, it attempts to grab the files from
22 the user cache.'''
19 the user cache.'''
23
20
24 def __init__(self, ui, repo, remote):
21 def __init__(self, ui, repo, remote):
25 self.remote = remote.local()
22 self.remote = remote.local()
26 super(localstore, self).__init__(ui, repo, self.remote.url())
23 super(localstore, self).__init__(ui, repo, self.remote.url())
27
24
28 def put(self, source, hash):
25 def put(self, source, hash):
29 util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
30 if lfutil.instore(self.remote, hash):
26 if lfutil.instore(self.remote, hash):
31 return
27 return
32 lfutil.link(lfutil.storepath(self.repo, hash),
28 lfutil.link(lfutil.storepath(self.repo, hash),
33 lfutil.storepath(self.remote, hash))
29 lfutil.storepath(self.remote, hash))
34
30
35 def exists(self, hashes):
31 def exists(self, hashes):
36 retval = {}
32 retval = {}
37 for hash in hashes:
33 for hash in hashes:
38 retval[hash] = lfutil.instore(self.remote, hash)
34 retval[hash] = lfutil.instore(self.remote, hash)
39 return retval
35 return retval
40
36
41
37
42 def _getfile(self, tmpfile, filename, hash):
38 def _getfile(self, tmpfile, filename, hash):
43 if lfutil.instore(self.remote, hash):
39 if lfutil.instore(self.remote, hash):
44 path = lfutil.storepath(self.remote, hash)
40 path = lfutil.storepath(self.remote, hash)
45 elif lfutil.inusercache(self.ui, hash):
41 elif lfutil.inusercache(self.ui, hash):
46 path = lfutil.usercachepath(self.ui, hash)
42 path = lfutil.usercachepath(self.ui, hash)
47 else:
43 else:
48 raise basestore.StoreError(filename, hash, self.url,
44 raise basestore.StoreError(filename, hash, self.url,
49 _("can't get file locally"))
45 _("can't get file locally"))
50 fd = open(path, 'rb')
46 fd = open(path, 'rb')
51 try:
47 try:
52 return lfutil.copyandhash(fd, tmpfile)
48 return lfutil.copyandhash(fd, tmpfile)
53 finally:
49 finally:
54 fd.close()
50 fd.close()
55
51
56 def _verifyfile(self, cctx, cset, contents, standin, verified):
52 def _verifyfile(self, cctx, cset, contents, standin, verified):
57 filename = lfutil.splitstandin(standin)
53 filename = lfutil.splitstandin(standin)
58 if not filename:
54 if not filename:
59 return False
55 return False
60 fctx = cctx[standin]
56 fctx = cctx[standin]
61 key = (filename, fctx.filenode())
57 key = (filename, fctx.filenode())
62 if key in verified:
58 if key in verified:
63 return False
59 return False
64
60
65 expecthash = fctx.data()[0:40]
61 expecthash = fctx.data()[0:40]
66 storepath = lfutil.storepath(self.remote, expecthash)
62 storepath = lfutil.storepath(self.remote, expecthash)
67 verified.add(key)
63 verified.add(key)
68 if not lfutil.instore(self.remote, expecthash):
64 if not lfutil.instore(self.remote, expecthash):
69 self.ui.warn(
65 self.ui.warn(
70 _('changeset %s: %s references missing %s\n')
66 _('changeset %s: %s references missing %s\n')
71 % (cset, filename, storepath))
67 % (cset, filename, storepath))
72 return True # failed
68 return True # failed
73
69
74 if contents:
70 if contents:
75 actualhash = lfutil.hashfile(storepath)
71 actualhash = lfutil.hashfile(storepath)
76 if actualhash != expecthash:
72 if actualhash != expecthash:
77 self.ui.warn(
73 self.ui.warn(
78 _('changeset %s: %s references corrupted %s\n')
74 _('changeset %s: %s references corrupted %s\n')
79 % (cset, filename, storepath))
75 % (cset, filename, storepath))
80 return True # failed
76 return True # failed
81 return False
77 return False
General Comments 0
You need to be logged in to leave comments. Login now