##// END OF EJS Templates
largefiles: rename 'admin' to more descriptive 'lfstoredir'
Mads Kiilerich -
r18147:79f24931 default
parent child Browse files
Show More
@@ -1,457 +1,457 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repoadd(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def reporemove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
39 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
40 repo[None].forget(list)
40 repo[None].forget(list)
41 finally:
41 finally:
42 wlock.release()
42 wlock.release()
43 return remove(list, unlink=unlink)
43 return remove(list, unlink=unlink)
44
44
45 def repoforget(repo, list):
45 def repoforget(repo, list):
46 forget = repo[None].forget
46 forget = repo[None].forget
47 return forget(list)
47 return forget(list)
48
48
49 def findoutgoing(repo, remote, force):
49 def findoutgoing(repo, remote, force):
50 from mercurial import discovery
50 from mercurial import discovery
51 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
51 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
52 return outgoing.missing
52 return outgoing.missing
53
53
54 # -- Private worker functions ------------------------------------------
54 # -- Private worker functions ------------------------------------------
55
55
56 def getminsize(ui, assumelfiles, opt, default=10):
56 def getminsize(ui, assumelfiles, opt, default=10):
57 lfsize = opt
57 lfsize = opt
58 if not lfsize and assumelfiles:
58 if not lfsize and assumelfiles:
59 lfsize = ui.config(longname, 'minsize', default=default)
59 lfsize = ui.config(longname, 'minsize', default=default)
60 if lfsize:
60 if lfsize:
61 try:
61 try:
62 lfsize = float(lfsize)
62 lfsize = float(lfsize)
63 except ValueError:
63 except ValueError:
64 raise util.Abort(_('largefiles: size must be number (not %s)\n')
64 raise util.Abort(_('largefiles: size must be number (not %s)\n')
65 % lfsize)
65 % lfsize)
66 if lfsize is None:
66 if lfsize is None:
67 raise util.Abort(_('minimum size for largefiles must be specified'))
67 raise util.Abort(_('minimum size for largefiles must be specified'))
68 return lfsize
68 return lfsize
69
69
70 def link(src, dest):
70 def link(src, dest):
71 try:
71 try:
72 util.oslink(src, dest)
72 util.oslink(src, dest)
73 except OSError:
73 except OSError:
74 # if hardlinks fail, fallback on atomic copy
74 # if hardlinks fail, fallback on atomic copy
75 dst = util.atomictempfile(dest)
75 dst = util.atomictempfile(dest)
76 for chunk in util.filechunkiter(open(src, 'rb')):
76 for chunk in util.filechunkiter(open(src, 'rb')):
77 dst.write(chunk)
77 dst.write(chunk)
78 dst.close()
78 dst.close()
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81 def usercachepath(ui, hash):
81 def usercachepath(ui, hash):
82 path = ui.configpath(longname, 'usercache', None)
82 path = ui.configpath(longname, 'usercache', None)
83 if path:
83 if path:
84 path = os.path.join(path, hash)
84 path = os.path.join(path, hash)
85 else:
85 else:
86 if os.name == 'nt':
86 if os.name == 'nt':
87 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
87 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
88 if appdata:
88 if appdata:
89 path = os.path.join(appdata, longname, hash)
89 path = os.path.join(appdata, longname, hash)
90 elif platform.system() == 'Darwin':
90 elif platform.system() == 'Darwin':
91 home = os.getenv('HOME')
91 home = os.getenv('HOME')
92 if home:
92 if home:
93 path = os.path.join(home, 'Library', 'Caches',
93 path = os.path.join(home, 'Library', 'Caches',
94 longname, hash)
94 longname, hash)
95 elif os.name == 'posix':
95 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
96 path = os.getenv('XDG_CACHE_HOME')
97 if path:
97 if path:
98 path = os.path.join(path, longname, hash)
98 path = os.path.join(path, longname, hash)
99 else:
99 else:
100 home = os.getenv('HOME')
100 home = os.getenv('HOME')
101 if home:
101 if home:
102 path = os.path.join(home, '.cache', longname, hash)
102 path = os.path.join(home, '.cache', longname, hash)
103 else:
103 else:
104 raise util.Abort(_('unknown operating system: %s\n') % os.name)
104 raise util.Abort(_('unknown operating system: %s\n') % os.name)
105 return path
105 return path
106
106
107 def inusercache(ui, hash):
107 def inusercache(ui, hash):
108 path = usercachepath(ui, hash)
108 path = usercachepath(ui, hash)
109 return path and os.path.exists(path)
109 return path and os.path.exists(path)
110
110
111 def findfile(repo, hash):
111 def findfile(repo, hash):
112 if instore(repo, hash):
112 if instore(repo, hash):
113 repo.ui.note(_('found %s in store\n') % hash)
113 repo.ui.note(_('found %s in store\n') % hash)
114 return storepath(repo, hash)
114 return storepath(repo, hash)
115 elif inusercache(repo.ui, hash):
115 elif inusercache(repo.ui, hash):
116 repo.ui.note(_('found %s in system cache\n') % hash)
116 repo.ui.note(_('found %s in system cache\n') % hash)
117 path = storepath(repo, hash)
117 path = storepath(repo, hash)
118 util.makedirs(os.path.dirname(path))
118 util.makedirs(os.path.dirname(path))
119 link(usercachepath(repo.ui, hash), path)
119 link(usercachepath(repo.ui, hash), path)
120 return path
120 return path
121 return None
121 return None
122
122
123 class largefilesdirstate(dirstate.dirstate):
123 class largefilesdirstate(dirstate.dirstate):
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 return super(largefilesdirstate, self).__getitem__(unixpath(key))
125 return super(largefilesdirstate, self).__getitem__(unixpath(key))
126 def normal(self, f):
126 def normal(self, f):
127 return super(largefilesdirstate, self).normal(unixpath(f))
127 return super(largefilesdirstate, self).normal(unixpath(f))
128 def remove(self, f):
128 def remove(self, f):
129 return super(largefilesdirstate, self).remove(unixpath(f))
129 return super(largefilesdirstate, self).remove(unixpath(f))
130 def add(self, f):
130 def add(self, f):
131 return super(largefilesdirstate, self).add(unixpath(f))
131 return super(largefilesdirstate, self).add(unixpath(f))
132 def drop(self, f):
132 def drop(self, f):
133 return super(largefilesdirstate, self).drop(unixpath(f))
133 return super(largefilesdirstate, self).drop(unixpath(f))
134 def forget(self, f):
134 def forget(self, f):
135 return super(largefilesdirstate, self).forget(unixpath(f))
135 return super(largefilesdirstate, self).forget(unixpath(f))
136 def normallookup(self, f):
136 def normallookup(self, f):
137 return super(largefilesdirstate, self).normallookup(unixpath(f))
137 return super(largefilesdirstate, self).normallookup(unixpath(f))
138
138
139 def openlfdirstate(ui, repo, create=True):
139 def openlfdirstate(ui, repo, create=True):
140 '''
140 '''
141 Return a dirstate object that tracks largefiles: i.e. its root is
141 Return a dirstate object that tracks largefiles: i.e. its root is
142 the repo root, but it is saved in .hg/largefiles/dirstate.
142 the repo root, but it is saved in .hg/largefiles/dirstate.
143 '''
143 '''
144 admin = repo.join(longname)
144 lfstoredir = repo.join(longname)
145 opener = scmutil.opener(admin)
145 opener = scmutil.opener(lfstoredir)
146 lfdirstate = largefilesdirstate(opener, ui, repo.root,
146 lfdirstate = largefilesdirstate(opener, ui, repo.root,
147 repo.dirstate._validate)
147 repo.dirstate._validate)
148
148
149 # If the largefiles dirstate does not exist, populate and create
149 # If the largefiles dirstate does not exist, populate and create
150 # it. This ensures that we create it on the first meaningful
150 # it. This ensures that we create it on the first meaningful
151 # largefiles operation in a new clone.
151 # largefiles operation in a new clone.
152 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
152 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
153 util.makedirs(admin)
153 util.makedirs(lfstoredir)
154 matcher = getstandinmatcher(repo)
154 matcher = getstandinmatcher(repo)
155 for standin in dirstatewalk(repo.dirstate, matcher):
155 for standin in dirstatewalk(repo.dirstate, matcher):
156 lfile = splitstandin(standin)
156 lfile = splitstandin(standin)
157 hash = readstandin(repo, lfile)
157 hash = readstandin(repo, lfile)
158 lfdirstate.normallookup(lfile)
158 lfdirstate.normallookup(lfile)
159 try:
159 try:
160 if hash == hashfile(repo.wjoin(lfile)):
160 if hash == hashfile(repo.wjoin(lfile)):
161 lfdirstate.normal(lfile)
161 lfdirstate.normal(lfile)
162 except OSError, err:
162 except OSError, err:
163 if err.errno != errno.ENOENT:
163 if err.errno != errno.ENOENT:
164 raise
164 raise
165 return lfdirstate
165 return lfdirstate
166
166
167 def lfdirstatestatus(lfdirstate, repo, rev):
167 def lfdirstatestatus(lfdirstate, repo, rev):
168 match = match_.always(repo.root, repo.getcwd())
168 match = match_.always(repo.root, repo.getcwd())
169 s = lfdirstate.status(match, [], False, False, False)
169 s = lfdirstate.status(match, [], False, False, False)
170 unsure, modified, added, removed, missing, unknown, ignored, clean = s
170 unsure, modified, added, removed, missing, unknown, ignored, clean = s
171 for lfile in unsure:
171 for lfile in unsure:
172 if repo[rev][standin(lfile)].data().strip() != \
172 if repo[rev][standin(lfile)].data().strip() != \
173 hashfile(repo.wjoin(lfile)):
173 hashfile(repo.wjoin(lfile)):
174 modified.append(lfile)
174 modified.append(lfile)
175 else:
175 else:
176 clean.append(lfile)
176 clean.append(lfile)
177 lfdirstate.normal(lfile)
177 lfdirstate.normal(lfile)
178 return (modified, added, removed, missing, unknown, ignored, clean)
178 return (modified, added, removed, missing, unknown, ignored, clean)
179
179
180 def listlfiles(repo, rev=None, matcher=None):
180 def listlfiles(repo, rev=None, matcher=None):
181 '''return a list of largefiles in the working copy or the
181 '''return a list of largefiles in the working copy or the
182 specified changeset'''
182 specified changeset'''
183
183
184 if matcher is None:
184 if matcher is None:
185 matcher = getstandinmatcher(repo)
185 matcher = getstandinmatcher(repo)
186
186
187 # ignore unknown files in working directory
187 # ignore unknown files in working directory
188 return [splitstandin(f)
188 return [splitstandin(f)
189 for f in repo[rev].walk(matcher)
189 for f in repo[rev].walk(matcher)
190 if rev is not None or repo.dirstate[f] != '?']
190 if rev is not None or repo.dirstate[f] != '?']
191
191
192 def instore(repo, hash):
192 def instore(repo, hash):
193 return os.path.exists(storepath(repo, hash))
193 return os.path.exists(storepath(repo, hash))
194
194
195 def storepath(repo, hash):
195 def storepath(repo, hash):
196 return repo.join(os.path.join(longname, hash))
196 return repo.join(os.path.join(longname, hash))
197
197
198 def copyfromcache(repo, hash, filename):
198 def copyfromcache(repo, hash, filename):
199 '''Copy the specified largefile from the repo or system cache to
199 '''Copy the specified largefile from the repo or system cache to
200 filename in the repository. Return true on success or false if the
200 filename in the repository. Return true on success or false if the
201 file was not found in either cache (which should not happened:
201 file was not found in either cache (which should not happened:
202 this is meant to be called only after ensuring that the needed
202 this is meant to be called only after ensuring that the needed
203 largefile exists in the cache).'''
203 largefile exists in the cache).'''
204 path = findfile(repo, hash)
204 path = findfile(repo, hash)
205 if path is None:
205 if path is None:
206 return False
206 return False
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
208 # The write may fail before the file is fully written, but we
208 # The write may fail before the file is fully written, but we
209 # don't use atomic writes in the working copy.
209 # don't use atomic writes in the working copy.
210 shutil.copy(path, repo.wjoin(filename))
210 shutil.copy(path, repo.wjoin(filename))
211 return True
211 return True
212
212
213 def copytostore(repo, rev, file, uploaded=False):
213 def copytostore(repo, rev, file, uploaded=False):
214 hash = readstandin(repo, file, rev)
214 hash = readstandin(repo, file, rev)
215 if instore(repo, hash):
215 if instore(repo, hash):
216 return
216 return
217 copytostoreabsolute(repo, repo.wjoin(file), hash)
217 copytostoreabsolute(repo, repo.wjoin(file), hash)
218
218
219 def copyalltostore(repo, node):
219 def copyalltostore(repo, node):
220 '''Copy all largefiles in a given revision to the store'''
220 '''Copy all largefiles in a given revision to the store'''
221
221
222 ctx = repo[node]
222 ctx = repo[node]
223 for filename in ctx.files():
223 for filename in ctx.files():
224 if isstandin(filename) and filename in ctx.manifest():
224 if isstandin(filename) and filename in ctx.manifest():
225 realfile = splitstandin(filename)
225 realfile = splitstandin(filename)
226 copytostore(repo, ctx.node(), realfile)
226 copytostore(repo, ctx.node(), realfile)
227
227
228
228
229 def copytostoreabsolute(repo, file, hash):
229 def copytostoreabsolute(repo, file, hash):
230 util.makedirs(os.path.dirname(storepath(repo, hash)))
230 util.makedirs(os.path.dirname(storepath(repo, hash)))
231 if inusercache(repo.ui, hash):
231 if inusercache(repo.ui, hash):
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
232 link(usercachepath(repo.ui, hash), storepath(repo, hash))
233 elif not getattr(repo, "_isconverting", False):
233 elif not getattr(repo, "_isconverting", False):
234 dst = util.atomictempfile(storepath(repo, hash),
234 dst = util.atomictempfile(storepath(repo, hash),
235 createmode=repo.store.createmode)
235 createmode=repo.store.createmode)
236 for chunk in util.filechunkiter(open(file, 'rb')):
236 for chunk in util.filechunkiter(open(file, 'rb')):
237 dst.write(chunk)
237 dst.write(chunk)
238 dst.close()
238 dst.close()
239 linktousercache(repo, hash)
239 linktousercache(repo, hash)
240
240
241 def linktousercache(repo, hash):
241 def linktousercache(repo, hash):
242 path = usercachepath(repo.ui, hash)
242 path = usercachepath(repo.ui, hash)
243 if path:
243 if path:
244 util.makedirs(os.path.dirname(path))
244 util.makedirs(os.path.dirname(path))
245 link(storepath(repo, hash), path)
245 link(storepath(repo, hash), path)
246
246
247 def getstandinmatcher(repo, pats=[], opts={}):
247 def getstandinmatcher(repo, pats=[], opts={}):
248 '''Return a match object that applies pats to the standin directory'''
248 '''Return a match object that applies pats to the standin directory'''
249 standindir = repo.pathto(shortname)
249 standindir = repo.pathto(shortname)
250 if pats:
250 if pats:
251 # patterns supplied: search standin directory relative to current dir
251 # patterns supplied: search standin directory relative to current dir
252 cwd = repo.getcwd()
252 cwd = repo.getcwd()
253 if os.path.isabs(cwd):
253 if os.path.isabs(cwd):
254 # cwd is an absolute path for hg -R <reponame>
254 # cwd is an absolute path for hg -R <reponame>
255 # work relative to the repository root in this case
255 # work relative to the repository root in this case
256 cwd = ''
256 cwd = ''
257 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
257 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
258 elif os.path.isdir(standindir):
258 elif os.path.isdir(standindir):
259 # no patterns: relative to repo root
259 # no patterns: relative to repo root
260 pats = [standindir]
260 pats = [standindir]
261 else:
261 else:
262 # no patterns and no standin dir: return matcher that matches nothing
262 # no patterns and no standin dir: return matcher that matches nothing
263 return match_.match(repo.root, None, [], exact=True)
263 return match_.match(repo.root, None, [], exact=True)
264
264
265 # no warnings about missing files or directories
265 # no warnings about missing files or directories
266 match = scmutil.match(repo[None], pats, opts)
266 match = scmutil.match(repo[None], pats, opts)
267 match.bad = lambda f, msg: None
267 match.bad = lambda f, msg: None
268 return match
268 return match
269
269
270 def composestandinmatcher(repo, rmatcher):
270 def composestandinmatcher(repo, rmatcher):
271 '''Return a matcher that accepts standins corresponding to the
271 '''Return a matcher that accepts standins corresponding to the
272 files accepted by rmatcher. Pass the list of files in the matcher
272 files accepted by rmatcher. Pass the list of files in the matcher
273 as the paths specified by the user.'''
273 as the paths specified by the user.'''
274 smatcher = getstandinmatcher(repo, rmatcher.files())
274 smatcher = getstandinmatcher(repo, rmatcher.files())
275 isstandin = smatcher.matchfn
275 isstandin = smatcher.matchfn
276 def composedmatchfn(f):
276 def composedmatchfn(f):
277 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
277 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
278 smatcher.matchfn = composedmatchfn
278 smatcher.matchfn = composedmatchfn
279
279
280 return smatcher
280 return smatcher
281
281
282 def standin(filename):
282 def standin(filename):
283 '''Return the repo-relative path to the standin for the specified big
283 '''Return the repo-relative path to the standin for the specified big
284 file.'''
284 file.'''
285 # Notes:
285 # Notes:
286 # 1) Some callers want an absolute path, but for instance addlargefiles
286 # 1) Some callers want an absolute path, but for instance addlargefiles
287 # needs it repo-relative so it can be passed to repoadd(). So leave
287 # needs it repo-relative so it can be passed to repoadd(). So leave
288 # it up to the caller to use repo.wjoin() to get an absolute path.
288 # it up to the caller to use repo.wjoin() to get an absolute path.
289 # 2) Join with '/' because that's what dirstate always uses, even on
289 # 2) Join with '/' because that's what dirstate always uses, even on
290 # Windows. Change existing separator to '/' first in case we are
290 # Windows. Change existing separator to '/' first in case we are
291 # passed filenames from an external source (like the command line).
291 # passed filenames from an external source (like the command line).
292 return shortname + '/' + util.pconvert(filename)
292 return shortname + '/' + util.pconvert(filename)
293
293
294 def isstandin(filename):
294 def isstandin(filename):
295 '''Return true if filename is a big file standin. filename must be
295 '''Return true if filename is a big file standin. filename must be
296 in Mercurial's internal form (slash-separated).'''
296 in Mercurial's internal form (slash-separated).'''
297 return filename.startswith(shortname + '/')
297 return filename.startswith(shortname + '/')
298
298
299 def splitstandin(filename):
299 def splitstandin(filename):
300 # Split on / because that's what dirstate always uses, even on Windows.
300 # Split on / because that's what dirstate always uses, even on Windows.
301 # Change local separator to / first just in case we are passed filenames
301 # Change local separator to / first just in case we are passed filenames
302 # from an external source (like the command line).
302 # from an external source (like the command line).
303 bits = util.pconvert(filename).split('/', 1)
303 bits = util.pconvert(filename).split('/', 1)
304 if len(bits) == 2 and bits[0] == shortname:
304 if len(bits) == 2 and bits[0] == shortname:
305 return bits[1]
305 return bits[1]
306 else:
306 else:
307 return None
307 return None
308
308
309 def updatestandin(repo, standin):
309 def updatestandin(repo, standin):
310 file = repo.wjoin(splitstandin(standin))
310 file = repo.wjoin(splitstandin(standin))
311 if os.path.exists(file):
311 if os.path.exists(file):
312 hash = hashfile(file)
312 hash = hashfile(file)
313 executable = getexecutable(file)
313 executable = getexecutable(file)
314 writestandin(repo, standin, hash, executable)
314 writestandin(repo, standin, hash, executable)
315
315
316 def readstandin(repo, filename, node=None):
316 def readstandin(repo, filename, node=None):
317 '''read hex hash from standin for filename at given node, or working
317 '''read hex hash from standin for filename at given node, or working
318 directory if no node is given'''
318 directory if no node is given'''
319 return repo[node][standin(filename)].data().strip()
319 return repo[node][standin(filename)].data().strip()
320
320
321 def writestandin(repo, standin, hash, executable):
321 def writestandin(repo, standin, hash, executable):
322 '''write hash to <repo.root>/<standin>'''
322 '''write hash to <repo.root>/<standin>'''
323 writehash(hash, repo.wjoin(standin), executable)
323 writehash(hash, repo.wjoin(standin), executable)
324
324
325 def copyandhash(instream, outfile):
325 def copyandhash(instream, outfile):
326 '''Read bytes from instream (iterable) and write them to outfile,
326 '''Read bytes from instream (iterable) and write them to outfile,
327 computing the SHA-1 hash of the data along the way. Close outfile
327 computing the SHA-1 hash of the data along the way. Close outfile
328 when done and return the binary hash.'''
328 when done and return the binary hash.'''
329 hasher = util.sha1('')
329 hasher = util.sha1('')
330 for data in instream:
330 for data in instream:
331 hasher.update(data)
331 hasher.update(data)
332 outfile.write(data)
332 outfile.write(data)
333
333
334 # Blecch: closing a file that somebody else opened is rude and
334 # Blecch: closing a file that somebody else opened is rude and
335 # wrong. But it's so darn convenient and practical! After all,
335 # wrong. But it's so darn convenient and practical! After all,
336 # outfile was opened just to copy and hash.
336 # outfile was opened just to copy and hash.
337 outfile.close()
337 outfile.close()
338
338
339 return hasher.digest()
339 return hasher.digest()
340
340
341 def hashrepofile(repo, file):
341 def hashrepofile(repo, file):
342 return hashfile(repo.wjoin(file))
342 return hashfile(repo.wjoin(file))
343
343
344 def hashfile(file):
344 def hashfile(file):
345 if not os.path.exists(file):
345 if not os.path.exists(file):
346 return ''
346 return ''
347 hasher = util.sha1('')
347 hasher = util.sha1('')
348 fd = open(file, 'rb')
348 fd = open(file, 'rb')
349 for data in blockstream(fd):
349 for data in blockstream(fd):
350 hasher.update(data)
350 hasher.update(data)
351 fd.close()
351 fd.close()
352 return hasher.hexdigest()
352 return hasher.hexdigest()
353
353
354 class limitreader(object):
354 class limitreader(object):
355 def __init__(self, f, limit):
355 def __init__(self, f, limit):
356 self.f = f
356 self.f = f
357 self.limit = limit
357 self.limit = limit
358
358
359 def read(self, length):
359 def read(self, length):
360 if self.limit == 0:
360 if self.limit == 0:
361 return ''
361 return ''
362 length = length > self.limit and self.limit or length
362 length = length > self.limit and self.limit or length
363 self.limit -= length
363 self.limit -= length
364 return self.f.read(length)
364 return self.f.read(length)
365
365
366 def close(self):
366 def close(self):
367 pass
367 pass
368
368
369 def blockstream(infile, blocksize=128 * 1024):
369 def blockstream(infile, blocksize=128 * 1024):
370 """Generator that yields blocks of data from infile and closes infile."""
370 """Generator that yields blocks of data from infile and closes infile."""
371 while True:
371 while True:
372 data = infile.read(blocksize)
372 data = infile.read(blocksize)
373 if not data:
373 if not data:
374 break
374 break
375 yield data
375 yield data
376 # same blecch as copyandhash() above
376 # same blecch as copyandhash() above
377 infile.close()
377 infile.close()
378
378
379 def writehash(hash, filename, executable):
379 def writehash(hash, filename, executable):
380 util.makedirs(os.path.dirname(filename))
380 util.makedirs(os.path.dirname(filename))
381 util.writefile(filename, hash + '\n')
381 util.writefile(filename, hash + '\n')
382 os.chmod(filename, getmode(executable))
382 os.chmod(filename, getmode(executable))
383
383
384 def getexecutable(filename):
384 def getexecutable(filename):
385 mode = os.stat(filename).st_mode
385 mode = os.stat(filename).st_mode
386 return ((mode & stat.S_IXUSR) and
386 return ((mode & stat.S_IXUSR) and
387 (mode & stat.S_IXGRP) and
387 (mode & stat.S_IXGRP) and
388 (mode & stat.S_IXOTH))
388 (mode & stat.S_IXOTH))
389
389
390 def getmode(executable):
390 def getmode(executable):
391 if executable:
391 if executable:
392 return 0755
392 return 0755
393 else:
393 else:
394 return 0644
394 return 0644
395
395
396 def urljoin(first, second, *arg):
396 def urljoin(first, second, *arg):
397 def join(left, right):
397 def join(left, right):
398 if not left.endswith('/'):
398 if not left.endswith('/'):
399 left += '/'
399 left += '/'
400 if right.startswith('/'):
400 if right.startswith('/'):
401 right = right[1:]
401 right = right[1:]
402 return left + right
402 return left + right
403
403
404 url = join(first, second)
404 url = join(first, second)
405 for a in arg:
405 for a in arg:
406 url = join(url, a)
406 url = join(url, a)
407 return url
407 return url
408
408
409 def hexsha1(data):
409 def hexsha1(data):
410 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
410 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
411 object data"""
411 object data"""
412 h = util.sha1()
412 h = util.sha1()
413 for chunk in util.filechunkiter(data):
413 for chunk in util.filechunkiter(data):
414 h.update(chunk)
414 h.update(chunk)
415 return h.hexdigest()
415 return h.hexdigest()
416
416
417 def httpsendfile(ui, filename):
417 def httpsendfile(ui, filename):
418 return httpconnection.httpsendfile(ui, filename, 'rb')
418 return httpconnection.httpsendfile(ui, filename, 'rb')
419
419
420 def unixpath(path):
420 def unixpath(path):
421 '''Return a version of path normalized for use with the lfdirstate.'''
421 '''Return a version of path normalized for use with the lfdirstate.'''
422 return util.pconvert(os.path.normpath(path))
422 return util.pconvert(os.path.normpath(path))
423
423
424 def islfilesrepo(repo):
424 def islfilesrepo(repo):
425 if ('largefiles' in repo.requirements and
425 if ('largefiles' in repo.requirements and
426 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
426 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
427 return True
427 return True
428
428
429 return util.any(openlfdirstate(repo.ui, repo, False))
429 return util.any(openlfdirstate(repo.ui, repo, False))
430
430
431 class storeprotonotcapable(Exception):
431 class storeprotonotcapable(Exception):
432 def __init__(self, storetypes):
432 def __init__(self, storetypes):
433 self.storetypes = storetypes
433 self.storetypes = storetypes
434
434
435 def getcurrentheads(repo):
435 def getcurrentheads(repo):
436 branches = repo.branchmap()
436 branches = repo.branchmap()
437 heads = []
437 heads = []
438 for branch in branches:
438 for branch in branches:
439 newheads = repo.branchheads(branch)
439 newheads = repo.branchheads(branch)
440 heads = heads + newheads
440 heads = heads + newheads
441 return heads
441 return heads
442
442
443 def getstandinsstate(repo):
443 def getstandinsstate(repo):
444 standins = []
444 standins = []
445 matcher = getstandinmatcher(repo)
445 matcher = getstandinmatcher(repo)
446 for standin in dirstatewalk(repo.dirstate, matcher):
446 for standin in dirstatewalk(repo.dirstate, matcher):
447 lfile = splitstandin(standin)
447 lfile = splitstandin(standin)
448 standins.append((lfile, readstandin(repo, lfile)))
448 standins.append((lfile, readstandin(repo, lfile)))
449 return standins
449 return standins
450
450
451 def getlfilestoupdate(oldstandins, newstandins):
451 def getlfilestoupdate(oldstandins, newstandins):
452 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
452 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
453 filelist = []
453 filelist = []
454 for f in changedstandins:
454 for f in changedstandins:
455 if f[0] not in filelist:
455 if f[0] not in filelist:
456 filelist.append(f[0])
456 filelist.append(f[0])
457 return filelist
457 return filelist
General Comments 0
You need to be logged in to leave comments. Login now