##// END OF EJS Templates
largefiles: use constant for '.hglf/'...
Mads Kiilerich -
r18151:90ad387d default
parent child Browse files
Show More
@@ -1,459 +1,460 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 longname = 'largefiles'
22 longname = 'largefiles'
22
23
23
24
24 # -- Portability wrappers ----------------------------------------------
25 # -- Portability wrappers ----------------------------------------------
25
26
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
28 return dirstate.walk(matcher, [], unknown, ignored)
28
29
29 def repoadd(repo, list):
30 def repoadd(repo, list):
30 add = repo[None].add
31 add = repo[None].add
31 return add(list)
32 return add(list)
32
33
33 def reporemove(repo, list, unlink=False):
34 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
35 def remove(list, unlink):
35 wlock = repo.wlock()
36 wlock = repo.wlock()
36 try:
37 try:
37 if unlink:
38 if unlink:
38 for f in list:
39 for f in list:
39 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
40 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
40 repo[None].forget(list)
41 repo[None].forget(list)
41 finally:
42 finally:
42 wlock.release()
43 wlock.release()
43 return remove(list, unlink=unlink)
44 return remove(list, unlink=unlink)
44
45
45 def repoforget(repo, list):
46 def repoforget(repo, list):
46 forget = repo[None].forget
47 forget = repo[None].forget
47 return forget(list)
48 return forget(list)
48
49
49 def findoutgoing(repo, remote, force):
50 def findoutgoing(repo, remote, force):
50 from mercurial import discovery
51 from mercurial import discovery
51 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
52 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
52 return outgoing.missing
53 return outgoing.missing
53
54
54 # -- Private worker functions ------------------------------------------
55 # -- Private worker functions ------------------------------------------
55
56
56 def getminsize(ui, assumelfiles, opt, default=10):
57 def getminsize(ui, assumelfiles, opt, default=10):
57 lfsize = opt
58 lfsize = opt
58 if not lfsize and assumelfiles:
59 if not lfsize and assumelfiles:
59 lfsize = ui.config(longname, 'minsize', default=default)
60 lfsize = ui.config(longname, 'minsize', default=default)
60 if lfsize:
61 if lfsize:
61 try:
62 try:
62 lfsize = float(lfsize)
63 lfsize = float(lfsize)
63 except ValueError:
64 except ValueError:
64 raise util.Abort(_('largefiles: size must be number (not %s)\n')
65 raise util.Abort(_('largefiles: size must be number (not %s)\n')
65 % lfsize)
66 % lfsize)
66 if lfsize is None:
67 if lfsize is None:
67 raise util.Abort(_('minimum size for largefiles must be specified'))
68 raise util.Abort(_('minimum size for largefiles must be specified'))
68 return lfsize
69 return lfsize
69
70
70 def link(src, dest):
71 def link(src, dest):
71 try:
72 try:
72 util.oslink(src, dest)
73 util.oslink(src, dest)
73 except OSError:
74 except OSError:
74 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
75 dst = util.atomictempfile(dest)
76 dst = util.atomictempfile(dest)
76 for chunk in util.filechunkiter(open(src, 'rb')):
77 for chunk in util.filechunkiter(open(src, 'rb')):
77 dst.write(chunk)
78 dst.write(chunk)
78 dst.close()
79 dst.close()
79 os.chmod(dest, os.stat(src).st_mode)
80 os.chmod(dest, os.stat(src).st_mode)
80
81
81 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
82 path = ui.configpath(longname, 'usercache', None)
83 path = ui.configpath(longname, 'usercache', None)
83 if path:
84 if path:
84 path = os.path.join(path, hash)
85 path = os.path.join(path, hash)
85 else:
86 else:
86 if os.name == 'nt':
87 if os.name == 'nt':
87 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
88 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
88 if appdata:
89 if appdata:
89 path = os.path.join(appdata, longname, hash)
90 path = os.path.join(appdata, longname, hash)
90 elif platform.system() == 'Darwin':
91 elif platform.system() == 'Darwin':
91 home = os.getenv('HOME')
92 home = os.getenv('HOME')
92 if home:
93 if home:
93 path = os.path.join(home, 'Library', 'Caches',
94 path = os.path.join(home, 'Library', 'Caches',
94 longname, hash)
95 longname, hash)
95 elif os.name == 'posix':
96 elif os.name == 'posix':
96 path = os.getenv('XDG_CACHE_HOME')
97 path = os.getenv('XDG_CACHE_HOME')
97 if path:
98 if path:
98 path = os.path.join(path, longname, hash)
99 path = os.path.join(path, longname, hash)
99 else:
100 else:
100 home = os.getenv('HOME')
101 home = os.getenv('HOME')
101 if home:
102 if home:
102 path = os.path.join(home, '.cache', longname, hash)
103 path = os.path.join(home, '.cache', longname, hash)
103 else:
104 else:
104 raise util.Abort(_('unknown operating system: %s\n') % os.name)
105 raise util.Abort(_('unknown operating system: %s\n') % os.name)
105 return path
106 return path
106
107
107 def inusercache(ui, hash):
108 def inusercache(ui, hash):
108 path = usercachepath(ui, hash)
109 path = usercachepath(ui, hash)
109 return path and os.path.exists(path)
110 return path and os.path.exists(path)
110
111
111 def findfile(repo, hash):
112 def findfile(repo, hash):
112 if instore(repo, hash):
113 if instore(repo, hash):
113 repo.ui.note(_('found %s in store\n') % hash)
114 repo.ui.note(_('found %s in store\n') % hash)
114 return storepath(repo, hash)
115 return storepath(repo, hash)
115 elif inusercache(repo.ui, hash):
116 elif inusercache(repo.ui, hash):
116 repo.ui.note(_('found %s in system cache\n') % hash)
117 repo.ui.note(_('found %s in system cache\n') % hash)
117 path = storepath(repo, hash)
118 path = storepath(repo, hash)
118 util.makedirs(os.path.dirname(path))
119 util.makedirs(os.path.dirname(path))
119 link(usercachepath(repo.ui, hash), path)
120 link(usercachepath(repo.ui, hash), path)
120 return path
121 return path
121 return None
122 return None
122
123
123 class largefilesdirstate(dirstate.dirstate):
124 class largefilesdirstate(dirstate.dirstate):
124 def __getitem__(self, key):
125 def __getitem__(self, key):
125 return super(largefilesdirstate, self).__getitem__(unixpath(key))
126 return super(largefilesdirstate, self).__getitem__(unixpath(key))
126 def normal(self, f):
127 def normal(self, f):
127 return super(largefilesdirstate, self).normal(unixpath(f))
128 return super(largefilesdirstate, self).normal(unixpath(f))
128 def remove(self, f):
129 def remove(self, f):
129 return super(largefilesdirstate, self).remove(unixpath(f))
130 return super(largefilesdirstate, self).remove(unixpath(f))
130 def add(self, f):
131 def add(self, f):
131 return super(largefilesdirstate, self).add(unixpath(f))
132 return super(largefilesdirstate, self).add(unixpath(f))
132 def drop(self, f):
133 def drop(self, f):
133 return super(largefilesdirstate, self).drop(unixpath(f))
134 return super(largefilesdirstate, self).drop(unixpath(f))
134 def forget(self, f):
135 def forget(self, f):
135 return super(largefilesdirstate, self).forget(unixpath(f))
136 return super(largefilesdirstate, self).forget(unixpath(f))
136 def normallookup(self, f):
137 def normallookup(self, f):
137 return super(largefilesdirstate, self).normallookup(unixpath(f))
138 return super(largefilesdirstate, self).normallookup(unixpath(f))
138 def _ignore(self):
139 def _ignore(self):
139 return False
140 return False
140
141
141 def openlfdirstate(ui, repo, create=True):
142 def openlfdirstate(ui, repo, create=True):
142 '''
143 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
146 '''
146 lfstoredir = repo.join(longname)
147 lfstoredir = repo.join(longname)
147 opener = scmutil.opener(lfstoredir)
148 opener = scmutil.opener(lfstoredir)
148 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 repo.dirstate._validate)
150 repo.dirstate._validate)
150
151
151 # If the largefiles dirstate does not exist, populate and create
152 # If the largefiles dirstate does not exist, populate and create
152 # it. This ensures that we create it on the first meaningful
153 # it. This ensures that we create it on the first meaningful
153 # largefiles operation in a new clone.
154 # largefiles operation in a new clone.
154 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
155 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
155 util.makedirs(lfstoredir)
156 util.makedirs(lfstoredir)
156 matcher = getstandinmatcher(repo)
157 matcher = getstandinmatcher(repo)
157 for standin in dirstatewalk(repo.dirstate, matcher):
158 for standin in dirstatewalk(repo.dirstate, matcher):
158 lfile = splitstandin(standin)
159 lfile = splitstandin(standin)
159 hash = readstandin(repo, lfile)
160 hash = readstandin(repo, lfile)
160 lfdirstate.normallookup(lfile)
161 lfdirstate.normallookup(lfile)
161 try:
162 try:
162 if hash == hashfile(repo.wjoin(lfile)):
163 if hash == hashfile(repo.wjoin(lfile)):
163 lfdirstate.normal(lfile)
164 lfdirstate.normal(lfile)
164 except OSError, err:
165 except OSError, err:
165 if err.errno != errno.ENOENT:
166 if err.errno != errno.ENOENT:
166 raise
167 raise
167 return lfdirstate
168 return lfdirstate
168
169
169 def lfdirstatestatus(lfdirstate, repo, rev):
170 def lfdirstatestatus(lfdirstate, repo, rev):
170 match = match_.always(repo.root, repo.getcwd())
171 match = match_.always(repo.root, repo.getcwd())
171 s = lfdirstate.status(match, [], False, False, False)
172 s = lfdirstate.status(match, [], False, False, False)
172 unsure, modified, added, removed, missing, unknown, ignored, clean = s
173 unsure, modified, added, removed, missing, unknown, ignored, clean = s
173 for lfile in unsure:
174 for lfile in unsure:
174 if repo[rev][standin(lfile)].data().strip() != \
175 if repo[rev][standin(lfile)].data().strip() != \
175 hashfile(repo.wjoin(lfile)):
176 hashfile(repo.wjoin(lfile)):
176 modified.append(lfile)
177 modified.append(lfile)
177 else:
178 else:
178 clean.append(lfile)
179 clean.append(lfile)
179 lfdirstate.normal(lfile)
180 lfdirstate.normal(lfile)
180 return (modified, added, removed, missing, unknown, ignored, clean)
181 return (modified, added, removed, missing, unknown, ignored, clean)
181
182
182 def listlfiles(repo, rev=None, matcher=None):
183 def listlfiles(repo, rev=None, matcher=None):
183 '''return a list of largefiles in the working copy or the
184 '''return a list of largefiles in the working copy or the
184 specified changeset'''
185 specified changeset'''
185
186
186 if matcher is None:
187 if matcher is None:
187 matcher = getstandinmatcher(repo)
188 matcher = getstandinmatcher(repo)
188
189
189 # ignore unknown files in working directory
190 # ignore unknown files in working directory
190 return [splitstandin(f)
191 return [splitstandin(f)
191 for f in repo[rev].walk(matcher)
192 for f in repo[rev].walk(matcher)
192 if rev is not None or repo.dirstate[f] != '?']
193 if rev is not None or repo.dirstate[f] != '?']
193
194
194 def instore(repo, hash):
195 def instore(repo, hash):
195 return os.path.exists(storepath(repo, hash))
196 return os.path.exists(storepath(repo, hash))
196
197
197 def storepath(repo, hash):
198 def storepath(repo, hash):
198 return repo.join(os.path.join(longname, hash))
199 return repo.join(os.path.join(longname, hash))
199
200
200 def copyfromcache(repo, hash, filename):
201 def copyfromcache(repo, hash, filename):
201 '''Copy the specified largefile from the repo or system cache to
202 '''Copy the specified largefile from the repo or system cache to
202 filename in the repository. Return true on success or false if the
203 filename in the repository. Return true on success or false if the
203 file was not found in either cache (which should not happened:
204 file was not found in either cache (which should not happened:
204 this is meant to be called only after ensuring that the needed
205 this is meant to be called only after ensuring that the needed
205 largefile exists in the cache).'''
206 largefile exists in the cache).'''
206 path = findfile(repo, hash)
207 path = findfile(repo, hash)
207 if path is None:
208 if path is None:
208 return False
209 return False
209 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 # The write may fail before the file is fully written, but we
211 # The write may fail before the file is fully written, but we
211 # don't use atomic writes in the working copy.
212 # don't use atomic writes in the working copy.
212 shutil.copy(path, repo.wjoin(filename))
213 shutil.copy(path, repo.wjoin(filename))
213 return True
214 return True
214
215
215 def copytostore(repo, rev, file, uploaded=False):
216 def copytostore(repo, rev, file, uploaded=False):
216 hash = readstandin(repo, file, rev)
217 hash = readstandin(repo, file, rev)
217 if instore(repo, hash):
218 if instore(repo, hash):
218 return
219 return
219 copytostoreabsolute(repo, repo.wjoin(file), hash)
220 copytostoreabsolute(repo, repo.wjoin(file), hash)
220
221
221 def copyalltostore(repo, node):
222 def copyalltostore(repo, node):
222 '''Copy all largefiles in a given revision to the store'''
223 '''Copy all largefiles in a given revision to the store'''
223
224
224 ctx = repo[node]
225 ctx = repo[node]
225 for filename in ctx.files():
226 for filename in ctx.files():
226 if isstandin(filename) and filename in ctx.manifest():
227 if isstandin(filename) and filename in ctx.manifest():
227 realfile = splitstandin(filename)
228 realfile = splitstandin(filename)
228 copytostore(repo, ctx.node(), realfile)
229 copytostore(repo, ctx.node(), realfile)
229
230
230
231
231 def copytostoreabsolute(repo, file, hash):
232 def copytostoreabsolute(repo, file, hash):
232 util.makedirs(os.path.dirname(storepath(repo, hash)))
233 util.makedirs(os.path.dirname(storepath(repo, hash)))
233 if inusercache(repo.ui, hash):
234 if inusercache(repo.ui, hash):
234 link(usercachepath(repo.ui, hash), storepath(repo, hash))
235 link(usercachepath(repo.ui, hash), storepath(repo, hash))
235 elif not getattr(repo, "_isconverting", False):
236 elif not getattr(repo, "_isconverting", False):
236 dst = util.atomictempfile(storepath(repo, hash),
237 dst = util.atomictempfile(storepath(repo, hash),
237 createmode=repo.store.createmode)
238 createmode=repo.store.createmode)
238 for chunk in util.filechunkiter(open(file, 'rb')):
239 for chunk in util.filechunkiter(open(file, 'rb')):
239 dst.write(chunk)
240 dst.write(chunk)
240 dst.close()
241 dst.close()
241 linktousercache(repo, hash)
242 linktousercache(repo, hash)
242
243
243 def linktousercache(repo, hash):
244 def linktousercache(repo, hash):
244 path = usercachepath(repo.ui, hash)
245 path = usercachepath(repo.ui, hash)
245 if path:
246 if path:
246 util.makedirs(os.path.dirname(path))
247 util.makedirs(os.path.dirname(path))
247 link(storepath(repo, hash), path)
248 link(storepath(repo, hash), path)
248
249
249 def getstandinmatcher(repo, pats=[], opts={}):
250 def getstandinmatcher(repo, pats=[], opts={}):
250 '''Return a match object that applies pats to the standin directory'''
251 '''Return a match object that applies pats to the standin directory'''
251 standindir = repo.wjoin(shortname)
252 standindir = repo.wjoin(shortname)
252 if pats:
253 if pats:
253 # patterns supplied: search standin directory relative to current dir
254 # patterns supplied: search standin directory relative to current dir
254 cwd = repo.getcwd()
255 cwd = repo.getcwd()
255 if os.path.isabs(cwd):
256 if os.path.isabs(cwd):
256 # cwd is an absolute path for hg -R <reponame>
257 # cwd is an absolute path for hg -R <reponame>
257 # work relative to the repository root in this case
258 # work relative to the repository root in this case
258 cwd = ''
259 cwd = ''
259 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
260 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
260 elif os.path.isdir(standindir):
261 elif os.path.isdir(standindir):
261 # no patterns: relative to repo root
262 # no patterns: relative to repo root
262 pats = [standindir]
263 pats = [standindir]
263 else:
264 else:
264 # no patterns and no standin dir: return matcher that matches nothing
265 # no patterns and no standin dir: return matcher that matches nothing
265 return match_.match(repo.root, None, [], exact=True)
266 return match_.match(repo.root, None, [], exact=True)
266
267
267 # no warnings about missing files or directories
268 # no warnings about missing files or directories
268 match = scmutil.match(repo[None], pats, opts)
269 match = scmutil.match(repo[None], pats, opts)
269 match.bad = lambda f, msg: None
270 match.bad = lambda f, msg: None
270 return match
271 return match
271
272
272 def composestandinmatcher(repo, rmatcher):
273 def composestandinmatcher(repo, rmatcher):
273 '''Return a matcher that accepts standins corresponding to the
274 '''Return a matcher that accepts standins corresponding to the
274 files accepted by rmatcher. Pass the list of files in the matcher
275 files accepted by rmatcher. Pass the list of files in the matcher
275 as the paths specified by the user.'''
276 as the paths specified by the user.'''
276 smatcher = getstandinmatcher(repo, rmatcher.files())
277 smatcher = getstandinmatcher(repo, rmatcher.files())
277 isstandin = smatcher.matchfn
278 isstandin = smatcher.matchfn
278 def composedmatchfn(f):
279 def composedmatchfn(f):
279 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
280 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
280 smatcher.matchfn = composedmatchfn
281 smatcher.matchfn = composedmatchfn
281
282
282 return smatcher
283 return smatcher
283
284
284 def standin(filename):
285 def standin(filename):
285 '''Return the repo-relative path to the standin for the specified big
286 '''Return the repo-relative path to the standin for the specified big
286 file.'''
287 file.'''
287 # Notes:
288 # Notes:
288 # 1) Some callers want an absolute path, but for instance addlargefiles
289 # 1) Some callers want an absolute path, but for instance addlargefiles
289 # needs it repo-relative so it can be passed to repoadd(). So leave
290 # needs it repo-relative so it can be passed to repoadd(). So leave
290 # it up to the caller to use repo.wjoin() to get an absolute path.
291 # it up to the caller to use repo.wjoin() to get an absolute path.
291 # 2) Join with '/' because that's what dirstate always uses, even on
292 # 2) Join with '/' because that's what dirstate always uses, even on
292 # Windows. Change existing separator to '/' first in case we are
293 # Windows. Change existing separator to '/' first in case we are
293 # passed filenames from an external source (like the command line).
294 # passed filenames from an external source (like the command line).
294 return shortname + '/' + util.pconvert(filename)
295 return shortnameslash + util.pconvert(filename)
295
296
296 def isstandin(filename):
297 def isstandin(filename):
297 '''Return true if filename is a big file standin. filename must be
298 '''Return true if filename is a big file standin. filename must be
298 in Mercurial's internal form (slash-separated).'''
299 in Mercurial's internal form (slash-separated).'''
299 return filename.startswith(shortname + '/')
300 return filename.startswith(shortnameslash)
300
301
301 def splitstandin(filename):
302 def splitstandin(filename):
302 # Split on / because that's what dirstate always uses, even on Windows.
303 # Split on / because that's what dirstate always uses, even on Windows.
303 # Change local separator to / first just in case we are passed filenames
304 # Change local separator to / first just in case we are passed filenames
304 # from an external source (like the command line).
305 # from an external source (like the command line).
305 bits = util.pconvert(filename).split('/', 1)
306 bits = util.pconvert(filename).split('/', 1)
306 if len(bits) == 2 and bits[0] == shortname:
307 if len(bits) == 2 and bits[0] == shortname:
307 return bits[1]
308 return bits[1]
308 else:
309 else:
309 return None
310 return None
310
311
311 def updatestandin(repo, standin):
312 def updatestandin(repo, standin):
312 file = repo.wjoin(splitstandin(standin))
313 file = repo.wjoin(splitstandin(standin))
313 if os.path.exists(file):
314 if os.path.exists(file):
314 hash = hashfile(file)
315 hash = hashfile(file)
315 executable = getexecutable(file)
316 executable = getexecutable(file)
316 writestandin(repo, standin, hash, executable)
317 writestandin(repo, standin, hash, executable)
317
318
318 def readstandin(repo, filename, node=None):
319 def readstandin(repo, filename, node=None):
319 '''read hex hash from standin for filename at given node, or working
320 '''read hex hash from standin for filename at given node, or working
320 directory if no node is given'''
321 directory if no node is given'''
321 return repo[node][standin(filename)].data().strip()
322 return repo[node][standin(filename)].data().strip()
322
323
323 def writestandin(repo, standin, hash, executable):
324 def writestandin(repo, standin, hash, executable):
324 '''write hash to <repo.root>/<standin>'''
325 '''write hash to <repo.root>/<standin>'''
325 writehash(hash, repo.wjoin(standin), executable)
326 writehash(hash, repo.wjoin(standin), executable)
326
327
327 def copyandhash(instream, outfile):
328 def copyandhash(instream, outfile):
328 '''Read bytes from instream (iterable) and write them to outfile,
329 '''Read bytes from instream (iterable) and write them to outfile,
329 computing the SHA-1 hash of the data along the way. Close outfile
330 computing the SHA-1 hash of the data along the way. Close outfile
330 when done and return the binary hash.'''
331 when done and return the binary hash.'''
331 hasher = util.sha1('')
332 hasher = util.sha1('')
332 for data in instream:
333 for data in instream:
333 hasher.update(data)
334 hasher.update(data)
334 outfile.write(data)
335 outfile.write(data)
335
336
336 # Blecch: closing a file that somebody else opened is rude and
337 # Blecch: closing a file that somebody else opened is rude and
337 # wrong. But it's so darn convenient and practical! After all,
338 # wrong. But it's so darn convenient and practical! After all,
338 # outfile was opened just to copy and hash.
339 # outfile was opened just to copy and hash.
339 outfile.close()
340 outfile.close()
340
341
341 return hasher.digest()
342 return hasher.digest()
342
343
343 def hashrepofile(repo, file):
344 def hashrepofile(repo, file):
344 return hashfile(repo.wjoin(file))
345 return hashfile(repo.wjoin(file))
345
346
346 def hashfile(file):
347 def hashfile(file):
347 if not os.path.exists(file):
348 if not os.path.exists(file):
348 return ''
349 return ''
349 hasher = util.sha1('')
350 hasher = util.sha1('')
350 fd = open(file, 'rb')
351 fd = open(file, 'rb')
351 for data in blockstream(fd):
352 for data in blockstream(fd):
352 hasher.update(data)
353 hasher.update(data)
353 fd.close()
354 fd.close()
354 return hasher.hexdigest()
355 return hasher.hexdigest()
355
356
356 class limitreader(object):
357 class limitreader(object):
357 def __init__(self, f, limit):
358 def __init__(self, f, limit):
358 self.f = f
359 self.f = f
359 self.limit = limit
360 self.limit = limit
360
361
361 def read(self, length):
362 def read(self, length):
362 if self.limit == 0:
363 if self.limit == 0:
363 return ''
364 return ''
364 length = length > self.limit and self.limit or length
365 length = length > self.limit and self.limit or length
365 self.limit -= length
366 self.limit -= length
366 return self.f.read(length)
367 return self.f.read(length)
367
368
368 def close(self):
369 def close(self):
369 pass
370 pass
370
371
371 def blockstream(infile, blocksize=128 * 1024):
372 def blockstream(infile, blocksize=128 * 1024):
372 """Generator that yields blocks of data from infile and closes infile."""
373 """Generator that yields blocks of data from infile and closes infile."""
373 while True:
374 while True:
374 data = infile.read(blocksize)
375 data = infile.read(blocksize)
375 if not data:
376 if not data:
376 break
377 break
377 yield data
378 yield data
378 # same blecch as copyandhash() above
379 # same blecch as copyandhash() above
379 infile.close()
380 infile.close()
380
381
381 def writehash(hash, filename, executable):
382 def writehash(hash, filename, executable):
382 util.makedirs(os.path.dirname(filename))
383 util.makedirs(os.path.dirname(filename))
383 util.writefile(filename, hash + '\n')
384 util.writefile(filename, hash + '\n')
384 os.chmod(filename, getmode(executable))
385 os.chmod(filename, getmode(executable))
385
386
386 def getexecutable(filename):
387 def getexecutable(filename):
387 mode = os.stat(filename).st_mode
388 mode = os.stat(filename).st_mode
388 return ((mode & stat.S_IXUSR) and
389 return ((mode & stat.S_IXUSR) and
389 (mode & stat.S_IXGRP) and
390 (mode & stat.S_IXGRP) and
390 (mode & stat.S_IXOTH))
391 (mode & stat.S_IXOTH))
391
392
392 def getmode(executable):
393 def getmode(executable):
393 if executable:
394 if executable:
394 return 0755
395 return 0755
395 else:
396 else:
396 return 0644
397 return 0644
397
398
398 def urljoin(first, second, *arg):
399 def urljoin(first, second, *arg):
399 def join(left, right):
400 def join(left, right):
400 if not left.endswith('/'):
401 if not left.endswith('/'):
401 left += '/'
402 left += '/'
402 if right.startswith('/'):
403 if right.startswith('/'):
403 right = right[1:]
404 right = right[1:]
404 return left + right
405 return left + right
405
406
406 url = join(first, second)
407 url = join(first, second)
407 for a in arg:
408 for a in arg:
408 url = join(url, a)
409 url = join(url, a)
409 return url
410 return url
410
411
411 def hexsha1(data):
412 def hexsha1(data):
412 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
413 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
413 object data"""
414 object data"""
414 h = util.sha1()
415 h = util.sha1()
415 for chunk in util.filechunkiter(data):
416 for chunk in util.filechunkiter(data):
416 h.update(chunk)
417 h.update(chunk)
417 return h.hexdigest()
418 return h.hexdigest()
418
419
419 def httpsendfile(ui, filename):
420 def httpsendfile(ui, filename):
420 return httpconnection.httpsendfile(ui, filename, 'rb')
421 return httpconnection.httpsendfile(ui, filename, 'rb')
421
422
422 def unixpath(path):
423 def unixpath(path):
423 '''Return a version of path normalized for use with the lfdirstate.'''
424 '''Return a version of path normalized for use with the lfdirstate.'''
424 return util.pconvert(os.path.normpath(path))
425 return util.pconvert(os.path.normpath(path))
425
426
426 def islfilesrepo(repo):
427 def islfilesrepo(repo):
427 if ('largefiles' in repo.requirements and
428 if ('largefiles' in repo.requirements and
428 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
429 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
429 return True
430 return True
430
431
431 return util.any(openlfdirstate(repo.ui, repo, False))
432 return util.any(openlfdirstate(repo.ui, repo, False))
432
433
433 class storeprotonotcapable(Exception):
434 class storeprotonotcapable(Exception):
434 def __init__(self, storetypes):
435 def __init__(self, storetypes):
435 self.storetypes = storetypes
436 self.storetypes = storetypes
436
437
437 def getcurrentheads(repo):
438 def getcurrentheads(repo):
438 branches = repo.branchmap()
439 branches = repo.branchmap()
439 heads = []
440 heads = []
440 for branch in branches:
441 for branch in branches:
441 newheads = repo.branchheads(branch)
442 newheads = repo.branchheads(branch)
442 heads = heads + newheads
443 heads = heads + newheads
443 return heads
444 return heads
444
445
445 def getstandinsstate(repo):
446 def getstandinsstate(repo):
446 standins = []
447 standins = []
447 matcher = getstandinmatcher(repo)
448 matcher = getstandinmatcher(repo)
448 for standin in dirstatewalk(repo.dirstate, matcher):
449 for standin in dirstatewalk(repo.dirstate, matcher):
449 lfile = splitstandin(standin)
450 lfile = splitstandin(standin)
450 standins.append((lfile, readstandin(repo, lfile)))
451 standins.append((lfile, readstandin(repo, lfile)))
451 return standins
452 return standins
452
453
453 def getlfilestoupdate(oldstandins, newstandins):
454 def getlfilestoupdate(oldstandins, newstandins):
454 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
455 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
455 filelist = []
456 filelist = []
456 for f in changedstandins:
457 for f in changedstandins:
457 if f[0] not in filelist:
458 if f[0] not in filelist:
458 filelist.append(f[0])
459 filelist.append(f[0])
459 return filelist
460 return filelist
General Comments 0
You need to be logged in to leave comments. Login now