##// END OF EJS Templates
largefiles: update lfutil.findoutgoing() discovery method...
Matt Harbison -
r17794:a03cca2c default
parent child Browse files
Show More
@@ -1,470 +1,469 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repoadd(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def reporemove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repoforget(repo, list):
49 def repoforget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
56 remote.peer(), force=force)
56 return outgoing.missing
57 return repo.changelog.findmissing(common)
58
57
59 # -- Private worker functions ------------------------------------------
58 # -- Private worker functions ------------------------------------------
60
59
61 def getminsize(ui, assumelfiles, opt, default=10):
60 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
61 lfsize = opt
63 if not lfsize and assumelfiles:
62 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
63 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
64 if lfsize:
66 try:
65 try:
67 lfsize = float(lfsize)
66 lfsize = float(lfsize)
68 except ValueError:
67 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
68 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
69 % lfsize)
71 if lfsize is None:
70 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
71 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
72 return lfsize
74
73
75 def link(src, dest):
74 def link(src, dest):
76 try:
75 try:
77 util.oslink(src, dest)
76 util.oslink(src, dest)
78 except OSError:
77 except OSError:
79 # if hardlinks fail, fallback on atomic copy
78 # if hardlinks fail, fallback on atomic copy
80 dst = util.atomictempfile(dest)
79 dst = util.atomictempfile(dest)
81 for chunk in util.filechunkiter(open(src, 'rb')):
80 for chunk in util.filechunkiter(open(src, 'rb')):
82 dst.write(chunk)
81 dst.write(chunk)
83 dst.close()
82 dst.close()
84 os.chmod(dest, os.stat(src).st_mode)
83 os.chmod(dest, os.stat(src).st_mode)
85
84
86 def usercachepath(ui, hash):
85 def usercachepath(ui, hash):
87 path = ui.configpath(longname, 'usercache', None)
86 path = ui.configpath(longname, 'usercache', None)
88 if path:
87 if path:
89 path = os.path.join(path, hash)
88 path = os.path.join(path, hash)
90 else:
89 else:
91 if os.name == 'nt':
90 if os.name == 'nt':
92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
91 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 if appdata:
92 if appdata:
94 path = os.path.join(appdata, longname, hash)
93 path = os.path.join(appdata, longname, hash)
95 elif platform.system() == 'Darwin':
94 elif platform.system() == 'Darwin':
96 home = os.getenv('HOME')
95 home = os.getenv('HOME')
97 if home:
96 if home:
98 path = os.path.join(home, 'Library', 'Caches',
97 path = os.path.join(home, 'Library', 'Caches',
99 longname, hash)
98 longname, hash)
100 elif os.name == 'posix':
99 elif os.name == 'posix':
101 path = os.getenv('XDG_CACHE_HOME')
100 path = os.getenv('XDG_CACHE_HOME')
102 if path:
101 if path:
103 path = os.path.join(path, longname, hash)
102 path = os.path.join(path, longname, hash)
104 else:
103 else:
105 home = os.getenv('HOME')
104 home = os.getenv('HOME')
106 if home:
105 if home:
107 path = os.path.join(home, '.cache', longname, hash)
106 path = os.path.join(home, '.cache', longname, hash)
108 else:
107 else:
109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
108 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 return path
109 return path
111
110
112 def inusercache(ui, hash):
111 def inusercache(ui, hash):
113 path = usercachepath(ui, hash)
112 path = usercachepath(ui, hash)
114 return path and os.path.exists(path)
113 return path and os.path.exists(path)
115
114
116 def findfile(repo, hash):
115 def findfile(repo, hash):
117 if instore(repo, hash):
116 if instore(repo, hash):
118 repo.ui.note(_('found %s in store\n') % hash)
117 repo.ui.note(_('found %s in store\n') % hash)
119 return storepath(repo, hash)
118 return storepath(repo, hash)
120 elif inusercache(repo.ui, hash):
119 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('found %s in system cache\n') % hash)
120 repo.ui.note(_('found %s in system cache\n') % hash)
122 path = storepath(repo, hash)
121 path = storepath(repo, hash)
123 util.makedirs(os.path.dirname(path))
122 util.makedirs(os.path.dirname(path))
124 link(usercachepath(repo.ui, hash), path)
123 link(usercachepath(repo.ui, hash), path)
125 return path
124 return path
126 return None
125 return None
127
126
128 class largefilesdirstate(dirstate.dirstate):
127 class largefilesdirstate(dirstate.dirstate):
129 def __getitem__(self, key):
128 def __getitem__(self, key):
130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
129 return super(largefilesdirstate, self).__getitem__(unixpath(key))
131 def normal(self, f):
130 def normal(self, f):
132 return super(largefilesdirstate, self).normal(unixpath(f))
131 return super(largefilesdirstate, self).normal(unixpath(f))
133 def remove(self, f):
132 def remove(self, f):
134 return super(largefilesdirstate, self).remove(unixpath(f))
133 return super(largefilesdirstate, self).remove(unixpath(f))
135 def add(self, f):
134 def add(self, f):
136 return super(largefilesdirstate, self).add(unixpath(f))
135 return super(largefilesdirstate, self).add(unixpath(f))
137 def drop(self, f):
136 def drop(self, f):
138 return super(largefilesdirstate, self).drop(unixpath(f))
137 return super(largefilesdirstate, self).drop(unixpath(f))
139 def forget(self, f):
138 def forget(self, f):
140 return super(largefilesdirstate, self).forget(unixpath(f))
139 return super(largefilesdirstate, self).forget(unixpath(f))
141 def normallookup(self, f):
140 def normallookup(self, f):
142 return super(largefilesdirstate, self).normallookup(unixpath(f))
141 return super(largefilesdirstate, self).normallookup(unixpath(f))
143
142
144 def openlfdirstate(ui, repo, create=True):
143 def openlfdirstate(ui, repo, create=True):
145 '''
144 '''
146 Return a dirstate object that tracks largefiles: i.e. its root is
145 Return a dirstate object that tracks largefiles: i.e. its root is
147 the repo root, but it is saved in .hg/largefiles/dirstate.
146 the repo root, but it is saved in .hg/largefiles/dirstate.
148 '''
147 '''
149 admin = repo.join(longname)
148 admin = repo.join(longname)
150 opener = scmutil.opener(admin)
149 opener = scmutil.opener(admin)
151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 lfdirstate = largefilesdirstate(opener, ui, repo.root,
152 repo.dirstate._validate)
151 repo.dirstate._validate)
153
152
154 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
155 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
156 # largefiles operation in a new clone.
155 # largefiles operation in a new clone.
157 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
156 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
158 util.makedirs(admin)
157 util.makedirs(admin)
159 matcher = getstandinmatcher(repo)
158 matcher = getstandinmatcher(repo)
160 for standin in dirstatewalk(repo.dirstate, matcher):
159 for standin in dirstatewalk(repo.dirstate, matcher):
161 lfile = splitstandin(standin)
160 lfile = splitstandin(standin)
162 hash = readstandin(repo, lfile)
161 hash = readstandin(repo, lfile)
163 lfdirstate.normallookup(lfile)
162 lfdirstate.normallookup(lfile)
164 try:
163 try:
165 if hash == hashfile(repo.wjoin(lfile)):
164 if hash == hashfile(repo.wjoin(lfile)):
166 lfdirstate.normal(lfile)
165 lfdirstate.normal(lfile)
167 except OSError, err:
166 except OSError, err:
168 if err.errno != errno.ENOENT:
167 if err.errno != errno.ENOENT:
169 raise
168 raise
170 return lfdirstate
169 return lfdirstate
171
170
172 def lfdirstatestatus(lfdirstate, repo, rev):
171 def lfdirstatestatus(lfdirstate, repo, rev):
173 match = match_.always(repo.root, repo.getcwd())
172 match = match_.always(repo.root, repo.getcwd())
174 s = lfdirstate.status(match, [], False, False, False)
173 s = lfdirstate.status(match, [], False, False, False)
175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 for lfile in unsure:
175 for lfile in unsure:
177 if repo[rev][standin(lfile)].data().strip() != \
176 if repo[rev][standin(lfile)].data().strip() != \
178 hashfile(repo.wjoin(lfile)):
177 hashfile(repo.wjoin(lfile)):
179 modified.append(lfile)
178 modified.append(lfile)
180 else:
179 else:
181 clean.append(lfile)
180 clean.append(lfile)
182 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
183 return (modified, added, removed, missing, unknown, ignored, clean)
182 return (modified, added, removed, missing, unknown, ignored, clean)
184
183
185 def listlfiles(repo, rev=None, matcher=None):
184 def listlfiles(repo, rev=None, matcher=None):
186 '''return a list of largefiles in the working copy or the
185 '''return a list of largefiles in the working copy or the
187 specified changeset'''
186 specified changeset'''
188
187
189 if matcher is None:
188 if matcher is None:
190 matcher = getstandinmatcher(repo)
189 matcher = getstandinmatcher(repo)
191
190
192 # ignore unknown files in working directory
191 # ignore unknown files in working directory
193 return [splitstandin(f)
192 return [splitstandin(f)
194 for f in repo[rev].walk(matcher)
193 for f in repo[rev].walk(matcher)
195 if rev is not None or repo.dirstate[f] != '?']
194 if rev is not None or repo.dirstate[f] != '?']
196
195
197 def instore(repo, hash):
196 def instore(repo, hash):
198 return os.path.exists(storepath(repo, hash))
197 return os.path.exists(storepath(repo, hash))
199
198
200 def storepath(repo, hash):
199 def storepath(repo, hash):
201 return repo.join(os.path.join(longname, hash))
200 return repo.join(os.path.join(longname, hash))
202
201
203 def copyfromcache(repo, hash, filename):
202 def copyfromcache(repo, hash, filename):
204 '''Copy the specified largefile from the repo or system cache to
203 '''Copy the specified largefile from the repo or system cache to
205 filename in the repository. Return true on success or false if the
204 filename in the repository. Return true on success or false if the
206 file was not found in either cache (which should not happened:
205 file was not found in either cache (which should not happened:
207 this is meant to be called only after ensuring that the needed
206 this is meant to be called only after ensuring that the needed
208 largefile exists in the cache).'''
207 largefile exists in the cache).'''
209 path = findfile(repo, hash)
208 path = findfile(repo, hash)
210 if path is None:
209 if path is None:
211 return False
210 return False
212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
211 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 # The write may fail before the file is fully written, but we
212 # The write may fail before the file is fully written, but we
214 # don't use atomic writes in the working copy.
213 # don't use atomic writes in the working copy.
215 shutil.copy(path, repo.wjoin(filename))
214 shutil.copy(path, repo.wjoin(filename))
216 return True
215 return True
217
216
218 def copytostore(repo, rev, file, uploaded=False):
217 def copytostore(repo, rev, file, uploaded=False):
219 hash = readstandin(repo, file)
218 hash = readstandin(repo, file)
220 if instore(repo, hash):
219 if instore(repo, hash):
221 return
220 return
222 copytostoreabsolute(repo, repo.wjoin(file), hash)
221 copytostoreabsolute(repo, repo.wjoin(file), hash)
223
222
224 def copyalltostore(repo, node):
223 def copyalltostore(repo, node):
225 '''Copy all largefiles in a given revision to the store'''
224 '''Copy all largefiles in a given revision to the store'''
226
225
227 ctx = repo[node]
226 ctx = repo[node]
228 for filename in ctx.files():
227 for filename in ctx.files():
229 if isstandin(filename) and filename in ctx.manifest():
228 if isstandin(filename) and filename in ctx.manifest():
230 realfile = splitstandin(filename)
229 realfile = splitstandin(filename)
231 copytostore(repo, ctx.node(), realfile)
230 copytostore(repo, ctx.node(), realfile)
232
231
233
232
234 def copytostoreabsolute(repo, file, hash):
233 def copytostoreabsolute(repo, file, hash):
235 util.makedirs(os.path.dirname(storepath(repo, hash)))
234 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 if inusercache(repo.ui, hash):
235 if inusercache(repo.ui, hash):
237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
236 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 else:
237 else:
239 dst = util.atomictempfile(storepath(repo, hash),
238 dst = util.atomictempfile(storepath(repo, hash),
240 createmode=repo.store.createmode)
239 createmode=repo.store.createmode)
241 for chunk in util.filechunkiter(open(file, 'rb')):
240 for chunk in util.filechunkiter(open(file, 'rb')):
242 dst.write(chunk)
241 dst.write(chunk)
243 dst.close()
242 dst.close()
244 linktousercache(repo, hash)
243 linktousercache(repo, hash)
245
244
246 def linktousercache(repo, hash):
245 def linktousercache(repo, hash):
247 path = usercachepath(repo.ui, hash)
246 path = usercachepath(repo.ui, hash)
248 if path:
247 if path:
249 util.makedirs(os.path.dirname(path))
248 util.makedirs(os.path.dirname(path))
250 link(storepath(repo, hash), path)
249 link(storepath(repo, hash), path)
251
250
252 def getstandinmatcher(repo, pats=[], opts={}):
251 def getstandinmatcher(repo, pats=[], opts={}):
253 '''Return a match object that applies pats to the standin directory'''
252 '''Return a match object that applies pats to the standin directory'''
254 standindir = repo.pathto(shortname)
253 standindir = repo.pathto(shortname)
255 if pats:
254 if pats:
256 # patterns supplied: search standin directory relative to current dir
255 # patterns supplied: search standin directory relative to current dir
257 cwd = repo.getcwd()
256 cwd = repo.getcwd()
258 if os.path.isabs(cwd):
257 if os.path.isabs(cwd):
259 # cwd is an absolute path for hg -R <reponame>
258 # cwd is an absolute path for hg -R <reponame>
260 # work relative to the repository root in this case
259 # work relative to the repository root in this case
261 cwd = ''
260 cwd = ''
262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
261 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 elif os.path.isdir(standindir):
262 elif os.path.isdir(standindir):
264 # no patterns: relative to repo root
263 # no patterns: relative to repo root
265 pats = [standindir]
264 pats = [standindir]
266 else:
265 else:
267 # no patterns and no standin dir: return matcher that matches nothing
266 # no patterns and no standin dir: return matcher that matches nothing
268 match = match_.match(repo.root, None, [], exact=True)
267 match = match_.match(repo.root, None, [], exact=True)
269 match.matchfn = lambda f: False
268 match.matchfn = lambda f: False
270 return match
269 return match
271 return getmatcher(repo, pats, opts, showbad=False)
270 return getmatcher(repo, pats, opts, showbad=False)
272
271
273 def getmatcher(repo, pats=[], opts={}, showbad=True):
272 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 '''Wrapper around scmutil.match() that adds showbad: if false,
273 '''Wrapper around scmutil.match() that adds showbad: if false,
275 neuter the match object's bad() method so it does not print any
274 neuter the match object's bad() method so it does not print any
276 warnings about missing files or directories.'''
275 warnings about missing files or directories.'''
277 match = scmutil.match(repo[None], pats, opts)
276 match = scmutil.match(repo[None], pats, opts)
278
277
279 if not showbad:
278 if not showbad:
280 match.bad = lambda f, msg: None
279 match.bad = lambda f, msg: None
281 return match
280 return match
282
281
283 def composestandinmatcher(repo, rmatcher):
282 def composestandinmatcher(repo, rmatcher):
284 '''Return a matcher that accepts standins corresponding to the
283 '''Return a matcher that accepts standins corresponding to the
285 files accepted by rmatcher. Pass the list of files in the matcher
284 files accepted by rmatcher. Pass the list of files in the matcher
286 as the paths specified by the user.'''
285 as the paths specified by the user.'''
287 smatcher = getstandinmatcher(repo, rmatcher.files())
286 smatcher = getstandinmatcher(repo, rmatcher.files())
288 isstandin = smatcher.matchfn
287 isstandin = smatcher.matchfn
289 def composedmatchfn(f):
288 def composedmatchfn(f):
290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
289 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 smatcher.matchfn = composedmatchfn
290 smatcher.matchfn = composedmatchfn
292
291
293 return smatcher
292 return smatcher
294
293
295 def standin(filename):
294 def standin(filename):
296 '''Return the repo-relative path to the standin for the specified big
295 '''Return the repo-relative path to the standin for the specified big
297 file.'''
296 file.'''
298 # Notes:
297 # Notes:
299 # 1) Some callers want an absolute path, but for instance addlargefiles
298 # 1) Some callers want an absolute path, but for instance addlargefiles
300 # needs it repo-relative so it can be passed to repoadd(). So leave
299 # needs it repo-relative so it can be passed to repoadd(). So leave
301 # it up to the caller to use repo.wjoin() to get an absolute path.
300 # it up to the caller to use repo.wjoin() to get an absolute path.
302 # 2) Join with '/' because that's what dirstate always uses, even on
301 # 2) Join with '/' because that's what dirstate always uses, even on
303 # Windows. Change existing separator to '/' first in case we are
302 # Windows. Change existing separator to '/' first in case we are
304 # passed filenames from an external source (like the command line).
303 # passed filenames from an external source (like the command line).
305 return shortname + '/' + util.pconvert(filename)
304 return shortname + '/' + util.pconvert(filename)
306
305
307 def isstandin(filename):
306 def isstandin(filename):
308 '''Return true if filename is a big file standin. filename must be
307 '''Return true if filename is a big file standin. filename must be
309 in Mercurial's internal form (slash-separated).'''
308 in Mercurial's internal form (slash-separated).'''
310 return filename.startswith(shortname + '/')
309 return filename.startswith(shortname + '/')
311
310
312 def splitstandin(filename):
311 def splitstandin(filename):
313 # Split on / because that's what dirstate always uses, even on Windows.
312 # Split on / because that's what dirstate always uses, even on Windows.
314 # Change local separator to / first just in case we are passed filenames
313 # Change local separator to / first just in case we are passed filenames
315 # from an external source (like the command line).
314 # from an external source (like the command line).
316 bits = util.pconvert(filename).split('/', 1)
315 bits = util.pconvert(filename).split('/', 1)
317 if len(bits) == 2 and bits[0] == shortname:
316 if len(bits) == 2 and bits[0] == shortname:
318 return bits[1]
317 return bits[1]
319 else:
318 else:
320 return None
319 return None
321
320
322 def updatestandin(repo, standin):
321 def updatestandin(repo, standin):
323 file = repo.wjoin(splitstandin(standin))
322 file = repo.wjoin(splitstandin(standin))
324 if os.path.exists(file):
323 if os.path.exists(file):
325 hash = hashfile(file)
324 hash = hashfile(file)
326 executable = getexecutable(file)
325 executable = getexecutable(file)
327 writestandin(repo, standin, hash, executable)
326 writestandin(repo, standin, hash, executable)
328
327
329 def readstandin(repo, filename, node=None):
328 def readstandin(repo, filename, node=None):
330 '''read hex hash from standin for filename at given node, or working
329 '''read hex hash from standin for filename at given node, or working
331 directory if no node is given'''
330 directory if no node is given'''
332 return repo[node][standin(filename)].data().strip()
331 return repo[node][standin(filename)].data().strip()
333
332
334 def writestandin(repo, standin, hash, executable):
333 def writestandin(repo, standin, hash, executable):
335 '''write hash to <repo.root>/<standin>'''
334 '''write hash to <repo.root>/<standin>'''
336 writehash(hash, repo.wjoin(standin), executable)
335 writehash(hash, repo.wjoin(standin), executable)
337
336
338 def copyandhash(instream, outfile):
337 def copyandhash(instream, outfile):
339 '''Read bytes from instream (iterable) and write them to outfile,
338 '''Read bytes from instream (iterable) and write them to outfile,
340 computing the SHA-1 hash of the data along the way. Close outfile
339 computing the SHA-1 hash of the data along the way. Close outfile
341 when done and return the binary hash.'''
340 when done and return the binary hash.'''
342 hasher = util.sha1('')
341 hasher = util.sha1('')
343 for data in instream:
342 for data in instream:
344 hasher.update(data)
343 hasher.update(data)
345 outfile.write(data)
344 outfile.write(data)
346
345
347 # Blecch: closing a file that somebody else opened is rude and
346 # Blecch: closing a file that somebody else opened is rude and
348 # wrong. But it's so darn convenient and practical! After all,
347 # wrong. But it's so darn convenient and practical! After all,
349 # outfile was opened just to copy and hash.
348 # outfile was opened just to copy and hash.
350 outfile.close()
349 outfile.close()
351
350
352 return hasher.digest()
351 return hasher.digest()
353
352
354 def hashrepofile(repo, file):
353 def hashrepofile(repo, file):
355 return hashfile(repo.wjoin(file))
354 return hashfile(repo.wjoin(file))
356
355
357 def hashfile(file):
356 def hashfile(file):
358 if not os.path.exists(file):
357 if not os.path.exists(file):
359 return ''
358 return ''
360 hasher = util.sha1('')
359 hasher = util.sha1('')
361 fd = open(file, 'rb')
360 fd = open(file, 'rb')
362 for data in blockstream(fd):
361 for data in blockstream(fd):
363 hasher.update(data)
362 hasher.update(data)
364 fd.close()
363 fd.close()
365 return hasher.hexdigest()
364 return hasher.hexdigest()
366
365
367 class limitreader(object):
366 class limitreader(object):
368 def __init__(self, f, limit):
367 def __init__(self, f, limit):
369 self.f = f
368 self.f = f
370 self.limit = limit
369 self.limit = limit
371
370
372 def read(self, length):
371 def read(self, length):
373 if self.limit == 0:
372 if self.limit == 0:
374 return ''
373 return ''
375 length = length > self.limit and self.limit or length
374 length = length > self.limit and self.limit or length
376 self.limit -= length
375 self.limit -= length
377 return self.f.read(length)
376 return self.f.read(length)
378
377
379 def close(self):
378 def close(self):
380 pass
379 pass
381
380
382 def blockstream(infile, blocksize=128 * 1024):
381 def blockstream(infile, blocksize=128 * 1024):
383 """Generator that yields blocks of data from infile and closes infile."""
382 """Generator that yields blocks of data from infile and closes infile."""
384 while True:
383 while True:
385 data = infile.read(blocksize)
384 data = infile.read(blocksize)
386 if not data:
385 if not data:
387 break
386 break
388 yield data
387 yield data
389 # same blecch as copyandhash() above
388 # same blecch as copyandhash() above
390 infile.close()
389 infile.close()
391
390
392 def writehash(hash, filename, executable):
391 def writehash(hash, filename, executable):
393 util.makedirs(os.path.dirname(filename))
392 util.makedirs(os.path.dirname(filename))
394 util.writefile(filename, hash + '\n')
393 util.writefile(filename, hash + '\n')
395 os.chmod(filename, getmode(executable))
394 os.chmod(filename, getmode(executable))
396
395
397 def getexecutable(filename):
396 def getexecutable(filename):
398 mode = os.stat(filename).st_mode
397 mode = os.stat(filename).st_mode
399 return ((mode & stat.S_IXUSR) and
398 return ((mode & stat.S_IXUSR) and
400 (mode & stat.S_IXGRP) and
399 (mode & stat.S_IXGRP) and
401 (mode & stat.S_IXOTH))
400 (mode & stat.S_IXOTH))
402
401
403 def getmode(executable):
402 def getmode(executable):
404 if executable:
403 if executable:
405 return 0755
404 return 0755
406 else:
405 else:
407 return 0644
406 return 0644
408
407
409 def urljoin(first, second, *arg):
408 def urljoin(first, second, *arg):
410 def join(left, right):
409 def join(left, right):
411 if not left.endswith('/'):
410 if not left.endswith('/'):
412 left += '/'
411 left += '/'
413 if right.startswith('/'):
412 if right.startswith('/'):
414 right = right[1:]
413 right = right[1:]
415 return left + right
414 return left + right
416
415
417 url = join(first, second)
416 url = join(first, second)
418 for a in arg:
417 for a in arg:
419 url = join(url, a)
418 url = join(url, a)
420 return url
419 return url
421
420
422 def hexsha1(data):
421 def hexsha1(data):
423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 object data"""
423 object data"""
425 h = util.sha1()
424 h = util.sha1()
426 for chunk in util.filechunkiter(data):
425 for chunk in util.filechunkiter(data):
427 h.update(chunk)
426 h.update(chunk)
428 return h.hexdigest()
427 return h.hexdigest()
429
428
430 def httpsendfile(ui, filename):
429 def httpsendfile(ui, filename):
431 return httpconnection.httpsendfile(ui, filename, 'rb')
430 return httpconnection.httpsendfile(ui, filename, 'rb')
432
431
433 def unixpath(path):
432 def unixpath(path):
434 '''Return a version of path normalized for use with the lfdirstate.'''
433 '''Return a version of path normalized for use with the lfdirstate.'''
435 return util.pconvert(os.path.normpath(path))
434 return util.pconvert(os.path.normpath(path))
436
435
437 def islfilesrepo(repo):
436 def islfilesrepo(repo):
438 if ('largefiles' in repo.requirements and
437 if ('largefiles' in repo.requirements and
439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
438 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
440 return True
439 return True
441
440
442 return util.any(openlfdirstate(repo.ui, repo, False))
441 return util.any(openlfdirstate(repo.ui, repo, False))
443
442
444 class storeprotonotcapable(Exception):
443 class storeprotonotcapable(Exception):
445 def __init__(self, storetypes):
444 def __init__(self, storetypes):
446 self.storetypes = storetypes
445 self.storetypes = storetypes
447
446
448 def getcurrentheads(repo):
447 def getcurrentheads(repo):
449 branches = repo.branchmap()
448 branches = repo.branchmap()
450 heads = []
449 heads = []
451 for branch in branches:
450 for branch in branches:
452 newheads = repo.branchheads(branch)
451 newheads = repo.branchheads(branch)
453 heads = heads + newheads
452 heads = heads + newheads
454 return heads
453 return heads
455
454
456 def getstandinsstate(repo):
455 def getstandinsstate(repo):
457 standins = []
456 standins = []
458 matcher = getstandinmatcher(repo)
457 matcher = getstandinmatcher(repo)
459 for standin in dirstatewalk(repo.dirstate, matcher):
458 for standin in dirstatewalk(repo.dirstate, matcher):
460 lfile = splitstandin(standin)
459 lfile = splitstandin(standin)
461 standins.append((lfile, readstandin(repo, lfile)))
460 standins.append((lfile, readstandin(repo, lfile)))
462 return standins
461 return standins
463
462
464 def getlfilestoupdate(oldstandins, newstandins):
463 def getlfilestoupdate(oldstandins, newstandins):
465 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
464 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
466 filelist = []
465 filelist = []
467 for f in changedstandins:
466 for f in changedstandins:
468 if f[0] not in filelist:
467 if f[0] not in filelist:
469 filelist.append(f[0])
468 filelist.append(f[0])
470 return filelist
469 return filelist
General Comments 0
You need to be logged in to leave comments. Login now