##// END OF EJS Templates
doc: trim newline at the end of exception message
FUJIWARA Katsunori -
r29644:ce4ac5d1 stable
parent child Browse files
Show More
@@ -1,662 +1,662 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 error,
22 error,
23 httpconnection,
23 httpconnection,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 scmutil,
26 scmutil,
27 util,
27 util,
28 )
28 )
29
29
30 shortname = '.hglf'
30 shortname = '.hglf'
31 shortnameslash = shortname + '/'
31 shortnameslash = shortname + '/'
32 longname = 'largefiles'
32 longname = 'largefiles'
33
33
34 # -- Private worker functions ------------------------------------------
34 # -- Private worker functions ------------------------------------------
35
35
36 def getminsize(ui, assumelfiles, opt, default=10):
36 def getminsize(ui, assumelfiles, opt, default=10):
37 lfsize = opt
37 lfsize = opt
38 if not lfsize and assumelfiles:
38 if not lfsize and assumelfiles:
39 lfsize = ui.config(longname, 'minsize', default=default)
39 lfsize = ui.config(longname, 'minsize', default=default)
40 if lfsize:
40 if lfsize:
41 try:
41 try:
42 lfsize = float(lfsize)
42 lfsize = float(lfsize)
43 except ValueError:
43 except ValueError:
44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
44 raise error.Abort(_('largefiles: size must be number (not %s)\n')
45 % lfsize)
45 % lfsize)
46 if lfsize is None:
46 if lfsize is None:
47 raise error.Abort(_('minimum size for largefiles must be specified'))
47 raise error.Abort(_('minimum size for largefiles must be specified'))
48 return lfsize
48 return lfsize
49
49
50 def link(src, dest):
50 def link(src, dest):
51 """Try to create hardlink - if that fails, efficiently make a copy."""
51 """Try to create hardlink - if that fails, efficiently make a copy."""
52 util.makedirs(os.path.dirname(dest))
52 util.makedirs(os.path.dirname(dest))
53 try:
53 try:
54 util.oslink(src, dest)
54 util.oslink(src, dest)
55 except OSError:
55 except OSError:
56 # if hardlinks fail, fallback on atomic copy
56 # if hardlinks fail, fallback on atomic copy
57 dst = util.atomictempfile(dest)
57 dst = util.atomictempfile(dest)
58 for chunk in util.filechunkiter(open(src, 'rb')):
58 for chunk in util.filechunkiter(open(src, 'rb')):
59 dst.write(chunk)
59 dst.write(chunk)
60 dst.close()
60 dst.close()
61 os.chmod(dest, os.stat(src).st_mode)
61 os.chmod(dest, os.stat(src).st_mode)
62
62
63 def usercachepath(ui, hash):
63 def usercachepath(ui, hash):
64 '''Return the correct location in the "global" largefiles cache for a file
64 '''Return the correct location in the "global" largefiles cache for a file
65 with the given hash.
65 with the given hash.
66 This cache is used for sharing of largefiles across repositories - both
66 This cache is used for sharing of largefiles across repositories - both
67 to preserve download bandwidth and storage space.'''
67 to preserve download bandwidth and storage space.'''
68 return os.path.join(_usercachedir(ui), hash)
68 return os.path.join(_usercachedir(ui), hash)
69
69
70 def _usercachedir(ui):
70 def _usercachedir(ui):
71 '''Return the location of the "global" largefiles cache.'''
71 '''Return the location of the "global" largefiles cache.'''
72 path = ui.configpath(longname, 'usercache', None)
72 path = ui.configpath(longname, 'usercache', None)
73 if path:
73 if path:
74 return path
74 return path
75 if os.name == 'nt':
75 if os.name == 'nt':
76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
76 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
77 if appdata:
77 if appdata:
78 return os.path.join(appdata, longname)
78 return os.path.join(appdata, longname)
79 elif platform.system() == 'Darwin':
79 elif platform.system() == 'Darwin':
80 home = os.getenv('HOME')
80 home = os.getenv('HOME')
81 if home:
81 if home:
82 return os.path.join(home, 'Library', 'Caches', longname)
82 return os.path.join(home, 'Library', 'Caches', longname)
83 elif os.name == 'posix':
83 elif os.name == 'posix':
84 path = os.getenv('XDG_CACHE_HOME')
84 path = os.getenv('XDG_CACHE_HOME')
85 if path:
85 if path:
86 return os.path.join(path, longname)
86 return os.path.join(path, longname)
87 home = os.getenv('HOME')
87 home = os.getenv('HOME')
88 if home:
88 if home:
89 return os.path.join(home, '.cache', longname)
89 return os.path.join(home, '.cache', longname)
90 else:
90 else:
91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
91 raise error.Abort(_('unknown operating system: %s\n') % os.name)
92 raise error.Abort(_('unknown %s usercache location\n') % longname)
92 raise error.Abort(_('unknown %s usercache location') % longname)
93
93
94 def inusercache(ui, hash):
94 def inusercache(ui, hash):
95 path = usercachepath(ui, hash)
95 path = usercachepath(ui, hash)
96 return os.path.exists(path)
96 return os.path.exists(path)
97
97
98 def findfile(repo, hash):
98 def findfile(repo, hash):
99 '''Return store path of the largefile with the specified hash.
99 '''Return store path of the largefile with the specified hash.
100 As a side effect, the file might be linked from user cache.
100 As a side effect, the file might be linked from user cache.
101 Return None if the file can't be found locally.'''
101 Return None if the file can't be found locally.'''
102 path, exists = findstorepath(repo, hash)
102 path, exists = findstorepath(repo, hash)
103 if exists:
103 if exists:
104 repo.ui.note(_('found %s in store\n') % hash)
104 repo.ui.note(_('found %s in store\n') % hash)
105 return path
105 return path
106 elif inusercache(repo.ui, hash):
106 elif inusercache(repo.ui, hash):
107 repo.ui.note(_('found %s in system cache\n') % hash)
107 repo.ui.note(_('found %s in system cache\n') % hash)
108 path = storepath(repo, hash)
108 path = storepath(repo, hash)
109 link(usercachepath(repo.ui, hash), path)
109 link(usercachepath(repo.ui, hash), path)
110 return path
110 return path
111 return None
111 return None
112
112
113 class largefilesdirstate(dirstate.dirstate):
113 class largefilesdirstate(dirstate.dirstate):
114 def __getitem__(self, key):
114 def __getitem__(self, key):
115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
115 return super(largefilesdirstate, self).__getitem__(unixpath(key))
116 def normal(self, f):
116 def normal(self, f):
117 return super(largefilesdirstate, self).normal(unixpath(f))
117 return super(largefilesdirstate, self).normal(unixpath(f))
118 def remove(self, f):
118 def remove(self, f):
119 return super(largefilesdirstate, self).remove(unixpath(f))
119 return super(largefilesdirstate, self).remove(unixpath(f))
120 def add(self, f):
120 def add(self, f):
121 return super(largefilesdirstate, self).add(unixpath(f))
121 return super(largefilesdirstate, self).add(unixpath(f))
122 def drop(self, f):
122 def drop(self, f):
123 return super(largefilesdirstate, self).drop(unixpath(f))
123 return super(largefilesdirstate, self).drop(unixpath(f))
124 def forget(self, f):
124 def forget(self, f):
125 return super(largefilesdirstate, self).forget(unixpath(f))
125 return super(largefilesdirstate, self).forget(unixpath(f))
126 def normallookup(self, f):
126 def normallookup(self, f):
127 return super(largefilesdirstate, self).normallookup(unixpath(f))
127 return super(largefilesdirstate, self).normallookup(unixpath(f))
128 def _ignore(self, f):
128 def _ignore(self, f):
129 return False
129 return False
130 def write(self, tr=False):
130 def write(self, tr=False):
131 # (1) disable PENDING mode always
131 # (1) disable PENDING mode always
132 # (lfdirstate isn't yet managed as a part of the transaction)
132 # (lfdirstate isn't yet managed as a part of the transaction)
133 # (2) avoid develwarn 'use dirstate.write with ....'
133 # (2) avoid develwarn 'use dirstate.write with ....'
134 super(largefilesdirstate, self).write(None)
134 super(largefilesdirstate, self).write(None)
135
135
136 def openlfdirstate(ui, repo, create=True):
136 def openlfdirstate(ui, repo, create=True):
137 '''
137 '''
138 Return a dirstate object that tracks largefiles: i.e. its root is
138 Return a dirstate object that tracks largefiles: i.e. its root is
139 the repo root, but it is saved in .hg/largefiles/dirstate.
139 the repo root, but it is saved in .hg/largefiles/dirstate.
140 '''
140 '''
141 vfs = repo.vfs
141 vfs = repo.vfs
142 lfstoredir = longname
142 lfstoredir = longname
143 opener = scmutil.opener(vfs.join(lfstoredir))
143 opener = scmutil.opener(vfs.join(lfstoredir))
144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
144 lfdirstate = largefilesdirstate(opener, ui, repo.root,
145 repo.dirstate._validate)
145 repo.dirstate._validate)
146
146
147 # If the largefiles dirstate does not exist, populate and create
147 # If the largefiles dirstate does not exist, populate and create
148 # it. This ensures that we create it on the first meaningful
148 # it. This ensures that we create it on the first meaningful
149 # largefiles operation in a new clone.
149 # largefiles operation in a new clone.
150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
150 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
151 matcher = getstandinmatcher(repo)
151 matcher = getstandinmatcher(repo)
152 standins = repo.dirstate.walk(matcher, [], False, False)
152 standins = repo.dirstate.walk(matcher, [], False, False)
153
153
154 if len(standins) > 0:
154 if len(standins) > 0:
155 vfs.makedirs(lfstoredir)
155 vfs.makedirs(lfstoredir)
156
156
157 for standin in standins:
157 for standin in standins:
158 lfile = splitstandin(standin)
158 lfile = splitstandin(standin)
159 lfdirstate.normallookup(lfile)
159 lfdirstate.normallookup(lfile)
160 return lfdirstate
160 return lfdirstate
161
161
162 def lfdirstatestatus(lfdirstate, repo):
162 def lfdirstatestatus(lfdirstate, repo):
163 wctx = repo['.']
163 wctx = repo['.']
164 match = matchmod.always(repo.root, repo.getcwd())
164 match = matchmod.always(repo.root, repo.getcwd())
165 unsure, s = lfdirstate.status(match, [], False, False, False)
165 unsure, s = lfdirstate.status(match, [], False, False, False)
166 modified, clean = s.modified, s.clean
166 modified, clean = s.modified, s.clean
167 for lfile in unsure:
167 for lfile in unsure:
168 try:
168 try:
169 fctx = wctx[standin(lfile)]
169 fctx = wctx[standin(lfile)]
170 except LookupError:
170 except LookupError:
171 fctx = None
171 fctx = None
172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
172 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
173 modified.append(lfile)
173 modified.append(lfile)
174 else:
174 else:
175 clean.append(lfile)
175 clean.append(lfile)
176 lfdirstate.normal(lfile)
176 lfdirstate.normal(lfile)
177 return s
177 return s
178
178
179 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
180 '''return a list of largefiles in the working copy or the
180 '''return a list of largefiles in the working copy or the
181 specified changeset'''
181 specified changeset'''
182
182
183 if matcher is None:
183 if matcher is None:
184 matcher = getstandinmatcher(repo)
184 matcher = getstandinmatcher(repo)
185
185
186 # ignore unknown files in working directory
186 # ignore unknown files in working directory
187 return [splitstandin(f)
187 return [splitstandin(f)
188 for f in repo[rev].walk(matcher)
188 for f in repo[rev].walk(matcher)
189 if rev is not None or repo.dirstate[f] != '?']
189 if rev is not None or repo.dirstate[f] != '?']
190
190
191 def instore(repo, hash, forcelocal=False):
191 def instore(repo, hash, forcelocal=False):
192 '''Return true if a largefile with the given hash exists in the store'''
192 '''Return true if a largefile with the given hash exists in the store'''
193 return os.path.exists(storepath(repo, hash, forcelocal))
193 return os.path.exists(storepath(repo, hash, forcelocal))
194
194
195 def storepath(repo, hash, forcelocal=False):
195 def storepath(repo, hash, forcelocal=False):
196 '''Return the correct location in the repository largefiles store for a
196 '''Return the correct location in the repository largefiles store for a
197 file with the given hash.'''
197 file with the given hash.'''
198 if not forcelocal and repo.shared():
198 if not forcelocal and repo.shared():
199 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
199 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
200 return repo.join(longname, hash)
200 return repo.join(longname, hash)
201
201
202 def findstorepath(repo, hash):
202 def findstorepath(repo, hash):
203 '''Search through the local store path(s) to find the file for the given
203 '''Search through the local store path(s) to find the file for the given
204 hash. If the file is not found, its path in the primary store is returned.
204 hash. If the file is not found, its path in the primary store is returned.
205 The return value is a tuple of (path, exists(path)).
205 The return value is a tuple of (path, exists(path)).
206 '''
206 '''
207 # For shared repos, the primary store is in the share source. But for
207 # For shared repos, the primary store is in the share source. But for
208 # backward compatibility, force a lookup in the local store if it wasn't
208 # backward compatibility, force a lookup in the local store if it wasn't
209 # found in the share source.
209 # found in the share source.
210 path = storepath(repo, hash, False)
210 path = storepath(repo, hash, False)
211
211
212 if instore(repo, hash):
212 if instore(repo, hash):
213 return (path, True)
213 return (path, True)
214 elif repo.shared() and instore(repo, hash, True):
214 elif repo.shared() and instore(repo, hash, True):
215 return storepath(repo, hash, True), True
215 return storepath(repo, hash, True), True
216
216
217 return (path, False)
217 return (path, False)
218
218
219 def copyfromcache(repo, hash, filename):
219 def copyfromcache(repo, hash, filename):
220 '''Copy the specified largefile from the repo or system cache to
220 '''Copy the specified largefile from the repo or system cache to
221 filename in the repository. Return true on success or false if the
221 filename in the repository. Return true on success or false if the
222 file was not found in either cache (which should not happened:
222 file was not found in either cache (which should not happened:
223 this is meant to be called only after ensuring that the needed
223 this is meant to be called only after ensuring that the needed
224 largefile exists in the cache).'''
224 largefile exists in the cache).'''
225 wvfs = repo.wvfs
225 wvfs = repo.wvfs
226 path = findfile(repo, hash)
226 path = findfile(repo, hash)
227 if path is None:
227 if path is None:
228 return False
228 return False
229 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
229 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
230 # The write may fail before the file is fully written, but we
230 # The write may fail before the file is fully written, but we
231 # don't use atomic writes in the working copy.
231 # don't use atomic writes in the working copy.
232 with open(path, 'rb') as srcfd:
232 with open(path, 'rb') as srcfd:
233 with wvfs(filename, 'wb') as destfd:
233 with wvfs(filename, 'wb') as destfd:
234 gothash = copyandhash(srcfd, destfd)
234 gothash = copyandhash(srcfd, destfd)
235 if gothash != hash:
235 if gothash != hash:
236 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
236 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
237 % (filename, path, gothash))
237 % (filename, path, gothash))
238 wvfs.unlink(filename)
238 wvfs.unlink(filename)
239 return False
239 return False
240 return True
240 return True
241
241
242 def copytostore(repo, rev, file, uploaded=False):
242 def copytostore(repo, rev, file, uploaded=False):
243 wvfs = repo.wvfs
243 wvfs = repo.wvfs
244 hash = readstandin(repo, file, rev)
244 hash = readstandin(repo, file, rev)
245 if instore(repo, hash):
245 if instore(repo, hash):
246 return
246 return
247 if wvfs.exists(file):
247 if wvfs.exists(file):
248 copytostoreabsolute(repo, wvfs.join(file), hash)
248 copytostoreabsolute(repo, wvfs.join(file), hash)
249 else:
249 else:
250 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
250 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
251 (file, hash))
251 (file, hash))
252
252
253 def copyalltostore(repo, node):
253 def copyalltostore(repo, node):
254 '''Copy all largefiles in a given revision to the store'''
254 '''Copy all largefiles in a given revision to the store'''
255
255
256 ctx = repo[node]
256 ctx = repo[node]
257 for filename in ctx.files():
257 for filename in ctx.files():
258 if isstandin(filename) and filename in ctx.manifest():
258 if isstandin(filename) and filename in ctx.manifest():
259 realfile = splitstandin(filename)
259 realfile = splitstandin(filename)
260 copytostore(repo, ctx.node(), realfile)
260 copytostore(repo, ctx.node(), realfile)
261
261
262 def copytostoreabsolute(repo, file, hash):
262 def copytostoreabsolute(repo, file, hash):
263 if inusercache(repo.ui, hash):
263 if inusercache(repo.ui, hash):
264 link(usercachepath(repo.ui, hash), storepath(repo, hash))
264 link(usercachepath(repo.ui, hash), storepath(repo, hash))
265 else:
265 else:
266 util.makedirs(os.path.dirname(storepath(repo, hash)))
266 util.makedirs(os.path.dirname(storepath(repo, hash)))
267 dst = util.atomictempfile(storepath(repo, hash),
267 dst = util.atomictempfile(storepath(repo, hash),
268 createmode=repo.store.createmode)
268 createmode=repo.store.createmode)
269 for chunk in util.filechunkiter(open(file, 'rb')):
269 for chunk in util.filechunkiter(open(file, 'rb')):
270 dst.write(chunk)
270 dst.write(chunk)
271 dst.close()
271 dst.close()
272 linktousercache(repo, hash)
272 linktousercache(repo, hash)
273
273
274 def linktousercache(repo, hash):
274 def linktousercache(repo, hash):
275 '''Link / copy the largefile with the specified hash from the store
275 '''Link / copy the largefile with the specified hash from the store
276 to the cache.'''
276 to the cache.'''
277 path = usercachepath(repo.ui, hash)
277 path = usercachepath(repo.ui, hash)
278 link(storepath(repo, hash), path)
278 link(storepath(repo, hash), path)
279
279
280 def getstandinmatcher(repo, rmatcher=None):
280 def getstandinmatcher(repo, rmatcher=None):
281 '''Return a match object that applies rmatcher to the standin directory'''
281 '''Return a match object that applies rmatcher to the standin directory'''
282 wvfs = repo.wvfs
282 wvfs = repo.wvfs
283 standindir = shortname
283 standindir = shortname
284
284
285 # no warnings about missing files or directories
285 # no warnings about missing files or directories
286 badfn = lambda f, msg: None
286 badfn = lambda f, msg: None
287
287
288 if rmatcher and not rmatcher.always():
288 if rmatcher and not rmatcher.always():
289 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
289 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
290 if not pats:
290 if not pats:
291 pats = [wvfs.join(standindir)]
291 pats = [wvfs.join(standindir)]
292 match = scmutil.match(repo[None], pats, badfn=badfn)
292 match = scmutil.match(repo[None], pats, badfn=badfn)
293 # if pats is empty, it would incorrectly always match, so clear _always
293 # if pats is empty, it would incorrectly always match, so clear _always
294 match._always = False
294 match._always = False
295 else:
295 else:
296 # no patterns: relative to repo root
296 # no patterns: relative to repo root
297 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
297 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
298 return match
298 return match
299
299
300 def composestandinmatcher(repo, rmatcher):
300 def composestandinmatcher(repo, rmatcher):
301 '''Return a matcher that accepts standins corresponding to the
301 '''Return a matcher that accepts standins corresponding to the
302 files accepted by rmatcher. Pass the list of files in the matcher
302 files accepted by rmatcher. Pass the list of files in the matcher
303 as the paths specified by the user.'''
303 as the paths specified by the user.'''
304 smatcher = getstandinmatcher(repo, rmatcher)
304 smatcher = getstandinmatcher(repo, rmatcher)
305 isstandin = smatcher.matchfn
305 isstandin = smatcher.matchfn
306 def composedmatchfn(f):
306 def composedmatchfn(f):
307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
307 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
308 smatcher.matchfn = composedmatchfn
308 smatcher.matchfn = composedmatchfn
309
309
310 return smatcher
310 return smatcher
311
311
312 def standin(filename):
312 def standin(filename):
313 '''Return the repo-relative path to the standin for the specified big
313 '''Return the repo-relative path to the standin for the specified big
314 file.'''
314 file.'''
315 # Notes:
315 # Notes:
316 # 1) Some callers want an absolute path, but for instance addlargefiles
316 # 1) Some callers want an absolute path, but for instance addlargefiles
317 # needs it repo-relative so it can be passed to repo[None].add(). So
317 # needs it repo-relative so it can be passed to repo[None].add(). So
318 # leave it up to the caller to use repo.wjoin() to get an absolute path.
318 # leave it up to the caller to use repo.wjoin() to get an absolute path.
319 # 2) Join with '/' because that's what dirstate always uses, even on
319 # 2) Join with '/' because that's what dirstate always uses, even on
320 # Windows. Change existing separator to '/' first in case we are
320 # Windows. Change existing separator to '/' first in case we are
321 # passed filenames from an external source (like the command line).
321 # passed filenames from an external source (like the command line).
322 return shortnameslash + util.pconvert(filename)
322 return shortnameslash + util.pconvert(filename)
323
323
324 def isstandin(filename):
324 def isstandin(filename):
325 '''Return true if filename is a big file standin. filename must be
325 '''Return true if filename is a big file standin. filename must be
326 in Mercurial's internal form (slash-separated).'''
326 in Mercurial's internal form (slash-separated).'''
327 return filename.startswith(shortnameslash)
327 return filename.startswith(shortnameslash)
328
328
329 def splitstandin(filename):
329 def splitstandin(filename):
330 # Split on / because that's what dirstate always uses, even on Windows.
330 # Split on / because that's what dirstate always uses, even on Windows.
331 # Change local separator to / first just in case we are passed filenames
331 # Change local separator to / first just in case we are passed filenames
332 # from an external source (like the command line).
332 # from an external source (like the command line).
333 bits = util.pconvert(filename).split('/', 1)
333 bits = util.pconvert(filename).split('/', 1)
334 if len(bits) == 2 and bits[0] == shortname:
334 if len(bits) == 2 and bits[0] == shortname:
335 return bits[1]
335 return bits[1]
336 else:
336 else:
337 return None
337 return None
338
338
339 def updatestandin(repo, standin):
339 def updatestandin(repo, standin):
340 file = repo.wjoin(splitstandin(standin))
340 file = repo.wjoin(splitstandin(standin))
341 if repo.wvfs.exists(splitstandin(standin)):
341 if repo.wvfs.exists(splitstandin(standin)):
342 hash = hashfile(file)
342 hash = hashfile(file)
343 executable = getexecutable(file)
343 executable = getexecutable(file)
344 writestandin(repo, standin, hash, executable)
344 writestandin(repo, standin, hash, executable)
345 else:
345 else:
346 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
346 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
347
347
348 def readstandin(repo, filename, node=None):
348 def readstandin(repo, filename, node=None):
349 '''read hex hash from standin for filename at given node, or working
349 '''read hex hash from standin for filename at given node, or working
350 directory if no node is given'''
350 directory if no node is given'''
351 return repo[node][standin(filename)].data().strip()
351 return repo[node][standin(filename)].data().strip()
352
352
353 def writestandin(repo, standin, hash, executable):
353 def writestandin(repo, standin, hash, executable):
354 '''write hash to <repo.root>/<standin>'''
354 '''write hash to <repo.root>/<standin>'''
355 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
355 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
356
356
357 def copyandhash(instream, outfile):
357 def copyandhash(instream, outfile):
358 '''Read bytes from instream (iterable) and write them to outfile,
358 '''Read bytes from instream (iterable) and write them to outfile,
359 computing the SHA-1 hash of the data along the way. Return the hash.'''
359 computing the SHA-1 hash of the data along the way. Return the hash.'''
360 hasher = hashlib.sha1('')
360 hasher = hashlib.sha1('')
361 for data in instream:
361 for data in instream:
362 hasher.update(data)
362 hasher.update(data)
363 outfile.write(data)
363 outfile.write(data)
364 return hasher.hexdigest()
364 return hasher.hexdigest()
365
365
366 def hashrepofile(repo, file):
366 def hashrepofile(repo, file):
367 return hashfile(repo.wjoin(file))
367 return hashfile(repo.wjoin(file))
368
368
369 def hashfile(file):
369 def hashfile(file):
370 if not os.path.exists(file):
370 if not os.path.exists(file):
371 return ''
371 return ''
372 hasher = hashlib.sha1('')
372 hasher = hashlib.sha1('')
373 fd = open(file, 'rb')
373 fd = open(file, 'rb')
374 for data in util.filechunkiter(fd, 128 * 1024):
374 for data in util.filechunkiter(fd, 128 * 1024):
375 hasher.update(data)
375 hasher.update(data)
376 fd.close()
376 fd.close()
377 return hasher.hexdigest()
377 return hasher.hexdigest()
378
378
379 def getexecutable(filename):
379 def getexecutable(filename):
380 mode = os.stat(filename).st_mode
380 mode = os.stat(filename).st_mode
381 return ((mode & stat.S_IXUSR) and
381 return ((mode & stat.S_IXUSR) and
382 (mode & stat.S_IXGRP) and
382 (mode & stat.S_IXGRP) and
383 (mode & stat.S_IXOTH))
383 (mode & stat.S_IXOTH))
384
384
385 def urljoin(first, second, *arg):
385 def urljoin(first, second, *arg):
386 def join(left, right):
386 def join(left, right):
387 if not left.endswith('/'):
387 if not left.endswith('/'):
388 left += '/'
388 left += '/'
389 if right.startswith('/'):
389 if right.startswith('/'):
390 right = right[1:]
390 right = right[1:]
391 return left + right
391 return left + right
392
392
393 url = join(first, second)
393 url = join(first, second)
394 for a in arg:
394 for a in arg:
395 url = join(url, a)
395 url = join(url, a)
396 return url
396 return url
397
397
398 def hexsha1(data):
398 def hexsha1(data):
399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
399 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
400 object data"""
400 object data"""
401 h = hashlib.sha1()
401 h = hashlib.sha1()
402 for chunk in util.filechunkiter(data):
402 for chunk in util.filechunkiter(data):
403 h.update(chunk)
403 h.update(chunk)
404 return h.hexdigest()
404 return h.hexdigest()
405
405
406 def httpsendfile(ui, filename):
406 def httpsendfile(ui, filename):
407 return httpconnection.httpsendfile(ui, filename, 'rb')
407 return httpconnection.httpsendfile(ui, filename, 'rb')
408
408
409 def unixpath(path):
409 def unixpath(path):
410 '''Return a version of path normalized for use with the lfdirstate.'''
410 '''Return a version of path normalized for use with the lfdirstate.'''
411 return util.pconvert(os.path.normpath(path))
411 return util.pconvert(os.path.normpath(path))
412
412
413 def islfilesrepo(repo):
413 def islfilesrepo(repo):
414 '''Return true if the repo is a largefile repo.'''
414 '''Return true if the repo is a largefile repo.'''
415 if ('largefiles' in repo.requirements and
415 if ('largefiles' in repo.requirements and
416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
416 any(shortnameslash in f[0] for f in repo.store.datafiles())):
417 return True
417 return True
418
418
419 return any(openlfdirstate(repo.ui, repo, False))
419 return any(openlfdirstate(repo.ui, repo, False))
420
420
421 class storeprotonotcapable(Exception):
421 class storeprotonotcapable(Exception):
422 def __init__(self, storetypes):
422 def __init__(self, storetypes):
423 self.storetypes = storetypes
423 self.storetypes = storetypes
424
424
425 def getstandinsstate(repo):
425 def getstandinsstate(repo):
426 standins = []
426 standins = []
427 matcher = getstandinmatcher(repo)
427 matcher = getstandinmatcher(repo)
428 for standin in repo.dirstate.walk(matcher, [], False, False):
428 for standin in repo.dirstate.walk(matcher, [], False, False):
429 lfile = splitstandin(standin)
429 lfile = splitstandin(standin)
430 try:
430 try:
431 hash = readstandin(repo, lfile)
431 hash = readstandin(repo, lfile)
432 except IOError:
432 except IOError:
433 hash = None
433 hash = None
434 standins.append((lfile, hash))
434 standins.append((lfile, hash))
435 return standins
435 return standins
436
436
437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
437 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
438 lfstandin = standin(lfile)
438 lfstandin = standin(lfile)
439 if lfstandin in repo.dirstate:
439 if lfstandin in repo.dirstate:
440 stat = repo.dirstate._map[lfstandin]
440 stat = repo.dirstate._map[lfstandin]
441 state, mtime = stat[0], stat[3]
441 state, mtime = stat[0], stat[3]
442 else:
442 else:
443 state, mtime = '?', -1
443 state, mtime = '?', -1
444 if state == 'n':
444 if state == 'n':
445 if (normallookup or mtime < 0 or
445 if (normallookup or mtime < 0 or
446 not repo.wvfs.exists(lfile)):
446 not repo.wvfs.exists(lfile)):
447 # state 'n' doesn't ensure 'clean' in this case
447 # state 'n' doesn't ensure 'clean' in this case
448 lfdirstate.normallookup(lfile)
448 lfdirstate.normallookup(lfile)
449 else:
449 else:
450 lfdirstate.normal(lfile)
450 lfdirstate.normal(lfile)
451 elif state == 'm':
451 elif state == 'm':
452 lfdirstate.normallookup(lfile)
452 lfdirstate.normallookup(lfile)
453 elif state == 'r':
453 elif state == 'r':
454 lfdirstate.remove(lfile)
454 lfdirstate.remove(lfile)
455 elif state == 'a':
455 elif state == 'a':
456 lfdirstate.add(lfile)
456 lfdirstate.add(lfile)
457 elif state == '?':
457 elif state == '?':
458 lfdirstate.drop(lfile)
458 lfdirstate.drop(lfile)
459
459
460 def markcommitted(orig, ctx, node):
460 def markcommitted(orig, ctx, node):
461 repo = ctx.repo()
461 repo = ctx.repo()
462
462
463 orig(node)
463 orig(node)
464
464
465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
465 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
466 # because files coming from the 2nd parent are omitted in the latter.
466 # because files coming from the 2nd parent are omitted in the latter.
467 #
467 #
468 # The former should be used to get targets of "synclfdirstate",
468 # The former should be used to get targets of "synclfdirstate",
469 # because such files:
469 # because such files:
470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
470 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
471 # - have to be marked as "n" after commit, but
471 # - have to be marked as "n" after commit, but
472 # - aren't listed in "repo[node].files()"
472 # - aren't listed in "repo[node].files()"
473
473
474 lfdirstate = openlfdirstate(repo.ui, repo)
474 lfdirstate = openlfdirstate(repo.ui, repo)
475 for f in ctx.files():
475 for f in ctx.files():
476 if isstandin(f):
476 if isstandin(f):
477 lfile = splitstandin(f)
477 lfile = splitstandin(f)
478 synclfdirstate(repo, lfdirstate, lfile, False)
478 synclfdirstate(repo, lfdirstate, lfile, False)
479 lfdirstate.write()
479 lfdirstate.write()
480
480
481 # As part of committing, copy all of the largefiles into the cache.
481 # As part of committing, copy all of the largefiles into the cache.
482 copyalltostore(repo, node)
482 copyalltostore(repo, node)
483
483
484 def getlfilestoupdate(oldstandins, newstandins):
484 def getlfilestoupdate(oldstandins, newstandins):
485 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
485 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
486 filelist = []
486 filelist = []
487 for f in changedstandins:
487 for f in changedstandins:
488 if f[0] not in filelist:
488 if f[0] not in filelist:
489 filelist.append(f[0])
489 filelist.append(f[0])
490 return filelist
490 return filelist
491
491
492 def getlfilestoupload(repo, missing, addfunc):
492 def getlfilestoupload(repo, missing, addfunc):
493 for i, n in enumerate(missing):
493 for i, n in enumerate(missing):
494 repo.ui.progress(_('finding outgoing largefiles'), i,
494 repo.ui.progress(_('finding outgoing largefiles'), i,
495 unit=_('revisions'), total=len(missing))
495 unit=_('revisions'), total=len(missing))
496 parents = [p for p in repo[n].parents() if p != node.nullid]
496 parents = [p for p in repo[n].parents() if p != node.nullid]
497
497
498 oldlfstatus = repo.lfstatus
498 oldlfstatus = repo.lfstatus
499 repo.lfstatus = False
499 repo.lfstatus = False
500 try:
500 try:
501 ctx = repo[n]
501 ctx = repo[n]
502 finally:
502 finally:
503 repo.lfstatus = oldlfstatus
503 repo.lfstatus = oldlfstatus
504
504
505 files = set(ctx.files())
505 files = set(ctx.files())
506 if len(parents) == 2:
506 if len(parents) == 2:
507 mc = ctx.manifest()
507 mc = ctx.manifest()
508 mp1 = ctx.parents()[0].manifest()
508 mp1 = ctx.parents()[0].manifest()
509 mp2 = ctx.parents()[1].manifest()
509 mp2 = ctx.parents()[1].manifest()
510 for f in mp1:
510 for f in mp1:
511 if f not in mc:
511 if f not in mc:
512 files.add(f)
512 files.add(f)
513 for f in mp2:
513 for f in mp2:
514 if f not in mc:
514 if f not in mc:
515 files.add(f)
515 files.add(f)
516 for f in mc:
516 for f in mc:
517 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
517 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
518 files.add(f)
518 files.add(f)
519 for fn in files:
519 for fn in files:
520 if isstandin(fn) and fn in ctx:
520 if isstandin(fn) and fn in ctx:
521 addfunc(fn, ctx[fn].data().strip())
521 addfunc(fn, ctx[fn].data().strip())
522 repo.ui.progress(_('finding outgoing largefiles'), None)
522 repo.ui.progress(_('finding outgoing largefiles'), None)
523
523
524 def updatestandinsbymatch(repo, match):
524 def updatestandinsbymatch(repo, match):
525 '''Update standins in the working directory according to specified match
525 '''Update standins in the working directory according to specified match
526
526
527 This returns (possibly modified) ``match`` object to be used for
527 This returns (possibly modified) ``match`` object to be used for
528 subsequent commit process.
528 subsequent commit process.
529 '''
529 '''
530
530
531 ui = repo.ui
531 ui = repo.ui
532
532
533 # Case 1: user calls commit with no specific files or
533 # Case 1: user calls commit with no specific files or
534 # include/exclude patterns: refresh and commit all files that
534 # include/exclude patterns: refresh and commit all files that
535 # are "dirty".
535 # are "dirty".
536 if match is None or match.always():
536 if match is None or match.always():
537 # Spend a bit of time here to get a list of files we know
537 # Spend a bit of time here to get a list of files we know
538 # are modified so we can compare only against those.
538 # are modified so we can compare only against those.
539 # It can cost a lot of time (several seconds)
539 # It can cost a lot of time (several seconds)
540 # otherwise to update all standins if the largefiles are
540 # otherwise to update all standins if the largefiles are
541 # large.
541 # large.
542 lfdirstate = openlfdirstate(ui, repo)
542 lfdirstate = openlfdirstate(ui, repo)
543 dirtymatch = matchmod.always(repo.root, repo.getcwd())
543 dirtymatch = matchmod.always(repo.root, repo.getcwd())
544 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
544 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
545 False)
545 False)
546 modifiedfiles = unsure + s.modified + s.added + s.removed
546 modifiedfiles = unsure + s.modified + s.added + s.removed
547 lfiles = listlfiles(repo)
547 lfiles = listlfiles(repo)
548 # this only loops through largefiles that exist (not
548 # this only loops through largefiles that exist (not
549 # removed/renamed)
549 # removed/renamed)
550 for lfile in lfiles:
550 for lfile in lfiles:
551 if lfile in modifiedfiles:
551 if lfile in modifiedfiles:
552 if repo.wvfs.exists(standin(lfile)):
552 if repo.wvfs.exists(standin(lfile)):
553 # this handles the case where a rebase is being
553 # this handles the case where a rebase is being
554 # performed and the working copy is not updated
554 # performed and the working copy is not updated
555 # yet.
555 # yet.
556 if repo.wvfs.exists(lfile):
556 if repo.wvfs.exists(lfile):
557 updatestandin(repo,
557 updatestandin(repo,
558 standin(lfile))
558 standin(lfile))
559
559
560 return match
560 return match
561
561
562 lfiles = listlfiles(repo)
562 lfiles = listlfiles(repo)
563 match._files = repo._subdirlfs(match.files(), lfiles)
563 match._files = repo._subdirlfs(match.files(), lfiles)
564
564
565 # Case 2: user calls commit with specified patterns: refresh
565 # Case 2: user calls commit with specified patterns: refresh
566 # any matching big files.
566 # any matching big files.
567 smatcher = composestandinmatcher(repo, match)
567 smatcher = composestandinmatcher(repo, match)
568 standins = repo.dirstate.walk(smatcher, [], False, False)
568 standins = repo.dirstate.walk(smatcher, [], False, False)
569
569
570 # No matching big files: get out of the way and pass control to
570 # No matching big files: get out of the way and pass control to
571 # the usual commit() method.
571 # the usual commit() method.
572 if not standins:
572 if not standins:
573 return match
573 return match
574
574
575 # Refresh all matching big files. It's possible that the
575 # Refresh all matching big files. It's possible that the
576 # commit will end up failing, in which case the big files will
576 # commit will end up failing, in which case the big files will
577 # stay refreshed. No harm done: the user modified them and
577 # stay refreshed. No harm done: the user modified them and
578 # asked to commit them, so sooner or later we're going to
578 # asked to commit them, so sooner or later we're going to
579 # refresh the standins. Might as well leave them refreshed.
579 # refresh the standins. Might as well leave them refreshed.
580 lfdirstate = openlfdirstate(ui, repo)
580 lfdirstate = openlfdirstate(ui, repo)
581 for fstandin in standins:
581 for fstandin in standins:
582 lfile = splitstandin(fstandin)
582 lfile = splitstandin(fstandin)
583 if lfdirstate[lfile] != 'r':
583 if lfdirstate[lfile] != 'r':
584 updatestandin(repo, fstandin)
584 updatestandin(repo, fstandin)
585
585
586 # Cook up a new matcher that only matches regular files or
586 # Cook up a new matcher that only matches regular files or
587 # standins corresponding to the big files requested by the
587 # standins corresponding to the big files requested by the
588 # user. Have to modify _files to prevent commit() from
588 # user. Have to modify _files to prevent commit() from
589 # complaining "not tracked" for big files.
589 # complaining "not tracked" for big files.
590 match = copy.copy(match)
590 match = copy.copy(match)
591 origmatchfn = match.matchfn
591 origmatchfn = match.matchfn
592
592
593 # Check both the list of largefiles and the list of
593 # Check both the list of largefiles and the list of
594 # standins because if a largefile was removed, it
594 # standins because if a largefile was removed, it
595 # won't be in the list of largefiles at this point
595 # won't be in the list of largefiles at this point
596 match._files += sorted(standins)
596 match._files += sorted(standins)
597
597
598 actualfiles = []
598 actualfiles = []
599 for f in match._files:
599 for f in match._files:
600 fstandin = standin(f)
600 fstandin = standin(f)
601
601
602 # For largefiles, only one of the normal and standin should be
602 # For largefiles, only one of the normal and standin should be
603 # committed (except if one of them is a remove). In the case of a
603 # committed (except if one of them is a remove). In the case of a
604 # standin removal, drop the normal file if it is unknown to dirstate.
604 # standin removal, drop the normal file if it is unknown to dirstate.
605 # Thus, skip plain largefile names but keep the standin.
605 # Thus, skip plain largefile names but keep the standin.
606 if f in lfiles or fstandin in standins:
606 if f in lfiles or fstandin in standins:
607 if repo.dirstate[fstandin] != 'r':
607 if repo.dirstate[fstandin] != 'r':
608 if repo.dirstate[f] != 'r':
608 if repo.dirstate[f] != 'r':
609 continue
609 continue
610 elif repo.dirstate[f] == '?':
610 elif repo.dirstate[f] == '?':
611 continue
611 continue
612
612
613 actualfiles.append(f)
613 actualfiles.append(f)
614 match._files = actualfiles
614 match._files = actualfiles
615
615
616 def matchfn(f):
616 def matchfn(f):
617 if origmatchfn(f):
617 if origmatchfn(f):
618 return f not in lfiles
618 return f not in lfiles
619 else:
619 else:
620 return f in standins
620 return f in standins
621
621
622 match.matchfn = matchfn
622 match.matchfn = matchfn
623
623
624 return match
624 return match
625
625
626 class automatedcommithook(object):
626 class automatedcommithook(object):
627 '''Stateful hook to update standins at the 1st commit of resuming
627 '''Stateful hook to update standins at the 1st commit of resuming
628
628
629 For efficiency, updating standins in the working directory should
629 For efficiency, updating standins in the working directory should
630 be avoided while automated committing (like rebase, transplant and
630 be avoided while automated committing (like rebase, transplant and
631 so on), because they should be updated before committing.
631 so on), because they should be updated before committing.
632
632
633 But the 1st commit of resuming automated committing (e.g. ``rebase
633 But the 1st commit of resuming automated committing (e.g. ``rebase
634 --continue``) should update them, because largefiles may be
634 --continue``) should update them, because largefiles may be
635 modified manually.
635 modified manually.
636 '''
636 '''
637 def __init__(self, resuming):
637 def __init__(self, resuming):
638 self.resuming = resuming
638 self.resuming = resuming
639
639
640 def __call__(self, repo, match):
640 def __call__(self, repo, match):
641 if self.resuming:
641 if self.resuming:
642 self.resuming = False # avoids updating at subsequent commits
642 self.resuming = False # avoids updating at subsequent commits
643 return updatestandinsbymatch(repo, match)
643 return updatestandinsbymatch(repo, match)
644 else:
644 else:
645 return match
645 return match
646
646
647 def getstatuswriter(ui, repo, forcibly=None):
647 def getstatuswriter(ui, repo, forcibly=None):
648 '''Return the function to write largefiles specific status out
648 '''Return the function to write largefiles specific status out
649
649
650 If ``forcibly`` is ``None``, this returns the last element of
650 If ``forcibly`` is ``None``, this returns the last element of
651 ``repo._lfstatuswriters`` as "default" writer function.
651 ``repo._lfstatuswriters`` as "default" writer function.
652
652
653 Otherwise, this returns the function to always write out (or
653 Otherwise, this returns the function to always write out (or
654 ignore if ``not forcibly``) status.
654 ignore if ``not forcibly``) status.
655 '''
655 '''
656 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
656 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
657 return repo._lfstatuswriters[-1]
657 return repo._lfstatuswriters[-1]
658 else:
658 else:
659 if forcibly:
659 if forcibly:
660 return ui.status # forcibly WRITE OUT
660 return ui.status # forcibly WRITE OUT
661 else:
661 else:
662 return lambda *msg, **opts: None # forcibly IGNORE
662 return lambda *msg, **opts: None # forcibly IGNORE
General Comments 0
You need to be logged in to leave comments. Login now