##// END OF EJS Templates
largefiles: fix support for local largefiles while using share extension...
Henrik Stuart -
r29329:f359cdc9 stable
parent child Browse files
Show More
@@ -1,655 +1,655 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import stat
13 import stat
14 import copy
14 import copy
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node, error
18 from mercurial import node, error
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise error.Abort(_('minimum size for largefiles must be specified'))
38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 """Try to create hardlink - if that fails, efficiently make a copy."""
42 """Try to create hardlink - if that fails, efficiently make a copy."""
43 util.makedirs(os.path.dirname(dest))
43 util.makedirs(os.path.dirname(dest))
44 try:
44 try:
45 util.oslink(src, dest)
45 util.oslink(src, dest)
46 except OSError:
46 except OSError:
47 # if hardlinks fail, fallback on atomic copy
47 # if hardlinks fail, fallback on atomic copy
48 dst = util.atomictempfile(dest)
48 dst = util.atomictempfile(dest)
49 for chunk in util.filechunkiter(open(src, 'rb')):
49 for chunk in util.filechunkiter(open(src, 'rb')):
50 dst.write(chunk)
50 dst.write(chunk)
51 dst.close()
51 dst.close()
52 os.chmod(dest, os.stat(src).st_mode)
52 os.chmod(dest, os.stat(src).st_mode)
53
53
54 def usercachepath(ui, hash):
54 def usercachepath(ui, hash):
55 '''Return the correct location in the "global" largefiles cache for a file
55 '''Return the correct location in the "global" largefiles cache for a file
56 with the given hash.
56 with the given hash.
57 This cache is used for sharing of largefiles across repositories - both
57 This cache is used for sharing of largefiles across repositories - both
58 to preserve download bandwidth and storage space.'''
58 to preserve download bandwidth and storage space.'''
59 return os.path.join(_usercachedir(ui), hash)
59 return os.path.join(_usercachedir(ui), hash)
60
60
61 def _usercachedir(ui):
61 def _usercachedir(ui):
62 '''Return the location of the "global" largefiles cache.'''
62 '''Return the location of the "global" largefiles cache.'''
63 path = ui.configpath(longname, 'usercache', None)
63 path = ui.configpath(longname, 'usercache', None)
64 if path:
64 if path:
65 return path
65 return path
66 if os.name == 'nt':
66 if os.name == 'nt':
67 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
67 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
68 if appdata:
68 if appdata:
69 return os.path.join(appdata, longname)
69 return os.path.join(appdata, longname)
70 elif platform.system() == 'Darwin':
70 elif platform.system() == 'Darwin':
71 home = os.getenv('HOME')
71 home = os.getenv('HOME')
72 if home:
72 if home:
73 return os.path.join(home, 'Library', 'Caches', longname)
73 return os.path.join(home, 'Library', 'Caches', longname)
74 elif os.name == 'posix':
74 elif os.name == 'posix':
75 path = os.getenv('XDG_CACHE_HOME')
75 path = os.getenv('XDG_CACHE_HOME')
76 if path:
76 if path:
77 return os.path.join(path, longname)
77 return os.path.join(path, longname)
78 home = os.getenv('HOME')
78 home = os.getenv('HOME')
79 if home:
79 if home:
80 return os.path.join(home, '.cache', longname)
80 return os.path.join(home, '.cache', longname)
81 else:
81 else:
82 raise error.Abort(_('unknown operating system: %s\n') % os.name)
82 raise error.Abort(_('unknown operating system: %s\n') % os.name)
83 raise error.Abort(_('unknown %s usercache location\n') % longname)
83 raise error.Abort(_('unknown %s usercache location\n') % longname)
84
84
85 def inusercache(ui, hash):
85 def inusercache(ui, hash):
86 path = usercachepath(ui, hash)
86 path = usercachepath(ui, hash)
87 return os.path.exists(path)
87 return os.path.exists(path)
88
88
89 def findfile(repo, hash):
89 def findfile(repo, hash):
90 '''Return store path of the largefile with the specified hash.
90 '''Return store path of the largefile with the specified hash.
91 As a side effect, the file might be linked from user cache.
91 As a side effect, the file might be linked from user cache.
92 Return None if the file can't be found locally.'''
92 Return None if the file can't be found locally.'''
93 path, exists = findstorepath(repo, hash)
93 path, exists = findstorepath(repo, hash)
94 if exists:
94 if exists:
95 repo.ui.note(_('found %s in store\n') % hash)
95 repo.ui.note(_('found %s in store\n') % hash)
96 return path
96 return path
97 elif inusercache(repo.ui, hash):
97 elif inusercache(repo.ui, hash):
98 repo.ui.note(_('found %s in system cache\n') % hash)
98 repo.ui.note(_('found %s in system cache\n') % hash)
99 path = storepath(repo, hash)
99 path = storepath(repo, hash)
100 link(usercachepath(repo.ui, hash), path)
100 link(usercachepath(repo.ui, hash), path)
101 return path
101 return path
102 return None
102 return None
103
103
104 class largefilesdirstate(dirstate.dirstate):
104 class largefilesdirstate(dirstate.dirstate):
105 def __getitem__(self, key):
105 def __getitem__(self, key):
106 return super(largefilesdirstate, self).__getitem__(unixpath(key))
106 return super(largefilesdirstate, self).__getitem__(unixpath(key))
107 def normal(self, f):
107 def normal(self, f):
108 return super(largefilesdirstate, self).normal(unixpath(f))
108 return super(largefilesdirstate, self).normal(unixpath(f))
109 def remove(self, f):
109 def remove(self, f):
110 return super(largefilesdirstate, self).remove(unixpath(f))
110 return super(largefilesdirstate, self).remove(unixpath(f))
111 def add(self, f):
111 def add(self, f):
112 return super(largefilesdirstate, self).add(unixpath(f))
112 return super(largefilesdirstate, self).add(unixpath(f))
113 def drop(self, f):
113 def drop(self, f):
114 return super(largefilesdirstate, self).drop(unixpath(f))
114 return super(largefilesdirstate, self).drop(unixpath(f))
115 def forget(self, f):
115 def forget(self, f):
116 return super(largefilesdirstate, self).forget(unixpath(f))
116 return super(largefilesdirstate, self).forget(unixpath(f))
117 def normallookup(self, f):
117 def normallookup(self, f):
118 return super(largefilesdirstate, self).normallookup(unixpath(f))
118 return super(largefilesdirstate, self).normallookup(unixpath(f))
119 def _ignore(self, f):
119 def _ignore(self, f):
120 return False
120 return False
121 def write(self, tr=False):
121 def write(self, tr=False):
122 # (1) disable PENDING mode always
122 # (1) disable PENDING mode always
123 # (lfdirstate isn't yet managed as a part of the transaction)
123 # (lfdirstate isn't yet managed as a part of the transaction)
124 # (2) avoid develwarn 'use dirstate.write with ....'
124 # (2) avoid develwarn 'use dirstate.write with ....'
125 super(largefilesdirstate, self).write(None)
125 super(largefilesdirstate, self).write(None)
126
126
127 def openlfdirstate(ui, repo, create=True):
127 def openlfdirstate(ui, repo, create=True):
128 '''
128 '''
129 Return a dirstate object that tracks largefiles: i.e. its root is
129 Return a dirstate object that tracks largefiles: i.e. its root is
130 the repo root, but it is saved in .hg/largefiles/dirstate.
130 the repo root, but it is saved in .hg/largefiles/dirstate.
131 '''
131 '''
132 vfs = repo.vfs
132 vfs = repo.vfs
133 lfstoredir = longname
133 lfstoredir = longname
134 opener = scmutil.opener(vfs.join(lfstoredir))
134 opener = scmutil.opener(vfs.join(lfstoredir))
135 lfdirstate = largefilesdirstate(opener, ui, repo.root,
135 lfdirstate = largefilesdirstate(opener, ui, repo.root,
136 repo.dirstate._validate)
136 repo.dirstate._validate)
137
137
138 # If the largefiles dirstate does not exist, populate and create
138 # If the largefiles dirstate does not exist, populate and create
139 # it. This ensures that we create it on the first meaningful
139 # it. This ensures that we create it on the first meaningful
140 # largefiles operation in a new clone.
140 # largefiles operation in a new clone.
141 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
141 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
142 matcher = getstandinmatcher(repo)
142 matcher = getstandinmatcher(repo)
143 standins = repo.dirstate.walk(matcher, [], False, False)
143 standins = repo.dirstate.walk(matcher, [], False, False)
144
144
145 if len(standins) > 0:
145 if len(standins) > 0:
146 vfs.makedirs(lfstoredir)
146 vfs.makedirs(lfstoredir)
147
147
148 for standin in standins:
148 for standin in standins:
149 lfile = splitstandin(standin)
149 lfile = splitstandin(standin)
150 lfdirstate.normallookup(lfile)
150 lfdirstate.normallookup(lfile)
151 return lfdirstate
151 return lfdirstate
152
152
153 def lfdirstatestatus(lfdirstate, repo):
153 def lfdirstatestatus(lfdirstate, repo):
154 wctx = repo['.']
154 wctx = repo['.']
155 match = match_.always(repo.root, repo.getcwd())
155 match = match_.always(repo.root, repo.getcwd())
156 unsure, s = lfdirstate.status(match, [], False, False, False)
156 unsure, s = lfdirstate.status(match, [], False, False, False)
157 modified, clean = s.modified, s.clean
157 modified, clean = s.modified, s.clean
158 for lfile in unsure:
158 for lfile in unsure:
159 try:
159 try:
160 fctx = wctx[standin(lfile)]
160 fctx = wctx[standin(lfile)]
161 except LookupError:
161 except LookupError:
162 fctx = None
162 fctx = None
163 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
163 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
164 modified.append(lfile)
164 modified.append(lfile)
165 else:
165 else:
166 clean.append(lfile)
166 clean.append(lfile)
167 lfdirstate.normal(lfile)
167 lfdirstate.normal(lfile)
168 return s
168 return s
169
169
170 def listlfiles(repo, rev=None, matcher=None):
170 def listlfiles(repo, rev=None, matcher=None):
171 '''return a list of largefiles in the working copy or the
171 '''return a list of largefiles in the working copy or the
172 specified changeset'''
172 specified changeset'''
173
173
174 if matcher is None:
174 if matcher is None:
175 matcher = getstandinmatcher(repo)
175 matcher = getstandinmatcher(repo)
176
176
177 # ignore unknown files in working directory
177 # ignore unknown files in working directory
178 return [splitstandin(f)
178 return [splitstandin(f)
179 for f in repo[rev].walk(matcher)
179 for f in repo[rev].walk(matcher)
180 if rev is not None or repo.dirstate[f] != '?']
180 if rev is not None or repo.dirstate[f] != '?']
181
181
182 def instore(repo, hash, forcelocal=False):
182 def instore(repo, hash, forcelocal=False):
183 '''Return true if a largefile with the given hash exists in the user
183 '''Return true if a largefile with the given hash exists in the user
184 cache.'''
184 cache.'''
185 return os.path.exists(storepath(repo, hash, forcelocal))
185 return os.path.exists(storepath(repo, hash, forcelocal))
186
186
187 def storepath(repo, hash, forcelocal=False):
187 def storepath(repo, hash, forcelocal=False):
188 '''Return the correct location in the repository largefiles cache for a
188 '''Return the correct location in the repository largefiles cache for a
189 file with the given hash.'''
189 file with the given hash.'''
190 if not forcelocal and repo.shared():
190 if not forcelocal and repo.shared():
191 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
191 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
192 return repo.join(longname, hash)
192 return repo.join(longname, hash)
193
193
194 def findstorepath(repo, hash):
194 def findstorepath(repo, hash):
195 '''Search through the local store path(s) to find the file for the given
195 '''Search through the local store path(s) to find the file for the given
196 hash. If the file is not found, its path in the primary store is returned.
196 hash. If the file is not found, its path in the primary store is returned.
197 The return value is a tuple of (path, exists(path)).
197 The return value is a tuple of (path, exists(path)).
198 '''
198 '''
199 # For shared repos, the primary store is in the share source. But for
199 # For shared repos, the primary store is in the share source. But for
200 # backward compatibility, force a lookup in the local store if it wasn't
200 # backward compatibility, force a lookup in the local store if it wasn't
201 # found in the share source.
201 # found in the share source.
202 path = storepath(repo, hash, False)
202 path = storepath(repo, hash, False)
203
203
204 if instore(repo, hash):
204 if instore(repo, hash):
205 return (path, True)
205 return (path, True)
206 elif repo.shared() and instore(repo, hash, True):
206 elif repo.shared() and instore(repo, hash, True):
207 return storepath(repo, hash, True)
207 return storepath(repo, hash, True), True
208
208
209 return (path, False)
209 return (path, False)
210
210
211 def copyfromcache(repo, hash, filename):
211 def copyfromcache(repo, hash, filename):
212 '''Copy the specified largefile from the repo or system cache to
212 '''Copy the specified largefile from the repo or system cache to
213 filename in the repository. Return true on success or false if the
213 filename in the repository. Return true on success or false if the
214 file was not found in either cache (which should not happened:
214 file was not found in either cache (which should not happened:
215 this is meant to be called only after ensuring that the needed
215 this is meant to be called only after ensuring that the needed
216 largefile exists in the cache).'''
216 largefile exists in the cache).'''
217 wvfs = repo.wvfs
217 wvfs = repo.wvfs
218 path = findfile(repo, hash)
218 path = findfile(repo, hash)
219 if path is None:
219 if path is None:
220 return False
220 return False
221 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
221 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
222 # The write may fail before the file is fully written, but we
222 # The write may fail before the file is fully written, but we
223 # don't use atomic writes in the working copy.
223 # don't use atomic writes in the working copy.
224 with open(path, 'rb') as srcfd:
224 with open(path, 'rb') as srcfd:
225 with wvfs(filename, 'wb') as destfd:
225 with wvfs(filename, 'wb') as destfd:
226 gothash = copyandhash(srcfd, destfd)
226 gothash = copyandhash(srcfd, destfd)
227 if gothash != hash:
227 if gothash != hash:
228 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
228 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
229 % (filename, path, gothash))
229 % (filename, path, gothash))
230 wvfs.unlink(filename)
230 wvfs.unlink(filename)
231 return False
231 return False
232 return True
232 return True
233
233
234 def copytostore(repo, rev, file, uploaded=False):
234 def copytostore(repo, rev, file, uploaded=False):
235 wvfs = repo.wvfs
235 wvfs = repo.wvfs
236 hash = readstandin(repo, file, rev)
236 hash = readstandin(repo, file, rev)
237 if instore(repo, hash):
237 if instore(repo, hash):
238 return
238 return
239 if wvfs.exists(file):
239 if wvfs.exists(file):
240 copytostoreabsolute(repo, wvfs.join(file), hash)
240 copytostoreabsolute(repo, wvfs.join(file), hash)
241 else:
241 else:
242 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
242 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
243 (file, hash))
243 (file, hash))
244
244
245 def copyalltostore(repo, node):
245 def copyalltostore(repo, node):
246 '''Copy all largefiles in a given revision to the store'''
246 '''Copy all largefiles in a given revision to the store'''
247
247
248 ctx = repo[node]
248 ctx = repo[node]
249 for filename in ctx.files():
249 for filename in ctx.files():
250 if isstandin(filename) and filename in ctx.manifest():
250 if isstandin(filename) and filename in ctx.manifest():
251 realfile = splitstandin(filename)
251 realfile = splitstandin(filename)
252 copytostore(repo, ctx.node(), realfile)
252 copytostore(repo, ctx.node(), realfile)
253
253
254
254
255 def copytostoreabsolute(repo, file, hash):
255 def copytostoreabsolute(repo, file, hash):
256 if inusercache(repo.ui, hash):
256 if inusercache(repo.ui, hash):
257 link(usercachepath(repo.ui, hash), storepath(repo, hash))
257 link(usercachepath(repo.ui, hash), storepath(repo, hash))
258 else:
258 else:
259 util.makedirs(os.path.dirname(storepath(repo, hash)))
259 util.makedirs(os.path.dirname(storepath(repo, hash)))
260 dst = util.atomictempfile(storepath(repo, hash),
260 dst = util.atomictempfile(storepath(repo, hash),
261 createmode=repo.store.createmode)
261 createmode=repo.store.createmode)
262 for chunk in util.filechunkiter(open(file, 'rb')):
262 for chunk in util.filechunkiter(open(file, 'rb')):
263 dst.write(chunk)
263 dst.write(chunk)
264 dst.close()
264 dst.close()
265 linktousercache(repo, hash)
265 linktousercache(repo, hash)
266
266
267 def linktousercache(repo, hash):
267 def linktousercache(repo, hash):
268 '''Link / copy the largefile with the specified hash from the store
268 '''Link / copy the largefile with the specified hash from the store
269 to the cache.'''
269 to the cache.'''
270 path = usercachepath(repo.ui, hash)
270 path = usercachepath(repo.ui, hash)
271 link(storepath(repo, hash), path)
271 link(storepath(repo, hash), path)
272
272
273 def getstandinmatcher(repo, rmatcher=None):
273 def getstandinmatcher(repo, rmatcher=None):
274 '''Return a match object that applies rmatcher to the standin directory'''
274 '''Return a match object that applies rmatcher to the standin directory'''
275 wvfs = repo.wvfs
275 wvfs = repo.wvfs
276 standindir = shortname
276 standindir = shortname
277
277
278 # no warnings about missing files or directories
278 # no warnings about missing files or directories
279 badfn = lambda f, msg: None
279 badfn = lambda f, msg: None
280
280
281 if rmatcher and not rmatcher.always():
281 if rmatcher and not rmatcher.always():
282 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
282 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
283 if not pats:
283 if not pats:
284 pats = [wvfs.join(standindir)]
284 pats = [wvfs.join(standindir)]
285 match = scmutil.match(repo[None], pats, badfn=badfn)
285 match = scmutil.match(repo[None], pats, badfn=badfn)
286 # if pats is empty, it would incorrectly always match, so clear _always
286 # if pats is empty, it would incorrectly always match, so clear _always
287 match._always = False
287 match._always = False
288 else:
288 else:
289 # no patterns: relative to repo root
289 # no patterns: relative to repo root
290 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
290 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
291 return match
291 return match
292
292
293 def composestandinmatcher(repo, rmatcher):
293 def composestandinmatcher(repo, rmatcher):
294 '''Return a matcher that accepts standins corresponding to the
294 '''Return a matcher that accepts standins corresponding to the
295 files accepted by rmatcher. Pass the list of files in the matcher
295 files accepted by rmatcher. Pass the list of files in the matcher
296 as the paths specified by the user.'''
296 as the paths specified by the user.'''
297 smatcher = getstandinmatcher(repo, rmatcher)
297 smatcher = getstandinmatcher(repo, rmatcher)
298 isstandin = smatcher.matchfn
298 isstandin = smatcher.matchfn
299 def composedmatchfn(f):
299 def composedmatchfn(f):
300 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
300 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
301 smatcher.matchfn = composedmatchfn
301 smatcher.matchfn = composedmatchfn
302
302
303 return smatcher
303 return smatcher
304
304
305 def standin(filename):
305 def standin(filename):
306 '''Return the repo-relative path to the standin for the specified big
306 '''Return the repo-relative path to the standin for the specified big
307 file.'''
307 file.'''
308 # Notes:
308 # Notes:
309 # 1) Some callers want an absolute path, but for instance addlargefiles
309 # 1) Some callers want an absolute path, but for instance addlargefiles
310 # needs it repo-relative so it can be passed to repo[None].add(). So
310 # needs it repo-relative so it can be passed to repo[None].add(). So
311 # leave it up to the caller to use repo.wjoin() to get an absolute path.
311 # leave it up to the caller to use repo.wjoin() to get an absolute path.
312 # 2) Join with '/' because that's what dirstate always uses, even on
312 # 2) Join with '/' because that's what dirstate always uses, even on
313 # Windows. Change existing separator to '/' first in case we are
313 # Windows. Change existing separator to '/' first in case we are
314 # passed filenames from an external source (like the command line).
314 # passed filenames from an external source (like the command line).
315 return shortnameslash + util.pconvert(filename)
315 return shortnameslash + util.pconvert(filename)
316
316
317 def isstandin(filename):
317 def isstandin(filename):
318 '''Return true if filename is a big file standin. filename must be
318 '''Return true if filename is a big file standin. filename must be
319 in Mercurial's internal form (slash-separated).'''
319 in Mercurial's internal form (slash-separated).'''
320 return filename.startswith(shortnameslash)
320 return filename.startswith(shortnameslash)
321
321
322 def splitstandin(filename):
322 def splitstandin(filename):
323 # Split on / because that's what dirstate always uses, even on Windows.
323 # Split on / because that's what dirstate always uses, even on Windows.
324 # Change local separator to / first just in case we are passed filenames
324 # Change local separator to / first just in case we are passed filenames
325 # from an external source (like the command line).
325 # from an external source (like the command line).
326 bits = util.pconvert(filename).split('/', 1)
326 bits = util.pconvert(filename).split('/', 1)
327 if len(bits) == 2 and bits[0] == shortname:
327 if len(bits) == 2 and bits[0] == shortname:
328 return bits[1]
328 return bits[1]
329 else:
329 else:
330 return None
330 return None
331
331
332 def updatestandin(repo, standin):
332 def updatestandin(repo, standin):
333 file = repo.wjoin(splitstandin(standin))
333 file = repo.wjoin(splitstandin(standin))
334 if repo.wvfs.exists(splitstandin(standin)):
334 if repo.wvfs.exists(splitstandin(standin)):
335 hash = hashfile(file)
335 hash = hashfile(file)
336 executable = getexecutable(file)
336 executable = getexecutable(file)
337 writestandin(repo, standin, hash, executable)
337 writestandin(repo, standin, hash, executable)
338 else:
338 else:
339 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
339 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
340
340
341 def readstandin(repo, filename, node=None):
341 def readstandin(repo, filename, node=None):
342 '''read hex hash from standin for filename at given node, or working
342 '''read hex hash from standin for filename at given node, or working
343 directory if no node is given'''
343 directory if no node is given'''
344 return repo[node][standin(filename)].data().strip()
344 return repo[node][standin(filename)].data().strip()
345
345
346 def writestandin(repo, standin, hash, executable):
346 def writestandin(repo, standin, hash, executable):
347 '''write hash to <repo.root>/<standin>'''
347 '''write hash to <repo.root>/<standin>'''
348 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
348 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
349
349
350 def copyandhash(instream, outfile):
350 def copyandhash(instream, outfile):
351 '''Read bytes from instream (iterable) and write them to outfile,
351 '''Read bytes from instream (iterable) and write them to outfile,
352 computing the SHA-1 hash of the data along the way. Return the hash.'''
352 computing the SHA-1 hash of the data along the way. Return the hash.'''
353 hasher = util.sha1('')
353 hasher = util.sha1('')
354 for data in instream:
354 for data in instream:
355 hasher.update(data)
355 hasher.update(data)
356 outfile.write(data)
356 outfile.write(data)
357 return hasher.hexdigest()
357 return hasher.hexdigest()
358
358
359 def hashrepofile(repo, file):
359 def hashrepofile(repo, file):
360 return hashfile(repo.wjoin(file))
360 return hashfile(repo.wjoin(file))
361
361
362 def hashfile(file):
362 def hashfile(file):
363 if not os.path.exists(file):
363 if not os.path.exists(file):
364 return ''
364 return ''
365 hasher = util.sha1('')
365 hasher = util.sha1('')
366 fd = open(file, 'rb')
366 fd = open(file, 'rb')
367 for data in util.filechunkiter(fd, 128 * 1024):
367 for data in util.filechunkiter(fd, 128 * 1024):
368 hasher.update(data)
368 hasher.update(data)
369 fd.close()
369 fd.close()
370 return hasher.hexdigest()
370 return hasher.hexdigest()
371
371
372 def getexecutable(filename):
372 def getexecutable(filename):
373 mode = os.stat(filename).st_mode
373 mode = os.stat(filename).st_mode
374 return ((mode & stat.S_IXUSR) and
374 return ((mode & stat.S_IXUSR) and
375 (mode & stat.S_IXGRP) and
375 (mode & stat.S_IXGRP) and
376 (mode & stat.S_IXOTH))
376 (mode & stat.S_IXOTH))
377
377
378 def urljoin(first, second, *arg):
378 def urljoin(first, second, *arg):
379 def join(left, right):
379 def join(left, right):
380 if not left.endswith('/'):
380 if not left.endswith('/'):
381 left += '/'
381 left += '/'
382 if right.startswith('/'):
382 if right.startswith('/'):
383 right = right[1:]
383 right = right[1:]
384 return left + right
384 return left + right
385
385
386 url = join(first, second)
386 url = join(first, second)
387 for a in arg:
387 for a in arg:
388 url = join(url, a)
388 url = join(url, a)
389 return url
389 return url
390
390
391 def hexsha1(data):
391 def hexsha1(data):
392 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
392 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
393 object data"""
393 object data"""
394 h = util.sha1()
394 h = util.sha1()
395 for chunk in util.filechunkiter(data):
395 for chunk in util.filechunkiter(data):
396 h.update(chunk)
396 h.update(chunk)
397 return h.hexdigest()
397 return h.hexdigest()
398
398
399 def httpsendfile(ui, filename):
399 def httpsendfile(ui, filename):
400 return httpconnection.httpsendfile(ui, filename, 'rb')
400 return httpconnection.httpsendfile(ui, filename, 'rb')
401
401
402 def unixpath(path):
402 def unixpath(path):
403 '''Return a version of path normalized for use with the lfdirstate.'''
403 '''Return a version of path normalized for use with the lfdirstate.'''
404 return util.pconvert(os.path.normpath(path))
404 return util.pconvert(os.path.normpath(path))
405
405
406 def islfilesrepo(repo):
406 def islfilesrepo(repo):
407 '''Return true if the repo is a largefile repo.'''
407 '''Return true if the repo is a largefile repo.'''
408 if ('largefiles' in repo.requirements and
408 if ('largefiles' in repo.requirements and
409 any(shortnameslash in f[0] for f in repo.store.datafiles())):
409 any(shortnameslash in f[0] for f in repo.store.datafiles())):
410 return True
410 return True
411
411
412 return any(openlfdirstate(repo.ui, repo, False))
412 return any(openlfdirstate(repo.ui, repo, False))
413
413
414 class storeprotonotcapable(Exception):
414 class storeprotonotcapable(Exception):
415 def __init__(self, storetypes):
415 def __init__(self, storetypes):
416 self.storetypes = storetypes
416 self.storetypes = storetypes
417
417
418 def getstandinsstate(repo):
418 def getstandinsstate(repo):
419 standins = []
419 standins = []
420 matcher = getstandinmatcher(repo)
420 matcher = getstandinmatcher(repo)
421 for standin in repo.dirstate.walk(matcher, [], False, False):
421 for standin in repo.dirstate.walk(matcher, [], False, False):
422 lfile = splitstandin(standin)
422 lfile = splitstandin(standin)
423 try:
423 try:
424 hash = readstandin(repo, lfile)
424 hash = readstandin(repo, lfile)
425 except IOError:
425 except IOError:
426 hash = None
426 hash = None
427 standins.append((lfile, hash))
427 standins.append((lfile, hash))
428 return standins
428 return standins
429
429
430 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
430 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
431 lfstandin = standin(lfile)
431 lfstandin = standin(lfile)
432 if lfstandin in repo.dirstate:
432 if lfstandin in repo.dirstate:
433 stat = repo.dirstate._map[lfstandin]
433 stat = repo.dirstate._map[lfstandin]
434 state, mtime = stat[0], stat[3]
434 state, mtime = stat[0], stat[3]
435 else:
435 else:
436 state, mtime = '?', -1
436 state, mtime = '?', -1
437 if state == 'n':
437 if state == 'n':
438 if (normallookup or mtime < 0 or
438 if (normallookup or mtime < 0 or
439 not repo.wvfs.exists(lfile)):
439 not repo.wvfs.exists(lfile)):
440 # state 'n' doesn't ensure 'clean' in this case
440 # state 'n' doesn't ensure 'clean' in this case
441 lfdirstate.normallookup(lfile)
441 lfdirstate.normallookup(lfile)
442 else:
442 else:
443 lfdirstate.normal(lfile)
443 lfdirstate.normal(lfile)
444 elif state == 'm':
444 elif state == 'm':
445 lfdirstate.normallookup(lfile)
445 lfdirstate.normallookup(lfile)
446 elif state == 'r':
446 elif state == 'r':
447 lfdirstate.remove(lfile)
447 lfdirstate.remove(lfile)
448 elif state == 'a':
448 elif state == 'a':
449 lfdirstate.add(lfile)
449 lfdirstate.add(lfile)
450 elif state == '?':
450 elif state == '?':
451 lfdirstate.drop(lfile)
451 lfdirstate.drop(lfile)
452
452
453 def markcommitted(orig, ctx, node):
453 def markcommitted(orig, ctx, node):
454 repo = ctx.repo()
454 repo = ctx.repo()
455
455
456 orig(node)
456 orig(node)
457
457
458 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
458 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
459 # because files coming from the 2nd parent are omitted in the latter.
459 # because files coming from the 2nd parent are omitted in the latter.
460 #
460 #
461 # The former should be used to get targets of "synclfdirstate",
461 # The former should be used to get targets of "synclfdirstate",
462 # because such files:
462 # because such files:
463 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
463 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
464 # - have to be marked as "n" after commit, but
464 # - have to be marked as "n" after commit, but
465 # - aren't listed in "repo[node].files()"
465 # - aren't listed in "repo[node].files()"
466
466
467 lfdirstate = openlfdirstate(repo.ui, repo)
467 lfdirstate = openlfdirstate(repo.ui, repo)
468 for f in ctx.files():
468 for f in ctx.files():
469 if isstandin(f):
469 if isstandin(f):
470 lfile = splitstandin(f)
470 lfile = splitstandin(f)
471 synclfdirstate(repo, lfdirstate, lfile, False)
471 synclfdirstate(repo, lfdirstate, lfile, False)
472 lfdirstate.write()
472 lfdirstate.write()
473
473
474 # As part of committing, copy all of the largefiles into the cache.
474 # As part of committing, copy all of the largefiles into the cache.
475 copyalltostore(repo, node)
475 copyalltostore(repo, node)
476
476
477 def getlfilestoupdate(oldstandins, newstandins):
477 def getlfilestoupdate(oldstandins, newstandins):
478 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
478 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
479 filelist = []
479 filelist = []
480 for f in changedstandins:
480 for f in changedstandins:
481 if f[0] not in filelist:
481 if f[0] not in filelist:
482 filelist.append(f[0])
482 filelist.append(f[0])
483 return filelist
483 return filelist
484
484
485 def getlfilestoupload(repo, missing, addfunc):
485 def getlfilestoupload(repo, missing, addfunc):
486 for i, n in enumerate(missing):
486 for i, n in enumerate(missing):
487 repo.ui.progress(_('finding outgoing largefiles'), i,
487 repo.ui.progress(_('finding outgoing largefiles'), i,
488 unit=_('revisions'), total=len(missing))
488 unit=_('revisions'), total=len(missing))
489 parents = [p for p in repo[n].parents() if p != node.nullid]
489 parents = [p for p in repo[n].parents() if p != node.nullid]
490
490
491 oldlfstatus = repo.lfstatus
491 oldlfstatus = repo.lfstatus
492 repo.lfstatus = False
492 repo.lfstatus = False
493 try:
493 try:
494 ctx = repo[n]
494 ctx = repo[n]
495 finally:
495 finally:
496 repo.lfstatus = oldlfstatus
496 repo.lfstatus = oldlfstatus
497
497
498 files = set(ctx.files())
498 files = set(ctx.files())
499 if len(parents) == 2:
499 if len(parents) == 2:
500 mc = ctx.manifest()
500 mc = ctx.manifest()
501 mp1 = ctx.parents()[0].manifest()
501 mp1 = ctx.parents()[0].manifest()
502 mp2 = ctx.parents()[1].manifest()
502 mp2 = ctx.parents()[1].manifest()
503 for f in mp1:
503 for f in mp1:
504 if f not in mc:
504 if f not in mc:
505 files.add(f)
505 files.add(f)
506 for f in mp2:
506 for f in mp2:
507 if f not in mc:
507 if f not in mc:
508 files.add(f)
508 files.add(f)
509 for f in mc:
509 for f in mc:
510 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
510 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
511 files.add(f)
511 files.add(f)
512 for fn in files:
512 for fn in files:
513 if isstandin(fn) and fn in ctx:
513 if isstandin(fn) and fn in ctx:
514 addfunc(fn, ctx[fn].data().strip())
514 addfunc(fn, ctx[fn].data().strip())
515 repo.ui.progress(_('finding outgoing largefiles'), None)
515 repo.ui.progress(_('finding outgoing largefiles'), None)
516
516
517 def updatestandinsbymatch(repo, match):
517 def updatestandinsbymatch(repo, match):
518 '''Update standins in the working directory according to specified match
518 '''Update standins in the working directory according to specified match
519
519
520 This returns (possibly modified) ``match`` object to be used for
520 This returns (possibly modified) ``match`` object to be used for
521 subsequent commit process.
521 subsequent commit process.
522 '''
522 '''
523
523
524 ui = repo.ui
524 ui = repo.ui
525
525
526 # Case 1: user calls commit with no specific files or
526 # Case 1: user calls commit with no specific files or
527 # include/exclude patterns: refresh and commit all files that
527 # include/exclude patterns: refresh and commit all files that
528 # are "dirty".
528 # are "dirty".
529 if match is None or match.always():
529 if match is None or match.always():
530 # Spend a bit of time here to get a list of files we know
530 # Spend a bit of time here to get a list of files we know
531 # are modified so we can compare only against those.
531 # are modified so we can compare only against those.
532 # It can cost a lot of time (several seconds)
532 # It can cost a lot of time (several seconds)
533 # otherwise to update all standins if the largefiles are
533 # otherwise to update all standins if the largefiles are
534 # large.
534 # large.
535 lfdirstate = openlfdirstate(ui, repo)
535 lfdirstate = openlfdirstate(ui, repo)
536 dirtymatch = match_.always(repo.root, repo.getcwd())
536 dirtymatch = match_.always(repo.root, repo.getcwd())
537 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
537 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
538 False)
538 False)
539 modifiedfiles = unsure + s.modified + s.added + s.removed
539 modifiedfiles = unsure + s.modified + s.added + s.removed
540 lfiles = listlfiles(repo)
540 lfiles = listlfiles(repo)
541 # this only loops through largefiles that exist (not
541 # this only loops through largefiles that exist (not
542 # removed/renamed)
542 # removed/renamed)
543 for lfile in lfiles:
543 for lfile in lfiles:
544 if lfile in modifiedfiles:
544 if lfile in modifiedfiles:
545 if repo.wvfs.exists(standin(lfile)):
545 if repo.wvfs.exists(standin(lfile)):
546 # this handles the case where a rebase is being
546 # this handles the case where a rebase is being
547 # performed and the working copy is not updated
547 # performed and the working copy is not updated
548 # yet.
548 # yet.
549 if repo.wvfs.exists(lfile):
549 if repo.wvfs.exists(lfile):
550 updatestandin(repo,
550 updatestandin(repo,
551 standin(lfile))
551 standin(lfile))
552
552
553 return match
553 return match
554
554
555 lfiles = listlfiles(repo)
555 lfiles = listlfiles(repo)
556 match._files = repo._subdirlfs(match.files(), lfiles)
556 match._files = repo._subdirlfs(match.files(), lfiles)
557
557
558 # Case 2: user calls commit with specified patterns: refresh
558 # Case 2: user calls commit with specified patterns: refresh
559 # any matching big files.
559 # any matching big files.
560 smatcher = composestandinmatcher(repo, match)
560 smatcher = composestandinmatcher(repo, match)
561 standins = repo.dirstate.walk(smatcher, [], False, False)
561 standins = repo.dirstate.walk(smatcher, [], False, False)
562
562
563 # No matching big files: get out of the way and pass control to
563 # No matching big files: get out of the way and pass control to
564 # the usual commit() method.
564 # the usual commit() method.
565 if not standins:
565 if not standins:
566 return match
566 return match
567
567
568 # Refresh all matching big files. It's possible that the
568 # Refresh all matching big files. It's possible that the
569 # commit will end up failing, in which case the big files will
569 # commit will end up failing, in which case the big files will
570 # stay refreshed. No harm done: the user modified them and
570 # stay refreshed. No harm done: the user modified them and
571 # asked to commit them, so sooner or later we're going to
571 # asked to commit them, so sooner or later we're going to
572 # refresh the standins. Might as well leave them refreshed.
572 # refresh the standins. Might as well leave them refreshed.
573 lfdirstate = openlfdirstate(ui, repo)
573 lfdirstate = openlfdirstate(ui, repo)
574 for fstandin in standins:
574 for fstandin in standins:
575 lfile = splitstandin(fstandin)
575 lfile = splitstandin(fstandin)
576 if lfdirstate[lfile] != 'r':
576 if lfdirstate[lfile] != 'r':
577 updatestandin(repo, fstandin)
577 updatestandin(repo, fstandin)
578
578
579 # Cook up a new matcher that only matches regular files or
579 # Cook up a new matcher that only matches regular files or
580 # standins corresponding to the big files requested by the
580 # standins corresponding to the big files requested by the
581 # user. Have to modify _files to prevent commit() from
581 # user. Have to modify _files to prevent commit() from
582 # complaining "not tracked" for big files.
582 # complaining "not tracked" for big files.
583 match = copy.copy(match)
583 match = copy.copy(match)
584 origmatchfn = match.matchfn
584 origmatchfn = match.matchfn
585
585
586 # Check both the list of largefiles and the list of
586 # Check both the list of largefiles and the list of
587 # standins because if a largefile was removed, it
587 # standins because if a largefile was removed, it
588 # won't be in the list of largefiles at this point
588 # won't be in the list of largefiles at this point
589 match._files += sorted(standins)
589 match._files += sorted(standins)
590
590
591 actualfiles = []
591 actualfiles = []
592 for f in match._files:
592 for f in match._files:
593 fstandin = standin(f)
593 fstandin = standin(f)
594
594
595 # For largefiles, only one of the normal and standin should be
595 # For largefiles, only one of the normal and standin should be
596 # committed (except if one of them is a remove). In the case of a
596 # committed (except if one of them is a remove). In the case of a
597 # standin removal, drop the normal file if it is unknown to dirstate.
597 # standin removal, drop the normal file if it is unknown to dirstate.
598 # Thus, skip plain largefile names but keep the standin.
598 # Thus, skip plain largefile names but keep the standin.
599 if f in lfiles or fstandin in standins:
599 if f in lfiles or fstandin in standins:
600 if repo.dirstate[fstandin] != 'r':
600 if repo.dirstate[fstandin] != 'r':
601 if repo.dirstate[f] != 'r':
601 if repo.dirstate[f] != 'r':
602 continue
602 continue
603 elif repo.dirstate[f] == '?':
603 elif repo.dirstate[f] == '?':
604 continue
604 continue
605
605
606 actualfiles.append(f)
606 actualfiles.append(f)
607 match._files = actualfiles
607 match._files = actualfiles
608
608
609 def matchfn(f):
609 def matchfn(f):
610 if origmatchfn(f):
610 if origmatchfn(f):
611 return f not in lfiles
611 return f not in lfiles
612 else:
612 else:
613 return f in standins
613 return f in standins
614
614
615 match.matchfn = matchfn
615 match.matchfn = matchfn
616
616
617 return match
617 return match
618
618
619 class automatedcommithook(object):
619 class automatedcommithook(object):
620 '''Stateful hook to update standins at the 1st commit of resuming
620 '''Stateful hook to update standins at the 1st commit of resuming
621
621
622 For efficiency, updating standins in the working directory should
622 For efficiency, updating standins in the working directory should
623 be avoided while automated committing (like rebase, transplant and
623 be avoided while automated committing (like rebase, transplant and
624 so on), because they should be updated before committing.
624 so on), because they should be updated before committing.
625
625
626 But the 1st commit of resuming automated committing (e.g. ``rebase
626 But the 1st commit of resuming automated committing (e.g. ``rebase
627 --continue``) should update them, because largefiles may be
627 --continue``) should update them, because largefiles may be
628 modified manually.
628 modified manually.
629 '''
629 '''
630 def __init__(self, resuming):
630 def __init__(self, resuming):
631 self.resuming = resuming
631 self.resuming = resuming
632
632
633 def __call__(self, repo, match):
633 def __call__(self, repo, match):
634 if self.resuming:
634 if self.resuming:
635 self.resuming = False # avoids updating at subsequent commits
635 self.resuming = False # avoids updating at subsequent commits
636 return updatestandinsbymatch(repo, match)
636 return updatestandinsbymatch(repo, match)
637 else:
637 else:
638 return match
638 return match
639
639
640 def getstatuswriter(ui, repo, forcibly=None):
640 def getstatuswriter(ui, repo, forcibly=None):
641 '''Return the function to write largefiles specific status out
641 '''Return the function to write largefiles specific status out
642
642
643 If ``forcibly`` is ``None``, this returns the last element of
643 If ``forcibly`` is ``None``, this returns the last element of
644 ``repo._lfstatuswriters`` as "default" writer function.
644 ``repo._lfstatuswriters`` as "default" writer function.
645
645
646 Otherwise, this returns the function to always write out (or
646 Otherwise, this returns the function to always write out (or
647 ignore if ``not forcibly``) status.
647 ignore if ``not forcibly``) status.
648 '''
648 '''
649 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
649 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
650 return repo._lfstatuswriters[-1]
650 return repo._lfstatuswriters[-1]
651 else:
651 else:
652 if forcibly:
652 if forcibly:
653 return ui.status # forcibly WRITE OUT
653 return ui.status # forcibly WRITE OUT
654 else:
654 else:
655 return lambda *msg, **opts: None # forcibly IGNORE
655 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,254 +1,258 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile' large
20 $ hg commit -m 'branchhead without largefile' large
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ rm large
22 $ rm large
23 $ echo "0000000000000000000000000000000000000000" > .hglf/large
23 $ echo "0000000000000000000000000000000000000000" > .hglf/large
24 $ hg commit -m 'commit missing file with corrupt standin' large
24 $ hg commit -m 'commit missing file with corrupt standin' large
25 abort: large: file not found!
25 abort: large: file not found!
26 [255]
26 [255]
27 $ hg up -Cqr 0
27 $ hg up -Cqr 0
28 $ cd ..
28 $ cd ..
29
29
30 Discard all cached largefiles in USERCACHE
30 Discard all cached largefiles in USERCACHE
31
31
32 $ rm -rf ${USERCACHE}
32 $ rm -rf ${USERCACHE}
33
33
34 Create mirror repo, and pull from source without largefile:
34 Create mirror repo, and pull from source without largefile:
35 "pull" is used instead of "clone" for suppression of (1) updating to
35 "pull" is used instead of "clone" for suppression of (1) updating to
36 tip (= caching largefile from source repo), and (2) recording source
36 tip (= caching largefile from source repo), and (2) recording source
37 repo as "default" path in .hg/hgrc.
37 repo as "default" path in .hg/hgrc.
38
38
39 $ hg init mirror
39 $ hg init mirror
40 $ cd mirror
40 $ cd mirror
41 $ hg pull ../src
41 $ hg pull ../src
42 pulling from ../src
42 pulling from ../src
43 requesting all changes
43 requesting all changes
44 adding changesets
44 adding changesets
45 adding manifests
45 adding manifests
46 adding file changes
46 adding file changes
47 added 2 changesets with 1 changes to 1 files
47 added 2 changesets with 1 changes to 1 files
48 (run 'hg update' to get a working copy)
48 (run 'hg update' to get a working copy)
49
49
50 Update working directory to "tip", which requires largefile("large"),
50 Update working directory to "tip", which requires largefile("large"),
51 but there is no cache file for it. So, hg must treat it as
51 but there is no cache file for it. So, hg must treat it as
52 "missing"(!) file.
52 "missing"(!) file.
53
53
54 $ hg update -r0
54 $ hg update -r0
55 getting changed largefiles
55 getting changed largefiles
56 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
56 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
57 0 largefiles updated, 0 removed
57 0 largefiles updated, 0 removed
58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 $ hg status
59 $ hg status
60 ! large
60 ! large
61
61
62 Update working directory to null: this cleanup .hg/largefiles/dirstate
62 Update working directory to null: this cleanup .hg/largefiles/dirstate
63
63
64 $ hg update null
64 $ hg update null
65 getting changed largefiles
65 getting changed largefiles
66 0 largefiles updated, 0 removed
66 0 largefiles updated, 0 removed
67 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
67 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
68
68
69 Update working directory to tip, again.
69 Update working directory to tip, again.
70
70
71 $ hg update -r0
71 $ hg update -r0
72 getting changed largefiles
72 getting changed largefiles
73 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
73 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
74 0 largefiles updated, 0 removed
74 0 largefiles updated, 0 removed
75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 $ hg status
76 $ hg status
77 ! large
77 ! large
78 $ cd ..
78 $ cd ..
79
79
80 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
80 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
81
81
82 $ hg init mirror2
82 $ hg init mirror2
83 $ hg -R mirror2 pull src -r0
83 $ hg -R mirror2 pull src -r0
84 pulling from src
84 pulling from src
85 adding changesets
85 adding changesets
86 adding manifests
86 adding manifests
87 adding file changes
87 adding file changes
88 added 1 changesets with 1 changes to 1 files
88 added 1 changesets with 1 changes to 1 files
89 (run 'hg update' to get a working copy)
89 (run 'hg update' to get a working copy)
90
90
91 #if unix-permissions
91 #if unix-permissions
92
92
93 Portable way to print file permissions:
93 Portable way to print file permissions:
94
94
95 $ cat > ls-l.py <<EOF
95 $ cat > ls-l.py <<EOF
96 > #!/usr/bin/env python
96 > #!/usr/bin/env python
97 > import sys, os
97 > import sys, os
98 > path = sys.argv[1]
98 > path = sys.argv[1]
99 > print '%03o' % (os.lstat(path).st_mode & 0777)
99 > print '%03o' % (os.lstat(path).st_mode & 0777)
100 > EOF
100 > EOF
101 $ chmod +x ls-l.py
101 $ chmod +x ls-l.py
102
102
103 Test that files in .hg/largefiles inherit mode from .hg/store, not
103 Test that files in .hg/largefiles inherit mode from .hg/store, not
104 from file in working copy:
104 from file in working copy:
105
105
106 $ cd src
106 $ cd src
107 $ chmod 750 .hg/store
107 $ chmod 750 .hg/store
108 $ chmod 660 large
108 $ chmod 660 large
109 $ echo change >> large
109 $ echo change >> large
110 $ hg commit -m change
110 $ hg commit -m change
111 created new head
111 created new head
112 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
112 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
113 640
113 640
114
114
115 Test permission of with files in .hg/largefiles created by update:
115 Test permission of with files in .hg/largefiles created by update:
116
116
117 $ cd ../mirror
117 $ cd ../mirror
118 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
118 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
119 $ chmod 750 .hg/store
119 $ chmod 750 .hg/store
120 $ hg pull ../src --update -q
120 $ hg pull ../src --update -q
121 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
121 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
122 640
122 640
123
123
124 Test permission of files created by push:
124 Test permission of files created by push:
125
125
126 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
126 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
127 > --config "web.allow_push=*" --config web.push_ssl=no
127 > --config "web.allow_push=*" --config web.push_ssl=no
128 $ cat hg.pid >> $DAEMON_PIDS
128 $ cat hg.pid >> $DAEMON_PIDS
129
129
130 $ echo change >> large
130 $ echo change >> large
131 $ hg commit -m change
131 $ hg commit -m change
132
132
133 $ rm -r "$USERCACHE"
133 $ rm -r "$USERCACHE"
134
134
135 $ hg push -q http://localhost:$HGPORT/
135 $ hg push -q http://localhost:$HGPORT/
136
136
137 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
137 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
138 640
138 640
139
139
140 $ cd ..
140 $ cd ..
141
141
142 #endif
142 #endif
143
143
144 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
144 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
145 it is missing, but a remove on a nonexistent unknown file still should. Same
145 it is missing, but a remove on a nonexistent unknown file still should. Same
146 for a forget.)
146 for a forget.)
147
147
148 $ cd src
148 $ cd src
149 $ touch x
149 $ touch x
150 $ hg add x
150 $ hg add x
151 $ mv x y
151 $ mv x y
152 $ hg remove -A x y ENOENT
152 $ hg remove -A x y ENOENT
153 ENOENT: * (glob)
153 ENOENT: * (glob)
154 not removing y: file is untracked
154 not removing y: file is untracked
155 [1]
155 [1]
156 $ hg add y
156 $ hg add y
157 $ mv y z
157 $ mv y z
158 $ hg forget y z ENOENT
158 $ hg forget y z ENOENT
159 ENOENT: * (glob)
159 ENOENT: * (glob)
160 not removing z: file is already untracked
160 not removing z: file is already untracked
161 [1]
161 [1]
162
162
163 Largefiles are accessible from the share's store
163 Largefiles are accessible from the share's store
164 $ cd ..
164 $ cd ..
165 $ hg share -q src share_dst --config extensions.share=
165 $ hg share -q src share_dst --config extensions.share=
166 $ hg -R share_dst update -r0
166 $ hg -R share_dst update -r0
167 getting changed largefiles
167 getting changed largefiles
168 1 largefiles updated, 0 removed
168 1 largefiles updated, 0 removed
169 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170
170
171 $ echo modified > share_dst/large
171 $ echo modified > share_dst/large
172 $ hg -R share_dst ci -m modified
172 $ hg -R share_dst ci -m modified
173 created new head
173 created new head
174
174
175 Only dirstate is in the local store for the share, and the largefile is in the
175 Only dirstate is in the local store for the share, and the largefile is in the
176 share source's local store. Avoid the extra largefiles added in the unix
176 share source's local store. Avoid the extra largefiles added in the unix
177 conditional above.
177 conditional above.
178 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
178 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
179 $ echo $hash
179 $ echo $hash
180 e2fb5f2139d086ded2cb600d5a91a196e76bf020
180 e2fb5f2139d086ded2cb600d5a91a196e76bf020
181
181
182 $ find share_dst/.hg/largefiles/* | sort
182 $ find share_dst/.hg/largefiles/* | sort
183 share_dst/.hg/largefiles/dirstate
183 share_dst/.hg/largefiles/dirstate
184
184
185 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
185 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
186 src/.hg/largefiles/dirstate
186 src/.hg/largefiles/dirstate
187 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
187 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
188
188
189 Verify that backwards compatibility is maintained for old storage layout
190 $ mv src/.hg/largefiles/$hash share_dst/.hg/largefiles
191 $ hg verify --quiet --lfa -R share_dst --config largefiles.usercache=
192
189 Inject corruption into the largefiles store and see how update handles that:
193 Inject corruption into the largefiles store and see how update handles that:
190
194
191 $ cd src
195 $ cd src
192 $ hg up -qC tip
196 $ hg up -qC tip
193 $ cat large
197 $ cat large
194 modified
198 modified
195 $ rm large
199 $ rm large
196 $ cat .hglf/large
200 $ cat .hglf/large
197 e2fb5f2139d086ded2cb600d5a91a196e76bf020
201 e2fb5f2139d086ded2cb600d5a91a196e76bf020
198 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
202 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
199 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
203 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
200 $ hg up -C
204 $ hg up -C
201 getting changed largefiles
205 getting changed largefiles
202 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
206 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
203 0 largefiles updated, 0 removed
207 0 largefiles updated, 0 removed
204 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
208 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 [12] other heads for branch "default" (re)
209 [12] other heads for branch "default" (re)
206 $ hg st
210 $ hg st
207 ! large
211 ! large
208 ? z
212 ? z
209 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
213 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
210
214
211 #if serve
215 #if serve
212
216
213 Test coverage of error handling from putlfile:
217 Test coverage of error handling from putlfile:
214
218
215 $ mkdir $TESTTMP/mirrorcache
219 $ mkdir $TESTTMP/mirrorcache
216 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
220 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
217 $ cat hg.pid >> $DAEMON_PIDS
221 $ cat hg.pid >> $DAEMON_PIDS
218
222
219 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
223 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
220 pushing to http://localhost:$HGPORT1/
224 pushing to http://localhost:$HGPORT1/
221 searching for changes
225 searching for changes
222 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
226 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
223 [255]
227 [255]
224
228
225 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
229 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
226
230
227 Test coverage of 'missing from store':
231 Test coverage of 'missing from store':
228
232
229 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
233 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
230 $ cat hg.pid >> $DAEMON_PIDS
234 $ cat hg.pid >> $DAEMON_PIDS
231
235
232 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
236 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
233 pushing to http://localhost:$HGPORT2/
237 pushing to http://localhost:$HGPORT2/
234 searching for changes
238 searching for changes
235 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
239 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
236 [255]
240 [255]
237
241
238 Verify that --lfrev controls which revisions are checked for largefiles to push
242 Verify that --lfrev controls which revisions are checked for largefiles to push
239
243
240 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache --lfrev tip
244 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache --lfrev tip
241 pushing to http://localhost:$HGPORT2/
245 pushing to http://localhost:$HGPORT2/
242 searching for changes
246 searching for changes
243 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
247 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
244 [255]
248 [255]
245
249
246 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache --lfrev null
250 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache --lfrev null
247 pushing to http://localhost:$HGPORT2/
251 pushing to http://localhost:$HGPORT2/
248 searching for changes
252 searching for changes
249 remote: adding changesets
253 remote: adding changesets
250 remote: adding manifests
254 remote: adding manifests
251 remote: adding file changes
255 remote: adding file changes
252 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
256 remote: added 1 changesets with 1 changes to 1 files (+1 heads)
253
257
254 #endif
258 #endif
General Comments 0
You need to be logged in to leave comments. Login now