##// END OF EJS Templates
largefiles: fix an explicit largefile commit after a remove (issue4969)...
Matt Harbison -
r27942:eb1135d5 stable
parent child Browse files
Show More
@@ -1,633 +1,637 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import stat
13 import stat
14 import copy
14 import copy
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node, error
18 from mercurial import node, error
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise error.Abort(_('minimum size for largefiles must be specified'))
38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
43 try:
43 try:
44 util.oslink(src, dest)
44 util.oslink(src, dest)
45 except OSError:
45 except OSError:
46 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
49 dst.write(chunk)
50 dst.close()
50 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
52
52
53 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
55 if path:
55 if path:
56 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
57 else:
57 else:
58 if os.name == 'nt':
58 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
60 if appdata:
61 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
63 home = os.getenv('HOME')
64 if home:
64 if home:
65 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
66 longname, hash)
67 elif os.name == 'posix':
67 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
69 if path:
69 if path:
70 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
71 else:
71 else:
72 home = os.getenv('HOME')
72 home = os.getenv('HOME')
73 if home:
73 if home:
74 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
75 else:
75 else:
76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
77 return path
78
78
79 def inusercache(ui, hash):
79 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
81 return path and os.path.exists(path)
82
82
83 def findfile(repo, hash):
83 def findfile(repo, hash):
84 path, exists = findstorepath(repo, hash)
84 path, exists = findstorepath(repo, hash)
85 if exists:
85 if exists:
86 repo.ui.note(_('found %s in store\n') % hash)
86 repo.ui.note(_('found %s in store\n') % hash)
87 return path
87 return path
88 elif inusercache(repo.ui, hash):
88 elif inusercache(repo.ui, hash):
89 repo.ui.note(_('found %s in system cache\n') % hash)
89 repo.ui.note(_('found %s in system cache\n') % hash)
90 path = storepath(repo, hash)
90 path = storepath(repo, hash)
91 link(usercachepath(repo.ui, hash), path)
91 link(usercachepath(repo.ui, hash), path)
92 return path
92 return path
93 return None
93 return None
94
94
95 class largefilesdirstate(dirstate.dirstate):
95 class largefilesdirstate(dirstate.dirstate):
96 def __getitem__(self, key):
96 def __getitem__(self, key):
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 def normal(self, f):
98 def normal(self, f):
99 return super(largefilesdirstate, self).normal(unixpath(f))
99 return super(largefilesdirstate, self).normal(unixpath(f))
100 def remove(self, f):
100 def remove(self, f):
101 return super(largefilesdirstate, self).remove(unixpath(f))
101 return super(largefilesdirstate, self).remove(unixpath(f))
102 def add(self, f):
102 def add(self, f):
103 return super(largefilesdirstate, self).add(unixpath(f))
103 return super(largefilesdirstate, self).add(unixpath(f))
104 def drop(self, f):
104 def drop(self, f):
105 return super(largefilesdirstate, self).drop(unixpath(f))
105 return super(largefilesdirstate, self).drop(unixpath(f))
106 def forget(self, f):
106 def forget(self, f):
107 return super(largefilesdirstate, self).forget(unixpath(f))
107 return super(largefilesdirstate, self).forget(unixpath(f))
108 def normallookup(self, f):
108 def normallookup(self, f):
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 def _ignore(self, f):
110 def _ignore(self, f):
111 return False
111 return False
112 def write(self, tr=False):
112 def write(self, tr=False):
113 # (1) disable PENDING mode always
113 # (1) disable PENDING mode always
114 # (lfdirstate isn't yet managed as a part of the transaction)
114 # (lfdirstate isn't yet managed as a part of the transaction)
115 # (2) avoid develwarn 'use dirstate.write with ....'
115 # (2) avoid develwarn 'use dirstate.write with ....'
116 super(largefilesdirstate, self).write(None)
116 super(largefilesdirstate, self).write(None)
117
117
118 def openlfdirstate(ui, repo, create=True):
118 def openlfdirstate(ui, repo, create=True):
119 '''
119 '''
120 Return a dirstate object that tracks largefiles: i.e. its root is
120 Return a dirstate object that tracks largefiles: i.e. its root is
121 the repo root, but it is saved in .hg/largefiles/dirstate.
121 the repo root, but it is saved in .hg/largefiles/dirstate.
122 '''
122 '''
123 lfstoredir = repo.join(longname)
123 lfstoredir = repo.join(longname)
124 opener = scmutil.opener(lfstoredir)
124 opener = scmutil.opener(lfstoredir)
125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
126 repo.dirstate._validate)
126 repo.dirstate._validate)
127
127
128 # If the largefiles dirstate does not exist, populate and create
128 # If the largefiles dirstate does not exist, populate and create
129 # it. This ensures that we create it on the first meaningful
129 # it. This ensures that we create it on the first meaningful
130 # largefiles operation in a new clone.
130 # largefiles operation in a new clone.
131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
132 matcher = getstandinmatcher(repo)
132 matcher = getstandinmatcher(repo)
133 standins = repo.dirstate.walk(matcher, [], False, False)
133 standins = repo.dirstate.walk(matcher, [], False, False)
134
134
135 if len(standins) > 0:
135 if len(standins) > 0:
136 util.makedirs(lfstoredir)
136 util.makedirs(lfstoredir)
137
137
138 for standin in standins:
138 for standin in standins:
139 lfile = splitstandin(standin)
139 lfile = splitstandin(standin)
140 lfdirstate.normallookup(lfile)
140 lfdirstate.normallookup(lfile)
141 return lfdirstate
141 return lfdirstate
142
142
143 def lfdirstatestatus(lfdirstate, repo):
143 def lfdirstatestatus(lfdirstate, repo):
144 wctx = repo['.']
144 wctx = repo['.']
145 match = match_.always(repo.root, repo.getcwd())
145 match = match_.always(repo.root, repo.getcwd())
146 unsure, s = lfdirstate.status(match, [], False, False, False)
146 unsure, s = lfdirstate.status(match, [], False, False, False)
147 modified, clean = s.modified, s.clean
147 modified, clean = s.modified, s.clean
148 for lfile in unsure:
148 for lfile in unsure:
149 try:
149 try:
150 fctx = wctx[standin(lfile)]
150 fctx = wctx[standin(lfile)]
151 except LookupError:
151 except LookupError:
152 fctx = None
152 fctx = None
153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
154 modified.append(lfile)
154 modified.append(lfile)
155 else:
155 else:
156 clean.append(lfile)
156 clean.append(lfile)
157 lfdirstate.normal(lfile)
157 lfdirstate.normal(lfile)
158 return s
158 return s
159
159
160 def listlfiles(repo, rev=None, matcher=None):
160 def listlfiles(repo, rev=None, matcher=None):
161 '''return a list of largefiles in the working copy or the
161 '''return a list of largefiles in the working copy or the
162 specified changeset'''
162 specified changeset'''
163
163
164 if matcher is None:
164 if matcher is None:
165 matcher = getstandinmatcher(repo)
165 matcher = getstandinmatcher(repo)
166
166
167 # ignore unknown files in working directory
167 # ignore unknown files in working directory
168 return [splitstandin(f)
168 return [splitstandin(f)
169 for f in repo[rev].walk(matcher)
169 for f in repo[rev].walk(matcher)
170 if rev is not None or repo.dirstate[f] != '?']
170 if rev is not None or repo.dirstate[f] != '?']
171
171
172 def instore(repo, hash, forcelocal=False):
172 def instore(repo, hash, forcelocal=False):
173 return os.path.exists(storepath(repo, hash, forcelocal))
173 return os.path.exists(storepath(repo, hash, forcelocal))
174
174
175 def storepath(repo, hash, forcelocal=False):
175 def storepath(repo, hash, forcelocal=False):
176 if not forcelocal and repo.shared():
176 if not forcelocal and repo.shared():
177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
178 return repo.join(longname, hash)
178 return repo.join(longname, hash)
179
179
180 def findstorepath(repo, hash):
180 def findstorepath(repo, hash):
181 '''Search through the local store path(s) to find the file for the given
181 '''Search through the local store path(s) to find the file for the given
182 hash. If the file is not found, its path in the primary store is returned.
182 hash. If the file is not found, its path in the primary store is returned.
183 The return value is a tuple of (path, exists(path)).
183 The return value is a tuple of (path, exists(path)).
184 '''
184 '''
185 # For shared repos, the primary store is in the share source. But for
185 # For shared repos, the primary store is in the share source. But for
186 # backward compatibility, force a lookup in the local store if it wasn't
186 # backward compatibility, force a lookup in the local store if it wasn't
187 # found in the share source.
187 # found in the share source.
188 path = storepath(repo, hash, False)
188 path = storepath(repo, hash, False)
189
189
190 if instore(repo, hash):
190 if instore(repo, hash):
191 return (path, True)
191 return (path, True)
192 elif repo.shared() and instore(repo, hash, True):
192 elif repo.shared() and instore(repo, hash, True):
193 return storepath(repo, hash, True)
193 return storepath(repo, hash, True)
194
194
195 return (path, False)
195 return (path, False)
196
196
197 def copyfromcache(repo, hash, filename):
197 def copyfromcache(repo, hash, filename):
198 '''Copy the specified largefile from the repo or system cache to
198 '''Copy the specified largefile from the repo or system cache to
199 filename in the repository. Return true on success or false if the
199 filename in the repository. Return true on success or false if the
200 file was not found in either cache (which should not happened:
200 file was not found in either cache (which should not happened:
201 this is meant to be called only after ensuring that the needed
201 this is meant to be called only after ensuring that the needed
202 largefile exists in the cache).'''
202 largefile exists in the cache).'''
203 path = findfile(repo, hash)
203 path = findfile(repo, hash)
204 if path is None:
204 if path is None:
205 return False
205 return False
206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 # The write may fail before the file is fully written, but we
207 # The write may fail before the file is fully written, but we
208 # don't use atomic writes in the working copy.
208 # don't use atomic writes in the working copy.
209 dest = repo.wjoin(filename)
209 dest = repo.wjoin(filename)
210 with open(path, 'rb') as srcfd:
210 with open(path, 'rb') as srcfd:
211 with open(dest, 'wb') as destfd:
211 with open(dest, 'wb') as destfd:
212 gothash = copyandhash(srcfd, destfd)
212 gothash = copyandhash(srcfd, destfd)
213 if gothash != hash:
213 if gothash != hash:
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 % (filename, path, gothash))
215 % (filename, path, gothash))
216 util.unlink(dest)
216 util.unlink(dest)
217 return False
217 return False
218 return True
218 return True
219
219
220 def copytostore(repo, rev, file, uploaded=False):
220 def copytostore(repo, rev, file, uploaded=False):
221 hash = readstandin(repo, file, rev)
221 hash = readstandin(repo, file, rev)
222 if instore(repo, hash):
222 if instore(repo, hash):
223 return
223 return
224 absfile = repo.wjoin(file)
224 absfile = repo.wjoin(file)
225 if os.path.exists(absfile):
225 if os.path.exists(absfile):
226 copytostoreabsolute(repo, absfile, hash)
226 copytostoreabsolute(repo, absfile, hash)
227 else:
227 else:
228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
229 (file, hash))
229 (file, hash))
230
230
231 def copyalltostore(repo, node):
231 def copyalltostore(repo, node):
232 '''Copy all largefiles in a given revision to the store'''
232 '''Copy all largefiles in a given revision to the store'''
233
233
234 ctx = repo[node]
234 ctx = repo[node]
235 for filename in ctx.files():
235 for filename in ctx.files():
236 if isstandin(filename) and filename in ctx.manifest():
236 if isstandin(filename) and filename in ctx.manifest():
237 realfile = splitstandin(filename)
237 realfile = splitstandin(filename)
238 copytostore(repo, ctx.node(), realfile)
238 copytostore(repo, ctx.node(), realfile)
239
239
240
240
241 def copytostoreabsolute(repo, file, hash):
241 def copytostoreabsolute(repo, file, hash):
242 if inusercache(repo.ui, hash):
242 if inusercache(repo.ui, hash):
243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
244 else:
244 else:
245 util.makedirs(os.path.dirname(storepath(repo, hash)))
245 util.makedirs(os.path.dirname(storepath(repo, hash)))
246 dst = util.atomictempfile(storepath(repo, hash),
246 dst = util.atomictempfile(storepath(repo, hash),
247 createmode=repo.store.createmode)
247 createmode=repo.store.createmode)
248 for chunk in util.filechunkiter(open(file, 'rb')):
248 for chunk in util.filechunkiter(open(file, 'rb')):
249 dst.write(chunk)
249 dst.write(chunk)
250 dst.close()
250 dst.close()
251 linktousercache(repo, hash)
251 linktousercache(repo, hash)
252
252
253 def linktousercache(repo, hash):
253 def linktousercache(repo, hash):
254 path = usercachepath(repo.ui, hash)
254 path = usercachepath(repo.ui, hash)
255 if path:
255 if path:
256 link(storepath(repo, hash), path)
256 link(storepath(repo, hash), path)
257
257
258 def getstandinmatcher(repo, rmatcher=None):
258 def getstandinmatcher(repo, rmatcher=None):
259 '''Return a match object that applies rmatcher to the standin directory'''
259 '''Return a match object that applies rmatcher to the standin directory'''
260 standindir = repo.wjoin(shortname)
260 standindir = repo.wjoin(shortname)
261
261
262 # no warnings about missing files or directories
262 # no warnings about missing files or directories
263 badfn = lambda f, msg: None
263 badfn = lambda f, msg: None
264
264
265 if rmatcher and not rmatcher.always():
265 if rmatcher and not rmatcher.always():
266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
267 if not pats:
267 if not pats:
268 pats = [standindir]
268 pats = [standindir]
269 match = scmutil.match(repo[None], pats, badfn=badfn)
269 match = scmutil.match(repo[None], pats, badfn=badfn)
270 # if pats is empty, it would incorrectly always match, so clear _always
270 # if pats is empty, it would incorrectly always match, so clear _always
271 match._always = False
271 match._always = False
272 else:
272 else:
273 # no patterns: relative to repo root
273 # no patterns: relative to repo root
274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
275 return match
275 return match
276
276
277 def composestandinmatcher(repo, rmatcher):
277 def composestandinmatcher(repo, rmatcher):
278 '''Return a matcher that accepts standins corresponding to the
278 '''Return a matcher that accepts standins corresponding to the
279 files accepted by rmatcher. Pass the list of files in the matcher
279 files accepted by rmatcher. Pass the list of files in the matcher
280 as the paths specified by the user.'''
280 as the paths specified by the user.'''
281 smatcher = getstandinmatcher(repo, rmatcher)
281 smatcher = getstandinmatcher(repo, rmatcher)
282 isstandin = smatcher.matchfn
282 isstandin = smatcher.matchfn
283 def composedmatchfn(f):
283 def composedmatchfn(f):
284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
285 smatcher.matchfn = composedmatchfn
285 smatcher.matchfn = composedmatchfn
286
286
287 return smatcher
287 return smatcher
288
288
289 def standin(filename):
289 def standin(filename):
290 '''Return the repo-relative path to the standin for the specified big
290 '''Return the repo-relative path to the standin for the specified big
291 file.'''
291 file.'''
292 # Notes:
292 # Notes:
293 # 1) Some callers want an absolute path, but for instance addlargefiles
293 # 1) Some callers want an absolute path, but for instance addlargefiles
294 # needs it repo-relative so it can be passed to repo[None].add(). So
294 # needs it repo-relative so it can be passed to repo[None].add(). So
295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
296 # 2) Join with '/' because that's what dirstate always uses, even on
296 # 2) Join with '/' because that's what dirstate always uses, even on
297 # Windows. Change existing separator to '/' first in case we are
297 # Windows. Change existing separator to '/' first in case we are
298 # passed filenames from an external source (like the command line).
298 # passed filenames from an external source (like the command line).
299 return shortnameslash + util.pconvert(filename)
299 return shortnameslash + util.pconvert(filename)
300
300
301 def isstandin(filename):
301 def isstandin(filename):
302 '''Return true if filename is a big file standin. filename must be
302 '''Return true if filename is a big file standin. filename must be
303 in Mercurial's internal form (slash-separated).'''
303 in Mercurial's internal form (slash-separated).'''
304 return filename.startswith(shortnameslash)
304 return filename.startswith(shortnameslash)
305
305
306 def splitstandin(filename):
306 def splitstandin(filename):
307 # Split on / because that's what dirstate always uses, even on Windows.
307 # Split on / because that's what dirstate always uses, even on Windows.
308 # Change local separator to / first just in case we are passed filenames
308 # Change local separator to / first just in case we are passed filenames
309 # from an external source (like the command line).
309 # from an external source (like the command line).
310 bits = util.pconvert(filename).split('/', 1)
310 bits = util.pconvert(filename).split('/', 1)
311 if len(bits) == 2 and bits[0] == shortname:
311 if len(bits) == 2 and bits[0] == shortname:
312 return bits[1]
312 return bits[1]
313 else:
313 else:
314 return None
314 return None
315
315
316 def updatestandin(repo, standin):
316 def updatestandin(repo, standin):
317 file = repo.wjoin(splitstandin(standin))
317 file = repo.wjoin(splitstandin(standin))
318 if os.path.exists(file):
318 if os.path.exists(file):
319 hash = hashfile(file)
319 hash = hashfile(file)
320 executable = getexecutable(file)
320 executable = getexecutable(file)
321 writestandin(repo, standin, hash, executable)
321 writestandin(repo, standin, hash, executable)
322
322
323 def readstandin(repo, filename, node=None):
323 def readstandin(repo, filename, node=None):
324 '''read hex hash from standin for filename at given node, or working
324 '''read hex hash from standin for filename at given node, or working
325 directory if no node is given'''
325 directory if no node is given'''
326 return repo[node][standin(filename)].data().strip()
326 return repo[node][standin(filename)].data().strip()
327
327
328 def writestandin(repo, standin, hash, executable):
328 def writestandin(repo, standin, hash, executable):
329 '''write hash to <repo.root>/<standin>'''
329 '''write hash to <repo.root>/<standin>'''
330 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
330 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
331
331
332 def copyandhash(instream, outfile):
332 def copyandhash(instream, outfile):
333 '''Read bytes from instream (iterable) and write them to outfile,
333 '''Read bytes from instream (iterable) and write them to outfile,
334 computing the SHA-1 hash of the data along the way. Return the hash.'''
334 computing the SHA-1 hash of the data along the way. Return the hash.'''
335 hasher = util.sha1('')
335 hasher = util.sha1('')
336 for data in instream:
336 for data in instream:
337 hasher.update(data)
337 hasher.update(data)
338 outfile.write(data)
338 outfile.write(data)
339 return hasher.hexdigest()
339 return hasher.hexdigest()
340
340
341 def hashrepofile(repo, file):
341 def hashrepofile(repo, file):
342 return hashfile(repo.wjoin(file))
342 return hashfile(repo.wjoin(file))
343
343
344 def hashfile(file):
344 def hashfile(file):
345 if not os.path.exists(file):
345 if not os.path.exists(file):
346 return ''
346 return ''
347 hasher = util.sha1('')
347 hasher = util.sha1('')
348 fd = open(file, 'rb')
348 fd = open(file, 'rb')
349 for data in util.filechunkiter(fd, 128 * 1024):
349 for data in util.filechunkiter(fd, 128 * 1024):
350 hasher.update(data)
350 hasher.update(data)
351 fd.close()
351 fd.close()
352 return hasher.hexdigest()
352 return hasher.hexdigest()
353
353
354 def getexecutable(filename):
354 def getexecutable(filename):
355 mode = os.stat(filename).st_mode
355 mode = os.stat(filename).st_mode
356 return ((mode & stat.S_IXUSR) and
356 return ((mode & stat.S_IXUSR) and
357 (mode & stat.S_IXGRP) and
357 (mode & stat.S_IXGRP) and
358 (mode & stat.S_IXOTH))
358 (mode & stat.S_IXOTH))
359
359
360 def urljoin(first, second, *arg):
360 def urljoin(first, second, *arg):
361 def join(left, right):
361 def join(left, right):
362 if not left.endswith('/'):
362 if not left.endswith('/'):
363 left += '/'
363 left += '/'
364 if right.startswith('/'):
364 if right.startswith('/'):
365 right = right[1:]
365 right = right[1:]
366 return left + right
366 return left + right
367
367
368 url = join(first, second)
368 url = join(first, second)
369 for a in arg:
369 for a in arg:
370 url = join(url, a)
370 url = join(url, a)
371 return url
371 return url
372
372
373 def hexsha1(data):
373 def hexsha1(data):
374 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
374 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
375 object data"""
375 object data"""
376 h = util.sha1()
376 h = util.sha1()
377 for chunk in util.filechunkiter(data):
377 for chunk in util.filechunkiter(data):
378 h.update(chunk)
378 h.update(chunk)
379 return h.hexdigest()
379 return h.hexdigest()
380
380
381 def httpsendfile(ui, filename):
381 def httpsendfile(ui, filename):
382 return httpconnection.httpsendfile(ui, filename, 'rb')
382 return httpconnection.httpsendfile(ui, filename, 'rb')
383
383
384 def unixpath(path):
384 def unixpath(path):
385 '''Return a version of path normalized for use with the lfdirstate.'''
385 '''Return a version of path normalized for use with the lfdirstate.'''
386 return util.pconvert(os.path.normpath(path))
386 return util.pconvert(os.path.normpath(path))
387
387
388 def islfilesrepo(repo):
388 def islfilesrepo(repo):
389 if ('largefiles' in repo.requirements and
389 if ('largefiles' in repo.requirements and
390 any(shortnameslash in f[0] for f in repo.store.datafiles())):
390 any(shortnameslash in f[0] for f in repo.store.datafiles())):
391 return True
391 return True
392
392
393 return any(openlfdirstate(repo.ui, repo, False))
393 return any(openlfdirstate(repo.ui, repo, False))
394
394
395 class storeprotonotcapable(Exception):
395 class storeprotonotcapable(Exception):
396 def __init__(self, storetypes):
396 def __init__(self, storetypes):
397 self.storetypes = storetypes
397 self.storetypes = storetypes
398
398
399 def getstandinsstate(repo):
399 def getstandinsstate(repo):
400 standins = []
400 standins = []
401 matcher = getstandinmatcher(repo)
401 matcher = getstandinmatcher(repo)
402 for standin in repo.dirstate.walk(matcher, [], False, False):
402 for standin in repo.dirstate.walk(matcher, [], False, False):
403 lfile = splitstandin(standin)
403 lfile = splitstandin(standin)
404 try:
404 try:
405 hash = readstandin(repo, lfile)
405 hash = readstandin(repo, lfile)
406 except IOError:
406 except IOError:
407 hash = None
407 hash = None
408 standins.append((lfile, hash))
408 standins.append((lfile, hash))
409 return standins
409 return standins
410
410
411 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
411 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
412 lfstandin = standin(lfile)
412 lfstandin = standin(lfile)
413 if lfstandin in repo.dirstate:
413 if lfstandin in repo.dirstate:
414 stat = repo.dirstate._map[lfstandin]
414 stat = repo.dirstate._map[lfstandin]
415 state, mtime = stat[0], stat[3]
415 state, mtime = stat[0], stat[3]
416 else:
416 else:
417 state, mtime = '?', -1
417 state, mtime = '?', -1
418 if state == 'n':
418 if state == 'n':
419 if (normallookup or mtime < 0 or
419 if (normallookup or mtime < 0 or
420 not os.path.exists(repo.wjoin(lfile))):
420 not os.path.exists(repo.wjoin(lfile))):
421 # state 'n' doesn't ensure 'clean' in this case
421 # state 'n' doesn't ensure 'clean' in this case
422 lfdirstate.normallookup(lfile)
422 lfdirstate.normallookup(lfile)
423 else:
423 else:
424 lfdirstate.normal(lfile)
424 lfdirstate.normal(lfile)
425 elif state == 'm':
425 elif state == 'm':
426 lfdirstate.normallookup(lfile)
426 lfdirstate.normallookup(lfile)
427 elif state == 'r':
427 elif state == 'r':
428 lfdirstate.remove(lfile)
428 lfdirstate.remove(lfile)
429 elif state == 'a':
429 elif state == 'a':
430 lfdirstate.add(lfile)
430 lfdirstate.add(lfile)
431 elif state == '?':
431 elif state == '?':
432 lfdirstate.drop(lfile)
432 lfdirstate.drop(lfile)
433
433
434 def markcommitted(orig, ctx, node):
434 def markcommitted(orig, ctx, node):
435 repo = ctx.repo()
435 repo = ctx.repo()
436
436
437 orig(node)
437 orig(node)
438
438
439 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
439 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
440 # because files coming from the 2nd parent are omitted in the latter.
440 # because files coming from the 2nd parent are omitted in the latter.
441 #
441 #
442 # The former should be used to get targets of "synclfdirstate",
442 # The former should be used to get targets of "synclfdirstate",
443 # because such files:
443 # because such files:
444 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
444 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
445 # - have to be marked as "n" after commit, but
445 # - have to be marked as "n" after commit, but
446 # - aren't listed in "repo[node].files()"
446 # - aren't listed in "repo[node].files()"
447
447
448 lfdirstate = openlfdirstate(repo.ui, repo)
448 lfdirstate = openlfdirstate(repo.ui, repo)
449 for f in ctx.files():
449 for f in ctx.files():
450 if isstandin(f):
450 if isstandin(f):
451 lfile = splitstandin(f)
451 lfile = splitstandin(f)
452 synclfdirstate(repo, lfdirstate, lfile, False)
452 synclfdirstate(repo, lfdirstate, lfile, False)
453 lfdirstate.write()
453 lfdirstate.write()
454
454
455 # As part of committing, copy all of the largefiles into the cache.
455 # As part of committing, copy all of the largefiles into the cache.
456 copyalltostore(repo, node)
456 copyalltostore(repo, node)
457
457
458 def getlfilestoupdate(oldstandins, newstandins):
458 def getlfilestoupdate(oldstandins, newstandins):
459 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
459 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
460 filelist = []
460 filelist = []
461 for f in changedstandins:
461 for f in changedstandins:
462 if f[0] not in filelist:
462 if f[0] not in filelist:
463 filelist.append(f[0])
463 filelist.append(f[0])
464 return filelist
464 return filelist
465
465
466 def getlfilestoupload(repo, missing, addfunc):
466 def getlfilestoupload(repo, missing, addfunc):
467 for i, n in enumerate(missing):
467 for i, n in enumerate(missing):
468 repo.ui.progress(_('finding outgoing largefiles'), i,
468 repo.ui.progress(_('finding outgoing largefiles'), i,
469 unit=_('revision'), total=len(missing))
469 unit=_('revision'), total=len(missing))
470 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
470 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
471
471
472 oldlfstatus = repo.lfstatus
472 oldlfstatus = repo.lfstatus
473 repo.lfstatus = False
473 repo.lfstatus = False
474 try:
474 try:
475 ctx = repo[n]
475 ctx = repo[n]
476 finally:
476 finally:
477 repo.lfstatus = oldlfstatus
477 repo.lfstatus = oldlfstatus
478
478
479 files = set(ctx.files())
479 files = set(ctx.files())
480 if len(parents) == 2:
480 if len(parents) == 2:
481 mc = ctx.manifest()
481 mc = ctx.manifest()
482 mp1 = ctx.parents()[0].manifest()
482 mp1 = ctx.parents()[0].manifest()
483 mp2 = ctx.parents()[1].manifest()
483 mp2 = ctx.parents()[1].manifest()
484 for f in mp1:
484 for f in mp1:
485 if f not in mc:
485 if f not in mc:
486 files.add(f)
486 files.add(f)
487 for f in mp2:
487 for f in mp2:
488 if f not in mc:
488 if f not in mc:
489 files.add(f)
489 files.add(f)
490 for f in mc:
490 for f in mc:
491 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
491 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
492 files.add(f)
492 files.add(f)
493 for fn in files:
493 for fn in files:
494 if isstandin(fn) and fn in ctx:
494 if isstandin(fn) and fn in ctx:
495 addfunc(fn, ctx[fn].data().strip())
495 addfunc(fn, ctx[fn].data().strip())
496 repo.ui.progress(_('finding outgoing largefiles'), None)
496 repo.ui.progress(_('finding outgoing largefiles'), None)
497
497
498 def updatestandinsbymatch(repo, match):
498 def updatestandinsbymatch(repo, match):
499 '''Update standins in the working directory according to specified match
499 '''Update standins in the working directory according to specified match
500
500
501 This returns (possibly modified) ``match`` object to be used for
501 This returns (possibly modified) ``match`` object to be used for
502 subsequent commit process.
502 subsequent commit process.
503 '''
503 '''
504
504
505 ui = repo.ui
505 ui = repo.ui
506
506
507 # Case 1: user calls commit with no specific files or
507 # Case 1: user calls commit with no specific files or
508 # include/exclude patterns: refresh and commit all files that
508 # include/exclude patterns: refresh and commit all files that
509 # are "dirty".
509 # are "dirty".
510 if match is None or match.always():
510 if match is None or match.always():
511 # Spend a bit of time here to get a list of files we know
511 # Spend a bit of time here to get a list of files we know
512 # are modified so we can compare only against those.
512 # are modified so we can compare only against those.
513 # It can cost a lot of time (several seconds)
513 # It can cost a lot of time (several seconds)
514 # otherwise to update all standins if the largefiles are
514 # otherwise to update all standins if the largefiles are
515 # large.
515 # large.
516 lfdirstate = openlfdirstate(ui, repo)
516 lfdirstate = openlfdirstate(ui, repo)
517 dirtymatch = match_.always(repo.root, repo.getcwd())
517 dirtymatch = match_.always(repo.root, repo.getcwd())
518 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
518 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
519 False)
519 False)
520 modifiedfiles = unsure + s.modified + s.added + s.removed
520 modifiedfiles = unsure + s.modified + s.added + s.removed
521 lfiles = listlfiles(repo)
521 lfiles = listlfiles(repo)
522 # this only loops through largefiles that exist (not
522 # this only loops through largefiles that exist (not
523 # removed/renamed)
523 # removed/renamed)
524 for lfile in lfiles:
524 for lfile in lfiles:
525 if lfile in modifiedfiles:
525 if lfile in modifiedfiles:
526 if os.path.exists(
526 if os.path.exists(
527 repo.wjoin(standin(lfile))):
527 repo.wjoin(standin(lfile))):
528 # this handles the case where a rebase is being
528 # this handles the case where a rebase is being
529 # performed and the working copy is not updated
529 # performed and the working copy is not updated
530 # yet.
530 # yet.
531 if os.path.exists(repo.wjoin(lfile)):
531 if os.path.exists(repo.wjoin(lfile)):
532 updatestandin(repo,
532 updatestandin(repo,
533 standin(lfile))
533 standin(lfile))
534
534
535 return match
535 return match
536
536
537 lfiles = listlfiles(repo)
537 lfiles = listlfiles(repo)
538 match._files = repo._subdirlfs(match.files(), lfiles)
538 match._files = repo._subdirlfs(match.files(), lfiles)
539
539
540 # Case 2: user calls commit with specified patterns: refresh
540 # Case 2: user calls commit with specified patterns: refresh
541 # any matching big files.
541 # any matching big files.
542 smatcher = composestandinmatcher(repo, match)
542 smatcher = composestandinmatcher(repo, match)
543 standins = repo.dirstate.walk(smatcher, [], False, False)
543 standins = repo.dirstate.walk(smatcher, [], False, False)
544
544
545 # No matching big files: get out of the way and pass control to
545 # No matching big files: get out of the way and pass control to
546 # the usual commit() method.
546 # the usual commit() method.
547 if not standins:
547 if not standins:
548 return match
548 return match
549
549
550 # Refresh all matching big files. It's possible that the
550 # Refresh all matching big files. It's possible that the
551 # commit will end up failing, in which case the big files will
551 # commit will end up failing, in which case the big files will
552 # stay refreshed. No harm done: the user modified them and
552 # stay refreshed. No harm done: the user modified them and
553 # asked to commit them, so sooner or later we're going to
553 # asked to commit them, so sooner or later we're going to
554 # refresh the standins. Might as well leave them refreshed.
554 # refresh the standins. Might as well leave them refreshed.
555 lfdirstate = openlfdirstate(ui, repo)
555 lfdirstate = openlfdirstate(ui, repo)
556 for fstandin in standins:
556 for fstandin in standins:
557 lfile = splitstandin(fstandin)
557 lfile = splitstandin(fstandin)
558 if lfdirstate[lfile] != 'r':
558 if lfdirstate[lfile] != 'r':
559 updatestandin(repo, fstandin)
559 updatestandin(repo, fstandin)
560
560
561 # Cook up a new matcher that only matches regular files or
561 # Cook up a new matcher that only matches regular files or
562 # standins corresponding to the big files requested by the
562 # standins corresponding to the big files requested by the
563 # user. Have to modify _files to prevent commit() from
563 # user. Have to modify _files to prevent commit() from
564 # complaining "not tracked" for big files.
564 # complaining "not tracked" for big files.
565 match = copy.copy(match)
565 match = copy.copy(match)
566 origmatchfn = match.matchfn
566 origmatchfn = match.matchfn
567
567
568 # Check both the list of largefiles and the list of
568 # Check both the list of largefiles and the list of
569 # standins because if a largefile was removed, it
569 # standins because if a largefile was removed, it
570 # won't be in the list of largefiles at this point
570 # won't be in the list of largefiles at this point
571 match._files += sorted(standins)
571 match._files += sorted(standins)
572
572
573 actualfiles = []
573 actualfiles = []
574 for f in match._files:
574 for f in match._files:
575 fstandin = standin(f)
575 fstandin = standin(f)
576
576
577 # For largefiles, only one of the normal and standin should be
577 # For largefiles, only one of the normal and standin should be
578 # committed (except if one of them is a remove).
578 # committed (except if one of them is a remove). In the case of a
579 # standin removal, drop the normal file if it is unknown to dirstate.
579 # Thus, skip plain largefile names but keep the standin.
580 # Thus, skip plain largefile names but keep the standin.
580 if (f in lfiles or fstandin in standins) and \
581 if f in lfiles or fstandin in standins:
581 repo.dirstate[f] != 'r' and repo.dirstate[fstandin] != 'r':
582 if repo.dirstate[fstandin] != 'r':
583 if repo.dirstate[f] != 'r':
584 continue
585 elif repo.dirstate[f] == '?':
582 continue
586 continue
583
587
584 actualfiles.append(f)
588 actualfiles.append(f)
585 match._files = actualfiles
589 match._files = actualfiles
586
590
587 def matchfn(f):
591 def matchfn(f):
588 if origmatchfn(f):
592 if origmatchfn(f):
589 return f not in lfiles
593 return f not in lfiles
590 else:
594 else:
591 return f in standins
595 return f in standins
592
596
593 match.matchfn = matchfn
597 match.matchfn = matchfn
594
598
595 return match
599 return match
596
600
597 class automatedcommithook(object):
601 class automatedcommithook(object):
598 '''Stateful hook to update standins at the 1st commit of resuming
602 '''Stateful hook to update standins at the 1st commit of resuming
599
603
600 For efficiency, updating standins in the working directory should
604 For efficiency, updating standins in the working directory should
601 be avoided while automated committing (like rebase, transplant and
605 be avoided while automated committing (like rebase, transplant and
602 so on), because they should be updated before committing.
606 so on), because they should be updated before committing.
603
607
604 But the 1st commit of resuming automated committing (e.g. ``rebase
608 But the 1st commit of resuming automated committing (e.g. ``rebase
605 --continue``) should update them, because largefiles may be
609 --continue``) should update them, because largefiles may be
606 modified manually.
610 modified manually.
607 '''
611 '''
608 def __init__(self, resuming):
612 def __init__(self, resuming):
609 self.resuming = resuming
613 self.resuming = resuming
610
614
611 def __call__(self, repo, match):
615 def __call__(self, repo, match):
612 if self.resuming:
616 if self.resuming:
613 self.resuming = False # avoids updating at subsequent commits
617 self.resuming = False # avoids updating at subsequent commits
614 return updatestandinsbymatch(repo, match)
618 return updatestandinsbymatch(repo, match)
615 else:
619 else:
616 return match
620 return match
617
621
618 def getstatuswriter(ui, repo, forcibly=None):
622 def getstatuswriter(ui, repo, forcibly=None):
619 '''Return the function to write largefiles specific status out
623 '''Return the function to write largefiles specific status out
620
624
621 If ``forcibly`` is ``None``, this returns the last element of
625 If ``forcibly`` is ``None``, this returns the last element of
622 ``repo._lfstatuswriters`` as "default" writer function.
626 ``repo._lfstatuswriters`` as "default" writer function.
623
627
624 Otherwise, this returns the function to always write out (or
628 Otherwise, this returns the function to always write out (or
625 ignore if ``not forcibly``) status.
629 ignore if ``not forcibly``) status.
626 '''
630 '''
627 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
631 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
628 return repo._lfstatuswriters[-1]
632 return repo._lfstatuswriters[-1]
629 else:
633 else:
630 if forcibly:
634 if forcibly:
631 return ui.status # forcibly WRITE OUT
635 return ui.status # forcibly WRITE OUT
632 else:
636 else:
633 return lambda *msg, **opts: None # forcibly IGNORE
637 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,231 +1,231 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile'
20 $ hg commit -m 'branchhead without largefile' large
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ cd ..
22 $ cd ..
23
23
24 Discard all cached largefiles in USERCACHE
24 Discard all cached largefiles in USERCACHE
25
25
26 $ rm -rf ${USERCACHE}
26 $ rm -rf ${USERCACHE}
27
27
28 Create mirror repo, and pull from source without largefile:
28 Create mirror repo, and pull from source without largefile:
29 "pull" is used instead of "clone" for suppression of (1) updating to
29 "pull" is used instead of "clone" for suppression of (1) updating to
30 tip (= caching largefile from source repo), and (2) recording source
30 tip (= caching largefile from source repo), and (2) recording source
31 repo as "default" path in .hg/hgrc.
31 repo as "default" path in .hg/hgrc.
32
32
33 $ hg init mirror
33 $ hg init mirror
34 $ cd mirror
34 $ cd mirror
35 $ hg pull ../src
35 $ hg pull ../src
36 pulling from ../src
36 pulling from ../src
37 requesting all changes
37 requesting all changes
38 adding changesets
38 adding changesets
39 adding manifests
39 adding manifests
40 adding file changes
40 adding file changes
41 added 2 changesets with 1 changes to 1 files
41 added 2 changesets with 1 changes to 1 files
42 (run 'hg update' to get a working copy)
42 (run 'hg update' to get a working copy)
43
43
44 Update working directory to "tip", which requires largefile("large"),
44 Update working directory to "tip", which requires largefile("large"),
45 but there is no cache file for it. So, hg must treat it as
45 but there is no cache file for it. So, hg must treat it as
46 "missing"(!) file.
46 "missing"(!) file.
47
47
48 $ hg update -r0
48 $ hg update -r0
49 getting changed largefiles
49 getting changed largefiles
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 0 largefiles updated, 0 removed
51 0 largefiles updated, 0 removed
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 $ hg status
53 $ hg status
54 ! large
54 ! large
55
55
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
57
57
58 $ hg update null
58 $ hg update null
59 getting changed largefiles
59 getting changed largefiles
60 0 largefiles updated, 0 removed
60 0 largefiles updated, 0 removed
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62
62
63 Update working directory to tip, again.
63 Update working directory to tip, again.
64
64
65 $ hg update -r0
65 $ hg update -r0
66 getting changed largefiles
66 getting changed largefiles
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 0 largefiles updated, 0 removed
68 0 largefiles updated, 0 removed
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 $ hg status
70 $ hg status
71 ! large
71 ! large
72 $ cd ..
72 $ cd ..
73
73
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75
75
76 $ hg init mirror2
76 $ hg init mirror2
77 $ hg -R mirror2 pull src -r0
77 $ hg -R mirror2 pull src -r0
78 pulling from src
78 pulling from src
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 1 changesets with 1 changes to 1 files
82 added 1 changesets with 1 changes to 1 files
83 (run 'hg update' to get a working copy)
83 (run 'hg update' to get a working copy)
84
84
85 #if unix-permissions
85 #if unix-permissions
86
86
87 Portable way to print file permissions:
87 Portable way to print file permissions:
88
88
89 $ cat > ls-l.py <<EOF
89 $ cat > ls-l.py <<EOF
90 > #!/usr/bin/env python
90 > #!/usr/bin/env python
91 > import sys, os
91 > import sys, os
92 > path = sys.argv[1]
92 > path = sys.argv[1]
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 > EOF
94 > EOF
95 $ chmod +x ls-l.py
95 $ chmod +x ls-l.py
96
96
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 from file in working copy:
98 from file in working copy:
99
99
100 $ cd src
100 $ cd src
101 $ chmod 750 .hg/store
101 $ chmod 750 .hg/store
102 $ chmod 660 large
102 $ chmod 660 large
103 $ echo change >> large
103 $ echo change >> large
104 $ hg commit -m change
104 $ hg commit -m change
105 created new head
105 created new head
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 640
107 640
108
108
109 Test permission of with files in .hg/largefiles created by update:
109 Test permission of with files in .hg/largefiles created by update:
110
110
111 $ cd ../mirror
111 $ cd ../mirror
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 $ chmod 750 .hg/store
113 $ chmod 750 .hg/store
114 $ hg pull ../src --update -q
114 $ hg pull ../src --update -q
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 640
116 640
117
117
118 Test permission of files created by push:
118 Test permission of files created by push:
119
119
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 > --config "web.allow_push=*" --config web.push_ssl=no
121 > --config "web.allow_push=*" --config web.push_ssl=no
122 $ cat hg.pid >> $DAEMON_PIDS
122 $ cat hg.pid >> $DAEMON_PIDS
123
123
124 $ echo change >> large
124 $ echo change >> large
125 $ hg commit -m change
125 $ hg commit -m change
126
126
127 $ rm -r "$USERCACHE"
127 $ rm -r "$USERCACHE"
128
128
129 $ hg push -q http://localhost:$HGPORT/
129 $ hg push -q http://localhost:$HGPORT/
130
130
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 640
132 640
133
133
134 $ cd ..
134 $ cd ..
135
135
136 #endif
136 #endif
137
137
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 it is missing, but a remove on a nonexistent unknown file still should. Same
139 it is missing, but a remove on a nonexistent unknown file still should. Same
140 for a forget.)
140 for a forget.)
141
141
142 $ cd src
142 $ cd src
143 $ touch x
143 $ touch x
144 $ hg add x
144 $ hg add x
145 $ mv x y
145 $ mv x y
146 $ hg remove -A x y ENOENT
146 $ hg remove -A x y ENOENT
147 ENOENT: * (glob)
147 ENOENT: * (glob)
148 not removing y: file is untracked
148 not removing y: file is untracked
149 [1]
149 [1]
150 $ hg add y
150 $ hg add y
151 $ mv y z
151 $ mv y z
152 $ hg forget y z ENOENT
152 $ hg forget y z ENOENT
153 ENOENT: * (glob)
153 ENOENT: * (glob)
154 not removing z: file is already untracked
154 not removing z: file is already untracked
155 [1]
155 [1]
156
156
157 Largefiles are accessible from the share's store
157 Largefiles are accessible from the share's store
158 $ cd ..
158 $ cd ..
159 $ hg share -q src share_dst --config extensions.share=
159 $ hg share -q src share_dst --config extensions.share=
160 $ hg -R share_dst update -r0
160 $ hg -R share_dst update -r0
161 getting changed largefiles
161 getting changed largefiles
162 1 largefiles updated, 0 removed
162 1 largefiles updated, 0 removed
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164
164
165 $ echo modified > share_dst/large
165 $ echo modified > share_dst/large
166 $ hg -R share_dst ci -m modified
166 $ hg -R share_dst ci -m modified
167 created new head
167 created new head
168
168
169 Only dirstate is in the local store for the share, and the largefile is in the
169 Only dirstate is in the local store for the share, and the largefile is in the
170 share source's local store. Avoid the extra largefiles added in the unix
170 share source's local store. Avoid the extra largefiles added in the unix
171 conditional above.
171 conditional above.
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 $ echo $hash
173 $ echo $hash
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175
175
176 $ find share_dst/.hg/largefiles/* | sort
176 $ find share_dst/.hg/largefiles/* | sort
177 share_dst/.hg/largefiles/dirstate
177 share_dst/.hg/largefiles/dirstate
178
178
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 src/.hg/largefiles/dirstate
180 src/.hg/largefiles/dirstate
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182
182
183 Inject corruption into the largefiles store and see how update handles that:
183 Inject corruption into the largefiles store and see how update handles that:
184
184
185 $ cd src
185 $ cd src
186 $ hg up -qC
186 $ hg up -qC
187 $ cat large
187 $ cat large
188 modified
188 modified
189 $ rm large
189 $ rm large
190 $ cat .hglf/large
190 $ cat .hglf/large
191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 $ hg up -C
194 $ hg up -C
195 getting changed largefiles
195 getting changed largefiles
196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
197 0 largefiles updated, 0 removed
197 0 largefiles updated, 0 removed
198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 $ hg st
199 $ hg st
200 ! large
200 ! large
201 ? z
201 ? z
202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
203
203
204 #if serve
204 #if serve
205
205
206 Test coverage of error handling from putlfile:
206 Test coverage of error handling from putlfile:
207
207
208 $ mkdir $TESTTMP/mirrorcache
208 $ mkdir $TESTTMP/mirrorcache
209 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
209 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
210 $ cat hg.pid >> $DAEMON_PIDS
210 $ cat hg.pid >> $DAEMON_PIDS
211
211
212 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
212 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
213 pushing to http://localhost:$HGPORT1/
213 pushing to http://localhost:$HGPORT1/
214 searching for changes
214 searching for changes
215 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
215 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
216 [255]
216 [255]
217
217
218 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
218 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
219
219
220 Test coverage of 'missing from store':
220 Test coverage of 'missing from store':
221
221
222 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
222 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
223 $ cat hg.pid >> $DAEMON_PIDS
223 $ cat hg.pid >> $DAEMON_PIDS
224
224
225 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
225 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
226 pushing to http://localhost:$HGPORT2/
226 pushing to http://localhost:$HGPORT2/
227 searching for changes
227 searching for changes
228 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
228 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
229 [255]
229 [255]
230
230
231 #endif
231 #endif
General Comments 0
You need to be logged in to leave comments. Login now