##// END OF EJS Templates
largefiles: prevent committing a missing largefile...
Matt Harbison -
r27947:571ba161 stable
parent child Browse files
Show More
@@ -1,637 +1,639 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import stat
13 import stat
14 import copy
14 import copy
15
15
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial import node, error
18 from mercurial import node, error
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise error.Abort(_('minimum size for largefiles must be specified'))
38 raise error.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
43 try:
43 try:
44 util.oslink(src, dest)
44 util.oslink(src, dest)
45 except OSError:
45 except OSError:
46 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
47 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
48 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
49 dst.write(chunk)
49 dst.write(chunk)
50 dst.close()
50 dst.close()
51 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
52
52
53 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
54 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
55 if path:
55 if path:
56 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
57 else:
57 else:
58 if os.name == 'nt':
58 if os.name == 'nt':
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 if appdata:
60 if appdata:
61 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
62 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
63 home = os.getenv('HOME')
63 home = os.getenv('HOME')
64 if home:
64 if home:
65 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
66 longname, hash)
66 longname, hash)
67 elif os.name == 'posix':
67 elif os.name == 'posix':
68 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
69 if path:
69 if path:
70 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
71 else:
71 else:
72 home = os.getenv('HOME')
72 home = os.getenv('HOME')
73 if home:
73 if home:
74 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
75 else:
75 else:
76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
77 return path
77 return path
78
78
79 def inusercache(ui, hash):
79 def inusercache(ui, hash):
80 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
81 return path and os.path.exists(path)
81 return path and os.path.exists(path)
82
82
83 def findfile(repo, hash):
83 def findfile(repo, hash):
84 path, exists = findstorepath(repo, hash)
84 path, exists = findstorepath(repo, hash)
85 if exists:
85 if exists:
86 repo.ui.note(_('found %s in store\n') % hash)
86 repo.ui.note(_('found %s in store\n') % hash)
87 return path
87 return path
88 elif inusercache(repo.ui, hash):
88 elif inusercache(repo.ui, hash):
89 repo.ui.note(_('found %s in system cache\n') % hash)
89 repo.ui.note(_('found %s in system cache\n') % hash)
90 path = storepath(repo, hash)
90 path = storepath(repo, hash)
91 link(usercachepath(repo.ui, hash), path)
91 link(usercachepath(repo.ui, hash), path)
92 return path
92 return path
93 return None
93 return None
94
94
95 class largefilesdirstate(dirstate.dirstate):
95 class largefilesdirstate(dirstate.dirstate):
96 def __getitem__(self, key):
96 def __getitem__(self, key):
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 def normal(self, f):
98 def normal(self, f):
99 return super(largefilesdirstate, self).normal(unixpath(f))
99 return super(largefilesdirstate, self).normal(unixpath(f))
100 def remove(self, f):
100 def remove(self, f):
101 return super(largefilesdirstate, self).remove(unixpath(f))
101 return super(largefilesdirstate, self).remove(unixpath(f))
102 def add(self, f):
102 def add(self, f):
103 return super(largefilesdirstate, self).add(unixpath(f))
103 return super(largefilesdirstate, self).add(unixpath(f))
104 def drop(self, f):
104 def drop(self, f):
105 return super(largefilesdirstate, self).drop(unixpath(f))
105 return super(largefilesdirstate, self).drop(unixpath(f))
106 def forget(self, f):
106 def forget(self, f):
107 return super(largefilesdirstate, self).forget(unixpath(f))
107 return super(largefilesdirstate, self).forget(unixpath(f))
108 def normallookup(self, f):
108 def normallookup(self, f):
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 def _ignore(self, f):
110 def _ignore(self, f):
111 return False
111 return False
112 def write(self, tr=False):
112 def write(self, tr=False):
113 # (1) disable PENDING mode always
113 # (1) disable PENDING mode always
114 # (lfdirstate isn't yet managed as a part of the transaction)
114 # (lfdirstate isn't yet managed as a part of the transaction)
115 # (2) avoid develwarn 'use dirstate.write with ....'
115 # (2) avoid develwarn 'use dirstate.write with ....'
116 super(largefilesdirstate, self).write(None)
116 super(largefilesdirstate, self).write(None)
117
117
118 def openlfdirstate(ui, repo, create=True):
118 def openlfdirstate(ui, repo, create=True):
119 '''
119 '''
120 Return a dirstate object that tracks largefiles: i.e. its root is
120 Return a dirstate object that tracks largefiles: i.e. its root is
121 the repo root, but it is saved in .hg/largefiles/dirstate.
121 the repo root, but it is saved in .hg/largefiles/dirstate.
122 '''
122 '''
123 lfstoredir = repo.join(longname)
123 lfstoredir = repo.join(longname)
124 opener = scmutil.opener(lfstoredir)
124 opener = scmutil.opener(lfstoredir)
125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
126 repo.dirstate._validate)
126 repo.dirstate._validate)
127
127
128 # If the largefiles dirstate does not exist, populate and create
128 # If the largefiles dirstate does not exist, populate and create
129 # it. This ensures that we create it on the first meaningful
129 # it. This ensures that we create it on the first meaningful
130 # largefiles operation in a new clone.
130 # largefiles operation in a new clone.
131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
132 matcher = getstandinmatcher(repo)
132 matcher = getstandinmatcher(repo)
133 standins = repo.dirstate.walk(matcher, [], False, False)
133 standins = repo.dirstate.walk(matcher, [], False, False)
134
134
135 if len(standins) > 0:
135 if len(standins) > 0:
136 util.makedirs(lfstoredir)
136 util.makedirs(lfstoredir)
137
137
138 for standin in standins:
138 for standin in standins:
139 lfile = splitstandin(standin)
139 lfile = splitstandin(standin)
140 lfdirstate.normallookup(lfile)
140 lfdirstate.normallookup(lfile)
141 return lfdirstate
141 return lfdirstate
142
142
143 def lfdirstatestatus(lfdirstate, repo):
143 def lfdirstatestatus(lfdirstate, repo):
144 wctx = repo['.']
144 wctx = repo['.']
145 match = match_.always(repo.root, repo.getcwd())
145 match = match_.always(repo.root, repo.getcwd())
146 unsure, s = lfdirstate.status(match, [], False, False, False)
146 unsure, s = lfdirstate.status(match, [], False, False, False)
147 modified, clean = s.modified, s.clean
147 modified, clean = s.modified, s.clean
148 for lfile in unsure:
148 for lfile in unsure:
149 try:
149 try:
150 fctx = wctx[standin(lfile)]
150 fctx = wctx[standin(lfile)]
151 except LookupError:
151 except LookupError:
152 fctx = None
152 fctx = None
153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
154 modified.append(lfile)
154 modified.append(lfile)
155 else:
155 else:
156 clean.append(lfile)
156 clean.append(lfile)
157 lfdirstate.normal(lfile)
157 lfdirstate.normal(lfile)
158 return s
158 return s
159
159
160 def listlfiles(repo, rev=None, matcher=None):
160 def listlfiles(repo, rev=None, matcher=None):
161 '''return a list of largefiles in the working copy or the
161 '''return a list of largefiles in the working copy or the
162 specified changeset'''
162 specified changeset'''
163
163
164 if matcher is None:
164 if matcher is None:
165 matcher = getstandinmatcher(repo)
165 matcher = getstandinmatcher(repo)
166
166
167 # ignore unknown files in working directory
167 # ignore unknown files in working directory
168 return [splitstandin(f)
168 return [splitstandin(f)
169 for f in repo[rev].walk(matcher)
169 for f in repo[rev].walk(matcher)
170 if rev is not None or repo.dirstate[f] != '?']
170 if rev is not None or repo.dirstate[f] != '?']
171
171
172 def instore(repo, hash, forcelocal=False):
172 def instore(repo, hash, forcelocal=False):
173 return os.path.exists(storepath(repo, hash, forcelocal))
173 return os.path.exists(storepath(repo, hash, forcelocal))
174
174
175 def storepath(repo, hash, forcelocal=False):
175 def storepath(repo, hash, forcelocal=False):
176 if not forcelocal and repo.shared():
176 if not forcelocal and repo.shared():
177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
178 return repo.join(longname, hash)
178 return repo.join(longname, hash)
179
179
180 def findstorepath(repo, hash):
180 def findstorepath(repo, hash):
181 '''Search through the local store path(s) to find the file for the given
181 '''Search through the local store path(s) to find the file for the given
182 hash. If the file is not found, its path in the primary store is returned.
182 hash. If the file is not found, its path in the primary store is returned.
183 The return value is a tuple of (path, exists(path)).
183 The return value is a tuple of (path, exists(path)).
184 '''
184 '''
185 # For shared repos, the primary store is in the share source. But for
185 # For shared repos, the primary store is in the share source. But for
186 # backward compatibility, force a lookup in the local store if it wasn't
186 # backward compatibility, force a lookup in the local store if it wasn't
187 # found in the share source.
187 # found in the share source.
188 path = storepath(repo, hash, False)
188 path = storepath(repo, hash, False)
189
189
190 if instore(repo, hash):
190 if instore(repo, hash):
191 return (path, True)
191 return (path, True)
192 elif repo.shared() and instore(repo, hash, True):
192 elif repo.shared() and instore(repo, hash, True):
193 return storepath(repo, hash, True)
193 return storepath(repo, hash, True)
194
194
195 return (path, False)
195 return (path, False)
196
196
197 def copyfromcache(repo, hash, filename):
197 def copyfromcache(repo, hash, filename):
198 '''Copy the specified largefile from the repo or system cache to
198 '''Copy the specified largefile from the repo or system cache to
199 filename in the repository. Return true on success or false if the
199 filename in the repository. Return true on success or false if the
200 file was not found in either cache (which should not happened:
200 file was not found in either cache (which should not happened:
201 this is meant to be called only after ensuring that the needed
201 this is meant to be called only after ensuring that the needed
202 largefile exists in the cache).'''
202 largefile exists in the cache).'''
203 path = findfile(repo, hash)
203 path = findfile(repo, hash)
204 if path is None:
204 if path is None:
205 return False
205 return False
206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
207 # The write may fail before the file is fully written, but we
207 # The write may fail before the file is fully written, but we
208 # don't use atomic writes in the working copy.
208 # don't use atomic writes in the working copy.
209 dest = repo.wjoin(filename)
209 dest = repo.wjoin(filename)
210 with open(path, 'rb') as srcfd:
210 with open(path, 'rb') as srcfd:
211 with open(dest, 'wb') as destfd:
211 with open(dest, 'wb') as destfd:
212 gothash = copyandhash(srcfd, destfd)
212 gothash = copyandhash(srcfd, destfd)
213 if gothash != hash:
213 if gothash != hash:
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 % (filename, path, gothash))
215 % (filename, path, gothash))
216 util.unlink(dest)
216 util.unlink(dest)
217 return False
217 return False
218 return True
218 return True
219
219
220 def copytostore(repo, rev, file, uploaded=False):
220 def copytostore(repo, rev, file, uploaded=False):
221 hash = readstandin(repo, file, rev)
221 hash = readstandin(repo, file, rev)
222 if instore(repo, hash):
222 if instore(repo, hash):
223 return
223 return
224 absfile = repo.wjoin(file)
224 absfile = repo.wjoin(file)
225 if os.path.exists(absfile):
225 if os.path.exists(absfile):
226 copytostoreabsolute(repo, absfile, hash)
226 copytostoreabsolute(repo, absfile, hash)
227 else:
227 else:
228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
228 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
229 (file, hash))
229 (file, hash))
230
230
231 def copyalltostore(repo, node):
231 def copyalltostore(repo, node):
232 '''Copy all largefiles in a given revision to the store'''
232 '''Copy all largefiles in a given revision to the store'''
233
233
234 ctx = repo[node]
234 ctx = repo[node]
235 for filename in ctx.files():
235 for filename in ctx.files():
236 if isstandin(filename) and filename in ctx.manifest():
236 if isstandin(filename) and filename in ctx.manifest():
237 realfile = splitstandin(filename)
237 realfile = splitstandin(filename)
238 copytostore(repo, ctx.node(), realfile)
238 copytostore(repo, ctx.node(), realfile)
239
239
240
240
241 def copytostoreabsolute(repo, file, hash):
241 def copytostoreabsolute(repo, file, hash):
242 if inusercache(repo.ui, hash):
242 if inusercache(repo.ui, hash):
243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
243 link(usercachepath(repo.ui, hash), storepath(repo, hash))
244 else:
244 else:
245 util.makedirs(os.path.dirname(storepath(repo, hash)))
245 util.makedirs(os.path.dirname(storepath(repo, hash)))
246 dst = util.atomictempfile(storepath(repo, hash),
246 dst = util.atomictempfile(storepath(repo, hash),
247 createmode=repo.store.createmode)
247 createmode=repo.store.createmode)
248 for chunk in util.filechunkiter(open(file, 'rb')):
248 for chunk in util.filechunkiter(open(file, 'rb')):
249 dst.write(chunk)
249 dst.write(chunk)
250 dst.close()
250 dst.close()
251 linktousercache(repo, hash)
251 linktousercache(repo, hash)
252
252
253 def linktousercache(repo, hash):
253 def linktousercache(repo, hash):
254 path = usercachepath(repo.ui, hash)
254 path = usercachepath(repo.ui, hash)
255 if path:
255 if path:
256 link(storepath(repo, hash), path)
256 link(storepath(repo, hash), path)
257
257
258 def getstandinmatcher(repo, rmatcher=None):
258 def getstandinmatcher(repo, rmatcher=None):
259 '''Return a match object that applies rmatcher to the standin directory'''
259 '''Return a match object that applies rmatcher to the standin directory'''
260 standindir = repo.wjoin(shortname)
260 standindir = repo.wjoin(shortname)
261
261
262 # no warnings about missing files or directories
262 # no warnings about missing files or directories
263 badfn = lambda f, msg: None
263 badfn = lambda f, msg: None
264
264
265 if rmatcher and not rmatcher.always():
265 if rmatcher and not rmatcher.always():
266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
266 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
267 if not pats:
267 if not pats:
268 pats = [standindir]
268 pats = [standindir]
269 match = scmutil.match(repo[None], pats, badfn=badfn)
269 match = scmutil.match(repo[None], pats, badfn=badfn)
270 # if pats is empty, it would incorrectly always match, so clear _always
270 # if pats is empty, it would incorrectly always match, so clear _always
271 match._always = False
271 match._always = False
272 else:
272 else:
273 # no patterns: relative to repo root
273 # no patterns: relative to repo root
274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
274 match = scmutil.match(repo[None], [standindir], badfn=badfn)
275 return match
275 return match
276
276
277 def composestandinmatcher(repo, rmatcher):
277 def composestandinmatcher(repo, rmatcher):
278 '''Return a matcher that accepts standins corresponding to the
278 '''Return a matcher that accepts standins corresponding to the
279 files accepted by rmatcher. Pass the list of files in the matcher
279 files accepted by rmatcher. Pass the list of files in the matcher
280 as the paths specified by the user.'''
280 as the paths specified by the user.'''
281 smatcher = getstandinmatcher(repo, rmatcher)
281 smatcher = getstandinmatcher(repo, rmatcher)
282 isstandin = smatcher.matchfn
282 isstandin = smatcher.matchfn
283 def composedmatchfn(f):
283 def composedmatchfn(f):
284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
284 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
285 smatcher.matchfn = composedmatchfn
285 smatcher.matchfn = composedmatchfn
286
286
287 return smatcher
287 return smatcher
288
288
289 def standin(filename):
289 def standin(filename):
290 '''Return the repo-relative path to the standin for the specified big
290 '''Return the repo-relative path to the standin for the specified big
291 file.'''
291 file.'''
292 # Notes:
292 # Notes:
293 # 1) Some callers want an absolute path, but for instance addlargefiles
293 # 1) Some callers want an absolute path, but for instance addlargefiles
294 # needs it repo-relative so it can be passed to repo[None].add(). So
294 # needs it repo-relative so it can be passed to repo[None].add(). So
295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
295 # leave it up to the caller to use repo.wjoin() to get an absolute path.
296 # 2) Join with '/' because that's what dirstate always uses, even on
296 # 2) Join with '/' because that's what dirstate always uses, even on
297 # Windows. Change existing separator to '/' first in case we are
297 # Windows. Change existing separator to '/' first in case we are
298 # passed filenames from an external source (like the command line).
298 # passed filenames from an external source (like the command line).
299 return shortnameslash + util.pconvert(filename)
299 return shortnameslash + util.pconvert(filename)
300
300
301 def isstandin(filename):
301 def isstandin(filename):
302 '''Return true if filename is a big file standin. filename must be
302 '''Return true if filename is a big file standin. filename must be
303 in Mercurial's internal form (slash-separated).'''
303 in Mercurial's internal form (slash-separated).'''
304 return filename.startswith(shortnameslash)
304 return filename.startswith(shortnameslash)
305
305
306 def splitstandin(filename):
306 def splitstandin(filename):
307 # Split on / because that's what dirstate always uses, even on Windows.
307 # Split on / because that's what dirstate always uses, even on Windows.
308 # Change local separator to / first just in case we are passed filenames
308 # Change local separator to / first just in case we are passed filenames
309 # from an external source (like the command line).
309 # from an external source (like the command line).
310 bits = util.pconvert(filename).split('/', 1)
310 bits = util.pconvert(filename).split('/', 1)
311 if len(bits) == 2 and bits[0] == shortname:
311 if len(bits) == 2 and bits[0] == shortname:
312 return bits[1]
312 return bits[1]
313 else:
313 else:
314 return None
314 return None
315
315
316 def updatestandin(repo, standin):
316 def updatestandin(repo, standin):
317 file = repo.wjoin(splitstandin(standin))
317 file = repo.wjoin(splitstandin(standin))
318 if os.path.exists(file):
318 if os.path.exists(file):
319 hash = hashfile(file)
319 hash = hashfile(file)
320 executable = getexecutable(file)
320 executable = getexecutable(file)
321 writestandin(repo, standin, hash, executable)
321 writestandin(repo, standin, hash, executable)
322 else:
323 raise error.Abort(_('%s: file not found!') % splitstandin(standin))
322
324
323 def readstandin(repo, filename, node=None):
325 def readstandin(repo, filename, node=None):
324 '''read hex hash from standin for filename at given node, or working
326 '''read hex hash from standin for filename at given node, or working
325 directory if no node is given'''
327 directory if no node is given'''
326 return repo[node][standin(filename)].data().strip()
328 return repo[node][standin(filename)].data().strip()
327
329
328 def writestandin(repo, standin, hash, executable):
330 def writestandin(repo, standin, hash, executable):
329 '''write hash to <repo.root>/<standin>'''
331 '''write hash to <repo.root>/<standin>'''
330 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
332 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
331
333
332 def copyandhash(instream, outfile):
334 def copyandhash(instream, outfile):
333 '''Read bytes from instream (iterable) and write them to outfile,
335 '''Read bytes from instream (iterable) and write them to outfile,
334 computing the SHA-1 hash of the data along the way. Return the hash.'''
336 computing the SHA-1 hash of the data along the way. Return the hash.'''
335 hasher = util.sha1('')
337 hasher = util.sha1('')
336 for data in instream:
338 for data in instream:
337 hasher.update(data)
339 hasher.update(data)
338 outfile.write(data)
340 outfile.write(data)
339 return hasher.hexdigest()
341 return hasher.hexdigest()
340
342
341 def hashrepofile(repo, file):
343 def hashrepofile(repo, file):
342 return hashfile(repo.wjoin(file))
344 return hashfile(repo.wjoin(file))
343
345
344 def hashfile(file):
346 def hashfile(file):
345 if not os.path.exists(file):
347 if not os.path.exists(file):
346 return ''
348 return ''
347 hasher = util.sha1('')
349 hasher = util.sha1('')
348 fd = open(file, 'rb')
350 fd = open(file, 'rb')
349 for data in util.filechunkiter(fd, 128 * 1024):
351 for data in util.filechunkiter(fd, 128 * 1024):
350 hasher.update(data)
352 hasher.update(data)
351 fd.close()
353 fd.close()
352 return hasher.hexdigest()
354 return hasher.hexdigest()
353
355
354 def getexecutable(filename):
356 def getexecutable(filename):
355 mode = os.stat(filename).st_mode
357 mode = os.stat(filename).st_mode
356 return ((mode & stat.S_IXUSR) and
358 return ((mode & stat.S_IXUSR) and
357 (mode & stat.S_IXGRP) and
359 (mode & stat.S_IXGRP) and
358 (mode & stat.S_IXOTH))
360 (mode & stat.S_IXOTH))
359
361
360 def urljoin(first, second, *arg):
362 def urljoin(first, second, *arg):
361 def join(left, right):
363 def join(left, right):
362 if not left.endswith('/'):
364 if not left.endswith('/'):
363 left += '/'
365 left += '/'
364 if right.startswith('/'):
366 if right.startswith('/'):
365 right = right[1:]
367 right = right[1:]
366 return left + right
368 return left + right
367
369
368 url = join(first, second)
370 url = join(first, second)
369 for a in arg:
371 for a in arg:
370 url = join(url, a)
372 url = join(url, a)
371 return url
373 return url
372
374
373 def hexsha1(data):
375 def hexsha1(data):
374 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
376 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
375 object data"""
377 object data"""
376 h = util.sha1()
378 h = util.sha1()
377 for chunk in util.filechunkiter(data):
379 for chunk in util.filechunkiter(data):
378 h.update(chunk)
380 h.update(chunk)
379 return h.hexdigest()
381 return h.hexdigest()
380
382
381 def httpsendfile(ui, filename):
383 def httpsendfile(ui, filename):
382 return httpconnection.httpsendfile(ui, filename, 'rb')
384 return httpconnection.httpsendfile(ui, filename, 'rb')
383
385
384 def unixpath(path):
386 def unixpath(path):
385 '''Return a version of path normalized for use with the lfdirstate.'''
387 '''Return a version of path normalized for use with the lfdirstate.'''
386 return util.pconvert(os.path.normpath(path))
388 return util.pconvert(os.path.normpath(path))
387
389
388 def islfilesrepo(repo):
390 def islfilesrepo(repo):
389 if ('largefiles' in repo.requirements and
391 if ('largefiles' in repo.requirements and
390 any(shortnameslash in f[0] for f in repo.store.datafiles())):
392 any(shortnameslash in f[0] for f in repo.store.datafiles())):
391 return True
393 return True
392
394
393 return any(openlfdirstate(repo.ui, repo, False))
395 return any(openlfdirstate(repo.ui, repo, False))
394
396
395 class storeprotonotcapable(Exception):
397 class storeprotonotcapable(Exception):
396 def __init__(self, storetypes):
398 def __init__(self, storetypes):
397 self.storetypes = storetypes
399 self.storetypes = storetypes
398
400
399 def getstandinsstate(repo):
401 def getstandinsstate(repo):
400 standins = []
402 standins = []
401 matcher = getstandinmatcher(repo)
403 matcher = getstandinmatcher(repo)
402 for standin in repo.dirstate.walk(matcher, [], False, False):
404 for standin in repo.dirstate.walk(matcher, [], False, False):
403 lfile = splitstandin(standin)
405 lfile = splitstandin(standin)
404 try:
406 try:
405 hash = readstandin(repo, lfile)
407 hash = readstandin(repo, lfile)
406 except IOError:
408 except IOError:
407 hash = None
409 hash = None
408 standins.append((lfile, hash))
410 standins.append((lfile, hash))
409 return standins
411 return standins
410
412
411 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
413 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
412 lfstandin = standin(lfile)
414 lfstandin = standin(lfile)
413 if lfstandin in repo.dirstate:
415 if lfstandin in repo.dirstate:
414 stat = repo.dirstate._map[lfstandin]
416 stat = repo.dirstate._map[lfstandin]
415 state, mtime = stat[0], stat[3]
417 state, mtime = stat[0], stat[3]
416 else:
418 else:
417 state, mtime = '?', -1
419 state, mtime = '?', -1
418 if state == 'n':
420 if state == 'n':
419 if (normallookup or mtime < 0 or
421 if (normallookup or mtime < 0 or
420 not os.path.exists(repo.wjoin(lfile))):
422 not os.path.exists(repo.wjoin(lfile))):
421 # state 'n' doesn't ensure 'clean' in this case
423 # state 'n' doesn't ensure 'clean' in this case
422 lfdirstate.normallookup(lfile)
424 lfdirstate.normallookup(lfile)
423 else:
425 else:
424 lfdirstate.normal(lfile)
426 lfdirstate.normal(lfile)
425 elif state == 'm':
427 elif state == 'm':
426 lfdirstate.normallookup(lfile)
428 lfdirstate.normallookup(lfile)
427 elif state == 'r':
429 elif state == 'r':
428 lfdirstate.remove(lfile)
430 lfdirstate.remove(lfile)
429 elif state == 'a':
431 elif state == 'a':
430 lfdirstate.add(lfile)
432 lfdirstate.add(lfile)
431 elif state == '?':
433 elif state == '?':
432 lfdirstate.drop(lfile)
434 lfdirstate.drop(lfile)
433
435
434 def markcommitted(orig, ctx, node):
436 def markcommitted(orig, ctx, node):
435 repo = ctx.repo()
437 repo = ctx.repo()
436
438
437 orig(node)
439 orig(node)
438
440
439 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
441 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
440 # because files coming from the 2nd parent are omitted in the latter.
442 # because files coming from the 2nd parent are omitted in the latter.
441 #
443 #
442 # The former should be used to get targets of "synclfdirstate",
444 # The former should be used to get targets of "synclfdirstate",
443 # because such files:
445 # because such files:
444 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
446 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
445 # - have to be marked as "n" after commit, but
447 # - have to be marked as "n" after commit, but
446 # - aren't listed in "repo[node].files()"
448 # - aren't listed in "repo[node].files()"
447
449
448 lfdirstate = openlfdirstate(repo.ui, repo)
450 lfdirstate = openlfdirstate(repo.ui, repo)
449 for f in ctx.files():
451 for f in ctx.files():
450 if isstandin(f):
452 if isstandin(f):
451 lfile = splitstandin(f)
453 lfile = splitstandin(f)
452 synclfdirstate(repo, lfdirstate, lfile, False)
454 synclfdirstate(repo, lfdirstate, lfile, False)
453 lfdirstate.write()
455 lfdirstate.write()
454
456
455 # As part of committing, copy all of the largefiles into the cache.
457 # As part of committing, copy all of the largefiles into the cache.
456 copyalltostore(repo, node)
458 copyalltostore(repo, node)
457
459
458 def getlfilestoupdate(oldstandins, newstandins):
460 def getlfilestoupdate(oldstandins, newstandins):
459 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
461 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
460 filelist = []
462 filelist = []
461 for f in changedstandins:
463 for f in changedstandins:
462 if f[0] not in filelist:
464 if f[0] not in filelist:
463 filelist.append(f[0])
465 filelist.append(f[0])
464 return filelist
466 return filelist
465
467
466 def getlfilestoupload(repo, missing, addfunc):
468 def getlfilestoupload(repo, missing, addfunc):
467 for i, n in enumerate(missing):
469 for i, n in enumerate(missing):
468 repo.ui.progress(_('finding outgoing largefiles'), i,
470 repo.ui.progress(_('finding outgoing largefiles'), i,
469 unit=_('revision'), total=len(missing))
471 unit=_('revision'), total=len(missing))
470 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
472 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
471
473
472 oldlfstatus = repo.lfstatus
474 oldlfstatus = repo.lfstatus
473 repo.lfstatus = False
475 repo.lfstatus = False
474 try:
476 try:
475 ctx = repo[n]
477 ctx = repo[n]
476 finally:
478 finally:
477 repo.lfstatus = oldlfstatus
479 repo.lfstatus = oldlfstatus
478
480
479 files = set(ctx.files())
481 files = set(ctx.files())
480 if len(parents) == 2:
482 if len(parents) == 2:
481 mc = ctx.manifest()
483 mc = ctx.manifest()
482 mp1 = ctx.parents()[0].manifest()
484 mp1 = ctx.parents()[0].manifest()
483 mp2 = ctx.parents()[1].manifest()
485 mp2 = ctx.parents()[1].manifest()
484 for f in mp1:
486 for f in mp1:
485 if f not in mc:
487 if f not in mc:
486 files.add(f)
488 files.add(f)
487 for f in mp2:
489 for f in mp2:
488 if f not in mc:
490 if f not in mc:
489 files.add(f)
491 files.add(f)
490 for f in mc:
492 for f in mc:
491 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
493 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
492 files.add(f)
494 files.add(f)
493 for fn in files:
495 for fn in files:
494 if isstandin(fn) and fn in ctx:
496 if isstandin(fn) and fn in ctx:
495 addfunc(fn, ctx[fn].data().strip())
497 addfunc(fn, ctx[fn].data().strip())
496 repo.ui.progress(_('finding outgoing largefiles'), None)
498 repo.ui.progress(_('finding outgoing largefiles'), None)
497
499
498 def updatestandinsbymatch(repo, match):
500 def updatestandinsbymatch(repo, match):
499 '''Update standins in the working directory according to specified match
501 '''Update standins in the working directory according to specified match
500
502
501 This returns (possibly modified) ``match`` object to be used for
503 This returns (possibly modified) ``match`` object to be used for
502 subsequent commit process.
504 subsequent commit process.
503 '''
505 '''
504
506
505 ui = repo.ui
507 ui = repo.ui
506
508
507 # Case 1: user calls commit with no specific files or
509 # Case 1: user calls commit with no specific files or
508 # include/exclude patterns: refresh and commit all files that
510 # include/exclude patterns: refresh and commit all files that
509 # are "dirty".
511 # are "dirty".
510 if match is None or match.always():
512 if match is None or match.always():
511 # Spend a bit of time here to get a list of files we know
513 # Spend a bit of time here to get a list of files we know
512 # are modified so we can compare only against those.
514 # are modified so we can compare only against those.
513 # It can cost a lot of time (several seconds)
515 # It can cost a lot of time (several seconds)
514 # otherwise to update all standins if the largefiles are
516 # otherwise to update all standins if the largefiles are
515 # large.
517 # large.
516 lfdirstate = openlfdirstate(ui, repo)
518 lfdirstate = openlfdirstate(ui, repo)
517 dirtymatch = match_.always(repo.root, repo.getcwd())
519 dirtymatch = match_.always(repo.root, repo.getcwd())
518 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
520 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
519 False)
521 False)
520 modifiedfiles = unsure + s.modified + s.added + s.removed
522 modifiedfiles = unsure + s.modified + s.added + s.removed
521 lfiles = listlfiles(repo)
523 lfiles = listlfiles(repo)
522 # this only loops through largefiles that exist (not
524 # this only loops through largefiles that exist (not
523 # removed/renamed)
525 # removed/renamed)
524 for lfile in lfiles:
526 for lfile in lfiles:
525 if lfile in modifiedfiles:
527 if lfile in modifiedfiles:
526 if os.path.exists(
528 if os.path.exists(
527 repo.wjoin(standin(lfile))):
529 repo.wjoin(standin(lfile))):
528 # this handles the case where a rebase is being
530 # this handles the case where a rebase is being
529 # performed and the working copy is not updated
531 # performed and the working copy is not updated
530 # yet.
532 # yet.
531 if os.path.exists(repo.wjoin(lfile)):
533 if os.path.exists(repo.wjoin(lfile)):
532 updatestandin(repo,
534 updatestandin(repo,
533 standin(lfile))
535 standin(lfile))
534
536
535 return match
537 return match
536
538
537 lfiles = listlfiles(repo)
539 lfiles = listlfiles(repo)
538 match._files = repo._subdirlfs(match.files(), lfiles)
540 match._files = repo._subdirlfs(match.files(), lfiles)
539
541
540 # Case 2: user calls commit with specified patterns: refresh
542 # Case 2: user calls commit with specified patterns: refresh
541 # any matching big files.
543 # any matching big files.
542 smatcher = composestandinmatcher(repo, match)
544 smatcher = composestandinmatcher(repo, match)
543 standins = repo.dirstate.walk(smatcher, [], False, False)
545 standins = repo.dirstate.walk(smatcher, [], False, False)
544
546
545 # No matching big files: get out of the way and pass control to
547 # No matching big files: get out of the way and pass control to
546 # the usual commit() method.
548 # the usual commit() method.
547 if not standins:
549 if not standins:
548 return match
550 return match
549
551
550 # Refresh all matching big files. It's possible that the
552 # Refresh all matching big files. It's possible that the
551 # commit will end up failing, in which case the big files will
553 # commit will end up failing, in which case the big files will
552 # stay refreshed. No harm done: the user modified them and
554 # stay refreshed. No harm done: the user modified them and
553 # asked to commit them, so sooner or later we're going to
555 # asked to commit them, so sooner or later we're going to
554 # refresh the standins. Might as well leave them refreshed.
556 # refresh the standins. Might as well leave them refreshed.
555 lfdirstate = openlfdirstate(ui, repo)
557 lfdirstate = openlfdirstate(ui, repo)
556 for fstandin in standins:
558 for fstandin in standins:
557 lfile = splitstandin(fstandin)
559 lfile = splitstandin(fstandin)
558 if lfdirstate[lfile] != 'r':
560 if lfdirstate[lfile] != 'r':
559 updatestandin(repo, fstandin)
561 updatestandin(repo, fstandin)
560
562
561 # Cook up a new matcher that only matches regular files or
563 # Cook up a new matcher that only matches regular files or
562 # standins corresponding to the big files requested by the
564 # standins corresponding to the big files requested by the
563 # user. Have to modify _files to prevent commit() from
565 # user. Have to modify _files to prevent commit() from
564 # complaining "not tracked" for big files.
566 # complaining "not tracked" for big files.
565 match = copy.copy(match)
567 match = copy.copy(match)
566 origmatchfn = match.matchfn
568 origmatchfn = match.matchfn
567
569
568 # Check both the list of largefiles and the list of
570 # Check both the list of largefiles and the list of
569 # standins because if a largefile was removed, it
571 # standins because if a largefile was removed, it
570 # won't be in the list of largefiles at this point
572 # won't be in the list of largefiles at this point
571 match._files += sorted(standins)
573 match._files += sorted(standins)
572
574
573 actualfiles = []
575 actualfiles = []
574 for f in match._files:
576 for f in match._files:
575 fstandin = standin(f)
577 fstandin = standin(f)
576
578
577 # For largefiles, only one of the normal and standin should be
579 # For largefiles, only one of the normal and standin should be
578 # committed (except if one of them is a remove). In the case of a
580 # committed (except if one of them is a remove). In the case of a
579 # standin removal, drop the normal file if it is unknown to dirstate.
581 # standin removal, drop the normal file if it is unknown to dirstate.
580 # Thus, skip plain largefile names but keep the standin.
582 # Thus, skip plain largefile names but keep the standin.
581 if f in lfiles or fstandin in standins:
583 if f in lfiles or fstandin in standins:
582 if repo.dirstate[fstandin] != 'r':
584 if repo.dirstate[fstandin] != 'r':
583 if repo.dirstate[f] != 'r':
585 if repo.dirstate[f] != 'r':
584 continue
586 continue
585 elif repo.dirstate[f] == '?':
587 elif repo.dirstate[f] == '?':
586 continue
588 continue
587
589
588 actualfiles.append(f)
590 actualfiles.append(f)
589 match._files = actualfiles
591 match._files = actualfiles
590
592
591 def matchfn(f):
593 def matchfn(f):
592 if origmatchfn(f):
594 if origmatchfn(f):
593 return f not in lfiles
595 return f not in lfiles
594 else:
596 else:
595 return f in standins
597 return f in standins
596
598
597 match.matchfn = matchfn
599 match.matchfn = matchfn
598
600
599 return match
601 return match
600
602
601 class automatedcommithook(object):
603 class automatedcommithook(object):
602 '''Stateful hook to update standins at the 1st commit of resuming
604 '''Stateful hook to update standins at the 1st commit of resuming
603
605
604 For efficiency, updating standins in the working directory should
606 For efficiency, updating standins in the working directory should
605 be avoided while automated committing (like rebase, transplant and
607 be avoided while automated committing (like rebase, transplant and
606 so on), because they should be updated before committing.
608 so on), because they should be updated before committing.
607
609
608 But the 1st commit of resuming automated committing (e.g. ``rebase
610 But the 1st commit of resuming automated committing (e.g. ``rebase
609 --continue``) should update them, because largefiles may be
611 --continue``) should update them, because largefiles may be
610 modified manually.
612 modified manually.
611 '''
613 '''
612 def __init__(self, resuming):
614 def __init__(self, resuming):
613 self.resuming = resuming
615 self.resuming = resuming
614
616
615 def __call__(self, repo, match):
617 def __call__(self, repo, match):
616 if self.resuming:
618 if self.resuming:
617 self.resuming = False # avoids updating at subsequent commits
619 self.resuming = False # avoids updating at subsequent commits
618 return updatestandinsbymatch(repo, match)
620 return updatestandinsbymatch(repo, match)
619 else:
621 else:
620 return match
622 return match
621
623
622 def getstatuswriter(ui, repo, forcibly=None):
624 def getstatuswriter(ui, repo, forcibly=None):
623 '''Return the function to write largefiles specific status out
625 '''Return the function to write largefiles specific status out
624
626
625 If ``forcibly`` is ``None``, this returns the last element of
627 If ``forcibly`` is ``None``, this returns the last element of
626 ``repo._lfstatuswriters`` as "default" writer function.
628 ``repo._lfstatuswriters`` as "default" writer function.
627
629
628 Otherwise, this returns the function to always write out (or
630 Otherwise, this returns the function to always write out (or
629 ignore if ``not forcibly``) status.
631 ignore if ``not forcibly``) status.
630 '''
632 '''
631 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
633 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
632 return repo._lfstatuswriters[-1]
634 return repo._lfstatuswriters[-1]
633 else:
635 else:
634 if forcibly:
636 if forcibly:
635 return ui.status # forcibly WRITE OUT
637 return ui.status # forcibly WRITE OUT
636 else:
638 else:
637 return lambda *msg, **opts: None # forcibly IGNORE
639 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,231 +1,237 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile' large
20 $ hg commit -m 'branchhead without largefile' large
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ rm large
23 $ echo "0000000000000000000000000000000000000000" > .hglf/large
24 $ hg commit -m 'commit missing file with corrupt standin' large
25 abort: large: file not found!
26 [255]
27 $ hg up -Cqr 0
22 $ cd ..
28 $ cd ..
23
29
24 Discard all cached largefiles in USERCACHE
30 Discard all cached largefiles in USERCACHE
25
31
26 $ rm -rf ${USERCACHE}
32 $ rm -rf ${USERCACHE}
27
33
28 Create mirror repo, and pull from source without largefile:
34 Create mirror repo, and pull from source without largefile:
29 "pull" is used instead of "clone" for suppression of (1) updating to
35 "pull" is used instead of "clone" for suppression of (1) updating to
30 tip (= caching largefile from source repo), and (2) recording source
36 tip (= caching largefile from source repo), and (2) recording source
31 repo as "default" path in .hg/hgrc.
37 repo as "default" path in .hg/hgrc.
32
38
33 $ hg init mirror
39 $ hg init mirror
34 $ cd mirror
40 $ cd mirror
35 $ hg pull ../src
41 $ hg pull ../src
36 pulling from ../src
42 pulling from ../src
37 requesting all changes
43 requesting all changes
38 adding changesets
44 adding changesets
39 adding manifests
45 adding manifests
40 adding file changes
46 adding file changes
41 added 2 changesets with 1 changes to 1 files
47 added 2 changesets with 1 changes to 1 files
42 (run 'hg update' to get a working copy)
48 (run 'hg update' to get a working copy)
43
49
44 Update working directory to "tip", which requires largefile("large"),
50 Update working directory to "tip", which requires largefile("large"),
45 but there is no cache file for it. So, hg must treat it as
51 but there is no cache file for it. So, hg must treat it as
46 "missing"(!) file.
52 "missing"(!) file.
47
53
48 $ hg update -r0
54 $ hg update -r0
49 getting changed largefiles
55 getting changed largefiles
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
56 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 0 largefiles updated, 0 removed
57 0 largefiles updated, 0 removed
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
58 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 $ hg status
59 $ hg status
54 ! large
60 ! large
55
61
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
62 Update working directory to null: this cleanup .hg/largefiles/dirstate
57
63
58 $ hg update null
64 $ hg update null
59 getting changed largefiles
65 getting changed largefiles
60 0 largefiles updated, 0 removed
66 0 largefiles updated, 0 removed
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
67 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62
68
63 Update working directory to tip, again.
69 Update working directory to tip, again.
64
70
65 $ hg update -r0
71 $ hg update -r0
66 getting changed largefiles
72 getting changed largefiles
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
73 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 0 largefiles updated, 0 removed
74 0 largefiles updated, 0 removed
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
75 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 $ hg status
76 $ hg status
71 ! large
77 ! large
72 $ cd ..
78 $ cd ..
73
79
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
80 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75
81
76 $ hg init mirror2
82 $ hg init mirror2
77 $ hg -R mirror2 pull src -r0
83 $ hg -R mirror2 pull src -r0
78 pulling from src
84 pulling from src
79 adding changesets
85 adding changesets
80 adding manifests
86 adding manifests
81 adding file changes
87 adding file changes
82 added 1 changesets with 1 changes to 1 files
88 added 1 changesets with 1 changes to 1 files
83 (run 'hg update' to get a working copy)
89 (run 'hg update' to get a working copy)
84
90
85 #if unix-permissions
91 #if unix-permissions
86
92
87 Portable way to print file permissions:
93 Portable way to print file permissions:
88
94
89 $ cat > ls-l.py <<EOF
95 $ cat > ls-l.py <<EOF
90 > #!/usr/bin/env python
96 > #!/usr/bin/env python
91 > import sys, os
97 > import sys, os
92 > path = sys.argv[1]
98 > path = sys.argv[1]
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
99 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 > EOF
100 > EOF
95 $ chmod +x ls-l.py
101 $ chmod +x ls-l.py
96
102
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
103 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 from file in working copy:
104 from file in working copy:
99
105
100 $ cd src
106 $ cd src
101 $ chmod 750 .hg/store
107 $ chmod 750 .hg/store
102 $ chmod 660 large
108 $ chmod 660 large
103 $ echo change >> large
109 $ echo change >> large
104 $ hg commit -m change
110 $ hg commit -m change
105 created new head
111 created new head
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
112 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 640
113 640
108
114
109 Test permission of with files in .hg/largefiles created by update:
115 Test permission of with files in .hg/largefiles created by update:
110
116
111 $ cd ../mirror
117 $ cd ../mirror
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
118 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 $ chmod 750 .hg/store
119 $ chmod 750 .hg/store
114 $ hg pull ../src --update -q
120 $ hg pull ../src --update -q
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
121 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 640
122 640
117
123
118 Test permission of files created by push:
124 Test permission of files created by push:
119
125
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
126 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 > --config "web.allow_push=*" --config web.push_ssl=no
127 > --config "web.allow_push=*" --config web.push_ssl=no
122 $ cat hg.pid >> $DAEMON_PIDS
128 $ cat hg.pid >> $DAEMON_PIDS
123
129
124 $ echo change >> large
130 $ echo change >> large
125 $ hg commit -m change
131 $ hg commit -m change
126
132
127 $ rm -r "$USERCACHE"
133 $ rm -r "$USERCACHE"
128
134
129 $ hg push -q http://localhost:$HGPORT/
135 $ hg push -q http://localhost:$HGPORT/
130
136
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
137 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 640
138 640
133
139
134 $ cd ..
140 $ cd ..
135
141
136 #endif
142 #endif
137
143
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
144 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 it is missing, but a remove on a nonexistent unknown file still should. Same
145 it is missing, but a remove on a nonexistent unknown file still should. Same
140 for a forget.)
146 for a forget.)
141
147
142 $ cd src
148 $ cd src
143 $ touch x
149 $ touch x
144 $ hg add x
150 $ hg add x
145 $ mv x y
151 $ mv x y
146 $ hg remove -A x y ENOENT
152 $ hg remove -A x y ENOENT
147 ENOENT: * (glob)
153 ENOENT: * (glob)
148 not removing y: file is untracked
154 not removing y: file is untracked
149 [1]
155 [1]
150 $ hg add y
156 $ hg add y
151 $ mv y z
157 $ mv y z
152 $ hg forget y z ENOENT
158 $ hg forget y z ENOENT
153 ENOENT: * (glob)
159 ENOENT: * (glob)
154 not removing z: file is already untracked
160 not removing z: file is already untracked
155 [1]
161 [1]
156
162
157 Largefiles are accessible from the share's store
163 Largefiles are accessible from the share's store
158 $ cd ..
164 $ cd ..
159 $ hg share -q src share_dst --config extensions.share=
165 $ hg share -q src share_dst --config extensions.share=
160 $ hg -R share_dst update -r0
166 $ hg -R share_dst update -r0
161 getting changed largefiles
167 getting changed largefiles
162 1 largefiles updated, 0 removed
168 1 largefiles updated, 0 removed
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164
170
165 $ echo modified > share_dst/large
171 $ echo modified > share_dst/large
166 $ hg -R share_dst ci -m modified
172 $ hg -R share_dst ci -m modified
167 created new head
173 created new head
168
174
169 Only dirstate is in the local store for the share, and the largefile is in the
175 Only dirstate is in the local store for the share, and the largefile is in the
170 share source's local store. Avoid the extra largefiles added in the unix
176 share source's local store. Avoid the extra largefiles added in the unix
171 conditional above.
177 conditional above.
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
178 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 $ echo $hash
179 $ echo $hash
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
180 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175
181
176 $ find share_dst/.hg/largefiles/* | sort
182 $ find share_dst/.hg/largefiles/* | sort
177 share_dst/.hg/largefiles/dirstate
183 share_dst/.hg/largefiles/dirstate
178
184
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
185 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 src/.hg/largefiles/dirstate
186 src/.hg/largefiles/dirstate
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
187 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182
188
183 Inject corruption into the largefiles store and see how update handles that:
189 Inject corruption into the largefiles store and see how update handles that:
184
190
185 $ cd src
191 $ cd src
186 $ hg up -qC
192 $ hg up -qC
187 $ cat large
193 $ cat large
188 modified
194 modified
189 $ rm large
195 $ rm large
190 $ cat .hglf/large
196 $ cat .hglf/large
191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
197 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
198 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
199 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 $ hg up -C
200 $ hg up -C
195 getting changed largefiles
201 getting changed largefiles
196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
202 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob)
197 0 largefiles updated, 0 removed
203 0 largefiles updated, 0 removed
198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 $ hg st
205 $ hg st
200 ! large
206 ! large
201 ? z
207 ? z
202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
208 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
203
209
204 #if serve
210 #if serve
205
211
206 Test coverage of error handling from putlfile:
212 Test coverage of error handling from putlfile:
207
213
208 $ mkdir $TESTTMP/mirrorcache
214 $ mkdir $TESTTMP/mirrorcache
209 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
215 $ hg serve -R ../mirror -d -p $HGPORT1 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache
210 $ cat hg.pid >> $DAEMON_PIDS
216 $ cat hg.pid >> $DAEMON_PIDS
211
217
212 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
218 $ hg push http://localhost:$HGPORT1 -f --config files.usercache=nocache
213 pushing to http://localhost:$HGPORT1/
219 pushing to http://localhost:$HGPORT1/
214 searching for changes
220 searching for changes
215 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
221 abort: remotestore: could not open file $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020: HTTP Error 403: ssl required
216 [255]
222 [255]
217
223
218 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
224 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
219
225
220 Test coverage of 'missing from store':
226 Test coverage of 'missing from store':
221
227
222 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
228 $ hg serve -R ../mirror -d -p $HGPORT2 --pid-file hg.pid --config largefiles.usercache=$TESTTMP/mirrorcache --config "web.allow_push=*" --config web.push_ssl=no
223 $ cat hg.pid >> $DAEMON_PIDS
229 $ cat hg.pid >> $DAEMON_PIDS
224
230
225 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
231 $ hg push http://localhost:$HGPORT2 -f --config largefiles.usercache=nocache
226 pushing to http://localhost:$HGPORT2/
232 pushing to http://localhost:$HGPORT2/
227 searching for changes
233 searching for changes
228 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
234 abort: largefile e2fb5f2139d086ded2cb600d5a91a196e76bf020 missing from store (needs to be uploaded)
229 [255]
235 [255]
230
236
231 #endif
237 #endif
General Comments 0
You need to be logged in to leave comments. Login now