##// END OF EJS Templates
largefiles: use the share source as the primary local store (issue4471)...
Matt Harbison -
r24631:2a3f2478 default
parent child Browse files
Show More
@@ -1,593 +1,606 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import copy
15 import copy
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial import node
19 from mercurial import node
20
20
21 shortname = '.hglf'
21 shortname = '.hglf'
22 shortnameslash = shortname + '/'
22 shortnameslash = shortname + '/'
23 longname = 'largefiles'
23 longname = 'largefiles'
24
24
25
25
26 # -- Private worker functions ------------------------------------------
26 # -- Private worker functions ------------------------------------------
27
27
28 def getminsize(ui, assumelfiles, opt, default=10):
28 def getminsize(ui, assumelfiles, opt, default=10):
29 lfsize = opt
29 lfsize = opt
30 if not lfsize and assumelfiles:
30 if not lfsize and assumelfiles:
31 lfsize = ui.config(longname, 'minsize', default=default)
31 lfsize = ui.config(longname, 'minsize', default=default)
32 if lfsize:
32 if lfsize:
33 try:
33 try:
34 lfsize = float(lfsize)
34 lfsize = float(lfsize)
35 except ValueError:
35 except ValueError:
36 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 raise util.Abort(_('largefiles: size must be number (not %s)\n')
37 % lfsize)
37 % lfsize)
38 if lfsize is None:
38 if lfsize is None:
39 raise util.Abort(_('minimum size for largefiles must be specified'))
39 raise util.Abort(_('minimum size for largefiles must be specified'))
40 return lfsize
40 return lfsize
41
41
42 def link(src, dest):
42 def link(src, dest):
43 util.makedirs(os.path.dirname(dest))
43 util.makedirs(os.path.dirname(dest))
44 try:
44 try:
45 util.oslink(src, dest)
45 util.oslink(src, dest)
46 except OSError:
46 except OSError:
47 # if hardlinks fail, fallback on atomic copy
47 # if hardlinks fail, fallback on atomic copy
48 dst = util.atomictempfile(dest)
48 dst = util.atomictempfile(dest)
49 for chunk in util.filechunkiter(open(src, 'rb')):
49 for chunk in util.filechunkiter(open(src, 'rb')):
50 dst.write(chunk)
50 dst.write(chunk)
51 dst.close()
51 dst.close()
52 os.chmod(dest, os.stat(src).st_mode)
52 os.chmod(dest, os.stat(src).st_mode)
53
53
54 def usercachepath(ui, hash):
54 def usercachepath(ui, hash):
55 path = ui.configpath(longname, 'usercache', None)
55 path = ui.configpath(longname, 'usercache', None)
56 if path:
56 if path:
57 path = os.path.join(path, hash)
57 path = os.path.join(path, hash)
58 else:
58 else:
59 if os.name == 'nt':
59 if os.name == 'nt':
60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
61 if appdata:
61 if appdata:
62 path = os.path.join(appdata, longname, hash)
62 path = os.path.join(appdata, longname, hash)
63 elif platform.system() == 'Darwin':
63 elif platform.system() == 'Darwin':
64 home = os.getenv('HOME')
64 home = os.getenv('HOME')
65 if home:
65 if home:
66 path = os.path.join(home, 'Library', 'Caches',
66 path = os.path.join(home, 'Library', 'Caches',
67 longname, hash)
67 longname, hash)
68 elif os.name == 'posix':
68 elif os.name == 'posix':
69 path = os.getenv('XDG_CACHE_HOME')
69 path = os.getenv('XDG_CACHE_HOME')
70 if path:
70 if path:
71 path = os.path.join(path, longname, hash)
71 path = os.path.join(path, longname, hash)
72 else:
72 else:
73 home = os.getenv('HOME')
73 home = os.getenv('HOME')
74 if home:
74 if home:
75 path = os.path.join(home, '.cache', longname, hash)
75 path = os.path.join(home, '.cache', longname, hash)
76 else:
76 else:
77 raise util.Abort(_('unknown operating system: %s\n') % os.name)
77 raise util.Abort(_('unknown operating system: %s\n') % os.name)
78 return path
78 return path
79
79
80 def inusercache(ui, hash):
80 def inusercache(ui, hash):
81 path = usercachepath(ui, hash)
81 path = usercachepath(ui, hash)
82 return path and os.path.exists(path)
82 return path and os.path.exists(path)
83
83
84 def findfile(repo, hash):
84 def findfile(repo, hash):
85 if instore(repo, hash):
85 path, exists = findstorepath(repo, hash)
86 if exists:
86 repo.ui.note(_('found %s in store\n') % hash)
87 repo.ui.note(_('found %s in store\n') % hash)
87 return storepath(repo, hash)
88 return path
88 elif inusercache(repo.ui, hash):
89 elif inusercache(repo.ui, hash):
89 repo.ui.note(_('found %s in system cache\n') % hash)
90 repo.ui.note(_('found %s in system cache\n') % hash)
90 path = storepath(repo, hash)
91 path = storepath(repo, hash)
91 link(usercachepath(repo.ui, hash), path)
92 link(usercachepath(repo.ui, hash), path)
92 return path
93 return path
93 return None
94 return None
94
95
95 class largefilesdirstate(dirstate.dirstate):
96 class largefilesdirstate(dirstate.dirstate):
96 def __getitem__(self, key):
97 def __getitem__(self, key):
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 return super(largefilesdirstate, self).__getitem__(unixpath(key))
98 def normal(self, f):
99 def normal(self, f):
99 return super(largefilesdirstate, self).normal(unixpath(f))
100 return super(largefilesdirstate, self).normal(unixpath(f))
100 def remove(self, f):
101 def remove(self, f):
101 return super(largefilesdirstate, self).remove(unixpath(f))
102 return super(largefilesdirstate, self).remove(unixpath(f))
102 def add(self, f):
103 def add(self, f):
103 return super(largefilesdirstate, self).add(unixpath(f))
104 return super(largefilesdirstate, self).add(unixpath(f))
104 def drop(self, f):
105 def drop(self, f):
105 return super(largefilesdirstate, self).drop(unixpath(f))
106 return super(largefilesdirstate, self).drop(unixpath(f))
106 def forget(self, f):
107 def forget(self, f):
107 return super(largefilesdirstate, self).forget(unixpath(f))
108 return super(largefilesdirstate, self).forget(unixpath(f))
108 def normallookup(self, f):
109 def normallookup(self, f):
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 return super(largefilesdirstate, self).normallookup(unixpath(f))
110 def _ignore(self, f):
111 def _ignore(self, f):
111 return False
112 return False
112
113
113 def openlfdirstate(ui, repo, create=True):
114 def openlfdirstate(ui, repo, create=True):
114 '''
115 '''
115 Return a dirstate object that tracks largefiles: i.e. its root is
116 Return a dirstate object that tracks largefiles: i.e. its root is
116 the repo root, but it is saved in .hg/largefiles/dirstate.
117 the repo root, but it is saved in .hg/largefiles/dirstate.
117 '''
118 '''
118 lfstoredir = repo.join(longname)
119 lfstoredir = repo.join(longname)
119 opener = scmutil.opener(lfstoredir)
120 opener = scmutil.opener(lfstoredir)
120 lfdirstate = largefilesdirstate(opener, ui, repo.root,
121 lfdirstate = largefilesdirstate(opener, ui, repo.root,
121 repo.dirstate._validate)
122 repo.dirstate._validate)
122
123
123 # If the largefiles dirstate does not exist, populate and create
124 # If the largefiles dirstate does not exist, populate and create
124 # it. This ensures that we create it on the first meaningful
125 # it. This ensures that we create it on the first meaningful
125 # largefiles operation in a new clone.
126 # largefiles operation in a new clone.
126 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
127 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
127 matcher = getstandinmatcher(repo)
128 matcher = getstandinmatcher(repo)
128 standins = repo.dirstate.walk(matcher, [], False, False)
129 standins = repo.dirstate.walk(matcher, [], False, False)
129
130
130 if len(standins) > 0:
131 if len(standins) > 0:
131 util.makedirs(lfstoredir)
132 util.makedirs(lfstoredir)
132
133
133 for standin in standins:
134 for standin in standins:
134 lfile = splitstandin(standin)
135 lfile = splitstandin(standin)
135 lfdirstate.normallookup(lfile)
136 lfdirstate.normallookup(lfile)
136 return lfdirstate
137 return lfdirstate
137
138
138 def lfdirstatestatus(lfdirstate, repo):
139 def lfdirstatestatus(lfdirstate, repo):
139 wctx = repo['.']
140 wctx = repo['.']
140 match = match_.always(repo.root, repo.getcwd())
141 match = match_.always(repo.root, repo.getcwd())
141 unsure, s = lfdirstate.status(match, [], False, False, False)
142 unsure, s = lfdirstate.status(match, [], False, False, False)
142 modified, clean = s.modified, s.clean
143 modified, clean = s.modified, s.clean
143 for lfile in unsure:
144 for lfile in unsure:
144 try:
145 try:
145 fctx = wctx[standin(lfile)]
146 fctx = wctx[standin(lfile)]
146 except LookupError:
147 except LookupError:
147 fctx = None
148 fctx = None
148 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 modified.append(lfile)
150 modified.append(lfile)
150 else:
151 else:
151 clean.append(lfile)
152 clean.append(lfile)
152 lfdirstate.normal(lfile)
153 lfdirstate.normal(lfile)
153 return s
154 return s
154
155
155 def listlfiles(repo, rev=None, matcher=None):
156 def listlfiles(repo, rev=None, matcher=None):
156 '''return a list of largefiles in the working copy or the
157 '''return a list of largefiles in the working copy or the
157 specified changeset'''
158 specified changeset'''
158
159
159 if matcher is None:
160 if matcher is None:
160 matcher = getstandinmatcher(repo)
161 matcher = getstandinmatcher(repo)
161
162
162 # ignore unknown files in working directory
163 # ignore unknown files in working directory
163 return [splitstandin(f)
164 return [splitstandin(f)
164 for f in repo[rev].walk(matcher)
165 for f in repo[rev].walk(matcher)
165 if rev is not None or repo.dirstate[f] != '?']
166 if rev is not None or repo.dirstate[f] != '?']
166
167
167 def instore(repo, hash):
168 def instore(repo, hash, forcelocal=False):
168 return os.path.exists(storepath(repo, hash))
169 return os.path.exists(storepath(repo, hash, forcelocal))
169
170
170 def storepath(repo, hash):
171 def storepath(repo, hash, forcelocal=False):
172 if not forcelocal and repo.shared():
173 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
171 return repo.join(longname, hash)
174 return repo.join(longname, hash)
172
175
173 def findstorepath(repo, hash):
176 def findstorepath(repo, hash):
174 '''Search through the local store path(s) to find the file for the given
177 '''Search through the local store path(s) to find the file for the given
175 hash. If the file is not found, its path in the primary store is returned.
178 hash. If the file is not found, its path in the primary store is returned.
176 The return value is a tuple of (path, exists(path)).
179 The return value is a tuple of (path, exists(path)).
177 '''
180 '''
178 return (storepath(repo, hash), instore(repo, hash))
181 # For shared repos, the primary store is in the share source. But for
182 # backward compatibility, force a lookup in the local store if it wasn't
183 # found in the share source.
184 path = storepath(repo, hash, False)
185
186 if instore(repo, hash):
187 return (path, True)
188 elif repo.shared() and instore(repo, hash, True):
189 return storepath(repo, hash, True)
190
191 return (path, False)
179
192
180 def copyfromcache(repo, hash, filename):
193 def copyfromcache(repo, hash, filename):
181 '''Copy the specified largefile from the repo or system cache to
194 '''Copy the specified largefile from the repo or system cache to
182 filename in the repository. Return true on success or false if the
195 filename in the repository. Return true on success or false if the
183 file was not found in either cache (which should not happened:
196 file was not found in either cache (which should not happened:
184 this is meant to be called only after ensuring that the needed
197 this is meant to be called only after ensuring that the needed
185 largefile exists in the cache).'''
198 largefile exists in the cache).'''
186 path = findfile(repo, hash)
199 path = findfile(repo, hash)
187 if path is None:
200 if path is None:
188 return False
201 return False
189 util.makedirs(os.path.dirname(repo.wjoin(filename)))
202 util.makedirs(os.path.dirname(repo.wjoin(filename)))
190 # The write may fail before the file is fully written, but we
203 # The write may fail before the file is fully written, but we
191 # don't use atomic writes in the working copy.
204 # don't use atomic writes in the working copy.
192 shutil.copy(path, repo.wjoin(filename))
205 shutil.copy(path, repo.wjoin(filename))
193 return True
206 return True
194
207
195 def copytostore(repo, rev, file, uploaded=False):
208 def copytostore(repo, rev, file, uploaded=False):
196 hash = readstandin(repo, file, rev)
209 hash = readstandin(repo, file, rev)
197 if instore(repo, hash):
210 if instore(repo, hash):
198 return
211 return
199 copytostoreabsolute(repo, repo.wjoin(file), hash)
212 copytostoreabsolute(repo, repo.wjoin(file), hash)
200
213
201 def copyalltostore(repo, node):
214 def copyalltostore(repo, node):
202 '''Copy all largefiles in a given revision to the store'''
215 '''Copy all largefiles in a given revision to the store'''
203
216
204 ctx = repo[node]
217 ctx = repo[node]
205 for filename in ctx.files():
218 for filename in ctx.files():
206 if isstandin(filename) and filename in ctx.manifest():
219 if isstandin(filename) and filename in ctx.manifest():
207 realfile = splitstandin(filename)
220 realfile = splitstandin(filename)
208 copytostore(repo, ctx.node(), realfile)
221 copytostore(repo, ctx.node(), realfile)
209
222
210
223
211 def copytostoreabsolute(repo, file, hash):
224 def copytostoreabsolute(repo, file, hash):
212 if inusercache(repo.ui, hash):
225 if inusercache(repo.ui, hash):
213 link(usercachepath(repo.ui, hash), storepath(repo, hash))
226 link(usercachepath(repo.ui, hash), storepath(repo, hash))
214 else:
227 else:
215 util.makedirs(os.path.dirname(storepath(repo, hash)))
228 util.makedirs(os.path.dirname(storepath(repo, hash)))
216 dst = util.atomictempfile(storepath(repo, hash),
229 dst = util.atomictempfile(storepath(repo, hash),
217 createmode=repo.store.createmode)
230 createmode=repo.store.createmode)
218 for chunk in util.filechunkiter(open(file, 'rb')):
231 for chunk in util.filechunkiter(open(file, 'rb')):
219 dst.write(chunk)
232 dst.write(chunk)
220 dst.close()
233 dst.close()
221 linktousercache(repo, hash)
234 linktousercache(repo, hash)
222
235
223 def linktousercache(repo, hash):
236 def linktousercache(repo, hash):
224 path = usercachepath(repo.ui, hash)
237 path = usercachepath(repo.ui, hash)
225 if path:
238 if path:
226 link(storepath(repo, hash), path)
239 link(storepath(repo, hash), path)
227
240
228 def getstandinmatcher(repo, pats=[], opts={}):
241 def getstandinmatcher(repo, pats=[], opts={}):
229 '''Return a match object that applies pats to the standin directory'''
242 '''Return a match object that applies pats to the standin directory'''
230 standindir = repo.wjoin(shortname)
243 standindir = repo.wjoin(shortname)
231 if pats:
244 if pats:
232 pats = [os.path.join(standindir, pat) for pat in pats]
245 pats = [os.path.join(standindir, pat) for pat in pats]
233 else:
246 else:
234 # no patterns: relative to repo root
247 # no patterns: relative to repo root
235 pats = [standindir]
248 pats = [standindir]
236 # no warnings about missing files or directories
249 # no warnings about missing files or directories
237 match = scmutil.match(repo[None], pats, opts)
250 match = scmutil.match(repo[None], pats, opts)
238 match.bad = lambda f, msg: None
251 match.bad = lambda f, msg: None
239 return match
252 return match
240
253
241 def composestandinmatcher(repo, rmatcher):
254 def composestandinmatcher(repo, rmatcher):
242 '''Return a matcher that accepts standins corresponding to the
255 '''Return a matcher that accepts standins corresponding to the
243 files accepted by rmatcher. Pass the list of files in the matcher
256 files accepted by rmatcher. Pass the list of files in the matcher
244 as the paths specified by the user.'''
257 as the paths specified by the user.'''
245 smatcher = getstandinmatcher(repo, rmatcher.files())
258 smatcher = getstandinmatcher(repo, rmatcher.files())
246 isstandin = smatcher.matchfn
259 isstandin = smatcher.matchfn
247 def composedmatchfn(f):
260 def composedmatchfn(f):
248 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
261 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
249 smatcher.matchfn = composedmatchfn
262 smatcher.matchfn = composedmatchfn
250
263
251 return smatcher
264 return smatcher
252
265
253 def standin(filename):
266 def standin(filename):
254 '''Return the repo-relative path to the standin for the specified big
267 '''Return the repo-relative path to the standin for the specified big
255 file.'''
268 file.'''
256 # Notes:
269 # Notes:
257 # 1) Some callers want an absolute path, but for instance addlargefiles
270 # 1) Some callers want an absolute path, but for instance addlargefiles
258 # needs it repo-relative so it can be passed to repo[None].add(). So
271 # needs it repo-relative so it can be passed to repo[None].add(). So
259 # leave it up to the caller to use repo.wjoin() to get an absolute path.
272 # leave it up to the caller to use repo.wjoin() to get an absolute path.
260 # 2) Join with '/' because that's what dirstate always uses, even on
273 # 2) Join with '/' because that's what dirstate always uses, even on
261 # Windows. Change existing separator to '/' first in case we are
274 # Windows. Change existing separator to '/' first in case we are
262 # passed filenames from an external source (like the command line).
275 # passed filenames from an external source (like the command line).
263 return shortnameslash + util.pconvert(filename)
276 return shortnameslash + util.pconvert(filename)
264
277
265 def isstandin(filename):
278 def isstandin(filename):
266 '''Return true if filename is a big file standin. filename must be
279 '''Return true if filename is a big file standin. filename must be
267 in Mercurial's internal form (slash-separated).'''
280 in Mercurial's internal form (slash-separated).'''
268 return filename.startswith(shortnameslash)
281 return filename.startswith(shortnameslash)
269
282
270 def splitstandin(filename):
283 def splitstandin(filename):
271 # Split on / because that's what dirstate always uses, even on Windows.
284 # Split on / because that's what dirstate always uses, even on Windows.
272 # Change local separator to / first just in case we are passed filenames
285 # Change local separator to / first just in case we are passed filenames
273 # from an external source (like the command line).
286 # from an external source (like the command line).
274 bits = util.pconvert(filename).split('/', 1)
287 bits = util.pconvert(filename).split('/', 1)
275 if len(bits) == 2 and bits[0] == shortname:
288 if len(bits) == 2 and bits[0] == shortname:
276 return bits[1]
289 return bits[1]
277 else:
290 else:
278 return None
291 return None
279
292
280 def updatestandin(repo, standin):
293 def updatestandin(repo, standin):
281 file = repo.wjoin(splitstandin(standin))
294 file = repo.wjoin(splitstandin(standin))
282 if os.path.exists(file):
295 if os.path.exists(file):
283 hash = hashfile(file)
296 hash = hashfile(file)
284 executable = getexecutable(file)
297 executable = getexecutable(file)
285 writestandin(repo, standin, hash, executable)
298 writestandin(repo, standin, hash, executable)
286
299
287 def readstandin(repo, filename, node=None):
300 def readstandin(repo, filename, node=None):
288 '''read hex hash from standin for filename at given node, or working
301 '''read hex hash from standin for filename at given node, or working
289 directory if no node is given'''
302 directory if no node is given'''
290 return repo[node][standin(filename)].data().strip()
303 return repo[node][standin(filename)].data().strip()
291
304
292 def writestandin(repo, standin, hash, executable):
305 def writestandin(repo, standin, hash, executable):
293 '''write hash to <repo.root>/<standin>'''
306 '''write hash to <repo.root>/<standin>'''
294 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
307 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
295
308
296 def copyandhash(instream, outfile):
309 def copyandhash(instream, outfile):
297 '''Read bytes from instream (iterable) and write them to outfile,
310 '''Read bytes from instream (iterable) and write them to outfile,
298 computing the SHA-1 hash of the data along the way. Return the hash.'''
311 computing the SHA-1 hash of the data along the way. Return the hash.'''
299 hasher = util.sha1('')
312 hasher = util.sha1('')
300 for data in instream:
313 for data in instream:
301 hasher.update(data)
314 hasher.update(data)
302 outfile.write(data)
315 outfile.write(data)
303 return hasher.hexdigest()
316 return hasher.hexdigest()
304
317
305 def hashrepofile(repo, file):
318 def hashrepofile(repo, file):
306 return hashfile(repo.wjoin(file))
319 return hashfile(repo.wjoin(file))
307
320
308 def hashfile(file):
321 def hashfile(file):
309 if not os.path.exists(file):
322 if not os.path.exists(file):
310 return ''
323 return ''
311 hasher = util.sha1('')
324 hasher = util.sha1('')
312 fd = open(file, 'rb')
325 fd = open(file, 'rb')
313 for data in util.filechunkiter(fd, 128 * 1024):
326 for data in util.filechunkiter(fd, 128 * 1024):
314 hasher.update(data)
327 hasher.update(data)
315 fd.close()
328 fd.close()
316 return hasher.hexdigest()
329 return hasher.hexdigest()
317
330
318 def getexecutable(filename):
331 def getexecutable(filename):
319 mode = os.stat(filename).st_mode
332 mode = os.stat(filename).st_mode
320 return ((mode & stat.S_IXUSR) and
333 return ((mode & stat.S_IXUSR) and
321 (mode & stat.S_IXGRP) and
334 (mode & stat.S_IXGRP) and
322 (mode & stat.S_IXOTH))
335 (mode & stat.S_IXOTH))
323
336
324 def urljoin(first, second, *arg):
337 def urljoin(first, second, *arg):
325 def join(left, right):
338 def join(left, right):
326 if not left.endswith('/'):
339 if not left.endswith('/'):
327 left += '/'
340 left += '/'
328 if right.startswith('/'):
341 if right.startswith('/'):
329 right = right[1:]
342 right = right[1:]
330 return left + right
343 return left + right
331
344
332 url = join(first, second)
345 url = join(first, second)
333 for a in arg:
346 for a in arg:
334 url = join(url, a)
347 url = join(url, a)
335 return url
348 return url
336
349
337 def hexsha1(data):
350 def hexsha1(data):
338 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
351 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
339 object data"""
352 object data"""
340 h = util.sha1()
353 h = util.sha1()
341 for chunk in util.filechunkiter(data):
354 for chunk in util.filechunkiter(data):
342 h.update(chunk)
355 h.update(chunk)
343 return h.hexdigest()
356 return h.hexdigest()
344
357
345 def httpsendfile(ui, filename):
358 def httpsendfile(ui, filename):
346 return httpconnection.httpsendfile(ui, filename, 'rb')
359 return httpconnection.httpsendfile(ui, filename, 'rb')
347
360
348 def unixpath(path):
361 def unixpath(path):
349 '''Return a version of path normalized for use with the lfdirstate.'''
362 '''Return a version of path normalized for use with the lfdirstate.'''
350 return util.pconvert(os.path.normpath(path))
363 return util.pconvert(os.path.normpath(path))
351
364
352 def islfilesrepo(repo):
365 def islfilesrepo(repo):
353 if ('largefiles' in repo.requirements and
366 if ('largefiles' in repo.requirements and
354 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
367 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
355 return True
368 return True
356
369
357 return util.any(openlfdirstate(repo.ui, repo, False))
370 return util.any(openlfdirstate(repo.ui, repo, False))
358
371
359 class storeprotonotcapable(Exception):
372 class storeprotonotcapable(Exception):
360 def __init__(self, storetypes):
373 def __init__(self, storetypes):
361 self.storetypes = storetypes
374 self.storetypes = storetypes
362
375
363 def getstandinsstate(repo):
376 def getstandinsstate(repo):
364 standins = []
377 standins = []
365 matcher = getstandinmatcher(repo)
378 matcher = getstandinmatcher(repo)
366 for standin in repo.dirstate.walk(matcher, [], False, False):
379 for standin in repo.dirstate.walk(matcher, [], False, False):
367 lfile = splitstandin(standin)
380 lfile = splitstandin(standin)
368 try:
381 try:
369 hash = readstandin(repo, lfile)
382 hash = readstandin(repo, lfile)
370 except IOError:
383 except IOError:
371 hash = None
384 hash = None
372 standins.append((lfile, hash))
385 standins.append((lfile, hash))
373 return standins
386 return standins
374
387
375 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
388 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
376 lfstandin = standin(lfile)
389 lfstandin = standin(lfile)
377 if lfstandin in repo.dirstate:
390 if lfstandin in repo.dirstate:
378 stat = repo.dirstate._map[lfstandin]
391 stat = repo.dirstate._map[lfstandin]
379 state, mtime = stat[0], stat[3]
392 state, mtime = stat[0], stat[3]
380 else:
393 else:
381 state, mtime = '?', -1
394 state, mtime = '?', -1
382 if state == 'n':
395 if state == 'n':
383 if normallookup or mtime < 0:
396 if normallookup or mtime < 0:
384 # state 'n' doesn't ensure 'clean' in this case
397 # state 'n' doesn't ensure 'clean' in this case
385 lfdirstate.normallookup(lfile)
398 lfdirstate.normallookup(lfile)
386 else:
399 else:
387 lfdirstate.normal(lfile)
400 lfdirstate.normal(lfile)
388 elif state == 'm':
401 elif state == 'm':
389 lfdirstate.normallookup(lfile)
402 lfdirstate.normallookup(lfile)
390 elif state == 'r':
403 elif state == 'r':
391 lfdirstate.remove(lfile)
404 lfdirstate.remove(lfile)
392 elif state == 'a':
405 elif state == 'a':
393 lfdirstate.add(lfile)
406 lfdirstate.add(lfile)
394 elif state == '?':
407 elif state == '?':
395 lfdirstate.drop(lfile)
408 lfdirstate.drop(lfile)
396
409
397 def markcommitted(orig, ctx, node):
410 def markcommitted(orig, ctx, node):
398 repo = ctx.repo()
411 repo = ctx.repo()
399
412
400 orig(node)
413 orig(node)
401
414
402 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
415 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
403 # because files coming from the 2nd parent are omitted in the latter.
416 # because files coming from the 2nd parent are omitted in the latter.
404 #
417 #
405 # The former should be used to get targets of "synclfdirstate",
418 # The former should be used to get targets of "synclfdirstate",
406 # because such files:
419 # because such files:
407 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
420 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
408 # - have to be marked as "n" after commit, but
421 # - have to be marked as "n" after commit, but
409 # - aren't listed in "repo[node].files()"
422 # - aren't listed in "repo[node].files()"
410
423
411 lfdirstate = openlfdirstate(repo.ui, repo)
424 lfdirstate = openlfdirstate(repo.ui, repo)
412 for f in ctx.files():
425 for f in ctx.files():
413 if isstandin(f):
426 if isstandin(f):
414 lfile = splitstandin(f)
427 lfile = splitstandin(f)
415 synclfdirstate(repo, lfdirstate, lfile, False)
428 synclfdirstate(repo, lfdirstate, lfile, False)
416 lfdirstate.write()
429 lfdirstate.write()
417
430
418 # As part of committing, copy all of the largefiles into the cache.
431 # As part of committing, copy all of the largefiles into the cache.
419 copyalltostore(repo, node)
432 copyalltostore(repo, node)
420
433
421 def getlfilestoupdate(oldstandins, newstandins):
434 def getlfilestoupdate(oldstandins, newstandins):
422 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
435 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
423 filelist = []
436 filelist = []
424 for f in changedstandins:
437 for f in changedstandins:
425 if f[0] not in filelist:
438 if f[0] not in filelist:
426 filelist.append(f[0])
439 filelist.append(f[0])
427 return filelist
440 return filelist
428
441
429 def getlfilestoupload(repo, missing, addfunc):
442 def getlfilestoupload(repo, missing, addfunc):
430 for i, n in enumerate(missing):
443 for i, n in enumerate(missing):
431 repo.ui.progress(_('finding outgoing largefiles'), i,
444 repo.ui.progress(_('finding outgoing largefiles'), i,
432 unit=_('revision'), total=len(missing))
445 unit=_('revision'), total=len(missing))
433 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
446 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
434
447
435 oldlfstatus = repo.lfstatus
448 oldlfstatus = repo.lfstatus
436 repo.lfstatus = False
449 repo.lfstatus = False
437 try:
450 try:
438 ctx = repo[n]
451 ctx = repo[n]
439 finally:
452 finally:
440 repo.lfstatus = oldlfstatus
453 repo.lfstatus = oldlfstatus
441
454
442 files = set(ctx.files())
455 files = set(ctx.files())
443 if len(parents) == 2:
456 if len(parents) == 2:
444 mc = ctx.manifest()
457 mc = ctx.manifest()
445 mp1 = ctx.parents()[0].manifest()
458 mp1 = ctx.parents()[0].manifest()
446 mp2 = ctx.parents()[1].manifest()
459 mp2 = ctx.parents()[1].manifest()
447 for f in mp1:
460 for f in mp1:
448 if f not in mc:
461 if f not in mc:
449 files.add(f)
462 files.add(f)
450 for f in mp2:
463 for f in mp2:
451 if f not in mc:
464 if f not in mc:
452 files.add(f)
465 files.add(f)
453 for f in mc:
466 for f in mc:
454 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
467 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
455 files.add(f)
468 files.add(f)
456 for fn in files:
469 for fn in files:
457 if isstandin(fn) and fn in ctx:
470 if isstandin(fn) and fn in ctx:
458 addfunc(fn, ctx[fn].data().strip())
471 addfunc(fn, ctx[fn].data().strip())
459 repo.ui.progress(_('finding outgoing largefiles'), None)
472 repo.ui.progress(_('finding outgoing largefiles'), None)
460
473
461 def updatestandinsbymatch(repo, match):
474 def updatestandinsbymatch(repo, match):
462 '''Update standins in the working directory according to specified match
475 '''Update standins in the working directory according to specified match
463
476
464 This returns (possibly modified) ``match`` object to be used for
477 This returns (possibly modified) ``match`` object to be used for
465 subsequent commit process.
478 subsequent commit process.
466 '''
479 '''
467
480
468 ui = repo.ui
481 ui = repo.ui
469
482
470 # Case 1: user calls commit with no specific files or
483 # Case 1: user calls commit with no specific files or
471 # include/exclude patterns: refresh and commit all files that
484 # include/exclude patterns: refresh and commit all files that
472 # are "dirty".
485 # are "dirty".
473 if match is None or match.always():
486 if match is None or match.always():
474 # Spend a bit of time here to get a list of files we know
487 # Spend a bit of time here to get a list of files we know
475 # are modified so we can compare only against those.
488 # are modified so we can compare only against those.
476 # It can cost a lot of time (several seconds)
489 # It can cost a lot of time (several seconds)
477 # otherwise to update all standins if the largefiles are
490 # otherwise to update all standins if the largefiles are
478 # large.
491 # large.
479 lfdirstate = openlfdirstate(ui, repo)
492 lfdirstate = openlfdirstate(ui, repo)
480 dirtymatch = match_.always(repo.root, repo.getcwd())
493 dirtymatch = match_.always(repo.root, repo.getcwd())
481 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
494 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
482 False)
495 False)
483 modifiedfiles = unsure + s.modified + s.added + s.removed
496 modifiedfiles = unsure + s.modified + s.added + s.removed
484 lfiles = listlfiles(repo)
497 lfiles = listlfiles(repo)
485 # this only loops through largefiles that exist (not
498 # this only loops through largefiles that exist (not
486 # removed/renamed)
499 # removed/renamed)
487 for lfile in lfiles:
500 for lfile in lfiles:
488 if lfile in modifiedfiles:
501 if lfile in modifiedfiles:
489 if os.path.exists(
502 if os.path.exists(
490 repo.wjoin(standin(lfile))):
503 repo.wjoin(standin(lfile))):
491 # this handles the case where a rebase is being
504 # this handles the case where a rebase is being
492 # performed and the working copy is not updated
505 # performed and the working copy is not updated
493 # yet.
506 # yet.
494 if os.path.exists(repo.wjoin(lfile)):
507 if os.path.exists(repo.wjoin(lfile)):
495 updatestandin(repo,
508 updatestandin(repo,
496 standin(lfile))
509 standin(lfile))
497
510
498 return match
511 return match
499
512
500 lfiles = listlfiles(repo)
513 lfiles = listlfiles(repo)
501 match._files = repo._subdirlfs(match.files(), lfiles)
514 match._files = repo._subdirlfs(match.files(), lfiles)
502
515
503 # Case 2: user calls commit with specified patterns: refresh
516 # Case 2: user calls commit with specified patterns: refresh
504 # any matching big files.
517 # any matching big files.
505 smatcher = composestandinmatcher(repo, match)
518 smatcher = composestandinmatcher(repo, match)
506 standins = repo.dirstate.walk(smatcher, [], False, False)
519 standins = repo.dirstate.walk(smatcher, [], False, False)
507
520
508 # No matching big files: get out of the way and pass control to
521 # No matching big files: get out of the way and pass control to
509 # the usual commit() method.
522 # the usual commit() method.
510 if not standins:
523 if not standins:
511 return match
524 return match
512
525
513 # Refresh all matching big files. It's possible that the
526 # Refresh all matching big files. It's possible that the
514 # commit will end up failing, in which case the big files will
527 # commit will end up failing, in which case the big files will
515 # stay refreshed. No harm done: the user modified them and
528 # stay refreshed. No harm done: the user modified them and
516 # asked to commit them, so sooner or later we're going to
529 # asked to commit them, so sooner or later we're going to
517 # refresh the standins. Might as well leave them refreshed.
530 # refresh the standins. Might as well leave them refreshed.
518 lfdirstate = openlfdirstate(ui, repo)
531 lfdirstate = openlfdirstate(ui, repo)
519 for fstandin in standins:
532 for fstandin in standins:
520 lfile = splitstandin(fstandin)
533 lfile = splitstandin(fstandin)
521 if lfdirstate[lfile] != 'r':
534 if lfdirstate[lfile] != 'r':
522 updatestandin(repo, fstandin)
535 updatestandin(repo, fstandin)
523
536
524 # Cook up a new matcher that only matches regular files or
537 # Cook up a new matcher that only matches regular files or
525 # standins corresponding to the big files requested by the
538 # standins corresponding to the big files requested by the
526 # user. Have to modify _files to prevent commit() from
539 # user. Have to modify _files to prevent commit() from
527 # complaining "not tracked" for big files.
540 # complaining "not tracked" for big files.
528 match = copy.copy(match)
541 match = copy.copy(match)
529 origmatchfn = match.matchfn
542 origmatchfn = match.matchfn
530
543
531 # Check both the list of largefiles and the list of
544 # Check both the list of largefiles and the list of
532 # standins because if a largefile was removed, it
545 # standins because if a largefile was removed, it
533 # won't be in the list of largefiles at this point
546 # won't be in the list of largefiles at this point
534 match._files += sorted(standins)
547 match._files += sorted(standins)
535
548
536 actualfiles = []
549 actualfiles = []
537 for f in match._files:
550 for f in match._files:
538 fstandin = standin(f)
551 fstandin = standin(f)
539
552
540 # ignore known largefiles and standins
553 # ignore known largefiles and standins
541 if f in lfiles or fstandin in standins:
554 if f in lfiles or fstandin in standins:
542 continue
555 continue
543
556
544 actualfiles.append(f)
557 actualfiles.append(f)
545 match._files = actualfiles
558 match._files = actualfiles
546
559
547 def matchfn(f):
560 def matchfn(f):
548 if origmatchfn(f):
561 if origmatchfn(f):
549 return f not in lfiles
562 return f not in lfiles
550 else:
563 else:
551 return f in standins
564 return f in standins
552
565
553 match.matchfn = matchfn
566 match.matchfn = matchfn
554
567
555 return match
568 return match
556
569
557 class automatedcommithook(object):
570 class automatedcommithook(object):
558 '''Stateful hook to update standins at the 1st commit of resuming
571 '''Stateful hook to update standins at the 1st commit of resuming
559
572
560 For efficiency, updating standins in the working directory should
573 For efficiency, updating standins in the working directory should
561 be avoided while automated committing (like rebase, transplant and
574 be avoided while automated committing (like rebase, transplant and
562 so on), because they should be updated before committing.
575 so on), because they should be updated before committing.
563
576
564 But the 1st commit of resuming automated committing (e.g. ``rebase
577 But the 1st commit of resuming automated committing (e.g. ``rebase
565 --continue``) should update them, because largefiles may be
578 --continue``) should update them, because largefiles may be
566 modified manually.
579 modified manually.
567 '''
580 '''
568 def __init__(self, resuming):
581 def __init__(self, resuming):
569 self.resuming = resuming
582 self.resuming = resuming
570
583
571 def __call__(self, repo, match):
584 def __call__(self, repo, match):
572 if self.resuming:
585 if self.resuming:
573 self.resuming = False # avoids updating at subsequent commits
586 self.resuming = False # avoids updating at subsequent commits
574 return updatestandinsbymatch(repo, match)
587 return updatestandinsbymatch(repo, match)
575 else:
588 else:
576 return match
589 return match
577
590
578 def getstatuswriter(ui, repo, forcibly=None):
591 def getstatuswriter(ui, repo, forcibly=None):
579 '''Return the function to write largefiles specific status out
592 '''Return the function to write largefiles specific status out
580
593
581 If ``forcibly`` is ``None``, this returns the last element of
594 If ``forcibly`` is ``None``, this returns the last element of
582 ``repo._lfstatuswriters`` as "default" writer function.
595 ``repo._lfstatuswriters`` as "default" writer function.
583
596
584 Otherwise, this returns the function to always write out (or
597 Otherwise, this returns the function to always write out (or
585 ignore if ``not forcibly``) status.
598 ignore if ``not forcibly``) status.
586 '''
599 '''
587 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
600 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
588 return repo._lfstatuswriters[-1]
601 return repo._lfstatuswriters[-1]
589 else:
602 else:
590 if forcibly:
603 if forcibly:
591 return ui.status # forcibly WRITE OUT
604 return ui.status # forcibly WRITE OUT
592 else:
605 else:
593 return lambda *msg, **opts: None # forcibly IGNORE
606 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,155 +1,181 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile'
20 $ hg commit -m 'branchhead without largefile'
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ cd ..
22 $ cd ..
23
23
24 Discard all cached largefiles in USERCACHE
24 Discard all cached largefiles in USERCACHE
25
25
26 $ rm -rf ${USERCACHE}
26 $ rm -rf ${USERCACHE}
27
27
28 Create mirror repo, and pull from source without largefile:
28 Create mirror repo, and pull from source without largefile:
29 "pull" is used instead of "clone" for suppression of (1) updating to
29 "pull" is used instead of "clone" for suppression of (1) updating to
30 tip (= caching largefile from source repo), and (2) recording source
30 tip (= caching largefile from source repo), and (2) recording source
31 repo as "default" path in .hg/hgrc.
31 repo as "default" path in .hg/hgrc.
32
32
33 $ hg init mirror
33 $ hg init mirror
34 $ cd mirror
34 $ cd mirror
35 $ hg pull ../src
35 $ hg pull ../src
36 pulling from ../src
36 pulling from ../src
37 requesting all changes
37 requesting all changes
38 adding changesets
38 adding changesets
39 adding manifests
39 adding manifests
40 adding file changes
40 adding file changes
41 added 2 changesets with 1 changes to 1 files
41 added 2 changesets with 1 changes to 1 files
42 (run 'hg update' to get a working copy)
42 (run 'hg update' to get a working copy)
43
43
44 Update working directory to "tip", which requires largefile("large"),
44 Update working directory to "tip", which requires largefile("large"),
45 but there is no cache file for it. So, hg must treat it as
45 but there is no cache file for it. So, hg must treat it as
46 "missing"(!) file.
46 "missing"(!) file.
47
47
48 $ hg update -r0
48 $ hg update -r0
49 getting changed largefiles
49 getting changed largefiles
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 0 largefiles updated, 0 removed
51 0 largefiles updated, 0 removed
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 $ hg status
53 $ hg status
54 ! large
54 ! large
55
55
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
57
57
58 $ hg update null
58 $ hg update null
59 getting changed largefiles
59 getting changed largefiles
60 0 largefiles updated, 0 removed
60 0 largefiles updated, 0 removed
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62
62
63 Update working directory to tip, again.
63 Update working directory to tip, again.
64
64
65 $ hg update -r0
65 $ hg update -r0
66 getting changed largefiles
66 getting changed largefiles
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 0 largefiles updated, 0 removed
68 0 largefiles updated, 0 removed
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 $ hg status
70 $ hg status
71 ! large
71 ! large
72 $ cd ..
72 $ cd ..
73
73
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75
75
76 $ hg init mirror2
76 $ hg init mirror2
77 $ hg -R mirror2 pull src -r0
77 $ hg -R mirror2 pull src -r0
78 pulling from src
78 pulling from src
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 1 changesets with 1 changes to 1 files
82 added 1 changesets with 1 changes to 1 files
83 (run 'hg update' to get a working copy)
83 (run 'hg update' to get a working copy)
84
84
85 #if unix-permissions
85 #if unix-permissions
86
86
87 Portable way to print file permissions:
87 Portable way to print file permissions:
88
88
89 $ cat > ls-l.py <<EOF
89 $ cat > ls-l.py <<EOF
90 > #!/usr/bin/env python
90 > #!/usr/bin/env python
91 > import sys, os
91 > import sys, os
92 > path = sys.argv[1]
92 > path = sys.argv[1]
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 > EOF
94 > EOF
95 $ chmod +x ls-l.py
95 $ chmod +x ls-l.py
96
96
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 from file in working copy:
98 from file in working copy:
99
99
100 $ cd src
100 $ cd src
101 $ chmod 750 .hg/store
101 $ chmod 750 .hg/store
102 $ chmod 660 large
102 $ chmod 660 large
103 $ echo change >> large
103 $ echo change >> large
104 $ hg commit -m change
104 $ hg commit -m change
105 created new head
105 created new head
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 640
107 640
108
108
109 Test permission of with files in .hg/largefiles created by update:
109 Test permission of with files in .hg/largefiles created by update:
110
110
111 $ cd ../mirror
111 $ cd ../mirror
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 $ chmod 750 .hg/store
113 $ chmod 750 .hg/store
114 $ hg pull ../src --update -q
114 $ hg pull ../src --update -q
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 640
116 640
117
117
118 Test permission of files created by push:
118 Test permission of files created by push:
119
119
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 > --config "web.allow_push=*" --config web.push_ssl=no
121 > --config "web.allow_push=*" --config web.push_ssl=no
122 $ cat hg.pid >> $DAEMON_PIDS
122 $ cat hg.pid >> $DAEMON_PIDS
123
123
124 $ echo change >> large
124 $ echo change >> large
125 $ hg commit -m change
125 $ hg commit -m change
126
126
127 $ rm -r "$USERCACHE"
127 $ rm -r "$USERCACHE"
128
128
129 $ hg push -q http://localhost:$HGPORT/
129 $ hg push -q http://localhost:$HGPORT/
130
130
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 640
132 640
133
133
134 $ cd ..
134 $ cd ..
135
135
136 #endif
136 #endif
137
137
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 it is missing, but a remove on a nonexistent unknown file still should. Same
139 it is missing, but a remove on a nonexistent unknown file still should. Same
140 for a forget.)
140 for a forget.)
141
141
142 $ cd src
142 $ cd src
143 $ touch x
143 $ touch x
144 $ hg add x
144 $ hg add x
145 $ mv x y
145 $ mv x y
146 $ hg remove -A x y ENOENT
146 $ hg remove -A x y ENOENT
147 ENOENT: * (glob)
147 ENOENT: * (glob)
148 not removing y: file is untracked
148 not removing y: file is untracked
149 [1]
149 [1]
150 $ hg add y
150 $ hg add y
151 $ mv y z
151 $ mv y z
152 $ hg forget y z ENOENT
152 $ hg forget y z ENOENT
153 ENOENT: * (glob)
153 ENOENT: * (glob)
154 not removing z: file is already untracked
154 not removing z: file is already untracked
155 [1]
155 [1]
156
157 Largefiles are accessible from the share's store
158 $ cd ..
159 $ hg share -q src share_dst --config extensions.share=
160 $ hg -R share_dst update -r0
161 getting changed largefiles
162 1 largefiles updated, 0 removed
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164
165 $ echo modified > share_dst/large
166 $ hg -R share_dst ci -m modified
167 created new head
168
169 Only dirstate is in the local store for the share, and the largefile is in the
170 share source's local store. Avoid the extra largefiles added in the unix
171 conditional above.
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 $ echo $hash
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175
176 $ find share_dst/.hg/largefiles/* | sort
177 share_dst/.hg/largefiles/dirstate
178
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 src/.hg/largefiles/dirstate
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
General Comments 0
You need to be logged in to leave comments. Login now