##// END OF EJS Templates
largefiles: getstandinmatcher should not depend on existence of directories...
Mads Kiilerich -
r18724:894a5897 stable
parent child Browse files
Show More
@@ -1,431 +1,427 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
22 longname = 'largefiles'
22 longname = 'largefiles'
23
23
24
24
25 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
26
26
27 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
28 lfsize = opt
28 lfsize = opt
29 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
30 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
31 if lfsize:
31 if lfsize:
32 try:
32 try:
33 lfsize = float(lfsize)
33 lfsize = float(lfsize)
34 except ValueError:
34 except ValueError:
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
35 raise util.Abort(_('largefiles: size must be number (not %s)\n')
36 % lfsize)
36 % lfsize)
37 if lfsize is None:
37 if lfsize is None:
38 raise util.Abort(_('minimum size for largefiles must be specified'))
38 raise util.Abort(_('minimum size for largefiles must be specified'))
39 return lfsize
39 return lfsize
40
40
41 def link(src, dest):
41 def link(src, dest):
42 try:
42 try:
43 util.oslink(src, dest)
43 util.oslink(src, dest)
44 except OSError:
44 except OSError:
45 # if hardlinks fail, fallback on atomic copy
45 # if hardlinks fail, fallback on atomic copy
46 dst = util.atomictempfile(dest)
46 dst = util.atomictempfile(dest)
47 for chunk in util.filechunkiter(open(src, 'rb')):
47 for chunk in util.filechunkiter(open(src, 'rb')):
48 dst.write(chunk)
48 dst.write(chunk)
49 dst.close()
49 dst.close()
50 os.chmod(dest, os.stat(src).st_mode)
50 os.chmod(dest, os.stat(src).st_mode)
51
51
52 def usercachepath(ui, hash):
52 def usercachepath(ui, hash):
53 path = ui.configpath(longname, 'usercache', None)
53 path = ui.configpath(longname, 'usercache', None)
54 if path:
54 if path:
55 path = os.path.join(path, hash)
55 path = os.path.join(path, hash)
56 else:
56 else:
57 if os.name == 'nt':
57 if os.name == 'nt':
58 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
58 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 if appdata:
59 if appdata:
60 path = os.path.join(appdata, longname, hash)
60 path = os.path.join(appdata, longname, hash)
61 elif platform.system() == 'Darwin':
61 elif platform.system() == 'Darwin':
62 home = os.getenv('HOME')
62 home = os.getenv('HOME')
63 if home:
63 if home:
64 path = os.path.join(home, 'Library', 'Caches',
64 path = os.path.join(home, 'Library', 'Caches',
65 longname, hash)
65 longname, hash)
66 elif os.name == 'posix':
66 elif os.name == 'posix':
67 path = os.getenv('XDG_CACHE_HOME')
67 path = os.getenv('XDG_CACHE_HOME')
68 if path:
68 if path:
69 path = os.path.join(path, longname, hash)
69 path = os.path.join(path, longname, hash)
70 else:
70 else:
71 home = os.getenv('HOME')
71 home = os.getenv('HOME')
72 if home:
72 if home:
73 path = os.path.join(home, '.cache', longname, hash)
73 path = os.path.join(home, '.cache', longname, hash)
74 else:
74 else:
75 raise util.Abort(_('unknown operating system: %s\n') % os.name)
75 raise util.Abort(_('unknown operating system: %s\n') % os.name)
76 return path
76 return path
77
77
78 def inusercache(ui, hash):
78 def inusercache(ui, hash):
79 path = usercachepath(ui, hash)
79 path = usercachepath(ui, hash)
80 return path and os.path.exists(path)
80 return path and os.path.exists(path)
81
81
82 def findfile(repo, hash):
82 def findfile(repo, hash):
83 if instore(repo, hash):
83 if instore(repo, hash):
84 repo.ui.note(_('found %s in store\n') % hash)
84 repo.ui.note(_('found %s in store\n') % hash)
85 return storepath(repo, hash)
85 return storepath(repo, hash)
86 elif inusercache(repo.ui, hash):
86 elif inusercache(repo.ui, hash):
87 repo.ui.note(_('found %s in system cache\n') % hash)
87 repo.ui.note(_('found %s in system cache\n') % hash)
88 path = storepath(repo, hash)
88 path = storepath(repo, hash)
89 util.makedirs(os.path.dirname(path))
89 util.makedirs(os.path.dirname(path))
90 link(usercachepath(repo.ui, hash), path)
90 link(usercachepath(repo.ui, hash), path)
91 return path
91 return path
92 return None
92 return None
93
93
94 class largefilesdirstate(dirstate.dirstate):
94 class largefilesdirstate(dirstate.dirstate):
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
96 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 def normal(self, f):
97 def normal(self, f):
98 return super(largefilesdirstate, self).normal(unixpath(f))
98 return super(largefilesdirstate, self).normal(unixpath(f))
99 def remove(self, f):
99 def remove(self, f):
100 return super(largefilesdirstate, self).remove(unixpath(f))
100 return super(largefilesdirstate, self).remove(unixpath(f))
101 def add(self, f):
101 def add(self, f):
102 return super(largefilesdirstate, self).add(unixpath(f))
102 return super(largefilesdirstate, self).add(unixpath(f))
103 def drop(self, f):
103 def drop(self, f):
104 return super(largefilesdirstate, self).drop(unixpath(f))
104 return super(largefilesdirstate, self).drop(unixpath(f))
105 def forget(self, f):
105 def forget(self, f):
106 return super(largefilesdirstate, self).forget(unixpath(f))
106 return super(largefilesdirstate, self).forget(unixpath(f))
107 def normallookup(self, f):
107 def normallookup(self, f):
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
108 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 def _ignore(self):
109 def _ignore(self):
110 return False
110 return False
111
111
112 def openlfdirstate(ui, repo, create=True):
112 def openlfdirstate(ui, repo, create=True):
113 '''
113 '''
114 Return a dirstate object that tracks largefiles: i.e. its root is
114 Return a dirstate object that tracks largefiles: i.e. its root is
115 the repo root, but it is saved in .hg/largefiles/dirstate.
115 the repo root, but it is saved in .hg/largefiles/dirstate.
116 '''
116 '''
117 lfstoredir = repo.join(longname)
117 lfstoredir = repo.join(longname)
118 opener = scmutil.opener(lfstoredir)
118 opener = scmutil.opener(lfstoredir)
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
119 lfdirstate = largefilesdirstate(opener, ui, repo.root,
120 repo.dirstate._validate)
120 repo.dirstate._validate)
121
121
122 # If the largefiles dirstate does not exist, populate and create
122 # If the largefiles dirstate does not exist, populate and create
123 # it. This ensures that we create it on the first meaningful
123 # it. This ensures that we create it on the first meaningful
124 # largefiles operation in a new clone.
124 # largefiles operation in a new clone.
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
125 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
126 util.makedirs(lfstoredir)
126 util.makedirs(lfstoredir)
127 matcher = getstandinmatcher(repo)
127 matcher = getstandinmatcher(repo)
128 for standin in repo.dirstate.walk(matcher, [], False, False):
128 for standin in repo.dirstate.walk(matcher, [], False, False):
129 lfile = splitstandin(standin)
129 lfile = splitstandin(standin)
130 hash = readstandin(repo, lfile)
130 hash = readstandin(repo, lfile)
131 lfdirstate.normallookup(lfile)
131 lfdirstate.normallookup(lfile)
132 try:
132 try:
133 if hash == hashfile(repo.wjoin(lfile)):
133 if hash == hashfile(repo.wjoin(lfile)):
134 lfdirstate.normal(lfile)
134 lfdirstate.normal(lfile)
135 except OSError, err:
135 except OSError, err:
136 if err.errno != errno.ENOENT:
136 if err.errno != errno.ENOENT:
137 raise
137 raise
138 return lfdirstate
138 return lfdirstate
139
139
140 def lfdirstatestatus(lfdirstate, repo, rev):
140 def lfdirstatestatus(lfdirstate, repo, rev):
141 match = match_.always(repo.root, repo.getcwd())
141 match = match_.always(repo.root, repo.getcwd())
142 s = lfdirstate.status(match, [], False, False, False)
142 s = lfdirstate.status(match, [], False, False, False)
143 unsure, modified, added, removed, missing, unknown, ignored, clean = s
143 unsure, modified, added, removed, missing, unknown, ignored, clean = s
144 for lfile in unsure:
144 for lfile in unsure:
145 try:
145 try:
146 fctx = repo[rev][standin(lfile)]
146 fctx = repo[rev][standin(lfile)]
147 except LookupError:
147 except LookupError:
148 fctx = None
148 fctx = None
149 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
149 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
150 modified.append(lfile)
150 modified.append(lfile)
151 else:
151 else:
152 clean.append(lfile)
152 clean.append(lfile)
153 lfdirstate.normal(lfile)
153 lfdirstate.normal(lfile)
154 return (modified, added, removed, missing, unknown, ignored, clean)
154 return (modified, added, removed, missing, unknown, ignored, clean)
155
155
156 def listlfiles(repo, rev=None, matcher=None):
156 def listlfiles(repo, rev=None, matcher=None):
157 '''return a list of largefiles in the working copy or the
157 '''return a list of largefiles in the working copy or the
158 specified changeset'''
158 specified changeset'''
159
159
160 if matcher is None:
160 if matcher is None:
161 matcher = getstandinmatcher(repo)
161 matcher = getstandinmatcher(repo)
162
162
163 # ignore unknown files in working directory
163 # ignore unknown files in working directory
164 return [splitstandin(f)
164 return [splitstandin(f)
165 for f in repo[rev].walk(matcher)
165 for f in repo[rev].walk(matcher)
166 if rev is not None or repo.dirstate[f] != '?']
166 if rev is not None or repo.dirstate[f] != '?']
167
167
168 def instore(repo, hash):
168 def instore(repo, hash):
169 return os.path.exists(storepath(repo, hash))
169 return os.path.exists(storepath(repo, hash))
170
170
171 def storepath(repo, hash):
171 def storepath(repo, hash):
172 return repo.join(os.path.join(longname, hash))
172 return repo.join(os.path.join(longname, hash))
173
173
174 def copyfromcache(repo, hash, filename):
174 def copyfromcache(repo, hash, filename):
175 '''Copy the specified largefile from the repo or system cache to
175 '''Copy the specified largefile from the repo or system cache to
176 filename in the repository. Return true on success or false if the
176 filename in the repository. Return true on success or false if the
177 file was not found in either cache (which should not happened:
177 file was not found in either cache (which should not happened:
178 this is meant to be called only after ensuring that the needed
178 this is meant to be called only after ensuring that the needed
179 largefile exists in the cache).'''
179 largefile exists in the cache).'''
180 path = findfile(repo, hash)
180 path = findfile(repo, hash)
181 if path is None:
181 if path is None:
182 return False
182 return False
183 util.makedirs(os.path.dirname(repo.wjoin(filename)))
183 util.makedirs(os.path.dirname(repo.wjoin(filename)))
184 # The write may fail before the file is fully written, but we
184 # The write may fail before the file is fully written, but we
185 # don't use atomic writes in the working copy.
185 # don't use atomic writes in the working copy.
186 shutil.copy(path, repo.wjoin(filename))
186 shutil.copy(path, repo.wjoin(filename))
187 return True
187 return True
188
188
189 def copytostore(repo, rev, file, uploaded=False):
189 def copytostore(repo, rev, file, uploaded=False):
190 hash = readstandin(repo, file, rev)
190 hash = readstandin(repo, file, rev)
191 if instore(repo, hash):
191 if instore(repo, hash):
192 return
192 return
193 copytostoreabsolute(repo, repo.wjoin(file), hash)
193 copytostoreabsolute(repo, repo.wjoin(file), hash)
194
194
195 def copyalltostore(repo, node):
195 def copyalltostore(repo, node):
196 '''Copy all largefiles in a given revision to the store'''
196 '''Copy all largefiles in a given revision to the store'''
197
197
198 ctx = repo[node]
198 ctx = repo[node]
199 for filename in ctx.files():
199 for filename in ctx.files():
200 if isstandin(filename) and filename in ctx.manifest():
200 if isstandin(filename) and filename in ctx.manifest():
201 realfile = splitstandin(filename)
201 realfile = splitstandin(filename)
202 copytostore(repo, ctx.node(), realfile)
202 copytostore(repo, ctx.node(), realfile)
203
203
204
204
205 def copytostoreabsolute(repo, file, hash):
205 def copytostoreabsolute(repo, file, hash):
206 util.makedirs(os.path.dirname(storepath(repo, hash)))
206 util.makedirs(os.path.dirname(storepath(repo, hash)))
207 if inusercache(repo.ui, hash):
207 if inusercache(repo.ui, hash):
208 link(usercachepath(repo.ui, hash), storepath(repo, hash))
208 link(usercachepath(repo.ui, hash), storepath(repo, hash))
209 elif not getattr(repo, "_isconverting", False):
209 elif not getattr(repo, "_isconverting", False):
210 dst = util.atomictempfile(storepath(repo, hash),
210 dst = util.atomictempfile(storepath(repo, hash),
211 createmode=repo.store.createmode)
211 createmode=repo.store.createmode)
212 for chunk in util.filechunkiter(open(file, 'rb')):
212 for chunk in util.filechunkiter(open(file, 'rb')):
213 dst.write(chunk)
213 dst.write(chunk)
214 dst.close()
214 dst.close()
215 linktousercache(repo, hash)
215 linktousercache(repo, hash)
216
216
217 def linktousercache(repo, hash):
217 def linktousercache(repo, hash):
218 path = usercachepath(repo.ui, hash)
218 path = usercachepath(repo.ui, hash)
219 if path:
219 if path:
220 util.makedirs(os.path.dirname(path))
220 util.makedirs(os.path.dirname(path))
221 link(storepath(repo, hash), path)
221 link(storepath(repo, hash), path)
222
222
223 def getstandinmatcher(repo, pats=[], opts={}):
223 def getstandinmatcher(repo, pats=[], opts={}):
224 '''Return a match object that applies pats to the standin directory'''
224 '''Return a match object that applies pats to the standin directory'''
225 standindir = repo.wjoin(shortname)
225 standindir = repo.wjoin(shortname)
226 if pats:
226 if pats:
227 pats = [os.path.join(standindir, pat) for pat in pats]
227 pats = [os.path.join(standindir, pat) for pat in pats]
228 elif os.path.isdir(standindir):
228 else:
229 # no patterns: relative to repo root
229 # no patterns: relative to repo root
230 pats = [standindir]
230 pats = [standindir]
231 else:
232 # no patterns and no standin dir: return matcher that matches nothing
233 return match_.match(repo.root, None, [], exact=True)
234
235 # no warnings about missing files or directories
231 # no warnings about missing files or directories
236 match = scmutil.match(repo[None], pats, opts)
232 match = scmutil.match(repo[None], pats, opts)
237 match.bad = lambda f, msg: None
233 match.bad = lambda f, msg: None
238 return match
234 return match
239
235
240 def composestandinmatcher(repo, rmatcher):
236 def composestandinmatcher(repo, rmatcher):
241 '''Return a matcher that accepts standins corresponding to the
237 '''Return a matcher that accepts standins corresponding to the
242 files accepted by rmatcher. Pass the list of files in the matcher
238 files accepted by rmatcher. Pass the list of files in the matcher
243 as the paths specified by the user.'''
239 as the paths specified by the user.'''
244 smatcher = getstandinmatcher(repo, rmatcher.files())
240 smatcher = getstandinmatcher(repo, rmatcher.files())
245 isstandin = smatcher.matchfn
241 isstandin = smatcher.matchfn
246 def composedmatchfn(f):
242 def composedmatchfn(f):
247 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
243 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
248 smatcher.matchfn = composedmatchfn
244 smatcher.matchfn = composedmatchfn
249
245
250 return smatcher
246 return smatcher
251
247
252 def standin(filename):
248 def standin(filename):
253 '''Return the repo-relative path to the standin for the specified big
249 '''Return the repo-relative path to the standin for the specified big
254 file.'''
250 file.'''
255 # Notes:
251 # Notes:
256 # 1) Some callers want an absolute path, but for instance addlargefiles
252 # 1) Some callers want an absolute path, but for instance addlargefiles
257 # needs it repo-relative so it can be passed to repo[None].add(). So
253 # needs it repo-relative so it can be passed to repo[None].add(). So
258 # leave it up to the caller to use repo.wjoin() to get an absolute path.
254 # leave it up to the caller to use repo.wjoin() to get an absolute path.
259 # 2) Join with '/' because that's what dirstate always uses, even on
255 # 2) Join with '/' because that's what dirstate always uses, even on
260 # Windows. Change existing separator to '/' first in case we are
256 # Windows. Change existing separator to '/' first in case we are
261 # passed filenames from an external source (like the command line).
257 # passed filenames from an external source (like the command line).
262 return shortnameslash + util.pconvert(filename)
258 return shortnameslash + util.pconvert(filename)
263
259
264 def isstandin(filename):
260 def isstandin(filename):
265 '''Return true if filename is a big file standin. filename must be
261 '''Return true if filename is a big file standin. filename must be
266 in Mercurial's internal form (slash-separated).'''
262 in Mercurial's internal form (slash-separated).'''
267 return filename.startswith(shortnameslash)
263 return filename.startswith(shortnameslash)
268
264
269 def splitstandin(filename):
265 def splitstandin(filename):
270 # Split on / because that's what dirstate always uses, even on Windows.
266 # Split on / because that's what dirstate always uses, even on Windows.
271 # Change local separator to / first just in case we are passed filenames
267 # Change local separator to / first just in case we are passed filenames
272 # from an external source (like the command line).
268 # from an external source (like the command line).
273 bits = util.pconvert(filename).split('/', 1)
269 bits = util.pconvert(filename).split('/', 1)
274 if len(bits) == 2 and bits[0] == shortname:
270 if len(bits) == 2 and bits[0] == shortname:
275 return bits[1]
271 return bits[1]
276 else:
272 else:
277 return None
273 return None
278
274
279 def updatestandin(repo, standin):
275 def updatestandin(repo, standin):
280 file = repo.wjoin(splitstandin(standin))
276 file = repo.wjoin(splitstandin(standin))
281 if os.path.exists(file):
277 if os.path.exists(file):
282 hash = hashfile(file)
278 hash = hashfile(file)
283 executable = getexecutable(file)
279 executable = getexecutable(file)
284 writestandin(repo, standin, hash, executable)
280 writestandin(repo, standin, hash, executable)
285
281
286 def readstandin(repo, filename, node=None):
282 def readstandin(repo, filename, node=None):
287 '''read hex hash from standin for filename at given node, or working
283 '''read hex hash from standin for filename at given node, or working
288 directory if no node is given'''
284 directory if no node is given'''
289 return repo[node][standin(filename)].data().strip()
285 return repo[node][standin(filename)].data().strip()
290
286
291 def writestandin(repo, standin, hash, executable):
287 def writestandin(repo, standin, hash, executable):
292 '''write hash to <repo.root>/<standin>'''
288 '''write hash to <repo.root>/<standin>'''
293 writehash(hash, repo.wjoin(standin), executable)
289 writehash(hash, repo.wjoin(standin), executable)
294
290
295 def copyandhash(instream, outfile):
291 def copyandhash(instream, outfile):
296 '''Read bytes from instream (iterable) and write them to outfile,
292 '''Read bytes from instream (iterable) and write them to outfile,
297 computing the SHA-1 hash of the data along the way. Close outfile
293 computing the SHA-1 hash of the data along the way. Close outfile
298 when done and return the binary hash.'''
294 when done and return the binary hash.'''
299 hasher = util.sha1('')
295 hasher = util.sha1('')
300 for data in instream:
296 for data in instream:
301 hasher.update(data)
297 hasher.update(data)
302 outfile.write(data)
298 outfile.write(data)
303
299
304 # Blecch: closing a file that somebody else opened is rude and
300 # Blecch: closing a file that somebody else opened is rude and
305 # wrong. But it's so darn convenient and practical! After all,
301 # wrong. But it's so darn convenient and practical! After all,
306 # outfile was opened just to copy and hash.
302 # outfile was opened just to copy and hash.
307 outfile.close()
303 outfile.close()
308
304
309 return hasher.digest()
305 return hasher.digest()
310
306
311 def hashrepofile(repo, file):
307 def hashrepofile(repo, file):
312 return hashfile(repo.wjoin(file))
308 return hashfile(repo.wjoin(file))
313
309
314 def hashfile(file):
310 def hashfile(file):
315 if not os.path.exists(file):
311 if not os.path.exists(file):
316 return ''
312 return ''
317 hasher = util.sha1('')
313 hasher = util.sha1('')
318 fd = open(file, 'rb')
314 fd = open(file, 'rb')
319 for data in blockstream(fd):
315 for data in blockstream(fd):
320 hasher.update(data)
316 hasher.update(data)
321 fd.close()
317 fd.close()
322 return hasher.hexdigest()
318 return hasher.hexdigest()
323
319
324 class limitreader(object):
320 class limitreader(object):
325 def __init__(self, f, limit):
321 def __init__(self, f, limit):
326 self.f = f
322 self.f = f
327 self.limit = limit
323 self.limit = limit
328
324
329 def read(self, length):
325 def read(self, length):
330 if self.limit == 0:
326 if self.limit == 0:
331 return ''
327 return ''
332 length = length > self.limit and self.limit or length
328 length = length > self.limit and self.limit or length
333 self.limit -= length
329 self.limit -= length
334 return self.f.read(length)
330 return self.f.read(length)
335
331
336 def close(self):
332 def close(self):
337 pass
333 pass
338
334
339 def blockstream(infile, blocksize=128 * 1024):
335 def blockstream(infile, blocksize=128 * 1024):
340 """Generator that yields blocks of data from infile and closes infile."""
336 """Generator that yields blocks of data from infile and closes infile."""
341 while True:
337 while True:
342 data = infile.read(blocksize)
338 data = infile.read(blocksize)
343 if not data:
339 if not data:
344 break
340 break
345 yield data
341 yield data
346 # same blecch as copyandhash() above
342 # same blecch as copyandhash() above
347 infile.close()
343 infile.close()
348
344
349 def writehash(hash, filename, executable):
345 def writehash(hash, filename, executable):
350 util.makedirs(os.path.dirname(filename))
346 util.makedirs(os.path.dirname(filename))
351 util.writefile(filename, hash + '\n')
347 util.writefile(filename, hash + '\n')
352 os.chmod(filename, getmode(executable))
348 os.chmod(filename, getmode(executable))
353
349
354 def getexecutable(filename):
350 def getexecutable(filename):
355 mode = os.stat(filename).st_mode
351 mode = os.stat(filename).st_mode
356 return ((mode & stat.S_IXUSR) and
352 return ((mode & stat.S_IXUSR) and
357 (mode & stat.S_IXGRP) and
353 (mode & stat.S_IXGRP) and
358 (mode & stat.S_IXOTH))
354 (mode & stat.S_IXOTH))
359
355
360 def getmode(executable):
356 def getmode(executable):
361 if executable:
357 if executable:
362 return 0755
358 return 0755
363 else:
359 else:
364 return 0644
360 return 0644
365
361
366 def urljoin(first, second, *arg):
362 def urljoin(first, second, *arg):
367 def join(left, right):
363 def join(left, right):
368 if not left.endswith('/'):
364 if not left.endswith('/'):
369 left += '/'
365 left += '/'
370 if right.startswith('/'):
366 if right.startswith('/'):
371 right = right[1:]
367 right = right[1:]
372 return left + right
368 return left + right
373
369
374 url = join(first, second)
370 url = join(first, second)
375 for a in arg:
371 for a in arg:
376 url = join(url, a)
372 url = join(url, a)
377 return url
373 return url
378
374
379 def hexsha1(data):
375 def hexsha1(data):
380 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
376 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
381 object data"""
377 object data"""
382 h = util.sha1()
378 h = util.sha1()
383 for chunk in util.filechunkiter(data):
379 for chunk in util.filechunkiter(data):
384 h.update(chunk)
380 h.update(chunk)
385 return h.hexdigest()
381 return h.hexdigest()
386
382
387 def httpsendfile(ui, filename):
383 def httpsendfile(ui, filename):
388 return httpconnection.httpsendfile(ui, filename, 'rb')
384 return httpconnection.httpsendfile(ui, filename, 'rb')
389
385
390 def unixpath(path):
386 def unixpath(path):
391 '''Return a version of path normalized for use with the lfdirstate.'''
387 '''Return a version of path normalized for use with the lfdirstate.'''
392 return util.pconvert(os.path.normpath(path))
388 return util.pconvert(os.path.normpath(path))
393
389
394 def islfilesrepo(repo):
390 def islfilesrepo(repo):
395 if ('largefiles' in repo.requirements and
391 if ('largefiles' in repo.requirements and
396 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
392 util.any(shortnameslash in f[0] for f in repo.store.datafiles())):
397 return True
393 return True
398
394
399 return util.any(openlfdirstate(repo.ui, repo, False))
395 return util.any(openlfdirstate(repo.ui, repo, False))
400
396
401 class storeprotonotcapable(Exception):
397 class storeprotonotcapable(Exception):
402 def __init__(self, storetypes):
398 def __init__(self, storetypes):
403 self.storetypes = storetypes
399 self.storetypes = storetypes
404
400
405 def getcurrentheads(repo):
401 def getcurrentheads(repo):
406 branches = repo.branchmap()
402 branches = repo.branchmap()
407 heads = []
403 heads = []
408 for branch in branches:
404 for branch in branches:
409 newheads = repo.branchheads(branch)
405 newheads = repo.branchheads(branch)
410 heads = heads + newheads
406 heads = heads + newheads
411 return heads
407 return heads
412
408
413 def getstandinsstate(repo):
409 def getstandinsstate(repo):
414 standins = []
410 standins = []
415 matcher = getstandinmatcher(repo)
411 matcher = getstandinmatcher(repo)
416 for standin in repo.dirstate.walk(matcher, [], False, False):
412 for standin in repo.dirstate.walk(matcher, [], False, False):
417 lfile = splitstandin(standin)
413 lfile = splitstandin(standin)
418 try:
414 try:
419 hash = readstandin(repo, lfile)
415 hash = readstandin(repo, lfile)
420 except IOError:
416 except IOError:
421 hash = None
417 hash = None
422 standins.append((lfile, hash))
418 standins.append((lfile, hash))
423 return standins
419 return standins
424
420
425 def getlfilestoupdate(oldstandins, newstandins):
421 def getlfilestoupdate(oldstandins, newstandins):
426 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
422 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
427 filelist = []
423 filelist = []
428 for f in changedstandins:
424 for f in changedstandins:
429 if f[0] not in filelist:
425 if f[0] not in filelist:
430 filelist.append(f[0])
426 filelist.append(f[0])
431 return filelist
427 return filelist
@@ -1,110 +1,112 b''
1
1
2 $ echo "[extensions]" >> $HGRCPATH
2 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "largefiles =" >> $HGRCPATH
3 $ echo "largefiles =" >> $HGRCPATH
4
4
5 Create the repository outside $HOME since largefiles write to
5 Create the repository outside $HOME since largefiles write to
6 $HOME/.cache/largefiles.
6 $HOME/.cache/largefiles.
7
7
8 $ hg init test
8 $ hg init test
9 $ cd test
9 $ cd test
10 $ echo "root" > root
10 $ echo "root" > root
11 $ hg add root
11 $ hg add root
12 $ hg commit -m "Root commit"
12 $ hg commit -m "Root commit"
13
13
14 $ echo "large" > foo
14 $ echo "large" > foo
15 $ hg add --large foo
15 $ hg add --large foo
16 $ hg commit -m "Add foo as a largefile"
16 $ hg commit -m "Add foo as a largefile"
17
17
18 $ hg update -r 0
18 $ hg update -r 0
19 getting changed largefiles
19 getting changed largefiles
20 0 largefiles updated, 1 removed
20 0 largefiles updated, 1 removed
21 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
21 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
22
22
23 $ echo "normal" > foo
23 $ echo "normal" > foo
24 $ hg add foo
24 $ hg add foo
25 $ hg commit -m "Add foo as normal file"
25 $ hg commit -m "Add foo as normal file"
26 created new head
26 created new head
27
27
28 Normal file in the working copy, keeping the normal version:
28 Normal file in the working copy, keeping the normal version:
29
29
30 $ echo "n" | hg merge --config ui.interactive=Yes
30 $ echo "n" | hg merge --config ui.interactive=Yes
31 foo has been turned into a largefile
31 foo has been turned into a largefile
32 use (l)argefile or keep as (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
32 use (l)argefile or keep as (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
33 (branch merge, don't forget to commit)
33 (branch merge, don't forget to commit)
34 getting changed largefiles
35 0 largefiles updated, 0 removed
34
36
35 $ hg status
37 $ hg status
36 $ cat foo
38 $ cat foo
37 normal
39 normal
38
40
39 Normal file in the working copy, keeping the largefile version:
41 Normal file in the working copy, keeping the largefile version:
40
42
41 $ hg update -q -C
43 $ hg update -q -C
42 $ echo "l" | hg merge --config ui.interactive=Yes
44 $ echo "l" | hg merge --config ui.interactive=Yes
43 foo has been turned into a largefile
45 foo has been turned into a largefile
44 use (l)argefile or keep as (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
46 use (l)argefile or keep as (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
45 (branch merge, don't forget to commit)
47 (branch merge, don't forget to commit)
46 getting changed largefiles
48 getting changed largefiles
47 1 largefiles updated, 0 removed
49 1 largefiles updated, 0 removed
48
50
49 $ hg status
51 $ hg status
50 M foo
52 M foo
51
53
52 $ hg diff --nodates
54 $ hg diff --nodates
53 diff -r fa129ab6b5a7 .hglf/foo
55 diff -r fa129ab6b5a7 .hglf/foo
54 --- /dev/null
56 --- /dev/null
55 +++ b/.hglf/foo
57 +++ b/.hglf/foo
56 @@ -0,0 +1,1 @@
58 @@ -0,0 +1,1 @@
57 +7f7097b041ccf68cc5561e9600da4655d21c6d18
59 +7f7097b041ccf68cc5561e9600da4655d21c6d18
58 diff -r fa129ab6b5a7 foo
60 diff -r fa129ab6b5a7 foo
59 --- a/foo
61 --- a/foo
60 +++ /dev/null
62 +++ /dev/null
61 @@ -1,1 +0,0 @@
63 @@ -1,1 +0,0 @@
62 -normal
64 -normal
63
65
64 $ cat foo
66 $ cat foo
65 large
67 large
66
68
67 Largefile in the working copy, keeping the normal version:
69 Largefile in the working copy, keeping the normal version:
68
70
69 $ hg update -q -C -r 1
71 $ hg update -q -C -r 1
70 $ echo "n" | hg merge --config ui.interactive=Yes
72 $ echo "n" | hg merge --config ui.interactive=Yes
71 foo has been turned into a normal file
73 foo has been turned into a normal file
72 keep as (l)argefile or use (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
74 keep as (l)argefile or use (n)ormal file? 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
73 (branch merge, don't forget to commit)
75 (branch merge, don't forget to commit)
74 getting changed largefiles
76 getting changed largefiles
75 0 largefiles updated, 0 removed
77 0 largefiles updated, 0 removed
76
78
77 $ hg status
79 $ hg status
78 M foo
80 M foo
79
81
80 $ hg diff --nodates
82 $ hg diff --nodates
81 diff -r ff521236428a .hglf/foo
83 diff -r ff521236428a .hglf/foo
82 --- a/.hglf/foo
84 --- a/.hglf/foo
83 +++ /dev/null
85 +++ /dev/null
84 @@ -1,1 +0,0 @@
86 @@ -1,1 +0,0 @@
85 -7f7097b041ccf68cc5561e9600da4655d21c6d18
87 -7f7097b041ccf68cc5561e9600da4655d21c6d18
86 diff -r ff521236428a foo
88 diff -r ff521236428a foo
87 --- /dev/null
89 --- /dev/null
88 +++ b/foo
90 +++ b/foo
89 @@ -0,0 +1,1 @@
91 @@ -0,0 +1,1 @@
90 +normal
92 +normal
91
93
92 $ cat foo
94 $ cat foo
93 normal
95 normal
94
96
95 Largefile in the working copy, keeping the largefile version:
97 Largefile in the working copy, keeping the largefile version:
96
98
97 $ hg update -q -C -r 1
99 $ hg update -q -C -r 1
98 $ echo "l" | hg merge --config ui.interactive=Yes
100 $ echo "l" | hg merge --config ui.interactive=Yes
99 foo has been turned into a normal file
101 foo has been turned into a normal file
100 keep as (l)argefile or use (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
102 keep as (l)argefile or use (n)ormal file? 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
101 (branch merge, don't forget to commit)
103 (branch merge, don't forget to commit)
102 getting changed largefiles
104 getting changed largefiles
103 1 largefiles updated, 0 removed
105 1 largefiles updated, 0 removed
104
106
105 $ hg status
107 $ hg status
106
108
107 $ cat foo
109 $ cat foo
108 large
110 large
109
111
110 $ cd ..
112 $ cd ..
@@ -1,127 +1,141 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile'
20 $ hg commit -m 'branchhead without largefile'
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ cd ..
22 $ cd ..
23
23
24 Discard all cached largefiles in USERCACHE
24 Discard all cached largefiles in USERCACHE
25
25
26 $ rm -rf ${USERCACHE}
26 $ rm -rf ${USERCACHE}
27
27
28 Create mirror repo, and pull from source without largefile:
28 Create mirror repo, and pull from source without largefile:
29 "pull" is used instead of "clone" for suppression of (1) updating to
29 "pull" is used instead of "clone" for suppression of (1) updating to
30 tip (= cahcing largefile from source repo), and (2) recording source
30 tip (= cahcing largefile from source repo), and (2) recording source
31 repo as "default" path in .hg/hgrc.
31 repo as "default" path in .hg/hgrc.
32
32
33 $ hg init mirror
33 $ hg init mirror
34 $ cd mirror
34 $ cd mirror
35 $ hg pull ../src
35 $ hg pull ../src
36 pulling from ../src
36 pulling from ../src
37 requesting all changes
37 requesting all changes
38 adding changesets
38 adding changesets
39 adding manifests
39 adding manifests
40 adding file changes
40 adding file changes
41 added 2 changesets with 1 changes to 1 files
41 added 2 changesets with 1 changes to 1 files
42 (run 'hg update' to get a working copy)
42 (run 'hg update' to get a working copy)
43 caching new largefiles
43 caching new largefiles
44 0 largefiles cached
44 0 largefiles cached
45
45
46 Update working directory to "tip", which requires largefile("large"),
46 Update working directory to "tip", which requires largefile("large"),
47 but there is no cache file for it. So, hg must treat it as
47 but there is no cache file for it. So, hg must treat it as
48 "missing"(!) file.
48 "missing"(!) file.
49
49
50 $ hg update -r0
50 $ hg update -r0
51 getting changed largefiles
51 getting changed largefiles
52 error getting id 7f7097b041ccf68cc5561e9600da4655d21c6d18 from url file:$TESTTMP/mirror for file large: can't get file locally (glob)
52 error getting id 7f7097b041ccf68cc5561e9600da4655d21c6d18 from url file:$TESTTMP/mirror for file large: can't get file locally (glob)
53 0 largefiles updated, 0 removed
53 0 largefiles updated, 0 removed
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
55 $ hg status
55 $ hg status
56 ! large
56 ! large
57
57
58 Update working directory to null: this cleanup .hg/largefiles/dirstate
58 Update working directory to null: this cleanup .hg/largefiles/dirstate
59
59
60 $ hg update null
60 $ hg update null
61 getting changed largefiles
61 getting changed largefiles
62 0 largefiles updated, 0 removed
62 0 largefiles updated, 0 removed
63 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
63 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
64
64
65 Update working directory to tip, again.
65 Update working directory to tip, again.
66
66
67 $ hg update -r0
67 $ hg update -r0
68 getting changed largefiles
68 getting changed largefiles
69 error getting id 7f7097b041ccf68cc5561e9600da4655d21c6d18 from url file:$TESTTMP/mirror for file large: can't get file locally (glob)
69 error getting id 7f7097b041ccf68cc5561e9600da4655d21c6d18 from url file:$TESTTMP/mirror for file large: can't get file locally (glob)
70 0 largefiles updated, 0 removed
70 0 largefiles updated, 0 removed
71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 $ hg status
72 $ hg status
73 ! large
73 ! large
74 $ cd ..
74 $ cd ..
75
75
76 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
77
78 $ hg init mirror2
79 $ hg -R mirror2 pull src -r0
80 pulling from src
81 adding changesets
82 adding manifests
83 adding file changes
84 added 1 changesets with 1 changes to 1 files
85 (run 'hg update' to get a working copy)
86 caching new largefiles
87 abort: *: '$TESTTMP/mirror2/.hg/largefiles/.7f7097b041ccf68cc5561e9600da4655d21c6d18.*' (glob)
88 [255]
89
76 #if unix-permissions
90 #if unix-permissions
77
91
78 Portable way to print file permissions:
92 Portable way to print file permissions:
79
93
80 $ cat > ls-l.py <<EOF
94 $ cat > ls-l.py <<EOF
81 > #!/usr/bin/env python
95 > #!/usr/bin/env python
82 > import sys, os
96 > import sys, os
83 > path = sys.argv[1]
97 > path = sys.argv[1]
84 > print '%03o' % (os.lstat(path).st_mode & 0777)
98 > print '%03o' % (os.lstat(path).st_mode & 0777)
85 > EOF
99 > EOF
86 $ chmod +x ls-l.py
100 $ chmod +x ls-l.py
87
101
88 Test that files in .hg/largefiles inherit mode from .hg/store, not
102 Test that files in .hg/largefiles inherit mode from .hg/store, not
89 from file in working copy:
103 from file in working copy:
90
104
91 $ cd src
105 $ cd src
92 $ chmod 750 .hg/store
106 $ chmod 750 .hg/store
93 $ chmod 660 large
107 $ chmod 660 large
94 $ echo change >> large
108 $ echo change >> large
95 $ hg commit -m change
109 $ hg commit -m change
96 created new head
110 created new head
97 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
111 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
98 640
112 640
99
113
100 Test permission of with files in .hg/largefiles created by update:
114 Test permission of with files in .hg/largefiles created by update:
101
115
102 $ cd ../mirror
116 $ cd ../mirror
103 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
117 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
104 $ chmod 750 .hg/store
118 $ chmod 750 .hg/store
105 $ hg pull ../src --update -q
119 $ hg pull ../src --update -q
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
120 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 640
121 640
108
122
109 Test permission of files created by push:
123 Test permission of files created by push:
110
124
111 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
125 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
112 > --config "web.allow_push=*" --config web.push_ssl=no
126 > --config "web.allow_push=*" --config web.push_ssl=no
113 $ cat hg.pid >> $DAEMON_PIDS
127 $ cat hg.pid >> $DAEMON_PIDS
114
128
115 $ echo change >> large
129 $ echo change >> large
116 $ hg commit -m change
130 $ hg commit -m change
117
131
118 $ rm -r "$USERCACHE"
132 $ rm -r "$USERCACHE"
119
133
120 $ hg push -q http://localhost:$HGPORT/
134 $ hg push -q http://localhost:$HGPORT/
121
135
122 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
136 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
123 640
137 640
124
138
125 $ cd ..
139 $ cd ..
126
140
127 #endif
141 #endif
General Comments 0
You need to be logged in to leave comments. Login now