##// END OF EJS Templates
largefiles: respect the rev when reading standins in copytostore() (issue3630)...
Matt Harbison -
r17877:92bbb21d stable
parent child Browse files
Show More
@@ -1,469 +1,469 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import platform
13 import platform
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repoadd(repo, list):
29 def repoadd(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def reporemove(repo, list, unlink=False):
33 def reporemove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repoforget(repo, list):
49 def repoforget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
55 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=force)
56 return outgoing.missing
56 return outgoing.missing
57
57
58 # -- Private worker functions ------------------------------------------
58 # -- Private worker functions ------------------------------------------
59
59
60 def getminsize(ui, assumelfiles, opt, default=10):
60 def getminsize(ui, assumelfiles, opt, default=10):
61 lfsize = opt
61 lfsize = opt
62 if not lfsize and assumelfiles:
62 if not lfsize and assumelfiles:
63 lfsize = ui.config(longname, 'minsize', default=default)
63 lfsize = ui.config(longname, 'minsize', default=default)
64 if lfsize:
64 if lfsize:
65 try:
65 try:
66 lfsize = float(lfsize)
66 lfsize = float(lfsize)
67 except ValueError:
67 except ValueError:
68 raise util.Abort(_('largefiles: size must be number (not %s)\n')
68 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 % lfsize)
69 % lfsize)
70 if lfsize is None:
70 if lfsize is None:
71 raise util.Abort(_('minimum size for largefiles must be specified'))
71 raise util.Abort(_('minimum size for largefiles must be specified'))
72 return lfsize
72 return lfsize
73
73
74 def link(src, dest):
74 def link(src, dest):
75 try:
75 try:
76 util.oslink(src, dest)
76 util.oslink(src, dest)
77 except OSError:
77 except OSError:
78 # if hardlinks fail, fallback on atomic copy
78 # if hardlinks fail, fallback on atomic copy
79 dst = util.atomictempfile(dest)
79 dst = util.atomictempfile(dest)
80 for chunk in util.filechunkiter(open(src, 'rb')):
80 for chunk in util.filechunkiter(open(src, 'rb')):
81 dst.write(chunk)
81 dst.write(chunk)
82 dst.close()
82 dst.close()
83 os.chmod(dest, os.stat(src).st_mode)
83 os.chmod(dest, os.stat(src).st_mode)
84
84
85 def usercachepath(ui, hash):
85 def usercachepath(ui, hash):
86 path = ui.configpath(longname, 'usercache', None)
86 path = ui.configpath(longname, 'usercache', None)
87 if path:
87 if path:
88 path = os.path.join(path, hash)
88 path = os.path.join(path, hash)
89 else:
89 else:
90 if os.name == 'nt':
90 if os.name == 'nt':
91 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
91 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
92 if appdata:
92 if appdata:
93 path = os.path.join(appdata, longname, hash)
93 path = os.path.join(appdata, longname, hash)
94 elif platform.system() == 'Darwin':
94 elif platform.system() == 'Darwin':
95 home = os.getenv('HOME')
95 home = os.getenv('HOME')
96 if home:
96 if home:
97 path = os.path.join(home, 'Library', 'Caches',
97 path = os.path.join(home, 'Library', 'Caches',
98 longname, hash)
98 longname, hash)
99 elif os.name == 'posix':
99 elif os.name == 'posix':
100 path = os.getenv('XDG_CACHE_HOME')
100 path = os.getenv('XDG_CACHE_HOME')
101 if path:
101 if path:
102 path = os.path.join(path, longname, hash)
102 path = os.path.join(path, longname, hash)
103 else:
103 else:
104 home = os.getenv('HOME')
104 home = os.getenv('HOME')
105 if home:
105 if home:
106 path = os.path.join(home, '.cache', longname, hash)
106 path = os.path.join(home, '.cache', longname, hash)
107 else:
107 else:
108 raise util.Abort(_('unknown operating system: %s\n') % os.name)
108 raise util.Abort(_('unknown operating system: %s\n') % os.name)
109 return path
109 return path
110
110
111 def inusercache(ui, hash):
111 def inusercache(ui, hash):
112 path = usercachepath(ui, hash)
112 path = usercachepath(ui, hash)
113 return path and os.path.exists(path)
113 return path and os.path.exists(path)
114
114
115 def findfile(repo, hash):
115 def findfile(repo, hash):
116 if instore(repo, hash):
116 if instore(repo, hash):
117 repo.ui.note(_('found %s in store\n') % hash)
117 repo.ui.note(_('found %s in store\n') % hash)
118 return storepath(repo, hash)
118 return storepath(repo, hash)
119 elif inusercache(repo.ui, hash):
119 elif inusercache(repo.ui, hash):
120 repo.ui.note(_('found %s in system cache\n') % hash)
120 repo.ui.note(_('found %s in system cache\n') % hash)
121 path = storepath(repo, hash)
121 path = storepath(repo, hash)
122 util.makedirs(os.path.dirname(path))
122 util.makedirs(os.path.dirname(path))
123 link(usercachepath(repo.ui, hash), path)
123 link(usercachepath(repo.ui, hash), path)
124 return path
124 return path
125 return None
125 return None
126
126
127 class largefilesdirstate(dirstate.dirstate):
127 class largefilesdirstate(dirstate.dirstate):
128 def __getitem__(self, key):
128 def __getitem__(self, key):
129 return super(largefilesdirstate, self).__getitem__(unixpath(key))
129 return super(largefilesdirstate, self).__getitem__(unixpath(key))
130 def normal(self, f):
130 def normal(self, f):
131 return super(largefilesdirstate, self).normal(unixpath(f))
131 return super(largefilesdirstate, self).normal(unixpath(f))
132 def remove(self, f):
132 def remove(self, f):
133 return super(largefilesdirstate, self).remove(unixpath(f))
133 return super(largefilesdirstate, self).remove(unixpath(f))
134 def add(self, f):
134 def add(self, f):
135 return super(largefilesdirstate, self).add(unixpath(f))
135 return super(largefilesdirstate, self).add(unixpath(f))
136 def drop(self, f):
136 def drop(self, f):
137 return super(largefilesdirstate, self).drop(unixpath(f))
137 return super(largefilesdirstate, self).drop(unixpath(f))
138 def forget(self, f):
138 def forget(self, f):
139 return super(largefilesdirstate, self).forget(unixpath(f))
139 return super(largefilesdirstate, self).forget(unixpath(f))
140 def normallookup(self, f):
140 def normallookup(self, f):
141 return super(largefilesdirstate, self).normallookup(unixpath(f))
141 return super(largefilesdirstate, self).normallookup(unixpath(f))
142
142
143 def openlfdirstate(ui, repo, create=True):
143 def openlfdirstate(ui, repo, create=True):
144 '''
144 '''
145 Return a dirstate object that tracks largefiles: i.e. its root is
145 Return a dirstate object that tracks largefiles: i.e. its root is
146 the repo root, but it is saved in .hg/largefiles/dirstate.
146 the repo root, but it is saved in .hg/largefiles/dirstate.
147 '''
147 '''
148 admin = repo.join(longname)
148 admin = repo.join(longname)
149 opener = scmutil.opener(admin)
149 opener = scmutil.opener(admin)
150 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 lfdirstate = largefilesdirstate(opener, ui, repo.root,
151 repo.dirstate._validate)
151 repo.dirstate._validate)
152
152
153 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
154 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
155 # largefiles operation in a new clone.
155 # largefiles operation in a new clone.
156 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
156 if create and not os.path.exists(os.path.join(admin, 'dirstate')):
157 util.makedirs(admin)
157 util.makedirs(admin)
158 matcher = getstandinmatcher(repo)
158 matcher = getstandinmatcher(repo)
159 for standin in dirstatewalk(repo.dirstate, matcher):
159 for standin in dirstatewalk(repo.dirstate, matcher):
160 lfile = splitstandin(standin)
160 lfile = splitstandin(standin)
161 hash = readstandin(repo, lfile)
161 hash = readstandin(repo, lfile)
162 lfdirstate.normallookup(lfile)
162 lfdirstate.normallookup(lfile)
163 try:
163 try:
164 if hash == hashfile(repo.wjoin(lfile)):
164 if hash == hashfile(repo.wjoin(lfile)):
165 lfdirstate.normal(lfile)
165 lfdirstate.normal(lfile)
166 except OSError, err:
166 except OSError, err:
167 if err.errno != errno.ENOENT:
167 if err.errno != errno.ENOENT:
168 raise
168 raise
169 return lfdirstate
169 return lfdirstate
170
170
171 def lfdirstatestatus(lfdirstate, repo, rev):
171 def lfdirstatestatus(lfdirstate, repo, rev):
172 match = match_.always(repo.root, repo.getcwd())
172 match = match_.always(repo.root, repo.getcwd())
173 s = lfdirstate.status(match, [], False, False, False)
173 s = lfdirstate.status(match, [], False, False, False)
174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
174 unsure, modified, added, removed, missing, unknown, ignored, clean = s
175 for lfile in unsure:
175 for lfile in unsure:
176 if repo[rev][standin(lfile)].data().strip() != \
176 if repo[rev][standin(lfile)].data().strip() != \
177 hashfile(repo.wjoin(lfile)):
177 hashfile(repo.wjoin(lfile)):
178 modified.append(lfile)
178 modified.append(lfile)
179 else:
179 else:
180 clean.append(lfile)
180 clean.append(lfile)
181 lfdirstate.normal(lfile)
181 lfdirstate.normal(lfile)
182 return (modified, added, removed, missing, unknown, ignored, clean)
182 return (modified, added, removed, missing, unknown, ignored, clean)
183
183
184 def listlfiles(repo, rev=None, matcher=None):
184 def listlfiles(repo, rev=None, matcher=None):
185 '''return a list of largefiles in the working copy or the
185 '''return a list of largefiles in the working copy or the
186 specified changeset'''
186 specified changeset'''
187
187
188 if matcher is None:
188 if matcher is None:
189 matcher = getstandinmatcher(repo)
189 matcher = getstandinmatcher(repo)
190
190
191 # ignore unknown files in working directory
191 # ignore unknown files in working directory
192 return [splitstandin(f)
192 return [splitstandin(f)
193 for f in repo[rev].walk(matcher)
193 for f in repo[rev].walk(matcher)
194 if rev is not None or repo.dirstate[f] != '?']
194 if rev is not None or repo.dirstate[f] != '?']
195
195
196 def instore(repo, hash):
196 def instore(repo, hash):
197 return os.path.exists(storepath(repo, hash))
197 return os.path.exists(storepath(repo, hash))
198
198
199 def storepath(repo, hash):
199 def storepath(repo, hash):
200 return repo.join(os.path.join(longname, hash))
200 return repo.join(os.path.join(longname, hash))
201
201
202 def copyfromcache(repo, hash, filename):
202 def copyfromcache(repo, hash, filename):
203 '''Copy the specified largefile from the repo or system cache to
203 '''Copy the specified largefile from the repo or system cache to
204 filename in the repository. Return true on success or false if the
204 filename in the repository. Return true on success or false if the
205 file was not found in either cache (which should not happened:
205 file was not found in either cache (which should not happened:
206 this is meant to be called only after ensuring that the needed
206 this is meant to be called only after ensuring that the needed
207 largefile exists in the cache).'''
207 largefile exists in the cache).'''
208 path = findfile(repo, hash)
208 path = findfile(repo, hash)
209 if path is None:
209 if path is None:
210 return False
210 return False
211 util.makedirs(os.path.dirname(repo.wjoin(filename)))
211 util.makedirs(os.path.dirname(repo.wjoin(filename)))
212 # The write may fail before the file is fully written, but we
212 # The write may fail before the file is fully written, but we
213 # don't use atomic writes in the working copy.
213 # don't use atomic writes in the working copy.
214 shutil.copy(path, repo.wjoin(filename))
214 shutil.copy(path, repo.wjoin(filename))
215 return True
215 return True
216
216
217 def copytostore(repo, rev, file, uploaded=False):
217 def copytostore(repo, rev, file, uploaded=False):
218 hash = readstandin(repo, file)
218 hash = readstandin(repo, file, rev)
219 if instore(repo, hash):
219 if instore(repo, hash):
220 return
220 return
221 copytostoreabsolute(repo, repo.wjoin(file), hash)
221 copytostoreabsolute(repo, repo.wjoin(file), hash)
222
222
223 def copyalltostore(repo, node):
223 def copyalltostore(repo, node):
224 '''Copy all largefiles in a given revision to the store'''
224 '''Copy all largefiles in a given revision to the store'''
225
225
226 ctx = repo[node]
226 ctx = repo[node]
227 for filename in ctx.files():
227 for filename in ctx.files():
228 if isstandin(filename) and filename in ctx.manifest():
228 if isstandin(filename) and filename in ctx.manifest():
229 realfile = splitstandin(filename)
229 realfile = splitstandin(filename)
230 copytostore(repo, ctx.node(), realfile)
230 copytostore(repo, ctx.node(), realfile)
231
231
232
232
233 def copytostoreabsolute(repo, file, hash):
233 def copytostoreabsolute(repo, file, hash):
234 util.makedirs(os.path.dirname(storepath(repo, hash)))
234 util.makedirs(os.path.dirname(storepath(repo, hash)))
235 if inusercache(repo.ui, hash):
235 if inusercache(repo.ui, hash):
236 link(usercachepath(repo.ui, hash), storepath(repo, hash))
236 link(usercachepath(repo.ui, hash), storepath(repo, hash))
237 else:
237 else:
238 dst = util.atomictempfile(storepath(repo, hash),
238 dst = util.atomictempfile(storepath(repo, hash),
239 createmode=repo.store.createmode)
239 createmode=repo.store.createmode)
240 for chunk in util.filechunkiter(open(file, 'rb')):
240 for chunk in util.filechunkiter(open(file, 'rb')):
241 dst.write(chunk)
241 dst.write(chunk)
242 dst.close()
242 dst.close()
243 linktousercache(repo, hash)
243 linktousercache(repo, hash)
244
244
245 def linktousercache(repo, hash):
245 def linktousercache(repo, hash):
246 path = usercachepath(repo.ui, hash)
246 path = usercachepath(repo.ui, hash)
247 if path:
247 if path:
248 util.makedirs(os.path.dirname(path))
248 util.makedirs(os.path.dirname(path))
249 link(storepath(repo, hash), path)
249 link(storepath(repo, hash), path)
250
250
251 def getstandinmatcher(repo, pats=[], opts={}):
251 def getstandinmatcher(repo, pats=[], opts={}):
252 '''Return a match object that applies pats to the standin directory'''
252 '''Return a match object that applies pats to the standin directory'''
253 standindir = repo.pathto(shortname)
253 standindir = repo.pathto(shortname)
254 if pats:
254 if pats:
255 # patterns supplied: search standin directory relative to current dir
255 # patterns supplied: search standin directory relative to current dir
256 cwd = repo.getcwd()
256 cwd = repo.getcwd()
257 if os.path.isabs(cwd):
257 if os.path.isabs(cwd):
258 # cwd is an absolute path for hg -R <reponame>
258 # cwd is an absolute path for hg -R <reponame>
259 # work relative to the repository root in this case
259 # work relative to the repository root in this case
260 cwd = ''
260 cwd = ''
261 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
261 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
262 elif os.path.isdir(standindir):
262 elif os.path.isdir(standindir):
263 # no patterns: relative to repo root
263 # no patterns: relative to repo root
264 pats = [standindir]
264 pats = [standindir]
265 else:
265 else:
266 # no patterns and no standin dir: return matcher that matches nothing
266 # no patterns and no standin dir: return matcher that matches nothing
267 match = match_.match(repo.root, None, [], exact=True)
267 match = match_.match(repo.root, None, [], exact=True)
268 match.matchfn = lambda f: False
268 match.matchfn = lambda f: False
269 return match
269 return match
270 return getmatcher(repo, pats, opts, showbad=False)
270 return getmatcher(repo, pats, opts, showbad=False)
271
271
272 def getmatcher(repo, pats=[], opts={}, showbad=True):
272 def getmatcher(repo, pats=[], opts={}, showbad=True):
273 '''Wrapper around scmutil.match() that adds showbad: if false,
273 '''Wrapper around scmutil.match() that adds showbad: if false,
274 neuter the match object's bad() method so it does not print any
274 neuter the match object's bad() method so it does not print any
275 warnings about missing files or directories.'''
275 warnings about missing files or directories.'''
276 match = scmutil.match(repo[None], pats, opts)
276 match = scmutil.match(repo[None], pats, opts)
277
277
278 if not showbad:
278 if not showbad:
279 match.bad = lambda f, msg: None
279 match.bad = lambda f, msg: None
280 return match
280 return match
281
281
282 def composestandinmatcher(repo, rmatcher):
282 def composestandinmatcher(repo, rmatcher):
283 '''Return a matcher that accepts standins corresponding to the
283 '''Return a matcher that accepts standins corresponding to the
284 files accepted by rmatcher. Pass the list of files in the matcher
284 files accepted by rmatcher. Pass the list of files in the matcher
285 as the paths specified by the user.'''
285 as the paths specified by the user.'''
286 smatcher = getstandinmatcher(repo, rmatcher.files())
286 smatcher = getstandinmatcher(repo, rmatcher.files())
287 isstandin = smatcher.matchfn
287 isstandin = smatcher.matchfn
288 def composedmatchfn(f):
288 def composedmatchfn(f):
289 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
289 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
290 smatcher.matchfn = composedmatchfn
290 smatcher.matchfn = composedmatchfn
291
291
292 return smatcher
292 return smatcher
293
293
294 def standin(filename):
294 def standin(filename):
295 '''Return the repo-relative path to the standin for the specified big
295 '''Return the repo-relative path to the standin for the specified big
296 file.'''
296 file.'''
297 # Notes:
297 # Notes:
298 # 1) Some callers want an absolute path, but for instance addlargefiles
298 # 1) Some callers want an absolute path, but for instance addlargefiles
299 # needs it repo-relative so it can be passed to repoadd(). So leave
299 # needs it repo-relative so it can be passed to repoadd(). So leave
300 # it up to the caller to use repo.wjoin() to get an absolute path.
300 # it up to the caller to use repo.wjoin() to get an absolute path.
301 # 2) Join with '/' because that's what dirstate always uses, even on
301 # 2) Join with '/' because that's what dirstate always uses, even on
302 # Windows. Change existing separator to '/' first in case we are
302 # Windows. Change existing separator to '/' first in case we are
303 # passed filenames from an external source (like the command line).
303 # passed filenames from an external source (like the command line).
304 return shortname + '/' + util.pconvert(filename)
304 return shortname + '/' + util.pconvert(filename)
305
305
306 def isstandin(filename):
306 def isstandin(filename):
307 '''Return true if filename is a big file standin. filename must be
307 '''Return true if filename is a big file standin. filename must be
308 in Mercurial's internal form (slash-separated).'''
308 in Mercurial's internal form (slash-separated).'''
309 return filename.startswith(shortname + '/')
309 return filename.startswith(shortname + '/')
310
310
311 def splitstandin(filename):
311 def splitstandin(filename):
312 # Split on / because that's what dirstate always uses, even on Windows.
312 # Split on / because that's what dirstate always uses, even on Windows.
313 # Change local separator to / first just in case we are passed filenames
313 # Change local separator to / first just in case we are passed filenames
314 # from an external source (like the command line).
314 # from an external source (like the command line).
315 bits = util.pconvert(filename).split('/', 1)
315 bits = util.pconvert(filename).split('/', 1)
316 if len(bits) == 2 and bits[0] == shortname:
316 if len(bits) == 2 and bits[0] == shortname:
317 return bits[1]
317 return bits[1]
318 else:
318 else:
319 return None
319 return None
320
320
321 def updatestandin(repo, standin):
321 def updatestandin(repo, standin):
322 file = repo.wjoin(splitstandin(standin))
322 file = repo.wjoin(splitstandin(standin))
323 if os.path.exists(file):
323 if os.path.exists(file):
324 hash = hashfile(file)
324 hash = hashfile(file)
325 executable = getexecutable(file)
325 executable = getexecutable(file)
326 writestandin(repo, standin, hash, executable)
326 writestandin(repo, standin, hash, executable)
327
327
328 def readstandin(repo, filename, node=None):
328 def readstandin(repo, filename, node=None):
329 '''read hex hash from standin for filename at given node, or working
329 '''read hex hash from standin for filename at given node, or working
330 directory if no node is given'''
330 directory if no node is given'''
331 return repo[node][standin(filename)].data().strip()
331 return repo[node][standin(filename)].data().strip()
332
332
333 def writestandin(repo, standin, hash, executable):
333 def writestandin(repo, standin, hash, executable):
334 '''write hash to <repo.root>/<standin>'''
334 '''write hash to <repo.root>/<standin>'''
335 writehash(hash, repo.wjoin(standin), executable)
335 writehash(hash, repo.wjoin(standin), executable)
336
336
337 def copyandhash(instream, outfile):
337 def copyandhash(instream, outfile):
338 '''Read bytes from instream (iterable) and write them to outfile,
338 '''Read bytes from instream (iterable) and write them to outfile,
339 computing the SHA-1 hash of the data along the way. Close outfile
339 computing the SHA-1 hash of the data along the way. Close outfile
340 when done and return the binary hash.'''
340 when done and return the binary hash.'''
341 hasher = util.sha1('')
341 hasher = util.sha1('')
342 for data in instream:
342 for data in instream:
343 hasher.update(data)
343 hasher.update(data)
344 outfile.write(data)
344 outfile.write(data)
345
345
346 # Blecch: closing a file that somebody else opened is rude and
346 # Blecch: closing a file that somebody else opened is rude and
347 # wrong. But it's so darn convenient and practical! After all,
347 # wrong. But it's so darn convenient and practical! After all,
348 # outfile was opened just to copy and hash.
348 # outfile was opened just to copy and hash.
349 outfile.close()
349 outfile.close()
350
350
351 return hasher.digest()
351 return hasher.digest()
352
352
353 def hashrepofile(repo, file):
353 def hashrepofile(repo, file):
354 return hashfile(repo.wjoin(file))
354 return hashfile(repo.wjoin(file))
355
355
356 def hashfile(file):
356 def hashfile(file):
357 if not os.path.exists(file):
357 if not os.path.exists(file):
358 return ''
358 return ''
359 hasher = util.sha1('')
359 hasher = util.sha1('')
360 fd = open(file, 'rb')
360 fd = open(file, 'rb')
361 for data in blockstream(fd):
361 for data in blockstream(fd):
362 hasher.update(data)
362 hasher.update(data)
363 fd.close()
363 fd.close()
364 return hasher.hexdigest()
364 return hasher.hexdigest()
365
365
366 class limitreader(object):
366 class limitreader(object):
367 def __init__(self, f, limit):
367 def __init__(self, f, limit):
368 self.f = f
368 self.f = f
369 self.limit = limit
369 self.limit = limit
370
370
371 def read(self, length):
371 def read(self, length):
372 if self.limit == 0:
372 if self.limit == 0:
373 return ''
373 return ''
374 length = length > self.limit and self.limit or length
374 length = length > self.limit and self.limit or length
375 self.limit -= length
375 self.limit -= length
376 return self.f.read(length)
376 return self.f.read(length)
377
377
378 def close(self):
378 def close(self):
379 pass
379 pass
380
380
381 def blockstream(infile, blocksize=128 * 1024):
381 def blockstream(infile, blocksize=128 * 1024):
382 """Generator that yields blocks of data from infile and closes infile."""
382 """Generator that yields blocks of data from infile and closes infile."""
383 while True:
383 while True:
384 data = infile.read(blocksize)
384 data = infile.read(blocksize)
385 if not data:
385 if not data:
386 break
386 break
387 yield data
387 yield data
388 # same blecch as copyandhash() above
388 # same blecch as copyandhash() above
389 infile.close()
389 infile.close()
390
390
391 def writehash(hash, filename, executable):
391 def writehash(hash, filename, executable):
392 util.makedirs(os.path.dirname(filename))
392 util.makedirs(os.path.dirname(filename))
393 util.writefile(filename, hash + '\n')
393 util.writefile(filename, hash + '\n')
394 os.chmod(filename, getmode(executable))
394 os.chmod(filename, getmode(executable))
395
395
396 def getexecutable(filename):
396 def getexecutable(filename):
397 mode = os.stat(filename).st_mode
397 mode = os.stat(filename).st_mode
398 return ((mode & stat.S_IXUSR) and
398 return ((mode & stat.S_IXUSR) and
399 (mode & stat.S_IXGRP) and
399 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXOTH))
400 (mode & stat.S_IXOTH))
401
401
402 def getmode(executable):
402 def getmode(executable):
403 if executable:
403 if executable:
404 return 0755
404 return 0755
405 else:
405 else:
406 return 0644
406 return 0644
407
407
408 def urljoin(first, second, *arg):
408 def urljoin(first, second, *arg):
409 def join(left, right):
409 def join(left, right):
410 if not left.endswith('/'):
410 if not left.endswith('/'):
411 left += '/'
411 left += '/'
412 if right.startswith('/'):
412 if right.startswith('/'):
413 right = right[1:]
413 right = right[1:]
414 return left + right
414 return left + right
415
415
416 url = join(first, second)
416 url = join(first, second)
417 for a in arg:
417 for a in arg:
418 url = join(url, a)
418 url = join(url, a)
419 return url
419 return url
420
420
421 def hexsha1(data):
421 def hexsha1(data):
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 object data"""
423 object data"""
424 h = util.sha1()
424 h = util.sha1()
425 for chunk in util.filechunkiter(data):
425 for chunk in util.filechunkiter(data):
426 h.update(chunk)
426 h.update(chunk)
427 return h.hexdigest()
427 return h.hexdigest()
428
428
429 def httpsendfile(ui, filename):
429 def httpsendfile(ui, filename):
430 return httpconnection.httpsendfile(ui, filename, 'rb')
430 return httpconnection.httpsendfile(ui, filename, 'rb')
431
431
432 def unixpath(path):
432 def unixpath(path):
433 '''Return a version of path normalized for use with the lfdirstate.'''
433 '''Return a version of path normalized for use with the lfdirstate.'''
434 return util.pconvert(os.path.normpath(path))
434 return util.pconvert(os.path.normpath(path))
435
435
436 def islfilesrepo(repo):
436 def islfilesrepo(repo):
437 if ('largefiles' in repo.requirements and
437 if ('largefiles' in repo.requirements and
438 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
438 util.any(shortname + '/' in f[0] for f in repo.store.datafiles())):
439 return True
439 return True
440
440
441 return util.any(openlfdirstate(repo.ui, repo, False))
441 return util.any(openlfdirstate(repo.ui, repo, False))
442
442
443 class storeprotonotcapable(Exception):
443 class storeprotonotcapable(Exception):
444 def __init__(self, storetypes):
444 def __init__(self, storetypes):
445 self.storetypes = storetypes
445 self.storetypes = storetypes
446
446
447 def getcurrentheads(repo):
447 def getcurrentheads(repo):
448 branches = repo.branchmap()
448 branches = repo.branchmap()
449 heads = []
449 heads = []
450 for branch in branches:
450 for branch in branches:
451 newheads = repo.branchheads(branch)
451 newheads = repo.branchheads(branch)
452 heads = heads + newheads
452 heads = heads + newheads
453 return heads
453 return heads
454
454
455 def getstandinsstate(repo):
455 def getstandinsstate(repo):
456 standins = []
456 standins = []
457 matcher = getstandinmatcher(repo)
457 matcher = getstandinmatcher(repo)
458 for standin in dirstatewalk(repo.dirstate, matcher):
458 for standin in dirstatewalk(repo.dirstate, matcher):
459 lfile = splitstandin(standin)
459 lfile = splitstandin(standin)
460 standins.append((lfile, readstandin(repo, lfile)))
460 standins.append((lfile, readstandin(repo, lfile)))
461 return standins
461 return standins
462
462
463 def getlfilestoupdate(oldstandins, newstandins):
463 def getlfilestoupdate(oldstandins, newstandins):
464 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
464 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
465 filelist = []
465 filelist = []
466 for f in changedstandins:
466 for f in changedstandins:
467 if f[0] not in filelist:
467 if f[0] not in filelist:
468 filelist.append(f[0])
468 filelist.append(f[0])
469 return filelist
469 return filelist
@@ -1,297 +1,338 b''
1 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
1 $ USERCACHE="$TESTTMP/cache"; export USERCACHE
2 $ mkdir "${USERCACHE}"
2 $ mkdir "${USERCACHE}"
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > largefiles =
5 > largefiles =
6 > share =
6 > share =
7 > graphlog =
7 > graphlog =
8 > mq =
8 > mq =
9 > convert =
9 > [largefiles]
10 > [largefiles]
10 > minsize = 0.5
11 > minsize = 0.5
11 > patterns = **.other
12 > patterns = **.other
12 > **.dat
13 > **.dat
13 > usercache=${USERCACHE}
14 > usercache=${USERCACHE}
14 > EOF
15 > EOF
15
16
16 "lfconvert" works
17 "lfconvert" works
17 $ hg init bigfile-repo
18 $ hg init bigfile-repo
18 $ cd bigfile-repo
19 $ cd bigfile-repo
19 $ cat >> .hg/hgrc <<EOF
20 $ cat >> .hg/hgrc <<EOF
20 > [extensions]
21 > [extensions]
21 > largefiles = !
22 > largefiles = !
22 > EOF
23 > EOF
23 $ mkdir sub
24 $ mkdir sub
24 $ dd if=/dev/zero bs=1k count=256 > large 2> /dev/null
25 $ dd if=/dev/zero bs=1k count=256 > large 2> /dev/null
25 $ dd if=/dev/zero bs=1k count=256 > large2 2> /dev/null
26 $ dd if=/dev/zero bs=1k count=256 > large2 2> /dev/null
26 $ echo normal > normal1
27 $ echo normal > normal1
27 $ echo alsonormal > sub/normal2
28 $ echo alsonormal > sub/normal2
28 $ dd if=/dev/zero bs=1k count=10 > sub/maybelarge.dat 2> /dev/null
29 $ dd if=/dev/zero bs=1k count=10 > sub/maybelarge.dat 2> /dev/null
29 $ hg addremove
30 $ hg addremove
30 adding large
31 adding large
31 adding large2
32 adding large2
32 adding normal1
33 adding normal1
33 adding sub/maybelarge.dat
34 adding sub/maybelarge.dat
34 adding sub/normal2
35 adding sub/normal2
35 $ hg commit -m"add large, normal1" large normal1
36 $ hg commit -m"add large, normal1" large normal1
36 $ hg commit -m"add sub/*" sub
37 $ hg commit -m"add sub/*" sub
37
38
38 Test tag parsing
39 Test tag parsing
39 $ cat >> .hgtags <<EOF
40 $ cat >> .hgtags <<EOF
40 > IncorrectlyFormattedTag!
41 > IncorrectlyFormattedTag!
41 > invalidhash sometag
42 > invalidhash sometag
42 > 0123456789abcdef anothertag
43 > 0123456789abcdef anothertag
43 > EOF
44 > EOF
44 $ hg add .hgtags
45 $ hg add .hgtags
45 $ hg commit -m"add large2" large2 .hgtags
46 $ hg commit -m"add large2" large2 .hgtags
46
47
47 Test link+rename largefile codepath
48 Test link+rename largefile codepath
48 $ [ -d .hg/largefiles ] && echo fail || echo pass
49 $ [ -d .hg/largefiles ] && echo fail || echo pass
49 pass
50 pass
50 $ cd ..
51 $ cd ..
51 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
52 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
52 initializing destination largefiles-repo
53 initializing destination largefiles-repo
53 skipping incorrectly formatted tag IncorrectlyFormattedTag!
54 skipping incorrectly formatted tag IncorrectlyFormattedTag!
54 skipping incorrectly formatted id invalidhash
55 skipping incorrectly formatted id invalidhash
55 no mapping for id 0123456789abcdef
56 no mapping for id 0123456789abcdef
56 #if symlink
57 #if symlink
57 $ hg --cwd bigfile-repo rename large2 large3
58 $ hg --cwd bigfile-repo rename large2 large3
58 $ ln -sf large bigfile-repo/large3
59 $ ln -sf large bigfile-repo/large3
59 $ hg --cwd bigfile-repo commit -m"make large2 a symlink" large2 large3
60 $ hg --cwd bigfile-repo commit -m"make large2 a symlink" large2 large3
60 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo-symlink
61 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo-symlink
61 initializing destination largefiles-repo-symlink
62 initializing destination largefiles-repo-symlink
62 skipping incorrectly formatted tag IncorrectlyFormattedTag!
63 skipping incorrectly formatted tag IncorrectlyFormattedTag!
63 skipping incorrectly formatted id invalidhash
64 skipping incorrectly formatted id invalidhash
64 no mapping for id 0123456789abcdef
65 no mapping for id 0123456789abcdef
65 abort: renamed/copied largefile large3 becomes symlink
66 abort: renamed/copied largefile large3 becomes symlink
66 [255]
67 [255]
67 #endif
68 #endif
68 $ cd bigfile-repo
69 $ cd bigfile-repo
69 $ hg strip --no-backup 2
70 $ hg strip --no-backup 2
70 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
71 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
71 $ cd ..
72 $ cd ..
72 $ rm -rf largefiles-repo largefiles-repo-symlink
73 $ rm -rf largefiles-repo largefiles-repo-symlink
73
74
74 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
75 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
75 initializing destination largefiles-repo
76 initializing destination largefiles-repo
76
77
77 "lfconvert" converts content correctly
78 "lfconvert" converts content correctly
78 $ cd largefiles-repo
79 $ cd largefiles-repo
79 $ hg up
80 $ hg up
80 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
81 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
81 getting changed largefiles
82 getting changed largefiles
82 2 largefiles updated, 0 removed
83 2 largefiles updated, 0 removed
83 $ hg locate
84 $ hg locate
84 .hglf/large
85 .hglf/large
85 .hglf/sub/maybelarge.dat
86 .hglf/sub/maybelarge.dat
86 normal1
87 normal1
87 sub/normal2
88 sub/normal2
88 $ cat normal1
89 $ cat normal1
89 normal
90 normal
90 $ cat sub/normal2
91 $ cat sub/normal2
91 alsonormal
92 alsonormal
92 $ "$TESTDIR/md5sum.py" large sub/maybelarge.dat
93 $ "$TESTDIR/md5sum.py" large sub/maybelarge.dat
93 ec87a838931d4d5d2e94a04644788a55 large
94 ec87a838931d4d5d2e94a04644788a55 large
94 1276481102f218c981e0324180bafd9f sub/maybelarge.dat
95 1276481102f218c981e0324180bafd9f sub/maybelarge.dat
95
96
96 "lfconvert" adds 'largefiles' to .hg/requires.
97 "lfconvert" adds 'largefiles' to .hg/requires.
97 $ cat .hg/requires
98 $ cat .hg/requires
98 largefiles
99 largefiles
99 revlogv1
100 revlogv1
100 fncache
101 fncache
101 store
102 store
102 dotencode
103 dotencode
103
104
104 "lfconvert" includes a newline at the end of the standin files.
105 "lfconvert" includes a newline at the end of the standin files.
105 $ cat .hglf/large .hglf/sub/maybelarge.dat
106 $ cat .hglf/large .hglf/sub/maybelarge.dat
106 2e000fa7e85759c7f4c254d4d9c33ef481e459a7
107 2e000fa7e85759c7f4c254d4d9c33ef481e459a7
107 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
108 34e163be8e43c5631d8b92e9c43ab0bf0fa62b9c
108 $ cd ..
109 $ cd ..
109
110
110 add some changesets to rename/remove/merge
111 add some changesets to rename/remove/merge
111 $ cd bigfile-repo
112 $ cd bigfile-repo
112 $ hg mv -q sub stuff
113 $ hg mv -q sub stuff
113 $ hg commit -m"rename sub/ to stuff/"
114 $ hg commit -m"rename sub/ to stuff/"
114 $ hg update -q 1
115 $ hg update -q 1
115 $ echo blah >> normal3
116 $ echo blah >> normal3
116 $ echo blah >> sub/normal2
117 $ echo blah >> sub/normal2
117 $ echo blah >> sub/maybelarge.dat
118 $ echo blah >> sub/maybelarge.dat
118 $ "$TESTDIR/md5sum.py" sub/maybelarge.dat
119 $ "$TESTDIR/md5sum.py" sub/maybelarge.dat
119 1dd0b99ff80e19cff409702a1d3f5e15 sub/maybelarge.dat
120 1dd0b99ff80e19cff409702a1d3f5e15 sub/maybelarge.dat
120 $ hg commit -A -m"add normal3, modify sub/*"
121 $ hg commit -A -m"add normal3, modify sub/*"
121 adding normal3
122 adding normal3
122 created new head
123 created new head
123 $ hg rm large normal3
124 $ hg rm large normal3
124 $ hg commit -q -m"remove large, normal3"
125 $ hg commit -q -m"remove large, normal3"
125 $ hg merge
126 $ hg merge
126 merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
127 merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
127 warning: $TESTTMP/bigfile-repo/stuff/maybelarge.dat looks like a binary file. (glob)
128 warning: $TESTTMP/bigfile-repo/stuff/maybelarge.dat looks like a binary file. (glob)
128 merging stuff/maybelarge.dat incomplete! (edit conflicts, then use 'hg resolve --mark')
129 merging stuff/maybelarge.dat incomplete! (edit conflicts, then use 'hg resolve --mark')
129 merging sub/normal2 and stuff/normal2 to stuff/normal2
130 merging sub/normal2 and stuff/normal2 to stuff/normal2
130 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
131 0 files updated, 1 files merged, 0 files removed, 1 files unresolved
131 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
132 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
132 [1]
133 [1]
133 $ hg cat -r . sub/maybelarge.dat > stuff/maybelarge.dat
134 $ hg cat -r . sub/maybelarge.dat > stuff/maybelarge.dat
134 $ hg resolve -m stuff/maybelarge.dat
135 $ hg resolve -m stuff/maybelarge.dat
135 $ hg commit -m"merge"
136 $ hg commit -m"merge"
136 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
137 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
137 @ 5:4884f215abda merge
138 @ 5:4884f215abda merge
138 |\
139 |\
139 | o 4:7285f817b77e remove large, normal3
140 | o 4:7285f817b77e remove large, normal3
140 | |
141 | |
141 | o 3:67e3892e3534 add normal3, modify sub/*
142 | o 3:67e3892e3534 add normal3, modify sub/*
142 | |
143 | |
143 o | 2:c96c8beb5d56 rename sub/ to stuff/
144 o | 2:c96c8beb5d56 rename sub/ to stuff/
144 |/
145 |/
145 o 1:020c65d24e11 add sub/*
146 o 1:020c65d24e11 add sub/*
146 |
147 |
147 o 0:117b8328f97a add large, normal1
148 o 0:117b8328f97a add large, normal1
148
149
149 $ cd ..
150 $ cd ..
150
151
151 lfconvert with rename, merge, and remove
152 lfconvert with rename, merge, and remove
152 $ rm -rf largefiles-repo
153 $ rm -rf largefiles-repo
153 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
154 $ hg lfconvert --size 0.2 bigfile-repo largefiles-repo
154 initializing destination largefiles-repo
155 initializing destination largefiles-repo
155 $ cd largefiles-repo
156 $ cd largefiles-repo
156 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
157 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
157 o 5:8e05f5f2b77e merge
158 o 5:8e05f5f2b77e merge
158 |\
159 |\
159 | o 4:a5a02de7a8e4 remove large, normal3
160 | o 4:a5a02de7a8e4 remove large, normal3
160 | |
161 | |
161 | o 3:55759520c76f add normal3, modify sub/*
162 | o 3:55759520c76f add normal3, modify sub/*
162 | |
163 | |
163 o | 2:261ad3f3f037 rename sub/ to stuff/
164 o | 2:261ad3f3f037 rename sub/ to stuff/
164 |/
165 |/
165 o 1:334e5237836d add sub/*
166 o 1:334e5237836d add sub/*
166 |
167 |
167 o 0:d4892ec57ce2 add large, normal1
168 o 0:d4892ec57ce2 add large, normal1
168
169
169 $ hg locate -r 2
170 $ hg locate -r 2
170 .hglf/large
171 .hglf/large
171 .hglf/stuff/maybelarge.dat
172 .hglf/stuff/maybelarge.dat
172 normal1
173 normal1
173 stuff/normal2
174 stuff/normal2
174 $ hg locate -r 3
175 $ hg locate -r 3
175 .hglf/large
176 .hglf/large
176 .hglf/sub/maybelarge.dat
177 .hglf/sub/maybelarge.dat
177 normal1
178 normal1
178 normal3
179 normal3
179 sub/normal2
180 sub/normal2
180 $ hg locate -r 4
181 $ hg locate -r 4
181 .hglf/sub/maybelarge.dat
182 .hglf/sub/maybelarge.dat
182 normal1
183 normal1
183 sub/normal2
184 sub/normal2
184 $ hg locate -r 5
185 $ hg locate -r 5
185 .hglf/stuff/maybelarge.dat
186 .hglf/stuff/maybelarge.dat
186 normal1
187 normal1
187 stuff/normal2
188 stuff/normal2
188 $ hg update
189 $ hg update
189 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
190 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
190 getting changed largefiles
191 getting changed largefiles
191 1 largefiles updated, 0 removed
192 1 largefiles updated, 0 removed
192 $ cat stuff/normal2
193 $ cat stuff/normal2
193 alsonormal
194 alsonormal
194 blah
195 blah
195 $ "$TESTDIR/md5sum.py" stuff/maybelarge.dat
196 $ "$TESTDIR/md5sum.py" stuff/maybelarge.dat
196 1dd0b99ff80e19cff409702a1d3f5e15 stuff/maybelarge.dat
197 1dd0b99ff80e19cff409702a1d3f5e15 stuff/maybelarge.dat
197 $ cat .hglf/stuff/maybelarge.dat
198 $ cat .hglf/stuff/maybelarge.dat
198 76236b6a2c6102826c61af4297dd738fb3b1de38
199 76236b6a2c6102826c61af4297dd738fb3b1de38
199 $ cd ..
200 $ cd ..
200
201
201 "lfconvert" error cases
202 "lfconvert" error cases
202 $ hg lfconvert http://localhost/foo foo
203 $ hg lfconvert http://localhost/foo foo
203 abort: http://localhost/foo is not a local Mercurial repo
204 abort: http://localhost/foo is not a local Mercurial repo
204 [255]
205 [255]
205 $ hg lfconvert foo ssh://localhost/foo
206 $ hg lfconvert foo ssh://localhost/foo
206 abort: ssh://localhost/foo is not a local Mercurial repo
207 abort: ssh://localhost/foo is not a local Mercurial repo
207 [255]
208 [255]
208 $ hg lfconvert nosuchrepo foo
209 $ hg lfconvert nosuchrepo foo
209 abort: repository nosuchrepo not found!
210 abort: repository nosuchrepo not found!
210 [255]
211 [255]
211 $ hg share -q -U bigfile-repo shared
212 $ hg share -q -U bigfile-repo shared
212 $ printf 'bogus' > shared/.hg/sharedpath
213 $ printf 'bogus' > shared/.hg/sharedpath
213 $ hg lfconvert shared foo
214 $ hg lfconvert shared foo
214 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/bogus! (glob)
215 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/bogus! (glob)
215 [255]
216 [255]
216 $ hg lfconvert bigfile-repo largefiles-repo
217 $ hg lfconvert bigfile-repo largefiles-repo
217 initializing destination largefiles-repo
218 initializing destination largefiles-repo
218 abort: repository largefiles-repo already exists!
219 abort: repository largefiles-repo already exists!
219 [255]
220 [255]
220
221
221 add another largefile to the new largefiles repo
222 add another largefile to the new largefiles repo
222 $ cd largefiles-repo
223 $ cd largefiles-repo
223 $ dd if=/dev/zero bs=1k count=1k > anotherlarge 2> /dev/null
224 $ dd if=/dev/zero bs=1k count=1k > anotherlarge 2> /dev/null
224 $ hg add --lfsize=1 anotherlarge
225 $ hg add --lfsize=1 anotherlarge
225 $ hg commit -m "add anotherlarge (should be a largefile)"
226 $ hg commit -m "add anotherlarge (should be a largefile)"
226 $ cat .hglf/anotherlarge
227 $ cat .hglf/anotherlarge
227 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
228 3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3
228 $ cd ..
229 $ cd ..
229
230
230 round-trip: converting back to a normal (non-largefiles) repo with
231 round-trip: converting back to a normal (non-largefiles) repo with
231 "lfconvert --to-normal" should give the same as ../bigfile-repo
232 "lfconvert --to-normal" should give the same as ../bigfile-repo
232 $ cd largefiles-repo
233 $ cd largefiles-repo
233 $ hg lfconvert --to-normal . ../normal-repo
234 $ hg lfconvert --to-normal . ../normal-repo
234 initializing destination ../normal-repo
235 initializing destination ../normal-repo
235 $ cd ../normal-repo
236 $ cd ../normal-repo
236 $ cat >> .hg/hgrc <<EOF
237 $ cat >> .hg/hgrc <<EOF
237 > [extensions]
238 > [extensions]
238 > largefiles = !
239 > largefiles = !
239 > EOF
240 > EOF
240
241
241 # Hmmm: the changeset ID for rev 5 is different from the original
242 # Hmmm: the changeset ID for rev 5 is different from the original
242 # normal repo (../bigfile-repo), because the changelog filelist
243 # normal repo (../bigfile-repo), because the changelog filelist
243 # differs between the two incarnations of rev 5: this repo includes
244 # differs between the two incarnations of rev 5: this repo includes
244 # 'large' in the list, but ../bigfile-repo does not. Since rev 5
245 # 'large' in the list, but ../bigfile-repo does not. Since rev 5
245 # removes 'large' relative to the first parent in both repos, it seems
246 # removes 'large' relative to the first parent in both repos, it seems
246 # to me that lfconvert is doing a *better* job than
247 # to me that lfconvert is doing a *better* job than
247 # "hg remove" + "hg merge" + "hg commit".
248 # "hg remove" + "hg merge" + "hg commit".
248 # $ hg -R ../bigfile-repo debugdata -c 5
249 # $ hg -R ../bigfile-repo debugdata -c 5
249 # $ hg debugdata -c 5
250 # $ hg debugdata -c 5
250 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
251 $ hg glog --template "{rev}:{node|short} {desc|firstline}\n"
251 o 6:1635824e6f59 add anotherlarge (should be a largefile)
252 o 6:1635824e6f59 add anotherlarge (should be a largefile)
252 |
253 |
253 o 5:7215f8deeaaf merge
254 o 5:7215f8deeaaf merge
254 |\
255 |\
255 | o 4:7285f817b77e remove large, normal3
256 | o 4:7285f817b77e remove large, normal3
256 | |
257 | |
257 | o 3:67e3892e3534 add normal3, modify sub/*
258 | o 3:67e3892e3534 add normal3, modify sub/*
258 | |
259 | |
259 o | 2:c96c8beb5d56 rename sub/ to stuff/
260 o | 2:c96c8beb5d56 rename sub/ to stuff/
260 |/
261 |/
261 o 1:020c65d24e11 add sub/*
262 o 1:020c65d24e11 add sub/*
262 |
263 |
263 o 0:117b8328f97a add large, normal1
264 o 0:117b8328f97a add large, normal1
264
265
265 $ hg update
266 $ hg update
266 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
267 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
267 $ hg locate
268 $ hg locate
268 anotherlarge
269 anotherlarge
269 normal1
270 normal1
270 stuff/maybelarge.dat
271 stuff/maybelarge.dat
271 stuff/normal2
272 stuff/normal2
272 $ [ -d .hg/largefiles ] && echo fail || echo pass
273 $ [ -d .hg/largefiles ] && echo fail || echo pass
273 pass
274 pass
274
275
275 $ cd ..
276 $ cd ..
276
277
278 $ hg convert largefiles-repo
279 assuming destination largefiles-repo-hg
280 initializing destination largefiles-repo-hg repository
281 scanning source...
282 sorting...
283 converting...
284 6 add large, normal1
285 5 add sub/*
286 4 rename sub/ to stuff/
287 3 add normal3, modify sub/*
288 2 remove large, normal3
289 1 merge
290 0 add anotherlarge (should be a largefile)
291
292 $ hg -R largefiles-repo-hg glog --template "{rev}:{node|short} {desc|firstline}\n"
293 o 6:17126745edfd add anotherlarge (should be a largefile)
294 |
295 o 5:9cc5aa7204f0 merge
296 |\
297 | o 4:a5a02de7a8e4 remove large, normal3
298 | |
299 | o 3:55759520c76f add normal3, modify sub/*
300 | |
301 o | 2:261ad3f3f037 rename sub/ to stuff/
302 |/
303 o 1:334e5237836d add sub/*
304 |
305 o 0:d4892ec57ce2 add large, normal1
306
307 $ hg -R largefiles-repo-hg verify --large --lfa
308 checking changesets
309 checking manifests
310 crosschecking files in changesets and manifests
311 checking files
312 8 files, 7 changesets, 12 total revisions
313 searching 7 changesets for largefiles
314 verified existence of 6 revisions of 4 largefiles
315 $ hg -R largefiles-repo-hg showconfig paths
316
317
277 Avoid a traceback if a largefile isn't available (issue3519)
318 Avoid a traceback if a largefile isn't available (issue3519)
278
319
279 Ensure the largefile can be cached in the source if necessary
320 Ensure the largefile can be cached in the source if necessary
280 $ hg clone -U largefiles-repo issue3519
321 $ hg clone -U largefiles-repo issue3519
281 $ rm "${USERCACHE}"/*
322 $ rm "${USERCACHE}"/*
282 $ hg lfconvert --to-normal issue3519 normalized3519
323 $ hg lfconvert --to-normal issue3519 normalized3519
283 initializing destination normalized3519
324 initializing destination normalized3519
284
325
285 Ensure the abort message is useful if a largefile is entirely unavailable
326 Ensure the abort message is useful if a largefile is entirely unavailable
286 $ rm -rf normalized3519
327 $ rm -rf normalized3519
287 $ rm "${USERCACHE}"/*
328 $ rm "${USERCACHE}"/*
288 $ rm issue3519/.hg/largefiles/*
329 $ rm issue3519/.hg/largefiles/*
289 $ rm largefiles-repo/.hg/largefiles/*
330 $ rm largefiles-repo/.hg/largefiles/*
290 $ hg lfconvert --to-normal issue3519 normalized3519
331 $ hg lfconvert --to-normal issue3519 normalized3519
291 initializing destination normalized3519
332 initializing destination normalized3519
292 large: can't get file locally
333 large: can't get file locally
293 (no default or default-push path set in hgrc)
334 (no default or default-push path set in hgrc)
294 abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad
335 abort: missing largefile 'large' from revision d4892ec57ce212905215fad1d9018f56b99202ad
295 [255]
336 [255]
296
337
297
338
General Comments 0
You need to be logged in to leave comments. Login now