##// END OF EJS Templates
largefiles: check hash of files in the store before copying to working dir...
Mads Kiilerich -
r26823:45e8bd2f stable
parent child Browse files
Show More
@@ -1,621 +1,628 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import platform
12 import platform
13 import shutil
14 import stat
13 import stat
15 import copy
14 import copy
16
15
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
16 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
17 from mercurial.i18n import _
19 from mercurial import node, error
18 from mercurial import node, error
20
19
21 shortname = '.hglf'
20 shortname = '.hglf'
22 shortnameslash = shortname + '/'
21 shortnameslash = shortname + '/'
23 longname = 'largefiles'
22 longname = 'largefiles'
24
23
25
24
26 # -- Private worker functions ------------------------------------------
25 # -- Private worker functions ------------------------------------------
27
26
28 def getminsize(ui, assumelfiles, opt, default=10):
27 def getminsize(ui, assumelfiles, opt, default=10):
29 lfsize = opt
28 lfsize = opt
30 if not lfsize and assumelfiles:
29 if not lfsize and assumelfiles:
31 lfsize = ui.config(longname, 'minsize', default=default)
30 lfsize = ui.config(longname, 'minsize', default=default)
32 if lfsize:
31 if lfsize:
33 try:
32 try:
34 lfsize = float(lfsize)
33 lfsize = float(lfsize)
35 except ValueError:
34 except ValueError:
36 raise error.Abort(_('largefiles: size must be number (not %s)\n')
35 raise error.Abort(_('largefiles: size must be number (not %s)\n')
37 % lfsize)
36 % lfsize)
38 if lfsize is None:
37 if lfsize is None:
39 raise error.Abort(_('minimum size for largefiles must be specified'))
38 raise error.Abort(_('minimum size for largefiles must be specified'))
40 return lfsize
39 return lfsize
41
40
42 def link(src, dest):
41 def link(src, dest):
43 util.makedirs(os.path.dirname(dest))
42 util.makedirs(os.path.dirname(dest))
44 try:
43 try:
45 util.oslink(src, dest)
44 util.oslink(src, dest)
46 except OSError:
45 except OSError:
47 # if hardlinks fail, fallback on atomic copy
46 # if hardlinks fail, fallback on atomic copy
48 dst = util.atomictempfile(dest)
47 dst = util.atomictempfile(dest)
49 for chunk in util.filechunkiter(open(src, 'rb')):
48 for chunk in util.filechunkiter(open(src, 'rb')):
50 dst.write(chunk)
49 dst.write(chunk)
51 dst.close()
50 dst.close()
52 os.chmod(dest, os.stat(src).st_mode)
51 os.chmod(dest, os.stat(src).st_mode)
53
52
54 def usercachepath(ui, hash):
53 def usercachepath(ui, hash):
55 path = ui.configpath(longname, 'usercache', None)
54 path = ui.configpath(longname, 'usercache', None)
56 if path:
55 if path:
57 path = os.path.join(path, hash)
56 path = os.path.join(path, hash)
58 else:
57 else:
59 if os.name == 'nt':
58 if os.name == 'nt':
60 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
59 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
61 if appdata:
60 if appdata:
62 path = os.path.join(appdata, longname, hash)
61 path = os.path.join(appdata, longname, hash)
63 elif platform.system() == 'Darwin':
62 elif platform.system() == 'Darwin':
64 home = os.getenv('HOME')
63 home = os.getenv('HOME')
65 if home:
64 if home:
66 path = os.path.join(home, 'Library', 'Caches',
65 path = os.path.join(home, 'Library', 'Caches',
67 longname, hash)
66 longname, hash)
68 elif os.name == 'posix':
67 elif os.name == 'posix':
69 path = os.getenv('XDG_CACHE_HOME')
68 path = os.getenv('XDG_CACHE_HOME')
70 if path:
69 if path:
71 path = os.path.join(path, longname, hash)
70 path = os.path.join(path, longname, hash)
72 else:
71 else:
73 home = os.getenv('HOME')
72 home = os.getenv('HOME')
74 if home:
73 if home:
75 path = os.path.join(home, '.cache', longname, hash)
74 path = os.path.join(home, '.cache', longname, hash)
76 else:
75 else:
77 raise error.Abort(_('unknown operating system: %s\n') % os.name)
76 raise error.Abort(_('unknown operating system: %s\n') % os.name)
78 return path
77 return path
79
78
80 def inusercache(ui, hash):
79 def inusercache(ui, hash):
81 path = usercachepath(ui, hash)
80 path = usercachepath(ui, hash)
82 return path and os.path.exists(path)
81 return path and os.path.exists(path)
83
82
84 def findfile(repo, hash):
83 def findfile(repo, hash):
85 path, exists = findstorepath(repo, hash)
84 path, exists = findstorepath(repo, hash)
86 if exists:
85 if exists:
87 repo.ui.note(_('found %s in store\n') % hash)
86 repo.ui.note(_('found %s in store\n') % hash)
88 return path
87 return path
89 elif inusercache(repo.ui, hash):
88 elif inusercache(repo.ui, hash):
90 repo.ui.note(_('found %s in system cache\n') % hash)
89 repo.ui.note(_('found %s in system cache\n') % hash)
91 path = storepath(repo, hash)
90 path = storepath(repo, hash)
92 link(usercachepath(repo.ui, hash), path)
91 link(usercachepath(repo.ui, hash), path)
93 return path
92 return path
94 return None
93 return None
95
94
96 class largefilesdirstate(dirstate.dirstate):
95 class largefilesdirstate(dirstate.dirstate):
97 def __getitem__(self, key):
96 def __getitem__(self, key):
98 return super(largefilesdirstate, self).__getitem__(unixpath(key))
97 return super(largefilesdirstate, self).__getitem__(unixpath(key))
99 def normal(self, f):
98 def normal(self, f):
100 return super(largefilesdirstate, self).normal(unixpath(f))
99 return super(largefilesdirstate, self).normal(unixpath(f))
101 def remove(self, f):
100 def remove(self, f):
102 return super(largefilesdirstate, self).remove(unixpath(f))
101 return super(largefilesdirstate, self).remove(unixpath(f))
103 def add(self, f):
102 def add(self, f):
104 return super(largefilesdirstate, self).add(unixpath(f))
103 return super(largefilesdirstate, self).add(unixpath(f))
105 def drop(self, f):
104 def drop(self, f):
106 return super(largefilesdirstate, self).drop(unixpath(f))
105 return super(largefilesdirstate, self).drop(unixpath(f))
107 def forget(self, f):
106 def forget(self, f):
108 return super(largefilesdirstate, self).forget(unixpath(f))
107 return super(largefilesdirstate, self).forget(unixpath(f))
109 def normallookup(self, f):
108 def normallookup(self, f):
110 return super(largefilesdirstate, self).normallookup(unixpath(f))
109 return super(largefilesdirstate, self).normallookup(unixpath(f))
111 def _ignore(self, f):
110 def _ignore(self, f):
112 return False
111 return False
113 def write(self, tr=False):
112 def write(self, tr=False):
114 # (1) disable PENDING mode always
113 # (1) disable PENDING mode always
115 # (lfdirstate isn't yet managed as a part of the transaction)
114 # (lfdirstate isn't yet managed as a part of the transaction)
116 # (2) avoid develwarn 'use dirstate.write with ....'
115 # (2) avoid develwarn 'use dirstate.write with ....'
117 super(largefilesdirstate, self).write(None)
116 super(largefilesdirstate, self).write(None)
118
117
119 def openlfdirstate(ui, repo, create=True):
118 def openlfdirstate(ui, repo, create=True):
120 '''
119 '''
121 Return a dirstate object that tracks largefiles: i.e. its root is
120 Return a dirstate object that tracks largefiles: i.e. its root is
122 the repo root, but it is saved in .hg/largefiles/dirstate.
121 the repo root, but it is saved in .hg/largefiles/dirstate.
123 '''
122 '''
124 lfstoredir = repo.join(longname)
123 lfstoredir = repo.join(longname)
125 opener = scmutil.opener(lfstoredir)
124 opener = scmutil.opener(lfstoredir)
126 lfdirstate = largefilesdirstate(opener, ui, repo.root,
125 lfdirstate = largefilesdirstate(opener, ui, repo.root,
127 repo.dirstate._validate)
126 repo.dirstate._validate)
128
127
129 # If the largefiles dirstate does not exist, populate and create
128 # If the largefiles dirstate does not exist, populate and create
130 # it. This ensures that we create it on the first meaningful
129 # it. This ensures that we create it on the first meaningful
131 # largefiles operation in a new clone.
130 # largefiles operation in a new clone.
132 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
131 if create and not os.path.exists(os.path.join(lfstoredir, 'dirstate')):
133 matcher = getstandinmatcher(repo)
132 matcher = getstandinmatcher(repo)
134 standins = repo.dirstate.walk(matcher, [], False, False)
133 standins = repo.dirstate.walk(matcher, [], False, False)
135
134
136 if len(standins) > 0:
135 if len(standins) > 0:
137 util.makedirs(lfstoredir)
136 util.makedirs(lfstoredir)
138
137
139 for standin in standins:
138 for standin in standins:
140 lfile = splitstandin(standin)
139 lfile = splitstandin(standin)
141 lfdirstate.normallookup(lfile)
140 lfdirstate.normallookup(lfile)
142 return lfdirstate
141 return lfdirstate
143
142
144 def lfdirstatestatus(lfdirstate, repo):
143 def lfdirstatestatus(lfdirstate, repo):
145 wctx = repo['.']
144 wctx = repo['.']
146 match = match_.always(repo.root, repo.getcwd())
145 match = match_.always(repo.root, repo.getcwd())
147 unsure, s = lfdirstate.status(match, [], False, False, False)
146 unsure, s = lfdirstate.status(match, [], False, False, False)
148 modified, clean = s.modified, s.clean
147 modified, clean = s.modified, s.clean
149 for lfile in unsure:
148 for lfile in unsure:
150 try:
149 try:
151 fctx = wctx[standin(lfile)]
150 fctx = wctx[standin(lfile)]
152 except LookupError:
151 except LookupError:
153 fctx = None
152 fctx = None
154 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
153 if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
155 modified.append(lfile)
154 modified.append(lfile)
156 else:
155 else:
157 clean.append(lfile)
156 clean.append(lfile)
158 lfdirstate.normal(lfile)
157 lfdirstate.normal(lfile)
159 return s
158 return s
160
159
161 def listlfiles(repo, rev=None, matcher=None):
160 def listlfiles(repo, rev=None, matcher=None):
162 '''return a list of largefiles in the working copy or the
161 '''return a list of largefiles in the working copy or the
163 specified changeset'''
162 specified changeset'''
164
163
165 if matcher is None:
164 if matcher is None:
166 matcher = getstandinmatcher(repo)
165 matcher = getstandinmatcher(repo)
167
166
168 # ignore unknown files in working directory
167 # ignore unknown files in working directory
169 return [splitstandin(f)
168 return [splitstandin(f)
170 for f in repo[rev].walk(matcher)
169 for f in repo[rev].walk(matcher)
171 if rev is not None or repo.dirstate[f] != '?']
170 if rev is not None or repo.dirstate[f] != '?']
172
171
173 def instore(repo, hash, forcelocal=False):
172 def instore(repo, hash, forcelocal=False):
174 return os.path.exists(storepath(repo, hash, forcelocal))
173 return os.path.exists(storepath(repo, hash, forcelocal))
175
174
176 def storepath(repo, hash, forcelocal=False):
175 def storepath(repo, hash, forcelocal=False):
177 if not forcelocal and repo.shared():
176 if not forcelocal and repo.shared():
178 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
177 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
179 return repo.join(longname, hash)
178 return repo.join(longname, hash)
180
179
181 def findstorepath(repo, hash):
180 def findstorepath(repo, hash):
182 '''Search through the local store path(s) to find the file for the given
181 '''Search through the local store path(s) to find the file for the given
183 hash. If the file is not found, its path in the primary store is returned.
182 hash. If the file is not found, its path in the primary store is returned.
184 The return value is a tuple of (path, exists(path)).
183 The return value is a tuple of (path, exists(path)).
185 '''
184 '''
186 # For shared repos, the primary store is in the share source. But for
185 # For shared repos, the primary store is in the share source. But for
187 # backward compatibility, force a lookup in the local store if it wasn't
186 # backward compatibility, force a lookup in the local store if it wasn't
188 # found in the share source.
187 # found in the share source.
189 path = storepath(repo, hash, False)
188 path = storepath(repo, hash, False)
190
189
191 if instore(repo, hash):
190 if instore(repo, hash):
192 return (path, True)
191 return (path, True)
193 elif repo.shared() and instore(repo, hash, True):
192 elif repo.shared() and instore(repo, hash, True):
194 return storepath(repo, hash, True)
193 return storepath(repo, hash, True)
195
194
196 return (path, False)
195 return (path, False)
197
196
198 def copyfromcache(repo, hash, filename):
197 def copyfromcache(repo, hash, filename):
199 '''Copy the specified largefile from the repo or system cache to
198 '''Copy the specified largefile from the repo or system cache to
200 filename in the repository. Return true on success or false if the
199 filename in the repository. Return true on success or false if the
201 file was not found in either cache (which should not happened:
200 file was not found in either cache (which should not happened:
202 this is meant to be called only after ensuring that the needed
201 this is meant to be called only after ensuring that the needed
203 largefile exists in the cache).'''
202 largefile exists in the cache).'''
204 path = findfile(repo, hash)
203 path = findfile(repo, hash)
205 if path is None:
204 if path is None:
206 return False
205 return False
207 util.makedirs(os.path.dirname(repo.wjoin(filename)))
206 util.makedirs(os.path.dirname(repo.wjoin(filename)))
208 # The write may fail before the file is fully written, but we
207 # The write may fail before the file is fully written, but we
209 # don't use atomic writes in the working copy.
208 # don't use atomic writes in the working copy.
210 shutil.copy(path, repo.wjoin(filename))
209 dest = repo.wjoin(filename)
210 with open(path, 'rb') as srcfd:
211 with open(dest, 'wb') as destfd:
212 gothash = copyandhash(srcfd, destfd)
213 if gothash != hash:
214 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
215 % (filename, path, gothash))
216 util.unlink(dest)
217 return False
211 return True
218 return True
212
219
213 def copytostore(repo, rev, file, uploaded=False):
220 def copytostore(repo, rev, file, uploaded=False):
214 hash = readstandin(repo, file, rev)
221 hash = readstandin(repo, file, rev)
215 if instore(repo, hash):
222 if instore(repo, hash):
216 return
223 return
217 copytostoreabsolute(repo, repo.wjoin(file), hash)
224 copytostoreabsolute(repo, repo.wjoin(file), hash)
218
225
219 def copyalltostore(repo, node):
226 def copyalltostore(repo, node):
220 '''Copy all largefiles in a given revision to the store'''
227 '''Copy all largefiles in a given revision to the store'''
221
228
222 ctx = repo[node]
229 ctx = repo[node]
223 for filename in ctx.files():
230 for filename in ctx.files():
224 if isstandin(filename) and filename in ctx.manifest():
231 if isstandin(filename) and filename in ctx.manifest():
225 realfile = splitstandin(filename)
232 realfile = splitstandin(filename)
226 copytostore(repo, ctx.node(), realfile)
233 copytostore(repo, ctx.node(), realfile)
227
234
228
235
229 def copytostoreabsolute(repo, file, hash):
236 def copytostoreabsolute(repo, file, hash):
230 if inusercache(repo.ui, hash):
237 if inusercache(repo.ui, hash):
231 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 link(usercachepath(repo.ui, hash), storepath(repo, hash))
232 else:
239 else:
233 util.makedirs(os.path.dirname(storepath(repo, hash)))
240 util.makedirs(os.path.dirname(storepath(repo, hash)))
234 dst = util.atomictempfile(storepath(repo, hash),
241 dst = util.atomictempfile(storepath(repo, hash),
235 createmode=repo.store.createmode)
242 createmode=repo.store.createmode)
236 for chunk in util.filechunkiter(open(file, 'rb')):
243 for chunk in util.filechunkiter(open(file, 'rb')):
237 dst.write(chunk)
244 dst.write(chunk)
238 dst.close()
245 dst.close()
239 linktousercache(repo, hash)
246 linktousercache(repo, hash)
240
247
241 def linktousercache(repo, hash):
248 def linktousercache(repo, hash):
242 path = usercachepath(repo.ui, hash)
249 path = usercachepath(repo.ui, hash)
243 if path:
250 if path:
244 link(storepath(repo, hash), path)
251 link(storepath(repo, hash), path)
245
252
246 def getstandinmatcher(repo, rmatcher=None):
253 def getstandinmatcher(repo, rmatcher=None):
247 '''Return a match object that applies rmatcher to the standin directory'''
254 '''Return a match object that applies rmatcher to the standin directory'''
248 standindir = repo.wjoin(shortname)
255 standindir = repo.wjoin(shortname)
249
256
250 # no warnings about missing files or directories
257 # no warnings about missing files or directories
251 badfn = lambda f, msg: None
258 badfn = lambda f, msg: None
252
259
253 if rmatcher and not rmatcher.always():
260 if rmatcher and not rmatcher.always():
254 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
261 pats = [os.path.join(standindir, pat) for pat in rmatcher.files()]
255 if not pats:
262 if not pats:
256 pats = [standindir]
263 pats = [standindir]
257 match = scmutil.match(repo[None], pats, badfn=badfn)
264 match = scmutil.match(repo[None], pats, badfn=badfn)
258 # if pats is empty, it would incorrectly always match, so clear _always
265 # if pats is empty, it would incorrectly always match, so clear _always
259 match._always = False
266 match._always = False
260 else:
267 else:
261 # no patterns: relative to repo root
268 # no patterns: relative to repo root
262 match = scmutil.match(repo[None], [standindir], badfn=badfn)
269 match = scmutil.match(repo[None], [standindir], badfn=badfn)
263 return match
270 return match
264
271
265 def composestandinmatcher(repo, rmatcher):
272 def composestandinmatcher(repo, rmatcher):
266 '''Return a matcher that accepts standins corresponding to the
273 '''Return a matcher that accepts standins corresponding to the
267 files accepted by rmatcher. Pass the list of files in the matcher
274 files accepted by rmatcher. Pass the list of files in the matcher
268 as the paths specified by the user.'''
275 as the paths specified by the user.'''
269 smatcher = getstandinmatcher(repo, rmatcher)
276 smatcher = getstandinmatcher(repo, rmatcher)
270 isstandin = smatcher.matchfn
277 isstandin = smatcher.matchfn
271 def composedmatchfn(f):
278 def composedmatchfn(f):
272 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
279 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
273 smatcher.matchfn = composedmatchfn
280 smatcher.matchfn = composedmatchfn
274
281
275 return smatcher
282 return smatcher
276
283
277 def standin(filename):
284 def standin(filename):
278 '''Return the repo-relative path to the standin for the specified big
285 '''Return the repo-relative path to the standin for the specified big
279 file.'''
286 file.'''
280 # Notes:
287 # Notes:
281 # 1) Some callers want an absolute path, but for instance addlargefiles
288 # 1) Some callers want an absolute path, but for instance addlargefiles
282 # needs it repo-relative so it can be passed to repo[None].add(). So
289 # needs it repo-relative so it can be passed to repo[None].add(). So
283 # leave it up to the caller to use repo.wjoin() to get an absolute path.
290 # leave it up to the caller to use repo.wjoin() to get an absolute path.
284 # 2) Join with '/' because that's what dirstate always uses, even on
291 # 2) Join with '/' because that's what dirstate always uses, even on
285 # Windows. Change existing separator to '/' first in case we are
292 # Windows. Change existing separator to '/' first in case we are
286 # passed filenames from an external source (like the command line).
293 # passed filenames from an external source (like the command line).
287 return shortnameslash + util.pconvert(filename)
294 return shortnameslash + util.pconvert(filename)
288
295
289 def isstandin(filename):
296 def isstandin(filename):
290 '''Return true if filename is a big file standin. filename must be
297 '''Return true if filename is a big file standin. filename must be
291 in Mercurial's internal form (slash-separated).'''
298 in Mercurial's internal form (slash-separated).'''
292 return filename.startswith(shortnameslash)
299 return filename.startswith(shortnameslash)
293
300
294 def splitstandin(filename):
301 def splitstandin(filename):
295 # Split on / because that's what dirstate always uses, even on Windows.
302 # Split on / because that's what dirstate always uses, even on Windows.
296 # Change local separator to / first just in case we are passed filenames
303 # Change local separator to / first just in case we are passed filenames
297 # from an external source (like the command line).
304 # from an external source (like the command line).
298 bits = util.pconvert(filename).split('/', 1)
305 bits = util.pconvert(filename).split('/', 1)
299 if len(bits) == 2 and bits[0] == shortname:
306 if len(bits) == 2 and bits[0] == shortname:
300 return bits[1]
307 return bits[1]
301 else:
308 else:
302 return None
309 return None
303
310
304 def updatestandin(repo, standin):
311 def updatestandin(repo, standin):
305 file = repo.wjoin(splitstandin(standin))
312 file = repo.wjoin(splitstandin(standin))
306 if os.path.exists(file):
313 if os.path.exists(file):
307 hash = hashfile(file)
314 hash = hashfile(file)
308 executable = getexecutable(file)
315 executable = getexecutable(file)
309 writestandin(repo, standin, hash, executable)
316 writestandin(repo, standin, hash, executable)
310
317
311 def readstandin(repo, filename, node=None):
318 def readstandin(repo, filename, node=None):
312 '''read hex hash from standin for filename at given node, or working
319 '''read hex hash from standin for filename at given node, or working
313 directory if no node is given'''
320 directory if no node is given'''
314 return repo[node][standin(filename)].data().strip()
321 return repo[node][standin(filename)].data().strip()
315
322
316 def writestandin(repo, standin, hash, executable):
323 def writestandin(repo, standin, hash, executable):
317 '''write hash to <repo.root>/<standin>'''
324 '''write hash to <repo.root>/<standin>'''
318 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
325 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
319
326
320 def copyandhash(instream, outfile):
327 def copyandhash(instream, outfile):
321 '''Read bytes from instream (iterable) and write them to outfile,
328 '''Read bytes from instream (iterable) and write them to outfile,
322 computing the SHA-1 hash of the data along the way. Return the hash.'''
329 computing the SHA-1 hash of the data along the way. Return the hash.'''
323 hasher = util.sha1('')
330 hasher = util.sha1('')
324 for data in instream:
331 for data in instream:
325 hasher.update(data)
332 hasher.update(data)
326 outfile.write(data)
333 outfile.write(data)
327 return hasher.hexdigest()
334 return hasher.hexdigest()
328
335
329 def hashrepofile(repo, file):
336 def hashrepofile(repo, file):
330 return hashfile(repo.wjoin(file))
337 return hashfile(repo.wjoin(file))
331
338
332 def hashfile(file):
339 def hashfile(file):
333 if not os.path.exists(file):
340 if not os.path.exists(file):
334 return ''
341 return ''
335 hasher = util.sha1('')
342 hasher = util.sha1('')
336 fd = open(file, 'rb')
343 fd = open(file, 'rb')
337 for data in util.filechunkiter(fd, 128 * 1024):
344 for data in util.filechunkiter(fd, 128 * 1024):
338 hasher.update(data)
345 hasher.update(data)
339 fd.close()
346 fd.close()
340 return hasher.hexdigest()
347 return hasher.hexdigest()
341
348
342 def getexecutable(filename):
349 def getexecutable(filename):
343 mode = os.stat(filename).st_mode
350 mode = os.stat(filename).st_mode
344 return ((mode & stat.S_IXUSR) and
351 return ((mode & stat.S_IXUSR) and
345 (mode & stat.S_IXGRP) and
352 (mode & stat.S_IXGRP) and
346 (mode & stat.S_IXOTH))
353 (mode & stat.S_IXOTH))
347
354
348 def urljoin(first, second, *arg):
355 def urljoin(first, second, *arg):
349 def join(left, right):
356 def join(left, right):
350 if not left.endswith('/'):
357 if not left.endswith('/'):
351 left += '/'
358 left += '/'
352 if right.startswith('/'):
359 if right.startswith('/'):
353 right = right[1:]
360 right = right[1:]
354 return left + right
361 return left + right
355
362
356 url = join(first, second)
363 url = join(first, second)
357 for a in arg:
364 for a in arg:
358 url = join(url, a)
365 url = join(url, a)
359 return url
366 return url
360
367
361 def hexsha1(data):
368 def hexsha1(data):
362 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
369 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
363 object data"""
370 object data"""
364 h = util.sha1()
371 h = util.sha1()
365 for chunk in util.filechunkiter(data):
372 for chunk in util.filechunkiter(data):
366 h.update(chunk)
373 h.update(chunk)
367 return h.hexdigest()
374 return h.hexdigest()
368
375
369 def httpsendfile(ui, filename):
376 def httpsendfile(ui, filename):
370 return httpconnection.httpsendfile(ui, filename, 'rb')
377 return httpconnection.httpsendfile(ui, filename, 'rb')
371
378
372 def unixpath(path):
379 def unixpath(path):
373 '''Return a version of path normalized for use with the lfdirstate.'''
380 '''Return a version of path normalized for use with the lfdirstate.'''
374 return util.pconvert(os.path.normpath(path))
381 return util.pconvert(os.path.normpath(path))
375
382
376 def islfilesrepo(repo):
383 def islfilesrepo(repo):
377 if ('largefiles' in repo.requirements and
384 if ('largefiles' in repo.requirements and
378 any(shortnameslash in f[0] for f in repo.store.datafiles())):
385 any(shortnameslash in f[0] for f in repo.store.datafiles())):
379 return True
386 return True
380
387
381 return any(openlfdirstate(repo.ui, repo, False))
388 return any(openlfdirstate(repo.ui, repo, False))
382
389
383 class storeprotonotcapable(Exception):
390 class storeprotonotcapable(Exception):
384 def __init__(self, storetypes):
391 def __init__(self, storetypes):
385 self.storetypes = storetypes
392 self.storetypes = storetypes
386
393
387 def getstandinsstate(repo):
394 def getstandinsstate(repo):
388 standins = []
395 standins = []
389 matcher = getstandinmatcher(repo)
396 matcher = getstandinmatcher(repo)
390 for standin in repo.dirstate.walk(matcher, [], False, False):
397 for standin in repo.dirstate.walk(matcher, [], False, False):
391 lfile = splitstandin(standin)
398 lfile = splitstandin(standin)
392 try:
399 try:
393 hash = readstandin(repo, lfile)
400 hash = readstandin(repo, lfile)
394 except IOError:
401 except IOError:
395 hash = None
402 hash = None
396 standins.append((lfile, hash))
403 standins.append((lfile, hash))
397 return standins
404 return standins
398
405
399 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
406 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
400 lfstandin = standin(lfile)
407 lfstandin = standin(lfile)
401 if lfstandin in repo.dirstate:
408 if lfstandin in repo.dirstate:
402 stat = repo.dirstate._map[lfstandin]
409 stat = repo.dirstate._map[lfstandin]
403 state, mtime = stat[0], stat[3]
410 state, mtime = stat[0], stat[3]
404 else:
411 else:
405 state, mtime = '?', -1
412 state, mtime = '?', -1
406 if state == 'n':
413 if state == 'n':
407 if (normallookup or mtime < 0 or
414 if (normallookup or mtime < 0 or
408 not os.path.exists(repo.wjoin(lfile))):
415 not os.path.exists(repo.wjoin(lfile))):
409 # state 'n' doesn't ensure 'clean' in this case
416 # state 'n' doesn't ensure 'clean' in this case
410 lfdirstate.normallookup(lfile)
417 lfdirstate.normallookup(lfile)
411 else:
418 else:
412 lfdirstate.normal(lfile)
419 lfdirstate.normal(lfile)
413 elif state == 'm':
420 elif state == 'm':
414 lfdirstate.normallookup(lfile)
421 lfdirstate.normallookup(lfile)
415 elif state == 'r':
422 elif state == 'r':
416 lfdirstate.remove(lfile)
423 lfdirstate.remove(lfile)
417 elif state == 'a':
424 elif state == 'a':
418 lfdirstate.add(lfile)
425 lfdirstate.add(lfile)
419 elif state == '?':
426 elif state == '?':
420 lfdirstate.drop(lfile)
427 lfdirstate.drop(lfile)
421
428
422 def markcommitted(orig, ctx, node):
429 def markcommitted(orig, ctx, node):
423 repo = ctx.repo()
430 repo = ctx.repo()
424
431
425 orig(node)
432 orig(node)
426
433
427 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
434 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
428 # because files coming from the 2nd parent are omitted in the latter.
435 # because files coming from the 2nd parent are omitted in the latter.
429 #
436 #
430 # The former should be used to get targets of "synclfdirstate",
437 # The former should be used to get targets of "synclfdirstate",
431 # because such files:
438 # because such files:
432 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
439 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
433 # - have to be marked as "n" after commit, but
440 # - have to be marked as "n" after commit, but
434 # - aren't listed in "repo[node].files()"
441 # - aren't listed in "repo[node].files()"
435
442
436 lfdirstate = openlfdirstate(repo.ui, repo)
443 lfdirstate = openlfdirstate(repo.ui, repo)
437 for f in ctx.files():
444 for f in ctx.files():
438 if isstandin(f):
445 if isstandin(f):
439 lfile = splitstandin(f)
446 lfile = splitstandin(f)
440 synclfdirstate(repo, lfdirstate, lfile, False)
447 synclfdirstate(repo, lfdirstate, lfile, False)
441 lfdirstate.write()
448 lfdirstate.write()
442
449
443 # As part of committing, copy all of the largefiles into the cache.
450 # As part of committing, copy all of the largefiles into the cache.
444 copyalltostore(repo, node)
451 copyalltostore(repo, node)
445
452
446 def getlfilestoupdate(oldstandins, newstandins):
453 def getlfilestoupdate(oldstandins, newstandins):
447 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
454 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
448 filelist = []
455 filelist = []
449 for f in changedstandins:
456 for f in changedstandins:
450 if f[0] not in filelist:
457 if f[0] not in filelist:
451 filelist.append(f[0])
458 filelist.append(f[0])
452 return filelist
459 return filelist
453
460
454 def getlfilestoupload(repo, missing, addfunc):
461 def getlfilestoupload(repo, missing, addfunc):
455 for i, n in enumerate(missing):
462 for i, n in enumerate(missing):
456 repo.ui.progress(_('finding outgoing largefiles'), i,
463 repo.ui.progress(_('finding outgoing largefiles'), i,
457 unit=_('revision'), total=len(missing))
464 unit=_('revision'), total=len(missing))
458 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
465 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
459
466
460 oldlfstatus = repo.lfstatus
467 oldlfstatus = repo.lfstatus
461 repo.lfstatus = False
468 repo.lfstatus = False
462 try:
469 try:
463 ctx = repo[n]
470 ctx = repo[n]
464 finally:
471 finally:
465 repo.lfstatus = oldlfstatus
472 repo.lfstatus = oldlfstatus
466
473
467 files = set(ctx.files())
474 files = set(ctx.files())
468 if len(parents) == 2:
475 if len(parents) == 2:
469 mc = ctx.manifest()
476 mc = ctx.manifest()
470 mp1 = ctx.parents()[0].manifest()
477 mp1 = ctx.parents()[0].manifest()
471 mp2 = ctx.parents()[1].manifest()
478 mp2 = ctx.parents()[1].manifest()
472 for f in mp1:
479 for f in mp1:
473 if f not in mc:
480 if f not in mc:
474 files.add(f)
481 files.add(f)
475 for f in mp2:
482 for f in mp2:
476 if f not in mc:
483 if f not in mc:
477 files.add(f)
484 files.add(f)
478 for f in mc:
485 for f in mc:
479 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
486 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
480 files.add(f)
487 files.add(f)
481 for fn in files:
488 for fn in files:
482 if isstandin(fn) and fn in ctx:
489 if isstandin(fn) and fn in ctx:
483 addfunc(fn, ctx[fn].data().strip())
490 addfunc(fn, ctx[fn].data().strip())
484 repo.ui.progress(_('finding outgoing largefiles'), None)
491 repo.ui.progress(_('finding outgoing largefiles'), None)
485
492
486 def updatestandinsbymatch(repo, match):
493 def updatestandinsbymatch(repo, match):
487 '''Update standins in the working directory according to specified match
494 '''Update standins in the working directory according to specified match
488
495
489 This returns (possibly modified) ``match`` object to be used for
496 This returns (possibly modified) ``match`` object to be used for
490 subsequent commit process.
497 subsequent commit process.
491 '''
498 '''
492
499
493 ui = repo.ui
500 ui = repo.ui
494
501
495 # Case 1: user calls commit with no specific files or
502 # Case 1: user calls commit with no specific files or
496 # include/exclude patterns: refresh and commit all files that
503 # include/exclude patterns: refresh and commit all files that
497 # are "dirty".
504 # are "dirty".
498 if match is None or match.always():
505 if match is None or match.always():
499 # Spend a bit of time here to get a list of files we know
506 # Spend a bit of time here to get a list of files we know
500 # are modified so we can compare only against those.
507 # are modified so we can compare only against those.
501 # It can cost a lot of time (several seconds)
508 # It can cost a lot of time (several seconds)
502 # otherwise to update all standins if the largefiles are
509 # otherwise to update all standins if the largefiles are
503 # large.
510 # large.
504 lfdirstate = openlfdirstate(ui, repo)
511 lfdirstate = openlfdirstate(ui, repo)
505 dirtymatch = match_.always(repo.root, repo.getcwd())
512 dirtymatch = match_.always(repo.root, repo.getcwd())
506 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
513 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
507 False)
514 False)
508 modifiedfiles = unsure + s.modified + s.added + s.removed
515 modifiedfiles = unsure + s.modified + s.added + s.removed
509 lfiles = listlfiles(repo)
516 lfiles = listlfiles(repo)
510 # this only loops through largefiles that exist (not
517 # this only loops through largefiles that exist (not
511 # removed/renamed)
518 # removed/renamed)
512 for lfile in lfiles:
519 for lfile in lfiles:
513 if lfile in modifiedfiles:
520 if lfile in modifiedfiles:
514 if os.path.exists(
521 if os.path.exists(
515 repo.wjoin(standin(lfile))):
522 repo.wjoin(standin(lfile))):
516 # this handles the case where a rebase is being
523 # this handles the case where a rebase is being
517 # performed and the working copy is not updated
524 # performed and the working copy is not updated
518 # yet.
525 # yet.
519 if os.path.exists(repo.wjoin(lfile)):
526 if os.path.exists(repo.wjoin(lfile)):
520 updatestandin(repo,
527 updatestandin(repo,
521 standin(lfile))
528 standin(lfile))
522
529
523 return match
530 return match
524
531
525 lfiles = listlfiles(repo)
532 lfiles = listlfiles(repo)
526 match._files = repo._subdirlfs(match.files(), lfiles)
533 match._files = repo._subdirlfs(match.files(), lfiles)
527
534
528 # Case 2: user calls commit with specified patterns: refresh
535 # Case 2: user calls commit with specified patterns: refresh
529 # any matching big files.
536 # any matching big files.
530 smatcher = composestandinmatcher(repo, match)
537 smatcher = composestandinmatcher(repo, match)
531 standins = repo.dirstate.walk(smatcher, [], False, False)
538 standins = repo.dirstate.walk(smatcher, [], False, False)
532
539
533 # No matching big files: get out of the way and pass control to
540 # No matching big files: get out of the way and pass control to
534 # the usual commit() method.
541 # the usual commit() method.
535 if not standins:
542 if not standins:
536 return match
543 return match
537
544
538 # Refresh all matching big files. It's possible that the
545 # Refresh all matching big files. It's possible that the
539 # commit will end up failing, in which case the big files will
546 # commit will end up failing, in which case the big files will
540 # stay refreshed. No harm done: the user modified them and
547 # stay refreshed. No harm done: the user modified them and
541 # asked to commit them, so sooner or later we're going to
548 # asked to commit them, so sooner or later we're going to
542 # refresh the standins. Might as well leave them refreshed.
549 # refresh the standins. Might as well leave them refreshed.
543 lfdirstate = openlfdirstate(ui, repo)
550 lfdirstate = openlfdirstate(ui, repo)
544 for fstandin in standins:
551 for fstandin in standins:
545 lfile = splitstandin(fstandin)
552 lfile = splitstandin(fstandin)
546 if lfdirstate[lfile] != 'r':
553 if lfdirstate[lfile] != 'r':
547 updatestandin(repo, fstandin)
554 updatestandin(repo, fstandin)
548
555
549 # Cook up a new matcher that only matches regular files or
556 # Cook up a new matcher that only matches regular files or
550 # standins corresponding to the big files requested by the
557 # standins corresponding to the big files requested by the
551 # user. Have to modify _files to prevent commit() from
558 # user. Have to modify _files to prevent commit() from
552 # complaining "not tracked" for big files.
559 # complaining "not tracked" for big files.
553 match = copy.copy(match)
560 match = copy.copy(match)
554 origmatchfn = match.matchfn
561 origmatchfn = match.matchfn
555
562
556 # Check both the list of largefiles and the list of
563 # Check both the list of largefiles and the list of
557 # standins because if a largefile was removed, it
564 # standins because if a largefile was removed, it
558 # won't be in the list of largefiles at this point
565 # won't be in the list of largefiles at this point
559 match._files += sorted(standins)
566 match._files += sorted(standins)
560
567
561 actualfiles = []
568 actualfiles = []
562 for f in match._files:
569 for f in match._files:
563 fstandin = standin(f)
570 fstandin = standin(f)
564
571
565 # For largefiles, only one of the normal and standin should be
572 # For largefiles, only one of the normal and standin should be
566 # committed (except if one of them is a remove).
573 # committed (except if one of them is a remove).
567 # Thus, skip plain largefile names but keep the standin.
574 # Thus, skip plain largefile names but keep the standin.
568 if (f in lfiles or fstandin in standins) and \
575 if (f in lfiles or fstandin in standins) and \
569 repo.dirstate[f] != 'r' and repo.dirstate[fstandin] != 'r':
576 repo.dirstate[f] != 'r' and repo.dirstate[fstandin] != 'r':
570 continue
577 continue
571
578
572 actualfiles.append(f)
579 actualfiles.append(f)
573 match._files = actualfiles
580 match._files = actualfiles
574
581
575 def matchfn(f):
582 def matchfn(f):
576 if origmatchfn(f):
583 if origmatchfn(f):
577 return f not in lfiles
584 return f not in lfiles
578 else:
585 else:
579 return f in standins
586 return f in standins
580
587
581 match.matchfn = matchfn
588 match.matchfn = matchfn
582
589
583 return match
590 return match
584
591
585 class automatedcommithook(object):
592 class automatedcommithook(object):
586 '''Stateful hook to update standins at the 1st commit of resuming
593 '''Stateful hook to update standins at the 1st commit of resuming
587
594
588 For efficiency, updating standins in the working directory should
595 For efficiency, updating standins in the working directory should
589 be avoided while automated committing (like rebase, transplant and
596 be avoided while automated committing (like rebase, transplant and
590 so on), because they should be updated before committing.
597 so on), because they should be updated before committing.
591
598
592 But the 1st commit of resuming automated committing (e.g. ``rebase
599 But the 1st commit of resuming automated committing (e.g. ``rebase
593 --continue``) should update them, because largefiles may be
600 --continue``) should update them, because largefiles may be
594 modified manually.
601 modified manually.
595 '''
602 '''
596 def __init__(self, resuming):
603 def __init__(self, resuming):
597 self.resuming = resuming
604 self.resuming = resuming
598
605
599 def __call__(self, repo, match):
606 def __call__(self, repo, match):
600 if self.resuming:
607 if self.resuming:
601 self.resuming = False # avoids updating at subsequent commits
608 self.resuming = False # avoids updating at subsequent commits
602 return updatestandinsbymatch(repo, match)
609 return updatestandinsbymatch(repo, match)
603 else:
610 else:
604 return match
611 return match
605
612
606 def getstatuswriter(ui, repo, forcibly=None):
613 def getstatuswriter(ui, repo, forcibly=None):
607 '''Return the function to write largefiles specific status out
614 '''Return the function to write largefiles specific status out
608
615
609 If ``forcibly`` is ``None``, this returns the last element of
616 If ``forcibly`` is ``None``, this returns the last element of
610 ``repo._lfstatuswriters`` as "default" writer function.
617 ``repo._lfstatuswriters`` as "default" writer function.
611
618
612 Otherwise, this returns the function to always write out (or
619 Otherwise, this returns the function to always write out (or
613 ignore if ``not forcibly``) status.
620 ignore if ``not forcibly``) status.
614 '''
621 '''
615 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
622 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
616 return repo._lfstatuswriters[-1]
623 return repo._lfstatuswriters[-1]
617 else:
624 else:
618 if forcibly:
625 if forcibly:
619 return ui.status # forcibly WRITE OUT
626 return ui.status # forcibly WRITE OUT
620 else:
627 else:
621 return lambda *msg, **opts: None # forcibly IGNORE
628 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,203 +1,202 b''
1 Create user cache directory
1 Create user cache directory
2
2
3 $ USERCACHE=`pwd`/cache; export USERCACHE
3 $ USERCACHE=`pwd`/cache; export USERCACHE
4 $ cat <<EOF >> ${HGRCPATH}
4 $ cat <<EOF >> ${HGRCPATH}
5 > [extensions]
5 > [extensions]
6 > hgext.largefiles=
6 > hgext.largefiles=
7 > [largefiles]
7 > [largefiles]
8 > usercache=${USERCACHE}
8 > usercache=${USERCACHE}
9 > EOF
9 > EOF
10 $ mkdir -p ${USERCACHE}
10 $ mkdir -p ${USERCACHE}
11
11
12 Create source repo, and commit adding largefile.
12 Create source repo, and commit adding largefile.
13
13
14 $ hg init src
14 $ hg init src
15 $ cd src
15 $ cd src
16 $ echo large > large
16 $ echo large > large
17 $ hg add --large large
17 $ hg add --large large
18 $ hg commit -m 'add largefile'
18 $ hg commit -m 'add largefile'
19 $ hg rm large
19 $ hg rm large
20 $ hg commit -m 'branchhead without largefile'
20 $ hg commit -m 'branchhead without largefile'
21 $ hg up -qr 0
21 $ hg up -qr 0
22 $ cd ..
22 $ cd ..
23
23
24 Discard all cached largefiles in USERCACHE
24 Discard all cached largefiles in USERCACHE
25
25
26 $ rm -rf ${USERCACHE}
26 $ rm -rf ${USERCACHE}
27
27
28 Create mirror repo, and pull from source without largefile:
28 Create mirror repo, and pull from source without largefile:
29 "pull" is used instead of "clone" for suppression of (1) updating to
29 "pull" is used instead of "clone" for suppression of (1) updating to
30 tip (= caching largefile from source repo), and (2) recording source
30 tip (= caching largefile from source repo), and (2) recording source
31 repo as "default" path in .hg/hgrc.
31 repo as "default" path in .hg/hgrc.
32
32
33 $ hg init mirror
33 $ hg init mirror
34 $ cd mirror
34 $ cd mirror
35 $ hg pull ../src
35 $ hg pull ../src
36 pulling from ../src
36 pulling from ../src
37 requesting all changes
37 requesting all changes
38 adding changesets
38 adding changesets
39 adding manifests
39 adding manifests
40 adding file changes
40 adding file changes
41 added 2 changesets with 1 changes to 1 files
41 added 2 changesets with 1 changes to 1 files
42 (run 'hg update' to get a working copy)
42 (run 'hg update' to get a working copy)
43
43
44 Update working directory to "tip", which requires largefile("large"),
44 Update working directory to "tip", which requires largefile("large"),
45 but there is no cache file for it. So, hg must treat it as
45 but there is no cache file for it. So, hg must treat it as
46 "missing"(!) file.
46 "missing"(!) file.
47
47
48 $ hg update -r0
48 $ hg update -r0
49 getting changed largefiles
49 getting changed largefiles
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
50 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
51 0 largefiles updated, 0 removed
51 0 largefiles updated, 0 removed
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 $ hg status
53 $ hg status
54 ! large
54 ! large
55
55
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
56 Update working directory to null: this cleanup .hg/largefiles/dirstate
57
57
58 $ hg update null
58 $ hg update null
59 getting changed largefiles
59 getting changed largefiles
60 0 largefiles updated, 0 removed
60 0 largefiles updated, 0 removed
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62
62
63 Update working directory to tip, again.
63 Update working directory to tip, again.
64
64
65 $ hg update -r0
65 $ hg update -r0
66 getting changed largefiles
66 getting changed largefiles
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
67 large: largefile 7f7097b041ccf68cc5561e9600da4655d21c6d18 not available from file:/*/$TESTTMP/mirror (glob)
68 0 largefiles updated, 0 removed
68 0 largefiles updated, 0 removed
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 $ hg status
70 $ hg status
71 ! large
71 ! large
72 $ cd ..
72 $ cd ..
73
73
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
74 Verify that largefiles from pulled branchheads are fetched, also to an empty repo
75
75
76 $ hg init mirror2
76 $ hg init mirror2
77 $ hg -R mirror2 pull src -r0
77 $ hg -R mirror2 pull src -r0
78 pulling from src
78 pulling from src
79 adding changesets
79 adding changesets
80 adding manifests
80 adding manifests
81 adding file changes
81 adding file changes
82 added 1 changesets with 1 changes to 1 files
82 added 1 changesets with 1 changes to 1 files
83 (run 'hg update' to get a working copy)
83 (run 'hg update' to get a working copy)
84
84
85 #if unix-permissions
85 #if unix-permissions
86
86
87 Portable way to print file permissions:
87 Portable way to print file permissions:
88
88
89 $ cat > ls-l.py <<EOF
89 $ cat > ls-l.py <<EOF
90 > #!/usr/bin/env python
90 > #!/usr/bin/env python
91 > import sys, os
91 > import sys, os
92 > path = sys.argv[1]
92 > path = sys.argv[1]
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
93 > print '%03o' % (os.lstat(path).st_mode & 0777)
94 > EOF
94 > EOF
95 $ chmod +x ls-l.py
95 $ chmod +x ls-l.py
96
96
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
97 Test that files in .hg/largefiles inherit mode from .hg/store, not
98 from file in working copy:
98 from file in working copy:
99
99
100 $ cd src
100 $ cd src
101 $ chmod 750 .hg/store
101 $ chmod 750 .hg/store
102 $ chmod 660 large
102 $ chmod 660 large
103 $ echo change >> large
103 $ echo change >> large
104 $ hg commit -m change
104 $ hg commit -m change
105 created new head
105 created new head
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
106 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
107 640
107 640
108
108
109 Test permission of with files in .hg/largefiles created by update:
109 Test permission of with files in .hg/largefiles created by update:
110
110
111 $ cd ../mirror
111 $ cd ../mirror
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
112 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
113 $ chmod 750 .hg/store
113 $ chmod 750 .hg/store
114 $ hg pull ../src --update -q
114 $ hg pull ../src --update -q
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
115 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
116 640
116 640
117
117
118 Test permission of files created by push:
118 Test permission of files created by push:
119
119
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
120 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
121 > --config "web.allow_push=*" --config web.push_ssl=no
121 > --config "web.allow_push=*" --config web.push_ssl=no
122 $ cat hg.pid >> $DAEMON_PIDS
122 $ cat hg.pid >> $DAEMON_PIDS
123
123
124 $ echo change >> large
124 $ echo change >> large
125 $ hg commit -m change
125 $ hg commit -m change
126
126
127 $ rm -r "$USERCACHE"
127 $ rm -r "$USERCACHE"
128
128
129 $ hg push -q http://localhost:$HGPORT/
129 $ hg push -q http://localhost:$HGPORT/
130
130
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
131 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
132 640
132 640
133
133
134 $ cd ..
134 $ cd ..
135
135
136 #endif
136 #endif
137
137
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
138 Test issue 4053 (remove --after on a deleted, uncommitted file shouldn't say
139 it is missing, but a remove on a nonexistent unknown file still should. Same
139 it is missing, but a remove on a nonexistent unknown file still should. Same
140 for a forget.)
140 for a forget.)
141
141
142 $ cd src
142 $ cd src
143 $ touch x
143 $ touch x
144 $ hg add x
144 $ hg add x
145 $ mv x y
145 $ mv x y
146 $ hg remove -A x y ENOENT
146 $ hg remove -A x y ENOENT
147 ENOENT: * (glob)
147 ENOENT: * (glob)
148 not removing y: file is untracked
148 not removing y: file is untracked
149 [1]
149 [1]
150 $ hg add y
150 $ hg add y
151 $ mv y z
151 $ mv y z
152 $ hg forget y z ENOENT
152 $ hg forget y z ENOENT
153 ENOENT: * (glob)
153 ENOENT: * (glob)
154 not removing z: file is already untracked
154 not removing z: file is already untracked
155 [1]
155 [1]
156
156
157 Largefiles are accessible from the share's store
157 Largefiles are accessible from the share's store
158 $ cd ..
158 $ cd ..
159 $ hg share -q src share_dst --config extensions.share=
159 $ hg share -q src share_dst --config extensions.share=
160 $ hg -R share_dst update -r0
160 $ hg -R share_dst update -r0
161 getting changed largefiles
161 getting changed largefiles
162 1 largefiles updated, 0 removed
162 1 largefiles updated, 0 removed
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164
164
165 $ echo modified > share_dst/large
165 $ echo modified > share_dst/large
166 $ hg -R share_dst ci -m modified
166 $ hg -R share_dst ci -m modified
167 created new head
167 created new head
168
168
169 Only dirstate is in the local store for the share, and the largefile is in the
169 Only dirstate is in the local store for the share, and the largefile is in the
170 share source's local store. Avoid the extra largefiles added in the unix
170 share source's local store. Avoid the extra largefiles added in the unix
171 conditional above.
171 conditional above.
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
172 $ hash=`hg -R share_dst cat share_dst/.hglf/large`
173 $ echo $hash
173 $ echo $hash
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
174 e2fb5f2139d086ded2cb600d5a91a196e76bf020
175
175
176 $ find share_dst/.hg/largefiles/* | sort
176 $ find share_dst/.hg/largefiles/* | sort
177 share_dst/.hg/largefiles/dirstate
177 share_dst/.hg/largefiles/dirstate
178
178
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
179 $ find src/.hg/largefiles/* | egrep "(dirstate|$hash)" | sort
180 src/.hg/largefiles/dirstate
180 src/.hg/largefiles/dirstate
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
181 src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
182
182
183 Inject corruption into the largefiles store and see how update handles that:
183 Inject corruption into the largefiles store and see how update handles that:
184
184
185 $ cd src
185 $ cd src
186 $ hg up -qC
186 $ hg up -qC
187 $ cat large
187 $ cat large
188 modified
188 modified
189 $ rm large
189 $ rm large
190 $ cat .hglf/large
190 $ cat .hglf/large
191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
191 e2fb5f2139d086ded2cb600d5a91a196e76bf020
192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
192 $ mv .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 ..
193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
193 $ echo corruption > .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
194 (the following update will put the corrupted file into the working directory
195 where it will show up as a change)
196 $ hg up -C
194 $ hg up -C
197 getting changed largefiles
195 getting changed largefiles
198 1 largefiles updated, 0 removed
196 large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27
197 0 largefiles updated, 0 removed
199 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
198 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 $ hg st
199 $ hg st
201 M large
200 ! large
202 ? z
201 ? z
203 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
202 $ rm .hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020
General Comments 0
You need to be logged in to leave comments. Login now