##// END OF EJS Templates
largefiles: lowercase messages
Martin Geisler -
r16928:73b9286e default
parent child Browse files
Show More
@@ -1,467 +1,467
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''largefiles utility code: must not import other modules in this package.'''
10 10
11 11 import os
12 12 import errno
13 13 import platform
14 14 import shutil
15 15 import stat
16 16
17 17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 18 from mercurial.i18n import _
19 19
20 20 shortname = '.hglf'
21 21 longname = 'largefiles'
22 22
23 23
24 24 # -- Portability wrappers ----------------------------------------------
25 25
26 26 def dirstatewalk(dirstate, matcher, unknown=False, ignored=False):
27 27 return dirstate.walk(matcher, [], unknown, ignored)
28 28
29 29 def repoadd(repo, list):
30 30 add = repo[None].add
31 31 return add(list)
32 32
33 33 def reporemove(repo, list, unlink=False):
34 34 def remove(list, unlink):
35 35 wlock = repo.wlock()
36 36 try:
37 37 if unlink:
38 38 for f in list:
39 39 try:
40 40 util.unlinkpath(repo.wjoin(f))
41 41 except OSError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 repo[None].forget(list)
45 45 finally:
46 46 wlock.release()
47 47 return remove(list, unlink=unlink)
48 48
49 49 def repoforget(repo, list):
50 50 forget = repo[None].forget
51 51 return forget(list)
52 52
53 53 def findoutgoing(repo, remote, force):
54 54 from mercurial import discovery
55 55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 56 remote, force=force)
57 57 return repo.changelog.findmissing(common)
58 58
59 59 # -- Private worker functions ------------------------------------------
60 60
61 61 def getminsize(ui, assumelfiles, opt, default=10):
62 62 lfsize = opt
63 63 if not lfsize and assumelfiles:
64 64 lfsize = ui.config(longname, 'minsize', default=default)
65 65 if lfsize:
66 66 try:
67 67 lfsize = float(lfsize)
68 68 except ValueError:
69 69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 70 % lfsize)
71 71 if lfsize is None:
72 72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 73 return lfsize
74 74
75 75 def link(src, dest):
76 76 try:
77 77 util.oslink(src, dest)
78 78 except OSError:
79 79 # if hardlinks fail, fallback on atomic copy
80 80 dst = util.atomictempfile(dest)
81 81 for chunk in util.filechunkiter(open(src, 'rb')):
82 82 dst.write(chunk)
83 83 dst.close()
84 84 os.chmod(dest, os.stat(src).st_mode)
85 85
86 86 def usercachepath(ui, hash):
87 87 path = ui.configpath(longname, 'usercache', None)
88 88 if path:
89 89 path = os.path.join(path, hash)
90 90 else:
91 91 if os.name == 'nt':
92 92 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
93 93 if appdata:
94 94 path = os.path.join(appdata, longname, hash)
95 95 elif platform.system() == 'Darwin':
96 96 home = os.getenv('HOME')
97 97 if home:
98 98 path = os.path.join(home, 'Library', 'Caches',
99 99 longname, hash)
100 100 elif os.name == 'posix':
101 101 path = os.getenv('XDG_CACHE_HOME')
102 102 if path:
103 103 path = os.path.join(path, longname, hash)
104 104 else:
105 105 home = os.getenv('HOME')
106 106 if home:
107 107 path = os.path.join(home, '.cache', longname, hash)
108 108 else:
109 109 raise util.Abort(_('unknown operating system: %s\n') % os.name)
110 110 return path
111 111
112 112 def inusercache(ui, hash):
113 113 path = usercachepath(ui, hash)
114 114 return path and os.path.exists(path)
115 115
116 116 def findfile(repo, hash):
117 117 if instore(repo, hash):
118 repo.ui.note(_('Found %s in store\n') % hash)
118 repo.ui.note(_('found %s in store\n') % hash)
119 119 return storepath(repo, hash)
120 120 elif inusercache(repo.ui, hash):
121 repo.ui.note(_('Found %s in system cache\n') % hash)
121 repo.ui.note(_('found %s in system cache\n') % hash)
122 122 path = storepath(repo, hash)
123 123 util.makedirs(os.path.dirname(path))
124 124 link(usercachepath(repo.ui, hash), path)
125 125 return path
126 126 return None
127 127
128 128 class largefilesdirstate(dirstate.dirstate):
129 129 def __getitem__(self, key):
130 130 return super(largefilesdirstate, self).__getitem__(unixpath(key))
131 131 def normal(self, f):
132 132 return super(largefilesdirstate, self).normal(unixpath(f))
133 133 def remove(self, f):
134 134 return super(largefilesdirstate, self).remove(unixpath(f))
135 135 def add(self, f):
136 136 return super(largefilesdirstate, self).add(unixpath(f))
137 137 def drop(self, f):
138 138 return super(largefilesdirstate, self).drop(unixpath(f))
139 139 def forget(self, f):
140 140 return super(largefilesdirstate, self).forget(unixpath(f))
141 141 def normallookup(self, f):
142 142 return super(largefilesdirstate, self).normallookup(unixpath(f))
143 143
144 144 def openlfdirstate(ui, repo):
145 145 '''
146 146 Return a dirstate object that tracks largefiles: i.e. its root is
147 147 the repo root, but it is saved in .hg/largefiles/dirstate.
148 148 '''
149 149 admin = repo.join(longname)
150 150 opener = scmutil.opener(admin)
151 151 lfdirstate = largefilesdirstate(opener, ui, repo.root,
152 152 repo.dirstate._validate)
153 153
154 154 # If the largefiles dirstate does not exist, populate and create
155 155 # it. This ensures that we create it on the first meaningful
156 156 # largefiles operation in a new clone.
157 157 if not os.path.exists(os.path.join(admin, 'dirstate')):
158 158 util.makedirs(admin)
159 159 matcher = getstandinmatcher(repo)
160 160 for standin in dirstatewalk(repo.dirstate, matcher):
161 161 lfile = splitstandin(standin)
162 162 hash = readstandin(repo, lfile)
163 163 lfdirstate.normallookup(lfile)
164 164 try:
165 165 if hash == hashfile(repo.wjoin(lfile)):
166 166 lfdirstate.normal(lfile)
167 167 except OSError, err:
168 168 if err.errno != errno.ENOENT:
169 169 raise
170 170 return lfdirstate
171 171
172 172 def lfdirstatestatus(lfdirstate, repo, rev):
173 173 match = match_.always(repo.root, repo.getcwd())
174 174 s = lfdirstate.status(match, [], False, False, False)
175 175 unsure, modified, added, removed, missing, unknown, ignored, clean = s
176 176 for lfile in unsure:
177 177 if repo[rev][standin(lfile)].data().strip() != \
178 178 hashfile(repo.wjoin(lfile)):
179 179 modified.append(lfile)
180 180 else:
181 181 clean.append(lfile)
182 182 lfdirstate.normal(lfile)
183 183 return (modified, added, removed, missing, unknown, ignored, clean)
184 184
185 185 def listlfiles(repo, rev=None, matcher=None):
186 186 '''return a list of largefiles in the working copy or the
187 187 specified changeset'''
188 188
189 189 if matcher is None:
190 190 matcher = getstandinmatcher(repo)
191 191
192 192 # ignore unknown files in working directory
193 193 return [splitstandin(f)
194 194 for f in repo[rev].walk(matcher)
195 195 if rev is not None or repo.dirstate[f] != '?']
196 196
197 197 def instore(repo, hash):
198 198 return os.path.exists(storepath(repo, hash))
199 199
200 200 def storepath(repo, hash):
201 201 return repo.join(os.path.join(longname, hash))
202 202
203 203 def copyfromcache(repo, hash, filename):
204 204 '''Copy the specified largefile from the repo or system cache to
205 205 filename in the repository. Return true on success or false if the
206 206 file was not found in either cache (which should not happened:
207 207 this is meant to be called only after ensuring that the needed
208 208 largefile exists in the cache).'''
209 209 path = findfile(repo, hash)
210 210 if path is None:
211 211 return False
212 212 util.makedirs(os.path.dirname(repo.wjoin(filename)))
213 213 # The write may fail before the file is fully written, but we
214 214 # don't use atomic writes in the working copy.
215 215 shutil.copy(path, repo.wjoin(filename))
216 216 return True
217 217
218 218 def copytostore(repo, rev, file, uploaded=False):
219 219 hash = readstandin(repo, file)
220 220 if instore(repo, hash):
221 221 return
222 222 copytostoreabsolute(repo, repo.wjoin(file), hash)
223 223
224 224 def copyalltostore(repo, node):
225 225 '''Copy all largefiles in a given revision to the store'''
226 226
227 227 ctx = repo[node]
228 228 for filename in ctx.files():
229 229 if isstandin(filename) and filename in ctx.manifest():
230 230 realfile = splitstandin(filename)
231 231 copytostore(repo, ctx.node(), realfile)
232 232
233 233
234 234 def copytostoreabsolute(repo, file, hash):
235 235 util.makedirs(os.path.dirname(storepath(repo, hash)))
236 236 if inusercache(repo.ui, hash):
237 237 link(usercachepath(repo.ui, hash), storepath(repo, hash))
238 238 else:
239 239 dst = util.atomictempfile(storepath(repo, hash),
240 240 createmode=repo.store.createmode)
241 241 for chunk in util.filechunkiter(open(file, 'rb')):
242 242 dst.write(chunk)
243 243 dst.close()
244 244 linktousercache(repo, hash)
245 245
246 246 def linktousercache(repo, hash):
247 247 path = usercachepath(repo.ui, hash)
248 248 if path:
249 249 util.makedirs(os.path.dirname(path))
250 250 link(storepath(repo, hash), path)
251 251
252 252 def getstandinmatcher(repo, pats=[], opts={}):
253 253 '''Return a match object that applies pats to the standin directory'''
254 254 standindir = repo.pathto(shortname)
255 255 if pats:
256 256 # patterns supplied: search standin directory relative to current dir
257 257 cwd = repo.getcwd()
258 258 if os.path.isabs(cwd):
259 259 # cwd is an absolute path for hg -R <reponame>
260 260 # work relative to the repository root in this case
261 261 cwd = ''
262 262 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
263 263 elif os.path.isdir(standindir):
264 264 # no patterns: relative to repo root
265 265 pats = [standindir]
266 266 else:
267 267 # no patterns and no standin dir: return matcher that matches nothing
268 268 match = match_.match(repo.root, None, [], exact=True)
269 269 match.matchfn = lambda f: False
270 270 return match
271 271 return getmatcher(repo, pats, opts, showbad=False)
272 272
273 273 def getmatcher(repo, pats=[], opts={}, showbad=True):
274 274 '''Wrapper around scmutil.match() that adds showbad: if false,
275 275 neuter the match object's bad() method so it does not print any
276 276 warnings about missing files or directories.'''
277 277 match = scmutil.match(repo[None], pats, opts)
278 278
279 279 if not showbad:
280 280 match.bad = lambda f, msg: None
281 281 return match
282 282
283 283 def composestandinmatcher(repo, rmatcher):
284 284 '''Return a matcher that accepts standins corresponding to the
285 285 files accepted by rmatcher. Pass the list of files in the matcher
286 286 as the paths specified by the user.'''
287 287 smatcher = getstandinmatcher(repo, rmatcher.files())
288 288 isstandin = smatcher.matchfn
289 289 def composedmatchfn(f):
290 290 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
291 291 smatcher.matchfn = composedmatchfn
292 292
293 293 return smatcher
294 294
295 295 def standin(filename):
296 296 '''Return the repo-relative path to the standin for the specified big
297 297 file.'''
298 298 # Notes:
299 299 # 1) Most callers want an absolute path, but _createstandin() needs
300 300 # it repo-relative so lfadd() can pass it to repoadd(). So leave
301 301 # it up to the caller to use repo.wjoin() to get an absolute path.
302 302 # 2) Join with '/' because that's what dirstate always uses, even on
303 303 # Windows. Change existing separator to '/' first in case we are
304 304 # passed filenames from an external source (like the command line).
305 305 return shortname + '/' + util.pconvert(filename)
306 306
307 307 def isstandin(filename):
308 308 '''Return true if filename is a big file standin. filename must be
309 309 in Mercurial's internal form (slash-separated).'''
310 310 return filename.startswith(shortname + '/')
311 311
312 312 def splitstandin(filename):
313 313 # Split on / because that's what dirstate always uses, even on Windows.
314 314 # Change local separator to / first just in case we are passed filenames
315 315 # from an external source (like the command line).
316 316 bits = util.pconvert(filename).split('/', 1)
317 317 if len(bits) == 2 and bits[0] == shortname:
318 318 return bits[1]
319 319 else:
320 320 return None
321 321
322 322 def updatestandin(repo, standin):
323 323 file = repo.wjoin(splitstandin(standin))
324 324 if os.path.exists(file):
325 325 hash = hashfile(file)
326 326 executable = getexecutable(file)
327 327 writestandin(repo, standin, hash, executable)
328 328
329 329 def readstandin(repo, filename, node=None):
330 330 '''read hex hash from standin for filename at given node, or working
331 331 directory if no node is given'''
332 332 return repo[node][standin(filename)].data().strip()
333 333
334 334 def writestandin(repo, standin, hash, executable):
335 335 '''write hash to <repo.root>/<standin>'''
336 336 writehash(hash, repo.wjoin(standin), executable)
337 337
338 338 def copyandhash(instream, outfile):
339 339 '''Read bytes from instream (iterable) and write them to outfile,
340 340 computing the SHA-1 hash of the data along the way. Close outfile
341 341 when done and return the binary hash.'''
342 342 hasher = util.sha1('')
343 343 for data in instream:
344 344 hasher.update(data)
345 345 outfile.write(data)
346 346
347 347 # Blecch: closing a file that somebody else opened is rude and
348 348 # wrong. But it's so darn convenient and practical! After all,
349 349 # outfile was opened just to copy and hash.
350 350 outfile.close()
351 351
352 352 return hasher.digest()
353 353
354 354 def hashrepofile(repo, file):
355 355 return hashfile(repo.wjoin(file))
356 356
357 357 def hashfile(file):
358 358 if not os.path.exists(file):
359 359 return ''
360 360 hasher = util.sha1('')
361 361 fd = open(file, 'rb')
362 362 for data in blockstream(fd):
363 363 hasher.update(data)
364 364 fd.close()
365 365 return hasher.hexdigest()
366 366
367 367 class limitreader(object):
368 368 def __init__(self, f, limit):
369 369 self.f = f
370 370 self.limit = limit
371 371
372 372 def read(self, length):
373 373 if self.limit == 0:
374 374 return ''
375 375 length = length > self.limit and self.limit or length
376 376 self.limit -= length
377 377 return self.f.read(length)
378 378
379 379 def close(self):
380 380 pass
381 381
382 382 def blockstream(infile, blocksize=128 * 1024):
383 383 """Generator that yields blocks of data from infile and closes infile."""
384 384 while True:
385 385 data = infile.read(blocksize)
386 386 if not data:
387 387 break
388 388 yield data
389 389 # same blecch as copyandhash() above
390 390 infile.close()
391 391
392 392 def writehash(hash, filename, executable):
393 393 util.makedirs(os.path.dirname(filename))
394 394 util.writefile(filename, hash + '\n')
395 395 os.chmod(filename, getmode(executable))
396 396
397 397 def getexecutable(filename):
398 398 mode = os.stat(filename).st_mode
399 399 return ((mode & stat.S_IXUSR) and
400 400 (mode & stat.S_IXGRP) and
401 401 (mode & stat.S_IXOTH))
402 402
403 403 def getmode(executable):
404 404 if executable:
405 405 return 0755
406 406 else:
407 407 return 0644
408 408
409 409 def urljoin(first, second, *arg):
410 410 def join(left, right):
411 411 if not left.endswith('/'):
412 412 left += '/'
413 413 if right.startswith('/'):
414 414 right = right[1:]
415 415 return left + right
416 416
417 417 url = join(first, second)
418 418 for a in arg:
419 419 url = join(url, a)
420 420 return url
421 421
422 422 def hexsha1(data):
423 423 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
424 424 object data"""
425 425 h = util.sha1()
426 426 for chunk in util.filechunkiter(data):
427 427 h.update(chunk)
428 428 return h.hexdigest()
429 429
430 430 def httpsendfile(ui, filename):
431 431 return httpconnection.httpsendfile(ui, filename, 'rb')
432 432
433 433 def unixpath(path):
434 434 '''Return a version of path normalized for use with the lfdirstate.'''
435 435 return util.pconvert(os.path.normpath(path))
436 436
437 437 def islfilesrepo(repo):
438 438 return ('largefiles' in repo.requirements and
439 439 util.any(shortname + '/' in f[0] for f in repo.store.datafiles()))
440 440
441 441 class storeprotonotcapable(Exception):
442 442 def __init__(self, storetypes):
443 443 self.storetypes = storetypes
444 444
445 445 def getcurrentheads(repo):
446 446 branches = repo.branchmap()
447 447 heads = []
448 448 for branch in branches:
449 449 newheads = repo.branchheads(branch)
450 450 heads = heads + newheads
451 451 return heads
452 452
453 453 def getstandinsstate(repo):
454 454 standins = []
455 455 matcher = getstandinmatcher(repo)
456 456 for standin in dirstatewalk(repo.dirstate, matcher):
457 457 lfile = splitstandin(standin)
458 458 standins.append((lfile, readstandin(repo, lfile)))
459 459 return standins
460 460
461 461 def getlfilestoupdate(oldstandins, newstandins):
462 462 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
463 463 filelist = []
464 464 for f in changedstandins:
465 465 if f[0] not in filelist:
466 466 filelist.append(f[0])
467 467 return filelist
@@ -1,82 +1,82
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''store class for local filesystem'''
10 10
11 11 import os
12 12
13 13 from mercurial import util
14 14 from mercurial.i18n import _
15 15
16 16 import lfutil
17 17 import basestore
18 18
19 19 class localstore(basestore.basestore):
20 20 '''localstore first attempts to grab files out of the store in the remote
21 21 Mercurial repository. Failling that, it attempts to grab the files from
22 22 the user cache.'''
23 23
24 24 def __init__(self, ui, repo, remote):
25 25 url = os.path.join(remote.path, '.hg', lfutil.longname)
26 26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
27 27 self.remote = remote
28 28
29 29 def put(self, source, hash):
30 30 util.makedirs(os.path.dirname(lfutil.storepath(self.remote, hash)))
31 31 if lfutil.instore(self.remote, hash):
32 32 return
33 33 lfutil.link(lfutil.storepath(self.repo, hash),
34 34 lfutil.storepath(self.remote, hash))
35 35
36 36 def exists(self, hash):
37 37 return lfutil.instore(self.remote, hash)
38 38
39 39 def _getfile(self, tmpfile, filename, hash):
40 40 if lfutil.instore(self.remote, hash):
41 41 path = lfutil.storepath(self.remote, hash)
42 42 elif lfutil.inusercache(self.ui, hash):
43 43 path = lfutil.usercachepath(self.ui, hash)
44 44 else:
45 45 raise basestore.StoreError(filename, hash, '',
46 _("Can't get file locally"))
46 _("can't get file locally"))
47 47 fd = open(path, 'rb')
48 48 try:
49 49 return lfutil.copyandhash(fd, tmpfile)
50 50 finally:
51 51 fd.close()
52 52
53 53 def _verifyfile(self, cctx, cset, contents, standin, verified):
54 54 filename = lfutil.splitstandin(standin)
55 55 if not filename:
56 56 return False
57 57 fctx = cctx[standin]
58 58 key = (filename, fctx.filenode())
59 59 if key in verified:
60 60 return False
61 61
62 62 expecthash = fctx.data()[0:40]
63 63 verified.add(key)
64 64 if not lfutil.instore(self.remote, expecthash):
65 65 self.ui.warn(
66 66 _('changeset %s: %s missing\n'
67 67 ' (looked for hash %s)\n')
68 68 % (cset, filename, expecthash))
69 69 return True # failed
70 70
71 71 if contents:
72 72 storepath = lfutil.storepath(self.remote, expecthash)
73 73 actualhash = lfutil.hashfile(storepath)
74 74 if actualhash != expecthash:
75 75 self.ui.warn(
76 76 _('changeset %s: %s: contents differ\n'
77 77 ' (%s:\n'
78 78 ' expected hash %s,\n'
79 79 ' but got %s)\n')
80 80 % (cset, filename, storepath, expecthash, actualhash))
81 81 return True # failed
82 82 return False
@@ -1,123 +1,123
1 1 $ "$TESTDIR/hghave" unix-permissions || exit 80
2 2
3 3 Create user cache directory
4 4
5 5 $ USERCACHE=`pwd`/cache; export USERCACHE
6 6 $ cat <<EOF >> ${HGRCPATH}
7 7 > [extensions]
8 8 > hgext.largefiles=
9 9 > [largefiles]
10 10 > usercache=${USERCACHE}
11 11 > EOF
12 12 $ mkdir -p ${USERCACHE}
13 13
14 14 Create source repo, and commit adding largefile.
15 15
16 16 $ hg init src
17 17 $ cd src
18 18 $ echo large > large
19 19 $ hg add --large large
20 20 $ hg commit -m 'add largefile'
21 21 $ cd ..
22 22
23 23 Discard all cached largefiles in USERCACHE
24 24
25 25 $ rm -rf ${USERCACHE}
26 26
27 27 Create mirror repo, and pull from source without largefile:
28 28 "pull" is used instead of "clone" for suppression of (1) updating to
29 29 tip (= cahcing largefile from source repo), and (2) recording source
30 30 repo as "default" path in .hg/hgrc.
31 31
32 32 $ hg init mirror
33 33 $ cd mirror
34 34 $ hg pull ../src
35 35 pulling from ../src
36 36 requesting all changes
37 37 adding changesets
38 38 adding manifests
39 39 adding file changes
40 40 added 1 changesets with 1 changes to 1 files
41 41 (run 'hg update' to get a working copy)
42 42 caching new largefiles
43 43 0 largefiles cached
44 44
45 45 Update working directory to "tip", which requires largefile("large"),
46 46 but there is no cache file for it. So, hg must treat it as
47 47 "missing"(!) file.
48 48
49 49 $ hg update
50 50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 51 getting changed largefiles
52 large: Can't get file locally
52 large: can't get file locally
53 53 (no default or default-push path set in hgrc)
54 54 0 largefiles updated, 0 removed
55 55 $ hg status
56 56 ! large
57 57
58 58 Update working directory to null: this cleanup .hg/largefiles/dirstate
59 59
60 60 $ hg update null
61 61 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
62 62 getting changed largefiles
63 63 0 largefiles updated, 0 removed
64 64
65 65 Update working directory to tip, again.
66 66
67 67 $ hg update
68 68 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
69 69 getting changed largefiles
70 large: Can't get file locally
70 large: can't get file locally
71 71 (no default or default-push path set in hgrc)
72 72 0 largefiles updated, 0 removed
73 73 $ hg status
74 74 ! large
75 75
76 76 Portable way to print file permissions:
77 77
78 78 $ cd ..
79 79 $ cat > ls-l.py <<EOF
80 80 > #!/usr/bin/env python
81 81 > import sys, os
82 82 > path = sys.argv[1]
83 83 > print '%03o' % (os.lstat(path).st_mode & 0777)
84 84 > EOF
85 85 $ chmod +x ls-l.py
86 86
87 87 Test that files in .hg/largefiles inherit mode from .hg/store, not
88 88 from file in working copy:
89 89
90 90 $ cd src
91 91 $ chmod 750 .hg/store
92 92 $ chmod 660 large
93 93 $ echo change >> large
94 94 $ hg commit -m change
95 95 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
96 96 640
97 97
98 98 Test permission of with files in .hg/largefiles created by update:
99 99
100 100 $ cd ../mirror
101 101 $ rm -r "$USERCACHE" .hg/largefiles # avoid links
102 102 $ chmod 750 .hg/store
103 103 $ hg pull ../src --update -q
104 104 $ ../ls-l.py .hg/largefiles/e151b474069de4ca6898f67ce2f2a7263adf8fea
105 105 640
106 106
107 107 Test permission of files created by push:
108 108
109 109 $ hg serve -R ../src -d -p $HGPORT --pid-file hg.pid \
110 110 > --config "web.allow_push=*" --config web.push_ssl=no
111 111 $ cat hg.pid >> $DAEMON_PIDS
112 112
113 113 $ echo change >> large
114 114 $ hg commit -m change
115 115
116 116 $ rm -r "$USERCACHE"
117 117
118 118 $ hg push -q http://localhost:$HGPORT/
119 119
120 120 $ ../ls-l.py ../src/.hg/largefiles/b734e14a0971e370408ab9bce8d56d8485e368a9
121 121 640
122 122
123 123 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now