##// END OF EJS Templates
largefiles: rename functions and methods to match desired behavior...
Benjamin Pollack -
r15316:c65f5b6e stable
parent child Browse files
Show More
@@ -1,202 +1,202 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''base class for store implementations and store-related utility code'''
9 '''base class for store implementations and store-related utility code'''
10
10
11 import os
11 import os
12 import tempfile
12 import tempfile
13 import binascii
13 import binascii
14 import re
14 import re
15
15
16 from mercurial import util, node, hg
16 from mercurial import util, node, hg
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18
18
19 import lfutil
19 import lfutil
20
20
21 class StoreError(Exception):
21 class StoreError(Exception):
22 '''Raised when there is a problem getting files from or putting
22 '''Raised when there is a problem getting files from or putting
23 files to a central store.'''
23 files to a central store.'''
24 def __init__(self, filename, hash, url, detail):
24 def __init__(self, filename, hash, url, detail):
25 self.filename = filename
25 self.filename = filename
26 self.hash = hash
26 self.hash = hash
27 self.url = url
27 self.url = url
28 self.detail = detail
28 self.detail = detail
29
29
30 def longmessage(self):
30 def longmessage(self):
31 if self.url:
31 if self.url:
32 return ('%s: %s\n'
32 return ('%s: %s\n'
33 '(failed URL: %s)\n'
33 '(failed URL: %s)\n'
34 % (self.filename, self.detail, self.url))
34 % (self.filename, self.detail, self.url))
35 else:
35 else:
36 return ('%s: %s\n'
36 return ('%s: %s\n'
37 '(no default or default-push path set in hgrc)\n'
37 '(no default or default-push path set in hgrc)\n'
38 % (self.filename, self.detail))
38 % (self.filename, self.detail))
39
39
40 def __str__(self):
40 def __str__(self):
41 return "%s: %s" % (self.url, self.detail)
41 return "%s: %s" % (self.url, self.detail)
42
42
43 class basestore(object):
43 class basestore(object):
44 def __init__(self, ui, repo, url):
44 def __init__(self, ui, repo, url):
45 self.ui = ui
45 self.ui = ui
46 self.repo = repo
46 self.repo = repo
47 self.url = url
47 self.url = url
48
48
49 def put(self, source, hash):
49 def put(self, source, hash):
50 '''Put source file into the store under <filename>/<hash>.'''
50 '''Put source file into the store under <filename>/<hash>.'''
51 raise NotImplementedError('abstract method')
51 raise NotImplementedError('abstract method')
52
52
53 def exists(self, hash):
53 def exists(self, hash):
54 '''Check to see if the store contains the given hash.'''
54 '''Check to see if the store contains the given hash.'''
55 raise NotImplementedError('abstract method')
55 raise NotImplementedError('abstract method')
56
56
57 def get(self, files):
57 def get(self, files):
58 '''Get the specified largefiles from the store and write to local
58 '''Get the specified largefiles from the store and write to local
59 files under repo.root. files is a list of (filename, hash)
59 files under repo.root. files is a list of (filename, hash)
60 tuples. Return (success, missing), lists of files successfuly
60 tuples. Return (success, missing), lists of files successfuly
61 downloaded and those not found in the store. success is a list
61 downloaded and those not found in the store. success is a list
62 of (filename, hash) tuples; missing is a list of filenames that
62 of (filename, hash) tuples; missing is a list of filenames that
63 we could not get. (The detailed error message will already have
63 we could not get. (The detailed error message will already have
64 been presented to the user, so missing is just supplied as a
64 been presented to the user, so missing is just supplied as a
65 summary.)'''
65 summary.)'''
66 success = []
66 success = []
67 missing = []
67 missing = []
68 ui = self.ui
68 ui = self.ui
69
69
70 at = 0
70 at = 0
71 for filename, hash in files:
71 for filename, hash in files:
72 ui.progress(_('getting largefiles'), at, unit='lfile',
72 ui.progress(_('getting largefiles'), at, unit='lfile',
73 total=len(files))
73 total=len(files))
74 at += 1
74 at += 1
75 ui.note(_('getting %s:%s\n') % (filename, hash))
75 ui.note(_('getting %s:%s\n') % (filename, hash))
76
76
77 cachefilename = lfutil.cachepath(self.repo, hash)
77 storefilename = lfutil.storepath(self.repo, hash)
78 cachedir = os.path.dirname(cachefilename)
78 storedir = os.path.dirname(storefilename)
79
79
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
80 # No need to pass mode='wb' to fdopen(), since mkstemp() already
81 # opened the file in binary mode.
81 # opened the file in binary mode.
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
82 (tmpfd, tmpfilename) = tempfile.mkstemp(
83 dir=cachedir, prefix=os.path.basename(filename))
83 dir=storedir, prefix=os.path.basename(filename))
84 tmpfile = os.fdopen(tmpfd, 'w')
84 tmpfile = os.fdopen(tmpfd, 'w')
85
85
86 try:
86 try:
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
87 hhash = binascii.hexlify(self._getfile(tmpfile, filename, hash))
88 except StoreError, err:
88 except StoreError, err:
89 ui.warn(err.longmessage())
89 ui.warn(err.longmessage())
90 hhash = ""
90 hhash = ""
91
91
92 if hhash != hash:
92 if hhash != hash:
93 if hhash != "":
93 if hhash != "":
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
94 ui.warn(_('%s: data corruption (expected %s, got %s)\n')
95 % (filename, hash, hhash))
95 % (filename, hash, hhash))
96 tmpfile.close() # no-op if it's already closed
96 tmpfile.close() # no-op if it's already closed
97 os.remove(tmpfilename)
97 os.remove(tmpfilename)
98 missing.append(filename)
98 missing.append(filename)
99 continue
99 continue
100
100
101 if os.path.exists(cachefilename): # Windows
101 if os.path.exists(storefilename): # Windows
102 os.remove(cachefilename)
102 os.remove(storefilename)
103 os.rename(tmpfilename, cachefilename)
103 os.rename(tmpfilename, storefilename)
104 lfutil.linktosystemcache(self.repo, hash)
104 lfutil.linktousercache(self.repo, hash)
105 success.append((filename, hhash))
105 success.append((filename, hhash))
106
106
107 ui.progress(_('getting largefiles'), None)
107 ui.progress(_('getting largefiles'), None)
108 return (success, missing)
108 return (success, missing)
109
109
110 def verify(self, revs, contents=False):
110 def verify(self, revs, contents=False):
111 '''Verify the existence (and, optionally, contents) of every big
111 '''Verify the existence (and, optionally, contents) of every big
112 file revision referenced by every changeset in revs.
112 file revision referenced by every changeset in revs.
113 Return 0 if all is well, non-zero on any errors.'''
113 Return 0 if all is well, non-zero on any errors.'''
114 write = self.ui.write
114 write = self.ui.write
115 failed = False
115 failed = False
116
116
117 write(_('searching %d changesets for largefiles\n') % len(revs))
117 write(_('searching %d changesets for largefiles\n') % len(revs))
118 verified = set() # set of (filename, filenode) tuples
118 verified = set() # set of (filename, filenode) tuples
119
119
120 for rev in revs:
120 for rev in revs:
121 cctx = self.repo[rev]
121 cctx = self.repo[rev]
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
122 cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
123
123
124 failed = lfutil.any_(self._verifyfile(
124 failed = lfutil.any_(self._verifyfile(
125 cctx, cset, contents, standin, verified) for standin in cctx)
125 cctx, cset, contents, standin, verified) for standin in cctx)
126
126
127 num_revs = len(verified)
127 num_revs = len(verified)
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
128 num_lfiles = len(set([fname for (fname, fnode) in verified]))
129 if contents:
129 if contents:
130 write(_('verified contents of %d revisions of %d largefiles\n')
130 write(_('verified contents of %d revisions of %d largefiles\n')
131 % (num_revs, num_lfiles))
131 % (num_revs, num_lfiles))
132 else:
132 else:
133 write(_('verified existence of %d revisions of %d largefiles\n')
133 write(_('verified existence of %d revisions of %d largefiles\n')
134 % (num_revs, num_lfiles))
134 % (num_revs, num_lfiles))
135
135
136 return int(failed)
136 return int(failed)
137
137
138 def _getfile(self, tmpfile, filename, hash):
138 def _getfile(self, tmpfile, filename, hash):
139 '''Fetch one revision of one file from the store and write it
139 '''Fetch one revision of one file from the store and write it
140 to tmpfile. Compute the hash of the file on-the-fly as it
140 to tmpfile. Compute the hash of the file on-the-fly as it
141 downloads and return the binary hash. Close tmpfile. Raise
141 downloads and return the binary hash. Close tmpfile. Raise
142 StoreError if unable to download the file (e.g. it does not
142 StoreError if unable to download the file (e.g. it does not
143 exist in the store).'''
143 exist in the store).'''
144 raise NotImplementedError('abstract method')
144 raise NotImplementedError('abstract method')
145
145
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
146 def _verifyfile(self, cctx, cset, contents, standin, verified):
147 '''Perform the actual verification of a file in the store.
147 '''Perform the actual verification of a file in the store.
148 '''
148 '''
149 raise NotImplementedError('abstract method')
149 raise NotImplementedError('abstract method')
150
150
151 import localstore, wirestore
151 import localstore, wirestore
152
152
153 _storeprovider = {
153 _storeprovider = {
154 'file': [localstore.localstore],
154 'file': [localstore.localstore],
155 'http': [wirestore.wirestore],
155 'http': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
156 'https': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
157 'ssh': [wirestore.wirestore],
158 }
158 }
159
159
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
160 _scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
161
161
162 # During clone this function is passed the src's ui object
162 # During clone this function is passed the src's ui object
163 # but it needs the dest's ui object so it can read out of
163 # but it needs the dest's ui object so it can read out of
164 # the config file. Use repo.ui instead.
164 # the config file. Use repo.ui instead.
165 def _openstore(repo, remote=None, put=False):
165 def _openstore(repo, remote=None, put=False):
166 ui = repo.ui
166 ui = repo.ui
167
167
168 if not remote:
168 if not remote:
169 path = (getattr(repo, 'lfpullsource', None) or
169 path = (getattr(repo, 'lfpullsource', None) or
170 ui.expandpath('default-push', 'default'))
170 ui.expandpath('default-push', 'default'))
171
171
172 # ui.expandpath() leaves 'default-push' and 'default' alone if
172 # ui.expandpath() leaves 'default-push' and 'default' alone if
173 # they cannot be expanded: fallback to the empty string,
173 # they cannot be expanded: fallback to the empty string,
174 # meaning the current directory.
174 # meaning the current directory.
175 if path == 'default-push' or path == 'default':
175 if path == 'default-push' or path == 'default':
176 path = ''
176 path = ''
177 remote = repo
177 remote = repo
178 else:
178 else:
179 remote = hg.peer(repo, {}, path)
179 remote = hg.peer(repo, {}, path)
180
180
181 # The path could be a scheme so use Mercurial's normal functionality
181 # The path could be a scheme so use Mercurial's normal functionality
182 # to resolve the scheme to a repository and use its path
182 # to resolve the scheme to a repository and use its path
183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
183 path = util.safehasattr(remote, 'url') and remote.url() or remote.path
184
184
185 match = _scheme_re.match(path)
185 match = _scheme_re.match(path)
186 if not match: # regular filesystem path
186 if not match: # regular filesystem path
187 scheme = 'file'
187 scheme = 'file'
188 else:
188 else:
189 scheme = match.group(1)
189 scheme = match.group(1)
190
190
191 try:
191 try:
192 storeproviders = _storeprovider[scheme]
192 storeproviders = _storeprovider[scheme]
193 except KeyError:
193 except KeyError:
194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
194 raise util.Abort(_('unsupported URL scheme %r') % scheme)
195
195
196 for class_obj in storeproviders:
196 for class_obj in storeproviders:
197 try:
197 try:
198 return class_obj(ui, repo, remote)
198 return class_obj(ui, repo, remote)
199 except lfutil.storeprotonotcapable:
199 except lfutil.storeprotonotcapable:
200 pass
200 pass
201
201
202 raise util.Abort(_('%s does not appear to be a largefile store') % path)
202 raise util.Abort(_('%s does not appear to be a largefile store') % path)
@@ -1,448 +1,448 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10
10
11 import os
11 import os
12 import errno
12 import errno
13 import shutil
13 import shutil
14 import stat
14 import stat
15 import hashlib
15 import hashlib
16
16
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
17 from mercurial import dirstate, httpconnection, match as match_, util, scmutil
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 shortname = '.hglf'
20 shortname = '.hglf'
21 longname = 'largefiles'
21 longname = 'largefiles'
22
22
23
23
24 # -- Portability wrappers ----------------------------------------------
24 # -- Portability wrappers ----------------------------------------------
25
25
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
26 def dirstate_walk(dirstate, matcher, unknown=False, ignored=False):
27 return dirstate.walk(matcher, [], unknown, ignored)
27 return dirstate.walk(matcher, [], unknown, ignored)
28
28
29 def repo_add(repo, list):
29 def repo_add(repo, list):
30 add = repo[None].add
30 add = repo[None].add
31 return add(list)
31 return add(list)
32
32
33 def repo_remove(repo, list, unlink=False):
33 def repo_remove(repo, list, unlink=False):
34 def remove(list, unlink):
34 def remove(list, unlink):
35 wlock = repo.wlock()
35 wlock = repo.wlock()
36 try:
36 try:
37 if unlink:
37 if unlink:
38 for f in list:
38 for f in list:
39 try:
39 try:
40 util.unlinkpath(repo.wjoin(f))
40 util.unlinkpath(repo.wjoin(f))
41 except OSError, inst:
41 except OSError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 repo[None].forget(list)
44 repo[None].forget(list)
45 finally:
45 finally:
46 wlock.release()
46 wlock.release()
47 return remove(list, unlink=unlink)
47 return remove(list, unlink=unlink)
48
48
49 def repo_forget(repo, list):
49 def repo_forget(repo, list):
50 forget = repo[None].forget
50 forget = repo[None].forget
51 return forget(list)
51 return forget(list)
52
52
53 def findoutgoing(repo, remote, force):
53 def findoutgoing(repo, remote, force):
54 from mercurial import discovery
54 from mercurial import discovery
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
55 common, _anyinc, _heads = discovery.findcommonincoming(repo,
56 remote, force=force)
56 remote, force=force)
57 return repo.changelog.findmissing(common)
57 return repo.changelog.findmissing(common)
58
58
59 # -- Private worker functions ------------------------------------------
59 # -- Private worker functions ------------------------------------------
60
60
61 def getminsize(ui, assumelfiles, opt, default=10):
61 def getminsize(ui, assumelfiles, opt, default=10):
62 lfsize = opt
62 lfsize = opt
63 if not lfsize and assumelfiles:
63 if not lfsize and assumelfiles:
64 lfsize = ui.config(longname, 'minsize', default=default)
64 lfsize = ui.config(longname, 'minsize', default=default)
65 if lfsize:
65 if lfsize:
66 try:
66 try:
67 lfsize = float(lfsize)
67 lfsize = float(lfsize)
68 except ValueError:
68 except ValueError:
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
69 raise util.Abort(_('largefiles: size must be number (not %s)\n')
70 % lfsize)
70 % lfsize)
71 if lfsize is None:
71 if lfsize is None:
72 raise util.Abort(_('minimum size for largefiles must be specified'))
72 raise util.Abort(_('minimum size for largefiles must be specified'))
73 return lfsize
73 return lfsize
74
74
75 def link(src, dest):
75 def link(src, dest):
76 try:
76 try:
77 util.oslink(src, dest)
77 util.oslink(src, dest)
78 except OSError:
78 except OSError:
79 # if hardlinks fail, fallback on copy
79 # if hardlinks fail, fallback on copy
80 shutil.copyfile(src, dest)
80 shutil.copyfile(src, dest)
81 os.chmod(dest, os.stat(src).st_mode)
81 os.chmod(dest, os.stat(src).st_mode)
82
82
83 def systemcachepath(ui, hash):
83 def usercachepath(ui, hash):
84 path = ui.config(longname, 'systemcache', None)
84 path = ui.config(longname, 'usercache', None)
85 if path:
85 if path:
86 path = os.path.join(path, hash)
86 path = os.path.join(path, hash)
87 else:
87 else:
88 if os.name == 'nt':
88 if os.name == 'nt':
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
89 appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
90 path = os.path.join(appdata, longname, hash)
90 path = os.path.join(appdata, longname, hash)
91 elif os.name == 'posix':
91 elif os.name == 'posix':
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
92 path = os.path.join(os.getenv('HOME'), '.' + longname, hash)
93 else:
93 else:
94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
94 raise util.Abort(_('unknown operating system: %s\n') % os.name)
95 return path
95 return path
96
96
97 def insystemcache(ui, hash):
97 def inusercache(ui, hash):
98 return os.path.exists(systemcachepath(ui, hash))
98 return os.path.exists(usercachepath(ui, hash))
99
99
100 def findfile(repo, hash):
100 def findfile(repo, hash):
101 if incache(repo, hash):
101 if instore(repo, hash):
102 repo.ui.note(_('Found %s in cache\n') % hash)
102 repo.ui.note(_('Found %s in store\n') % hash)
103 return cachepath(repo, hash)
103 return storepath(repo, hash)
104 if insystemcache(repo.ui, hash):
104 if inusercache(repo.ui, hash):
105 repo.ui.note(_('Found %s in system cache\n') % hash)
105 repo.ui.note(_('Found %s in system cache\n') % hash)
106 return systemcachepath(repo.ui, hash)
106 return usercachepath(repo.ui, hash)
107 return None
107 return None
108
108
109 class largefiles_dirstate(dirstate.dirstate):
109 class largefiles_dirstate(dirstate.dirstate):
110 def __getitem__(self, key):
110 def __getitem__(self, key):
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
111 return super(largefiles_dirstate, self).__getitem__(unixpath(key))
112 def normal(self, f):
112 def normal(self, f):
113 return super(largefiles_dirstate, self).normal(unixpath(f))
113 return super(largefiles_dirstate, self).normal(unixpath(f))
114 def remove(self, f):
114 def remove(self, f):
115 return super(largefiles_dirstate, self).remove(unixpath(f))
115 return super(largefiles_dirstate, self).remove(unixpath(f))
116 def add(self, f):
116 def add(self, f):
117 return super(largefiles_dirstate, self).add(unixpath(f))
117 return super(largefiles_dirstate, self).add(unixpath(f))
118 def drop(self, f):
118 def drop(self, f):
119 return super(largefiles_dirstate, self).drop(unixpath(f))
119 return super(largefiles_dirstate, self).drop(unixpath(f))
120 def forget(self, f):
120 def forget(self, f):
121 return super(largefiles_dirstate, self).forget(unixpath(f))
121 return super(largefiles_dirstate, self).forget(unixpath(f))
122
122
123 def openlfdirstate(ui, repo):
123 def openlfdirstate(ui, repo):
124 '''
124 '''
125 Return a dirstate object that tracks largefiles: i.e. its root is
125 Return a dirstate object that tracks largefiles: i.e. its root is
126 the repo root, but it is saved in .hg/largefiles/dirstate.
126 the repo root, but it is saved in .hg/largefiles/dirstate.
127 '''
127 '''
128 admin = repo.join(longname)
128 admin = repo.join(longname)
129 opener = scmutil.opener(admin)
129 opener = scmutil.opener(admin)
130 if util.safehasattr(repo.dirstate, '_validate'):
130 if util.safehasattr(repo.dirstate, '_validate'):
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
131 lfdirstate = largefiles_dirstate(opener, ui, repo.root,
132 repo.dirstate._validate)
132 repo.dirstate._validate)
133 else:
133 else:
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
134 lfdirstate = largefiles_dirstate(opener, ui, repo.root)
135
135
136 # If the largefiles dirstate does not exist, populate and create
136 # If the largefiles dirstate does not exist, populate and create
137 # it. This ensures that we create it on the first meaningful
137 # it. This ensures that we create it on the first meaningful
138 # largefiles operation in a new clone. It also gives us an easy
138 # largefiles operation in a new clone. It also gives us an easy
139 # way to forcibly rebuild largefiles state:
139 # way to forcibly rebuild largefiles state:
140 # rm .hg/largefiles/dirstate && hg status
140 # rm .hg/largefiles/dirstate && hg status
141 # Or even, if things are really messed up:
141 # Or even, if things are really messed up:
142 # rm -rf .hg/largefiles && hg status
142 # rm -rf .hg/largefiles && hg status
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
143 if not os.path.exists(os.path.join(admin, 'dirstate')):
144 util.makedirs(admin)
144 util.makedirs(admin)
145 matcher = getstandinmatcher(repo)
145 matcher = getstandinmatcher(repo)
146 for standin in dirstate_walk(repo.dirstate, matcher):
146 for standin in dirstate_walk(repo.dirstate, matcher):
147 lfile = splitstandin(standin)
147 lfile = splitstandin(standin)
148 hash = readstandin(repo, lfile)
148 hash = readstandin(repo, lfile)
149 lfdirstate.normallookup(lfile)
149 lfdirstate.normallookup(lfile)
150 try:
150 try:
151 if hash == hashfile(lfile):
151 if hash == hashfile(lfile):
152 lfdirstate.normal(lfile)
152 lfdirstate.normal(lfile)
153 except IOError, err:
153 except IOError, err:
154 if err.errno != errno.ENOENT:
154 if err.errno != errno.ENOENT:
155 raise
155 raise
156
156
157 lfdirstate.write()
157 lfdirstate.write()
158
158
159 return lfdirstate
159 return lfdirstate
160
160
161 def lfdirstate_status(lfdirstate, repo, rev):
161 def lfdirstate_status(lfdirstate, repo, rev):
162 wlock = repo.wlock()
162 wlock = repo.wlock()
163 try:
163 try:
164 match = match_.always(repo.root, repo.getcwd())
164 match = match_.always(repo.root, repo.getcwd())
165 s = lfdirstate.status(match, [], False, False, False)
165 s = lfdirstate.status(match, [], False, False, False)
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
166 unsure, modified, added, removed, missing, unknown, ignored, clean = s
167 for lfile in unsure:
167 for lfile in unsure:
168 if repo[rev][standin(lfile)].data().strip() != \
168 if repo[rev][standin(lfile)].data().strip() != \
169 hashfile(repo.wjoin(lfile)):
169 hashfile(repo.wjoin(lfile)):
170 modified.append(lfile)
170 modified.append(lfile)
171 else:
171 else:
172 clean.append(lfile)
172 clean.append(lfile)
173 lfdirstate.normal(lfile)
173 lfdirstate.normal(lfile)
174 lfdirstate.write()
174 lfdirstate.write()
175 finally:
175 finally:
176 wlock.release()
176 wlock.release()
177 return (modified, added, removed, missing, unknown, ignored, clean)
177 return (modified, added, removed, missing, unknown, ignored, clean)
178
178
179 def listlfiles(repo, rev=None, matcher=None):
179 def listlfiles(repo, rev=None, matcher=None):
180 '''return a list of largefiles in the working copy or the
180 '''return a list of largefiles in the working copy or the
181 specified changeset'''
181 specified changeset'''
182
182
183 if matcher is None:
183 if matcher is None:
184 matcher = getstandinmatcher(repo)
184 matcher = getstandinmatcher(repo)
185
185
186 # ignore unknown files in working directory
186 # ignore unknown files in working directory
187 return [splitstandin(f)
187 return [splitstandin(f)
188 for f in repo[rev].walk(matcher)
188 for f in repo[rev].walk(matcher)
189 if rev is not None or repo.dirstate[f] != '?']
189 if rev is not None or repo.dirstate[f] != '?']
190
190
191 def incache(repo, hash):
191 def instore(repo, hash):
192 return os.path.exists(cachepath(repo, hash))
192 return os.path.exists(storepath(repo, hash))
193
193
194 def createdir(dir):
194 def createdir(dir):
195 if not os.path.exists(dir):
195 if not os.path.exists(dir):
196 os.makedirs(dir)
196 os.makedirs(dir)
197
197
198 def cachepath(repo, hash):
198 def storepath(repo, hash):
199 return repo.join(os.path.join(longname, hash))
199 return repo.join(os.path.join(longname, hash))
200
200
201 def copyfromcache(repo, hash, filename):
201 def copyfromcache(repo, hash, filename):
202 '''Copy the specified largefile from the repo or system cache to
202 '''Copy the specified largefile from the repo or system cache to
203 filename in the repository. Return true on success or false if the
203 filename in the repository. Return true on success or false if the
204 file was not found in either cache (which should not happened:
204 file was not found in either cache (which should not happened:
205 this is meant to be called only after ensuring that the needed
205 this is meant to be called only after ensuring that the needed
206 largefile exists in the cache).'''
206 largefile exists in the cache).'''
207 path = findfile(repo, hash)
207 path = findfile(repo, hash)
208 if path is None:
208 if path is None:
209 return False
209 return False
210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
210 util.makedirs(os.path.dirname(repo.wjoin(filename)))
211 shutil.copy(path, repo.wjoin(filename))
211 shutil.copy(path, repo.wjoin(filename))
212 return True
212 return True
213
213
214 def copytocache(repo, rev, file, uploaded=False):
214 def copytostore(repo, rev, file, uploaded=False):
215 hash = readstandin(repo, file)
215 hash = readstandin(repo, file)
216 if incache(repo, hash):
216 if instore(repo, hash):
217 return
217 return
218 copytocacheabsolute(repo, repo.wjoin(file), hash)
218 copytostoreabsolute(repo, repo.wjoin(file), hash)
219
219
220 def copytocacheabsolute(repo, file, hash):
220 def copytostoreabsolute(repo, file, hash):
221 createdir(os.path.dirname(cachepath(repo, hash)))
221 createdir(os.path.dirname(storepath(repo, hash)))
222 if insystemcache(repo.ui, hash):
222 if inusercache(repo.ui, hash):
223 link(systemcachepath(repo.ui, hash), cachepath(repo, hash))
223 link(usercachepath(repo.ui, hash), storepath(repo, hash))
224 else:
224 else:
225 shutil.copyfile(file, cachepath(repo, hash))
225 shutil.copyfile(file, storepath(repo, hash))
226 os.chmod(cachepath(repo, hash), os.stat(file).st_mode)
226 os.chmod(storepath(repo, hash), os.stat(file).st_mode)
227 linktosystemcache(repo, hash)
227 linktousercache(repo, hash)
228
228
229 def linktosystemcache(repo, hash):
229 def linktousercache(repo, hash):
230 createdir(os.path.dirname(systemcachepath(repo.ui, hash)))
230 createdir(os.path.dirname(usercachepath(repo.ui, hash)))
231 link(cachepath(repo, hash), systemcachepath(repo.ui, hash))
231 link(storepath(repo, hash), usercachepath(repo.ui, hash))
232
232
233 def getstandinmatcher(repo, pats=[], opts={}):
233 def getstandinmatcher(repo, pats=[], opts={}):
234 '''Return a match object that applies pats to the standin directory'''
234 '''Return a match object that applies pats to the standin directory'''
235 standindir = repo.pathto(shortname)
235 standindir = repo.pathto(shortname)
236 if pats:
236 if pats:
237 # patterns supplied: search standin directory relative to current dir
237 # patterns supplied: search standin directory relative to current dir
238 cwd = repo.getcwd()
238 cwd = repo.getcwd()
239 if os.path.isabs(cwd):
239 if os.path.isabs(cwd):
240 # cwd is an absolute path for hg -R <reponame>
240 # cwd is an absolute path for hg -R <reponame>
241 # work relative to the repository root in this case
241 # work relative to the repository root in this case
242 cwd = ''
242 cwd = ''
243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
243 pats = [os.path.join(standindir, cwd, pat) for pat in pats]
244 elif os.path.isdir(standindir):
244 elif os.path.isdir(standindir):
245 # no patterns: relative to repo root
245 # no patterns: relative to repo root
246 pats = [standindir]
246 pats = [standindir]
247 else:
247 else:
248 # no patterns and no standin dir: return matcher that matches nothing
248 # no patterns and no standin dir: return matcher that matches nothing
249 match = match_.match(repo.root, None, [], exact=True)
249 match = match_.match(repo.root, None, [], exact=True)
250 match.matchfn = lambda f: False
250 match.matchfn = lambda f: False
251 return match
251 return match
252 return getmatcher(repo, pats, opts, showbad=False)
252 return getmatcher(repo, pats, opts, showbad=False)
253
253
254 def getmatcher(repo, pats=[], opts={}, showbad=True):
254 def getmatcher(repo, pats=[], opts={}, showbad=True):
255 '''Wrapper around scmutil.match() that adds showbad: if false,
255 '''Wrapper around scmutil.match() that adds showbad: if false,
256 neuter the match object's bad() method so it does not print any
256 neuter the match object's bad() method so it does not print any
257 warnings about missing files or directories.'''
257 warnings about missing files or directories.'''
258 match = scmutil.match(repo[None], pats, opts)
258 match = scmutil.match(repo[None], pats, opts)
259
259
260 if not showbad:
260 if not showbad:
261 match.bad = lambda f, msg: None
261 match.bad = lambda f, msg: None
262 return match
262 return match
263
263
264 def composestandinmatcher(repo, rmatcher):
264 def composestandinmatcher(repo, rmatcher):
265 '''Return a matcher that accepts standins corresponding to the
265 '''Return a matcher that accepts standins corresponding to the
266 files accepted by rmatcher. Pass the list of files in the matcher
266 files accepted by rmatcher. Pass the list of files in the matcher
267 as the paths specified by the user.'''
267 as the paths specified by the user.'''
268 smatcher = getstandinmatcher(repo, rmatcher.files())
268 smatcher = getstandinmatcher(repo, rmatcher.files())
269 isstandin = smatcher.matchfn
269 isstandin = smatcher.matchfn
270 def composed_matchfn(f):
270 def composed_matchfn(f):
271 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
271 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
272 smatcher.matchfn = composed_matchfn
272 smatcher.matchfn = composed_matchfn
273
273
274 return smatcher
274 return smatcher
275
275
276 def standin(filename):
276 def standin(filename):
277 '''Return the repo-relative path to the standin for the specified big
277 '''Return the repo-relative path to the standin for the specified big
278 file.'''
278 file.'''
279 # Notes:
279 # Notes:
280 # 1) Most callers want an absolute path, but _create_standin() needs
280 # 1) Most callers want an absolute path, but _create_standin() needs
281 # it repo-relative so lfadd() can pass it to repo_add(). So leave
281 # it repo-relative so lfadd() can pass it to repo_add(). So leave
282 # it up to the caller to use repo.wjoin() to get an absolute path.
282 # it up to the caller to use repo.wjoin() to get an absolute path.
283 # 2) Join with '/' because that's what dirstate always uses, even on
283 # 2) Join with '/' because that's what dirstate always uses, even on
284 # Windows. Change existing separator to '/' first in case we are
284 # Windows. Change existing separator to '/' first in case we are
285 # passed filenames from an external source (like the command line).
285 # passed filenames from an external source (like the command line).
286 return shortname + '/' + filename.replace(os.sep, '/')
286 return shortname + '/' + filename.replace(os.sep, '/')
287
287
288 def isstandin(filename):
288 def isstandin(filename):
289 '''Return true if filename is a big file standin. filename must be
289 '''Return true if filename is a big file standin. filename must be
290 in Mercurial's internal form (slash-separated).'''
290 in Mercurial's internal form (slash-separated).'''
291 return filename.startswith(shortname + '/')
291 return filename.startswith(shortname + '/')
292
292
293 def splitstandin(filename):
293 def splitstandin(filename):
294 # Split on / because that's what dirstate always uses, even on Windows.
294 # Split on / because that's what dirstate always uses, even on Windows.
295 # Change local separator to / first just in case we are passed filenames
295 # Change local separator to / first just in case we are passed filenames
296 # from an external source (like the command line).
296 # from an external source (like the command line).
297 bits = filename.replace(os.sep, '/').split('/', 1)
297 bits = filename.replace(os.sep, '/').split('/', 1)
298 if len(bits) == 2 and bits[0] == shortname:
298 if len(bits) == 2 and bits[0] == shortname:
299 return bits[1]
299 return bits[1]
300 else:
300 else:
301 return None
301 return None
302
302
303 def updatestandin(repo, standin):
303 def updatestandin(repo, standin):
304 file = repo.wjoin(splitstandin(standin))
304 file = repo.wjoin(splitstandin(standin))
305 if os.path.exists(file):
305 if os.path.exists(file):
306 hash = hashfile(file)
306 hash = hashfile(file)
307 executable = getexecutable(file)
307 executable = getexecutable(file)
308 writestandin(repo, standin, hash, executable)
308 writestandin(repo, standin, hash, executable)
309
309
310 def readstandin(repo, filename, node=None):
310 def readstandin(repo, filename, node=None):
311 '''read hex hash from standin for filename at given node, or working
311 '''read hex hash from standin for filename at given node, or working
312 directory if no node is given'''
312 directory if no node is given'''
313 return repo[node][standin(filename)].data().strip()
313 return repo[node][standin(filename)].data().strip()
314
314
315 def writestandin(repo, standin, hash, executable):
315 def writestandin(repo, standin, hash, executable):
316 '''write hash to <repo.root>/<standin>'''
316 '''write hash to <repo.root>/<standin>'''
317 writehash(hash, repo.wjoin(standin), executable)
317 writehash(hash, repo.wjoin(standin), executable)
318
318
319 def copyandhash(instream, outfile):
319 def copyandhash(instream, outfile):
320 '''Read bytes from instream (iterable) and write them to outfile,
320 '''Read bytes from instream (iterable) and write them to outfile,
321 computing the SHA-1 hash of the data along the way. Close outfile
321 computing the SHA-1 hash of the data along the way. Close outfile
322 when done and return the binary hash.'''
322 when done and return the binary hash.'''
323 hasher = util.sha1('')
323 hasher = util.sha1('')
324 for data in instream:
324 for data in instream:
325 hasher.update(data)
325 hasher.update(data)
326 outfile.write(data)
326 outfile.write(data)
327
327
328 # Blecch: closing a file that somebody else opened is rude and
328 # Blecch: closing a file that somebody else opened is rude and
329 # wrong. But it's so darn convenient and practical! After all,
329 # wrong. But it's so darn convenient and practical! After all,
330 # outfile was opened just to copy and hash.
330 # outfile was opened just to copy and hash.
331 outfile.close()
331 outfile.close()
332
332
333 return hasher.digest()
333 return hasher.digest()
334
334
335 def hashrepofile(repo, file):
335 def hashrepofile(repo, file):
336 return hashfile(repo.wjoin(file))
336 return hashfile(repo.wjoin(file))
337
337
338 def hashfile(file):
338 def hashfile(file):
339 if not os.path.exists(file):
339 if not os.path.exists(file):
340 return ''
340 return ''
341 hasher = util.sha1('')
341 hasher = util.sha1('')
342 fd = open(file, 'rb')
342 fd = open(file, 'rb')
343 for data in blockstream(fd):
343 for data in blockstream(fd):
344 hasher.update(data)
344 hasher.update(data)
345 fd.close()
345 fd.close()
346 return hasher.hexdigest()
346 return hasher.hexdigest()
347
347
348 class limitreader(object):
348 class limitreader(object):
349 def __init__(self, f, limit):
349 def __init__(self, f, limit):
350 self.f = f
350 self.f = f
351 self.limit = limit
351 self.limit = limit
352
352
353 def read(self, length):
353 def read(self, length):
354 if self.limit == 0:
354 if self.limit == 0:
355 return ''
355 return ''
356 length = length > self.limit and self.limit or length
356 length = length > self.limit and self.limit or length
357 self.limit -= length
357 self.limit -= length
358 return self.f.read(length)
358 return self.f.read(length)
359
359
360 def close(self):
360 def close(self):
361 pass
361 pass
362
362
363 def blockstream(infile, blocksize=128 * 1024):
363 def blockstream(infile, blocksize=128 * 1024):
364 """Generator that yields blocks of data from infile and closes infile."""
364 """Generator that yields blocks of data from infile and closes infile."""
365 while True:
365 while True:
366 data = infile.read(blocksize)
366 data = infile.read(blocksize)
367 if not data:
367 if not data:
368 break
368 break
369 yield data
369 yield data
370 # same blecch as copyandhash() above
370 # same blecch as copyandhash() above
371 infile.close()
371 infile.close()
372
372
373 def readhash(filename):
373 def readhash(filename):
374 rfile = open(filename, 'rb')
374 rfile = open(filename, 'rb')
375 hash = rfile.read(40)
375 hash = rfile.read(40)
376 rfile.close()
376 rfile.close()
377 if len(hash) < 40:
377 if len(hash) < 40:
378 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
378 raise util.Abort(_('bad hash in \'%s\' (only %d bytes long)')
379 % (filename, len(hash)))
379 % (filename, len(hash)))
380 return hash
380 return hash
381
381
382 def writehash(hash, filename, executable):
382 def writehash(hash, filename, executable):
383 util.makedirs(os.path.dirname(filename))
383 util.makedirs(os.path.dirname(filename))
384 if os.path.exists(filename):
384 if os.path.exists(filename):
385 os.unlink(filename)
385 os.unlink(filename)
386 wfile = open(filename, 'wb')
386 wfile = open(filename, 'wb')
387
387
388 try:
388 try:
389 wfile.write(hash)
389 wfile.write(hash)
390 wfile.write('\n')
390 wfile.write('\n')
391 finally:
391 finally:
392 wfile.close()
392 wfile.close()
393 if os.path.exists(filename):
393 if os.path.exists(filename):
394 os.chmod(filename, getmode(executable))
394 os.chmod(filename, getmode(executable))
395
395
396 def getexecutable(filename):
396 def getexecutable(filename):
397 mode = os.stat(filename).st_mode
397 mode = os.stat(filename).st_mode
398 return ((mode & stat.S_IXUSR) and
398 return ((mode & stat.S_IXUSR) and
399 (mode & stat.S_IXGRP) and
399 (mode & stat.S_IXGRP) and
400 (mode & stat.S_IXOTH))
400 (mode & stat.S_IXOTH))
401
401
402 def getmode(executable):
402 def getmode(executable):
403 if executable:
403 if executable:
404 return 0755
404 return 0755
405 else:
405 else:
406 return 0644
406 return 0644
407
407
408 def urljoin(first, second, *arg):
408 def urljoin(first, second, *arg):
409 def join(left, right):
409 def join(left, right):
410 if not left.endswith('/'):
410 if not left.endswith('/'):
411 left += '/'
411 left += '/'
412 if right.startswith('/'):
412 if right.startswith('/'):
413 right = right[1:]
413 right = right[1:]
414 return left + right
414 return left + right
415
415
416 url = join(first, second)
416 url = join(first, second)
417 for a in arg:
417 for a in arg:
418 url = join(url, a)
418 url = join(url, a)
419 return url
419 return url
420
420
421 def hexsha1(data):
421 def hexsha1(data):
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
422 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
423 object data"""
423 object data"""
424 h = hashlib.sha1()
424 h = hashlib.sha1()
425 for chunk in util.filechunkiter(data):
425 for chunk in util.filechunkiter(data):
426 h.update(chunk)
426 h.update(chunk)
427 return h.hexdigest()
427 return h.hexdigest()
428
428
429 def httpsendfile(ui, filename):
429 def httpsendfile(ui, filename):
430 return httpconnection.httpsendfile(ui, filename, 'rb')
430 return httpconnection.httpsendfile(ui, filename, 'rb')
431
431
432 def unixpath(path):
432 def unixpath(path):
433 '''Return a version of path normalized for use with the lfdirstate.'''
433 '''Return a version of path normalized for use with the lfdirstate.'''
434 return os.path.normpath(path).replace(os.sep, '/')
434 return os.path.normpath(path).replace(os.sep, '/')
435
435
436 def islfilesrepo(repo):
436 def islfilesrepo(repo):
437 return ('largefiles' in repo.requirements and
437 return ('largefiles' in repo.requirements and
438 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
438 any_(shortname + '/' in f[0] for f in repo.store.datafiles()))
439
439
440 def any_(gen):
440 def any_(gen):
441 for x in gen:
441 for x in gen:
442 if x:
442 if x:
443 return True
443 return True
444 return False
444 return False
445
445
446 class storeprotonotcapable(BaseException):
446 class storeprotonotcapable(BaseException):
447 def __init__(self, storetypes):
447 def __init__(self, storetypes):
448 self.storetypes = storetypes
448 self.storetypes = storetypes
@@ -1,71 +1,71 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''store class for local filesystem'''
9 '''store class for local filesystem'''
10
10
11 import os
11 import os
12
12
13 from mercurial import util
13 from mercurial import util
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 import lfutil
16 import lfutil
17 import basestore
17 import basestore
18
18
19 class localstore(basestore.basestore):
19 class localstore(basestore.basestore):
20 '''Because there is a system-wide cache, the local store always
20 '''Because there is a system-wide cache, the local store always
21 uses that cache. Since the cache is updated elsewhere, we can
21 uses that cache. Since the cache is updated elsewhere, we can
22 just read from it here as if it were the store.'''
22 just read from it here as if it were the store.'''
23
23
24 def __init__(self, ui, repo, remote):
24 def __init__(self, ui, repo, remote):
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
25 url = os.path.join(remote.path, '.hg', lfutil.longname)
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
26 super(localstore, self).__init__(ui, repo, util.expandpath(url))
27
27
28 def put(self, source, filename, hash):
28 def put(self, source, filename, hash):
29 '''Any file that is put must already be in the system-wide
29 '''Any file that is put must already be in the system-wide
30 cache so do nothing.'''
30 cache so do nothing.'''
31 return
31 return
32
32
33 def exists(self, hash):
33 def exists(self, hash):
34 return lfutil.insystemcache(self.repo.ui, hash)
34 return lfutil.inusercache(self.repo.ui, hash)
35
35
36 def _getfile(self, tmpfile, filename, hash):
36 def _getfile(self, tmpfile, filename, hash):
37 if lfutil.insystemcache(self.ui, hash):
37 if lfutil.inusercache(self.ui, hash):
38 return lfutil.systemcachepath(self.ui, hash)
38 return lfutil.usercachepath(self.ui, hash)
39 raise basestore.StoreError(filename, hash, '',
39 raise basestore.StoreError(filename, hash, '',
40 _("Can't get file locally"))
40 _("Can't get file locally"))
41
41
42 def _verifyfile(self, cctx, cset, contents, standin, verified):
42 def _verifyfile(self, cctx, cset, contents, standin, verified):
43 filename = lfutil.splitstandin(standin)
43 filename = lfutil.splitstandin(standin)
44 if not filename:
44 if not filename:
45 return False
45 return False
46 fctx = cctx[standin]
46 fctx = cctx[standin]
47 key = (filename, fctx.filenode())
47 key = (filename, fctx.filenode())
48 if key in verified:
48 if key in verified:
49 return False
49 return False
50
50
51 expecthash = fctx.data()[0:40]
51 expecthash = fctx.data()[0:40]
52 verified.add(key)
52 verified.add(key)
53 if not lfutil.insystemcache(self.ui, expecthash):
53 if not lfutil.inusercache(self.ui, expecthash):
54 self.ui.warn(
54 self.ui.warn(
55 _('changeset %s: %s missing\n'
55 _('changeset %s: %s missing\n'
56 ' (looked for hash %s)\n')
56 ' (looked for hash %s)\n')
57 % (cset, filename, expecthash))
57 % (cset, filename, expecthash))
58 return True # failed
58 return True # failed
59
59
60 if contents:
60 if contents:
61 storepath = lfutil.systemcachepath(self.ui, expecthash)
61 storepath = lfutil.usercachepath(self.ui, expecthash)
62 actualhash = lfutil.hashfile(storepath)
62 actualhash = lfutil.hashfile(storepath)
63 if actualhash != expecthash:
63 if actualhash != expecthash:
64 self.ui.warn(
64 self.ui.warn(
65 _('changeset %s: %s: contents differ\n'
65 _('changeset %s: %s: contents differ\n'
66 ' (%s:\n'
66 ' (%s:\n'
67 ' expected hash %s,\n'
67 ' expected hash %s,\n'
68 ' but got %s)\n')
68 ' but got %s)\n')
69 % (cset, filename, storepath, expecthash, actualhash))
69 % (cset, filename, storepath, expecthash, actualhash))
70 return True # failed
70 return True # failed
71 return False
71 return False
@@ -1,160 +1,160 b''
1 # Copyright 2011 Fog Creek Software
1 # Copyright 2011 Fog Creek Software
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 import os
6 import os
7 import tempfile
7 import tempfile
8 import urllib2
8 import urllib2
9
9
10 from mercurial import error, httprepo, util, wireproto
10 from mercurial import error, httprepo, util, wireproto
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 import lfutil
13 import lfutil
14
14
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
15 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
16 '\n\nPlease enable it in your Mercurial config '
16 '\n\nPlease enable it in your Mercurial config '
17 'file.\n')
17 'file.\n')
18
18
19 def putlfile(repo, proto, sha):
19 def putlfile(repo, proto, sha):
20 '''Put a largefile into a repository's local cache and into the
20 '''Put a largefile into a repository's local cache and into the
21 system cache.'''
21 system cache.'''
22 f = None
22 f = None
23 proto.redirect()
23 proto.redirect()
24 try:
24 try:
25 try:
25 try:
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
26 f = tempfile.NamedTemporaryFile(mode='wb+', prefix='hg-putlfile-')
27 proto.getfile(f)
27 proto.getfile(f)
28 f.seek(0)
28 f.seek(0)
29 if sha != lfutil.hexsha1(f):
29 if sha != lfutil.hexsha1(f):
30 return wireproto.pushres(1)
30 return wireproto.pushres(1)
31 lfutil.copytocacheabsolute(repo, f.name, sha)
31 lfutil.copytostoreabsolute(repo, f.name, sha)
32 except IOError:
32 except IOError:
33 repo.ui.warn(
33 repo.ui.warn(
34 _('error: could not put received data into largefile store'))
34 _('error: could not put received data into largefile store'))
35 return wireproto.pushres(1)
35 return wireproto.pushres(1)
36 finally:
36 finally:
37 if f:
37 if f:
38 f.close()
38 f.close()
39
39
40 return wireproto.pushres(0)
40 return wireproto.pushres(0)
41
41
42 def getlfile(repo, proto, sha):
42 def getlfile(repo, proto, sha):
43 '''Retrieve a largefile from the repository-local cache or system
43 '''Retrieve a largefile from the repository-local cache or system
44 cache.'''
44 cache.'''
45 filename = lfutil.findfile(repo, sha)
45 filename = lfutil.findfile(repo, sha)
46 if not filename:
46 if not filename:
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
47 raise util.Abort(_('requested largefile %s not present in cache') % sha)
48 f = open(filename, 'rb')
48 f = open(filename, 'rb')
49 length = os.fstat(f.fileno())[6]
49 length = os.fstat(f.fileno())[6]
50
50
51 # Since we can't set an HTTP content-length header here, and
51 # Since we can't set an HTTP content-length header here, and
52 # Mercurial core provides no way to give the length of a streamres
52 # Mercurial core provides no way to give the length of a streamres
53 # (and reading the entire file into RAM would be ill-advised), we
53 # (and reading the entire file into RAM would be ill-advised), we
54 # just send the length on the first line of the response, like the
54 # just send the length on the first line of the response, like the
55 # ssh proto does for string responses.
55 # ssh proto does for string responses.
56 def generator():
56 def generator():
57 yield '%d\n' % length
57 yield '%d\n' % length
58 for chunk in f:
58 for chunk in f:
59 yield chunk
59 yield chunk
60 return wireproto.streamres(generator())
60 return wireproto.streamres(generator())
61
61
62 def statlfile(repo, proto, sha):
62 def statlfile(repo, proto, sha):
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
63 '''Return '2\n' if the largefile is missing, '1\n' if it has a
64 mismatched checksum, or '0\n' if it is in good condition'''
64 mismatched checksum, or '0\n' if it is in good condition'''
65 filename = lfutil.findfile(repo, sha)
65 filename = lfutil.findfile(repo, sha)
66 if not filename:
66 if not filename:
67 return '2\n'
67 return '2\n'
68 fd = None
68 fd = None
69 try:
69 try:
70 fd = open(filename, 'rb')
70 fd = open(filename, 'rb')
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
71 return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
72 finally:
72 finally:
73 if fd:
73 if fd:
74 fd.close()
74 fd.close()
75
75
76 def wirereposetup(ui, repo):
76 def wirereposetup(ui, repo):
77 class lfileswirerepository(repo.__class__):
77 class lfileswirerepository(repo.__class__):
78 def putlfile(self, sha, fd):
78 def putlfile(self, sha, fd):
79 # unfortunately, httprepository._callpush tries to convert its
79 # unfortunately, httprepository._callpush tries to convert its
80 # input file-like into a bundle before sending it, so we can't use
80 # input file-like into a bundle before sending it, so we can't use
81 # it ...
81 # it ...
82 if issubclass(self.__class__, httprepo.httprepository):
82 if issubclass(self.__class__, httprepo.httprepository):
83 try:
83 try:
84 return int(self._call('putlfile', data=fd, sha=sha,
84 return int(self._call('putlfile', data=fd, sha=sha,
85 headers={'content-type':'application/mercurial-0.1'}))
85 headers={'content-type':'application/mercurial-0.1'}))
86 except (ValueError, urllib2.HTTPError):
86 except (ValueError, urllib2.HTTPError):
87 return 1
87 return 1
88 # ... but we can't use sshrepository._call because the data=
88 # ... but we can't use sshrepository._call because the data=
89 # argument won't get sent, and _callpush does exactly what we want
89 # argument won't get sent, and _callpush does exactly what we want
90 # in this case: send the data straight through
90 # in this case: send the data straight through
91 else:
91 else:
92 try:
92 try:
93 ret, output = self._callpush("putlfile", fd, sha=sha)
93 ret, output = self._callpush("putlfile", fd, sha=sha)
94 if ret == "":
94 if ret == "":
95 raise error.ResponseError(_('putlfile failed:'),
95 raise error.ResponseError(_('putlfile failed:'),
96 output)
96 output)
97 return int(ret)
97 return int(ret)
98 except IOError:
98 except IOError:
99 return 1
99 return 1
100 except ValueError:
100 except ValueError:
101 raise error.ResponseError(
101 raise error.ResponseError(
102 _('putlfile failed (unexpected response):'), ret)
102 _('putlfile failed (unexpected response):'), ret)
103
103
104 def getlfile(self, sha):
104 def getlfile(self, sha):
105 stream = self._callstream("getlfile", sha=sha)
105 stream = self._callstream("getlfile", sha=sha)
106 length = stream.readline()
106 length = stream.readline()
107 try:
107 try:
108 length = int(length)
108 length = int(length)
109 except ValueError:
109 except ValueError:
110 self._abort(error.ResponseError(_("unexpected response:"),
110 self._abort(error.ResponseError(_("unexpected response:"),
111 length))
111 length))
112 return (length, stream)
112 return (length, stream)
113
113
114 def statlfile(self, sha):
114 def statlfile(self, sha):
115 try:
115 try:
116 return int(self._call("statlfile", sha=sha))
116 return int(self._call("statlfile", sha=sha))
117 except (ValueError, urllib2.HTTPError):
117 except (ValueError, urllib2.HTTPError):
118 # If the server returns anything but an integer followed by a
118 # If the server returns anything but an integer followed by a
119 # newline, newline, it's not speaking our language; if we get
119 # newline, newline, it's not speaking our language; if we get
120 # an HTTP error, we can't be sure the largefile is present;
120 # an HTTP error, we can't be sure the largefile is present;
121 # either way, consider it missing.
121 # either way, consider it missing.
122 return 2
122 return 2
123
123
124 repo.__class__ = lfileswirerepository
124 repo.__class__ = lfileswirerepository
125
125
126 # advertise the largefiles=serve capability
126 # advertise the largefiles=serve capability
127 def capabilities(repo, proto):
127 def capabilities(repo, proto):
128 return capabilities_orig(repo, proto) + ' largefiles=serve'
128 return capabilities_orig(repo, proto) + ' largefiles=serve'
129
129
130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
130 # duplicate what Mercurial's new out-of-band errors mechanism does, because
131 # clients old and new alike both handle it well
131 # clients old and new alike both handle it well
132 def webproto_refuseclient(self, message):
132 def webproto_refuseclient(self, message):
133 self.req.header([('Content-Type', 'application/hg-error')])
133 self.req.header([('Content-Type', 'application/hg-error')])
134 return message
134 return message
135
135
136 def sshproto_refuseclient(self, message):
136 def sshproto_refuseclient(self, message):
137 self.ui.write_err('%s\n-\n' % message)
137 self.ui.write_err('%s\n-\n' % message)
138 self.fout.write('\n')
138 self.fout.write('\n')
139 self.fout.flush()
139 self.fout.flush()
140
140
141 return ''
141 return ''
142
142
143 def heads(repo, proto):
143 def heads(repo, proto):
144 if lfutil.islfilesrepo(repo):
144 if lfutil.islfilesrepo(repo):
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
145 return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
146 return wireproto.heads(repo, proto)
146 return wireproto.heads(repo, proto)
147
147
148 def sshrepo_callstream(self, cmd, **args):
148 def sshrepo_callstream(self, cmd, **args):
149 if cmd == 'heads' and self.capable('largefiles'):
149 if cmd == 'heads' and self.capable('largefiles'):
150 cmd = 'lheads'
150 cmd = 'lheads'
151 if cmd == 'batch' and self.capable('largefiles'):
151 if cmd == 'batch' and self.capable('largefiles'):
152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
152 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
153 return ssh_oldcallstream(self, cmd, **args)
153 return ssh_oldcallstream(self, cmd, **args)
154
154
155 def httprepo_callstream(self, cmd, **args):
155 def httprepo_callstream(self, cmd, **args):
156 if cmd == 'heads' and self.capable('largefiles'):
156 if cmd == 'heads' and self.capable('largefiles'):
157 cmd = 'lheads'
157 cmd = 'lheads'
158 if cmd == 'batch' and self.capable('largefiles'):
158 if cmd == 'batch' and self.capable('largefiles'):
159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
159 args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
160 return http_oldcallstream(self, cmd, **args)
160 return http_oldcallstream(self, cmd, **args)
@@ -1,411 +1,411 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 import copy
10 import copy
11 import types
11 import types
12 import os
12 import os
13 import re
13 import re
14
14
15 from mercurial import context, error, manifest, match as match_, node, util
15 from mercurial import context, error, manifest, match as match_, node, util
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17
17
18 import lfcommands
18 import lfcommands
19 import proto
19 import proto
20 import lfutil
20 import lfutil
21
21
22 def reposetup(ui, repo):
22 def reposetup(ui, repo):
23 # wire repositories should be given new wireproto functions but not the
23 # wire repositories should be given new wireproto functions but not the
24 # other largefiles modifications
24 # other largefiles modifications
25 if not repo.local():
25 if not repo.local():
26 return proto.wirereposetup(ui, repo)
26 return proto.wirereposetup(ui, repo)
27
27
28 for name in ('status', 'commitctx', 'commit', 'push'):
28 for name in ('status', 'commitctx', 'commit', 'push'):
29 method = getattr(repo, name)
29 method = getattr(repo, name)
30 #if not (isinstance(method, types.MethodType) and
30 #if not (isinstance(method, types.MethodType) and
31 # method.im_func is repo.__class__.commitctx.im_func):
31 # method.im_func is repo.__class__.commitctx.im_func):
32 if (isinstance(method, types.FunctionType) and
32 if (isinstance(method, types.FunctionType) and
33 method.func_name == 'wrap'):
33 method.func_name == 'wrap'):
34 ui.warn(_('largefiles: repo method %r appears to have already been'
34 ui.warn(_('largefiles: repo method %r appears to have already been'
35 ' wrapped by another extension: '
35 ' wrapped by another extension: '
36 'largefiles may behave incorrectly\n')
36 'largefiles may behave incorrectly\n')
37 % name)
37 % name)
38
38
39 class lfiles_repo(repo.__class__):
39 class lfiles_repo(repo.__class__):
40 lfstatus = False
40 lfstatus = False
41 def status_nolfiles(self, *args, **kwargs):
41 def status_nolfiles(self, *args, **kwargs):
42 return super(lfiles_repo, self).status(*args, **kwargs)
42 return super(lfiles_repo, self).status(*args, **kwargs)
43
43
44 # When lfstatus is set, return a context that gives the names
44 # When lfstatus is set, return a context that gives the names
45 # of largefiles instead of their corresponding standins and
45 # of largefiles instead of their corresponding standins and
46 # identifies the largefiles as always binary, regardless of
46 # identifies the largefiles as always binary, regardless of
47 # their actual contents.
47 # their actual contents.
48 def __getitem__(self, changeid):
48 def __getitem__(self, changeid):
49 ctx = super(lfiles_repo, self).__getitem__(changeid)
49 ctx = super(lfiles_repo, self).__getitem__(changeid)
50 if self.lfstatus:
50 if self.lfstatus:
51 class lfiles_manifestdict(manifest.manifestdict):
51 class lfiles_manifestdict(manifest.manifestdict):
52 def __contains__(self, filename):
52 def __contains__(self, filename):
53 if super(lfiles_manifestdict,
53 if super(lfiles_manifestdict,
54 self).__contains__(filename):
54 self).__contains__(filename):
55 return True
55 return True
56 return super(lfiles_manifestdict,
56 return super(lfiles_manifestdict,
57 self).__contains__(lfutil.shortname+'/' + filename)
57 self).__contains__(lfutil.shortname+'/' + filename)
58 class lfiles_ctx(ctx.__class__):
58 class lfiles_ctx(ctx.__class__):
59 def files(self):
59 def files(self):
60 filenames = super(lfiles_ctx, self).files()
60 filenames = super(lfiles_ctx, self).files()
61 return [re.sub('^\\'+lfutil.shortname+'/', '',
61 return [re.sub('^\\'+lfutil.shortname+'/', '',
62 filename) for filename in filenames]
62 filename) for filename in filenames]
63 def manifest(self):
63 def manifest(self):
64 man1 = super(lfiles_ctx, self).manifest()
64 man1 = super(lfiles_ctx, self).manifest()
65 man1.__class__ = lfiles_manifestdict
65 man1.__class__ = lfiles_manifestdict
66 return man1
66 return man1
67 def filectx(self, path, fileid=None, filelog=None):
67 def filectx(self, path, fileid=None, filelog=None):
68 try:
68 try:
69 result = super(lfiles_ctx, self).filectx(path,
69 result = super(lfiles_ctx, self).filectx(path,
70 fileid, filelog)
70 fileid, filelog)
71 except error.LookupError:
71 except error.LookupError:
72 # Adding a null character will cause Mercurial to
72 # Adding a null character will cause Mercurial to
73 # identify this as a binary file.
73 # identify this as a binary file.
74 result = super(lfiles_ctx, self).filectx(
74 result = super(lfiles_ctx, self).filectx(
75 lfutil.shortname + '/' + path, fileid,
75 lfutil.shortname + '/' + path, fileid,
76 filelog)
76 filelog)
77 olddata = result.data
77 olddata = result.data
78 result.data = lambda: olddata() + '\0'
78 result.data = lambda: olddata() + '\0'
79 return result
79 return result
80 ctx.__class__ = lfiles_ctx
80 ctx.__class__ = lfiles_ctx
81 return ctx
81 return ctx
82
82
83 # Figure out the status of big files and insert them into the
83 # Figure out the status of big files and insert them into the
84 # appropriate list in the result. Also removes standin files
84 # appropriate list in the result. Also removes standin files
85 # from the listing. Revert to the original status if
85 # from the listing. Revert to the original status if
86 # self.lfstatus is False.
86 # self.lfstatus is False.
87 def status(self, node1='.', node2=None, match=None, ignored=False,
87 def status(self, node1='.', node2=None, match=None, ignored=False,
88 clean=False, unknown=False, listsubrepos=False):
88 clean=False, unknown=False, listsubrepos=False):
89 listignored, listclean, listunknown = ignored, clean, unknown
89 listignored, listclean, listunknown = ignored, clean, unknown
90 if not self.lfstatus:
90 if not self.lfstatus:
91 try:
91 try:
92 return super(lfiles_repo, self).status(node1, node2, match,
92 return super(lfiles_repo, self).status(node1, node2, match,
93 listignored, listclean, listunknown, listsubrepos)
93 listignored, listclean, listunknown, listsubrepos)
94 except TypeError:
94 except TypeError:
95 return super(lfiles_repo, self).status(node1, node2, match,
95 return super(lfiles_repo, self).status(node1, node2, match,
96 listignored, listclean, listunknown)
96 listignored, listclean, listunknown)
97 else:
97 else:
98 # some calls in this function rely on the old version of status
98 # some calls in this function rely on the old version of status
99 self.lfstatus = False
99 self.lfstatus = False
100 if isinstance(node1, context.changectx):
100 if isinstance(node1, context.changectx):
101 ctx1 = node1
101 ctx1 = node1
102 else:
102 else:
103 ctx1 = repo[node1]
103 ctx1 = repo[node1]
104 if isinstance(node2, context.changectx):
104 if isinstance(node2, context.changectx):
105 ctx2 = node2
105 ctx2 = node2
106 else:
106 else:
107 ctx2 = repo[node2]
107 ctx2 = repo[node2]
108 working = ctx2.rev() is None
108 working = ctx2.rev() is None
109 parentworking = working and ctx1 == self['.']
109 parentworking = working and ctx1 == self['.']
110
110
111 def inctx(file, ctx):
111 def inctx(file, ctx):
112 try:
112 try:
113 if ctx.rev() is None:
113 if ctx.rev() is None:
114 return file in ctx.manifest()
114 return file in ctx.manifest()
115 ctx[file]
115 ctx[file]
116 return True
116 return True
117 except KeyError:
117 except KeyError:
118 return False
118 return False
119
119
120 if match is None:
120 if match is None:
121 match = match_.always(self.root, self.getcwd())
121 match = match_.always(self.root, self.getcwd())
122
122
123 # Create a copy of match that matches standins instead
123 # Create a copy of match that matches standins instead
124 # of largefiles.
124 # of largefiles.
125 def tostandin(file):
125 def tostandin(file):
126 if inctx(lfutil.standin(file), ctx2):
126 if inctx(lfutil.standin(file), ctx2):
127 return lfutil.standin(file)
127 return lfutil.standin(file)
128 return file
128 return file
129
129
130 m = copy.copy(match)
130 m = copy.copy(match)
131 m._files = [tostandin(f) for f in m._files]
131 m._files = [tostandin(f) for f in m._files]
132
132
133 # get ignored, clean, and unknown but remove them
133 # get ignored, clean, and unknown but remove them
134 # later if they were not asked for
134 # later if they were not asked for
135 try:
135 try:
136 result = super(lfiles_repo, self).status(node1, node2, m,
136 result = super(lfiles_repo, self).status(node1, node2, m,
137 True, True, True, listsubrepos)
137 True, True, True, listsubrepos)
138 except TypeError:
138 except TypeError:
139 result = super(lfiles_repo, self).status(node1, node2, m,
139 result = super(lfiles_repo, self).status(node1, node2, m,
140 True, True, True)
140 True, True, True)
141 if working:
141 if working:
142 # hold the wlock while we read largefiles and
142 # hold the wlock while we read largefiles and
143 # update the lfdirstate
143 # update the lfdirstate
144 wlock = repo.wlock()
144 wlock = repo.wlock()
145 try:
145 try:
146 # Any non-largefiles that were explicitly listed must be
146 # Any non-largefiles that were explicitly listed must be
147 # taken out or lfdirstate.status will report an error.
147 # taken out or lfdirstate.status will report an error.
148 # The status of these files was already computed using
148 # The status of these files was already computed using
149 # super's status.
149 # super's status.
150 lfdirstate = lfutil.openlfdirstate(ui, self)
150 lfdirstate = lfutil.openlfdirstate(ui, self)
151 match._files = [f for f in match._files if f in
151 match._files = [f for f in match._files if f in
152 lfdirstate]
152 lfdirstate]
153 s = lfdirstate.status(match, [], listignored,
153 s = lfdirstate.status(match, [], listignored,
154 listclean, listunknown)
154 listclean, listunknown)
155 (unsure, modified, added, removed, missing, unknown,
155 (unsure, modified, added, removed, missing, unknown,
156 ignored, clean) = s
156 ignored, clean) = s
157 if parentworking:
157 if parentworking:
158 for lfile in unsure:
158 for lfile in unsure:
159 if ctx1[lfutil.standin(lfile)].data().strip() \
159 if ctx1[lfutil.standin(lfile)].data().strip() \
160 != lfutil.hashfile(self.wjoin(lfile)):
160 != lfutil.hashfile(self.wjoin(lfile)):
161 modified.append(lfile)
161 modified.append(lfile)
162 else:
162 else:
163 clean.append(lfile)
163 clean.append(lfile)
164 lfdirstate.normal(lfile)
164 lfdirstate.normal(lfile)
165 lfdirstate.write()
165 lfdirstate.write()
166 else:
166 else:
167 tocheck = unsure + modified + added + clean
167 tocheck = unsure + modified + added + clean
168 modified, added, clean = [], [], []
168 modified, added, clean = [], [], []
169
169
170 for lfile in tocheck:
170 for lfile in tocheck:
171 standin = lfutil.standin(lfile)
171 standin = lfutil.standin(lfile)
172 if inctx(standin, ctx1):
172 if inctx(standin, ctx1):
173 if ctx1[standin].data().strip() != \
173 if ctx1[standin].data().strip() != \
174 lfutil.hashfile(self.wjoin(lfile)):
174 lfutil.hashfile(self.wjoin(lfile)):
175 modified.append(lfile)
175 modified.append(lfile)
176 else:
176 else:
177 clean.append(lfile)
177 clean.append(lfile)
178 else:
178 else:
179 added.append(lfile)
179 added.append(lfile)
180 finally:
180 finally:
181 wlock.release()
181 wlock.release()
182
182
183 for standin in ctx1.manifest():
183 for standin in ctx1.manifest():
184 if not lfutil.isstandin(standin):
184 if not lfutil.isstandin(standin):
185 continue
185 continue
186 lfile = lfutil.splitstandin(standin)
186 lfile = lfutil.splitstandin(standin)
187 if not match(lfile):
187 if not match(lfile):
188 continue
188 continue
189 if lfile not in lfdirstate:
189 if lfile not in lfdirstate:
190 removed.append(lfile)
190 removed.append(lfile)
191 # Handle unknown and ignored differently
191 # Handle unknown and ignored differently
192 lfiles = (modified, added, removed, missing, [], [], clean)
192 lfiles = (modified, added, removed, missing, [], [], clean)
193 result = list(result)
193 result = list(result)
194 # Unknown files
194 # Unknown files
195 result[4] = [f for f in unknown
195 result[4] = [f for f in unknown
196 if (repo.dirstate[f] == '?' and
196 if (repo.dirstate[f] == '?' and
197 not lfutil.isstandin(f))]
197 not lfutil.isstandin(f))]
198 # Ignored files must be ignored by both the dirstate and
198 # Ignored files must be ignored by both the dirstate and
199 # lfdirstate
199 # lfdirstate
200 result[5] = set(ignored).intersection(set(result[5]))
200 result[5] = set(ignored).intersection(set(result[5]))
201 # combine normal files and largefiles
201 # combine normal files and largefiles
202 normals = [[fn for fn in filelist
202 normals = [[fn for fn in filelist
203 if not lfutil.isstandin(fn)]
203 if not lfutil.isstandin(fn)]
204 for filelist in result]
204 for filelist in result]
205 result = [sorted(list1 + list2)
205 result = [sorted(list1 + list2)
206 for (list1, list2) in zip(normals, lfiles)]
206 for (list1, list2) in zip(normals, lfiles)]
207 else:
207 else:
208 def toname(f):
208 def toname(f):
209 if lfutil.isstandin(f):
209 if lfutil.isstandin(f):
210 return lfutil.splitstandin(f)
210 return lfutil.splitstandin(f)
211 return f
211 return f
212 result = [[toname(f) for f in items] for items in result]
212 result = [[toname(f) for f in items] for items in result]
213
213
214 if not listunknown:
214 if not listunknown:
215 result[4] = []
215 result[4] = []
216 if not listignored:
216 if not listignored:
217 result[5] = []
217 result[5] = []
218 if not listclean:
218 if not listclean:
219 result[6] = []
219 result[6] = []
220 self.lfstatus = True
220 self.lfstatus = True
221 return result
221 return result
222
222
223 # As part of committing, copy all of the largefiles into the
223 # As part of committing, copy all of the largefiles into the
224 # cache.
224 # cache.
225 def commitctx(self, *args, **kwargs):
225 def commitctx(self, *args, **kwargs):
226 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
226 node = super(lfiles_repo, self).commitctx(*args, **kwargs)
227 ctx = self[node]
227 ctx = self[node]
228 for filename in ctx.files():
228 for filename in ctx.files():
229 if lfutil.isstandin(filename) and filename in ctx.manifest():
229 if lfutil.isstandin(filename) and filename in ctx.manifest():
230 realfile = lfutil.splitstandin(filename)
230 realfile = lfutil.splitstandin(filename)
231 lfutil.copytocache(self, ctx.node(), realfile)
231 lfutil.copytostore(self, ctx.node(), realfile)
232
232
233 return node
233 return node
234
234
235 # Before commit, largefile standins have not had their
235 # Before commit, largefile standins have not had their
236 # contents updated to reflect the hash of their largefile.
236 # contents updated to reflect the hash of their largefile.
237 # Do that here.
237 # Do that here.
238 def commit(self, text="", user=None, date=None, match=None,
238 def commit(self, text="", user=None, date=None, match=None,
239 force=False, editor=False, extra={}):
239 force=False, editor=False, extra={}):
240 orig = super(lfiles_repo, self).commit
240 orig = super(lfiles_repo, self).commit
241
241
242 wlock = repo.wlock()
242 wlock = repo.wlock()
243 try:
243 try:
244 if getattr(repo, "_isrebasing", False):
244 if getattr(repo, "_isrebasing", False):
245 # We have to take the time to pull down the new
245 # We have to take the time to pull down the new
246 # largefiles now. Otherwise if we are rebasing,
246 # largefiles now. Otherwise if we are rebasing,
247 # any largefiles that were modified in the
247 # any largefiles that were modified in the
248 # destination changesets get overwritten, either
248 # destination changesets get overwritten, either
249 # by the rebase or in the first commit after the
249 # by the rebase or in the first commit after the
250 # rebase.
250 # rebase.
251 lfcommands.updatelfiles(repo.ui, repo)
251 lfcommands.updatelfiles(repo.ui, repo)
252 # Case 1: user calls commit with no specific files or
252 # Case 1: user calls commit with no specific files or
253 # include/exclude patterns: refresh and commit all files that
253 # include/exclude patterns: refresh and commit all files that
254 # are "dirty".
254 # are "dirty".
255 if ((match is None) or
255 if ((match is None) or
256 (not match.anypats() and not match.files())):
256 (not match.anypats() and not match.files())):
257 # Spend a bit of time here to get a list of files we know
257 # Spend a bit of time here to get a list of files we know
258 # are modified so we can compare only against those.
258 # are modified so we can compare only against those.
259 # It can cost a lot of time (several seconds)
259 # It can cost a lot of time (several seconds)
260 # otherwise to update all standins if the largefiles are
260 # otherwise to update all standins if the largefiles are
261 # large.
261 # large.
262 lfdirstate = lfutil.openlfdirstate(ui, self)
262 lfdirstate = lfutil.openlfdirstate(ui, self)
263 dirtymatch = match_.always(repo.root, repo.getcwd())
263 dirtymatch = match_.always(repo.root, repo.getcwd())
264 s = lfdirstate.status(dirtymatch, [], False, False, False)
264 s = lfdirstate.status(dirtymatch, [], False, False, False)
265 modifiedfiles = []
265 modifiedfiles = []
266 for i in s:
266 for i in s:
267 modifiedfiles.extend(i)
267 modifiedfiles.extend(i)
268 lfiles = lfutil.listlfiles(self)
268 lfiles = lfutil.listlfiles(self)
269 # this only loops through largefiles that exist (not
269 # this only loops through largefiles that exist (not
270 # removed/renamed)
270 # removed/renamed)
271 for lfile in lfiles:
271 for lfile in lfiles:
272 if lfile in modifiedfiles:
272 if lfile in modifiedfiles:
273 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
273 if os.path.exists(self.wjoin(lfutil.standin(lfile))):
274 # this handles the case where a rebase is being
274 # this handles the case where a rebase is being
275 # performed and the working copy is not updated
275 # performed and the working copy is not updated
276 # yet.
276 # yet.
277 if os.path.exists(self.wjoin(lfile)):
277 if os.path.exists(self.wjoin(lfile)):
278 lfutil.updatestandin(self,
278 lfutil.updatestandin(self,
279 lfutil.standin(lfile))
279 lfutil.standin(lfile))
280 lfdirstate.normal(lfile)
280 lfdirstate.normal(lfile)
281 for lfile in lfdirstate:
281 for lfile in lfdirstate:
282 if lfile in modifiedfiles:
282 if lfile in modifiedfiles:
283 if not os.path.exists(
283 if not os.path.exists(
284 repo.wjoin(lfutil.standin(lfile))):
284 repo.wjoin(lfutil.standin(lfile))):
285 lfdirstate.drop(lfile)
285 lfdirstate.drop(lfile)
286 lfdirstate.write()
286 lfdirstate.write()
287
287
288 return orig(text=text, user=user, date=date, match=match,
288 return orig(text=text, user=user, date=date, match=match,
289 force=force, editor=editor, extra=extra)
289 force=force, editor=editor, extra=extra)
290
290
291 for f in match.files():
291 for f in match.files():
292 if lfutil.isstandin(f):
292 if lfutil.isstandin(f):
293 raise util.Abort(
293 raise util.Abort(
294 _('file "%s" is a largefile standin') % f,
294 _('file "%s" is a largefile standin') % f,
295 hint=('commit the largefile itself instead'))
295 hint=('commit the largefile itself instead'))
296
296
297 # Case 2: user calls commit with specified patterns: refresh
297 # Case 2: user calls commit with specified patterns: refresh
298 # any matching big files.
298 # any matching big files.
299 smatcher = lfutil.composestandinmatcher(self, match)
299 smatcher = lfutil.composestandinmatcher(self, match)
300 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
300 standins = lfutil.dirstate_walk(self.dirstate, smatcher)
301
301
302 # No matching big files: get out of the way and pass control to
302 # No matching big files: get out of the way and pass control to
303 # the usual commit() method.
303 # the usual commit() method.
304 if not standins:
304 if not standins:
305 return orig(text=text, user=user, date=date, match=match,
305 return orig(text=text, user=user, date=date, match=match,
306 force=force, editor=editor, extra=extra)
306 force=force, editor=editor, extra=extra)
307
307
308 # Refresh all matching big files. It's possible that the
308 # Refresh all matching big files. It's possible that the
309 # commit will end up failing, in which case the big files will
309 # commit will end up failing, in which case the big files will
310 # stay refreshed. No harm done: the user modified them and
310 # stay refreshed. No harm done: the user modified them and
311 # asked to commit them, so sooner or later we're going to
311 # asked to commit them, so sooner or later we're going to
312 # refresh the standins. Might as well leave them refreshed.
312 # refresh the standins. Might as well leave them refreshed.
313 lfdirstate = lfutil.openlfdirstate(ui, self)
313 lfdirstate = lfutil.openlfdirstate(ui, self)
314 for standin in standins:
314 for standin in standins:
315 lfile = lfutil.splitstandin(standin)
315 lfile = lfutil.splitstandin(standin)
316 if lfdirstate[lfile] <> 'r':
316 if lfdirstate[lfile] <> 'r':
317 lfutil.updatestandin(self, standin)
317 lfutil.updatestandin(self, standin)
318 lfdirstate.normal(lfile)
318 lfdirstate.normal(lfile)
319 else:
319 else:
320 lfdirstate.drop(lfile)
320 lfdirstate.drop(lfile)
321 lfdirstate.write()
321 lfdirstate.write()
322
322
323 # Cook up a new matcher that only matches regular files or
323 # Cook up a new matcher that only matches regular files or
324 # standins corresponding to the big files requested by the
324 # standins corresponding to the big files requested by the
325 # user. Have to modify _files to prevent commit() from
325 # user. Have to modify _files to prevent commit() from
326 # complaining "not tracked" for big files.
326 # complaining "not tracked" for big files.
327 lfiles = lfutil.listlfiles(repo)
327 lfiles = lfutil.listlfiles(repo)
328 match = copy.copy(match)
328 match = copy.copy(match)
329 orig_matchfn = match.matchfn
329 orig_matchfn = match.matchfn
330
330
331 # Check both the list of largefiles and the list of
331 # Check both the list of largefiles and the list of
332 # standins because if a largefile was removed, it
332 # standins because if a largefile was removed, it
333 # won't be in the list of largefiles at this point
333 # won't be in the list of largefiles at this point
334 match._files += sorted(standins)
334 match._files += sorted(standins)
335
335
336 actualfiles = []
336 actualfiles = []
337 for f in match._files:
337 for f in match._files:
338 fstandin = lfutil.standin(f)
338 fstandin = lfutil.standin(f)
339
339
340 # ignore known largefiles and standins
340 # ignore known largefiles and standins
341 if f in lfiles or fstandin in standins:
341 if f in lfiles or fstandin in standins:
342 continue
342 continue
343
343
344 # append directory separator to avoid collisions
344 # append directory separator to avoid collisions
345 if not fstandin.endswith(os.sep):
345 if not fstandin.endswith(os.sep):
346 fstandin += os.sep
346 fstandin += os.sep
347
347
348 # prevalidate matching standin directories
348 # prevalidate matching standin directories
349 if lfutil.any_(st for st in match._files
349 if lfutil.any_(st for st in match._files
350 if st.startswith(fstandin)):
350 if st.startswith(fstandin)):
351 continue
351 continue
352 actualfiles.append(f)
352 actualfiles.append(f)
353 match._files = actualfiles
353 match._files = actualfiles
354
354
355 def matchfn(f):
355 def matchfn(f):
356 if orig_matchfn(f):
356 if orig_matchfn(f):
357 return f not in lfiles
357 return f not in lfiles
358 else:
358 else:
359 return f in standins
359 return f in standins
360
360
361 match.matchfn = matchfn
361 match.matchfn = matchfn
362 return orig(text=text, user=user, date=date, match=match,
362 return orig(text=text, user=user, date=date, match=match,
363 force=force, editor=editor, extra=extra)
363 force=force, editor=editor, extra=extra)
364 finally:
364 finally:
365 wlock.release()
365 wlock.release()
366
366
367 def push(self, remote, force=False, revs=None, newbranch=False):
367 def push(self, remote, force=False, revs=None, newbranch=False):
368 o = lfutil.findoutgoing(repo, remote, force)
368 o = lfutil.findoutgoing(repo, remote, force)
369 if o:
369 if o:
370 toupload = set()
370 toupload = set()
371 o = repo.changelog.nodesbetween(o, revs)[0]
371 o = repo.changelog.nodesbetween(o, revs)[0]
372 for n in o:
372 for n in o:
373 parents = [p for p in repo.changelog.parents(n)
373 parents = [p for p in repo.changelog.parents(n)
374 if p != node.nullid]
374 if p != node.nullid]
375 ctx = repo[n]
375 ctx = repo[n]
376 files = set(ctx.files())
376 files = set(ctx.files())
377 if len(parents) == 2:
377 if len(parents) == 2:
378 mc = ctx.manifest()
378 mc = ctx.manifest()
379 mp1 = ctx.parents()[0].manifest()
379 mp1 = ctx.parents()[0].manifest()
380 mp2 = ctx.parents()[1].manifest()
380 mp2 = ctx.parents()[1].manifest()
381 for f in mp1:
381 for f in mp1:
382 if f not in mc:
382 if f not in mc:
383 files.add(f)
383 files.add(f)
384 for f in mp2:
384 for f in mp2:
385 if f not in mc:
385 if f not in mc:
386 files.add(f)
386 files.add(f)
387 for f in mc:
387 for f in mc:
388 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
388 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f,
389 None):
389 None):
390 files.add(f)
390 files.add(f)
391
391
392 toupload = toupload.union(
392 toupload = toupload.union(
393 set([ctx[f].data().strip()
393 set([ctx[f].data().strip()
394 for f in files
394 for f in files
395 if lfutil.isstandin(f) and f in ctx]))
395 if lfutil.isstandin(f) and f in ctx]))
396 lfcommands.uploadlfiles(ui, self, remote, toupload)
396 lfcommands.uploadlfiles(ui, self, remote, toupload)
397 return super(lfiles_repo, self).push(remote, force, revs,
397 return super(lfiles_repo, self).push(remote, force, revs,
398 newbranch)
398 newbranch)
399
399
400 repo.__class__ = lfiles_repo
400 repo.__class__ = lfiles_repo
401
401
402 def checkrequireslfiles(ui, repo, **kwargs):
402 def checkrequireslfiles(ui, repo, **kwargs):
403 if 'largefiles' not in repo.requirements and lfutil.any_(
403 if 'largefiles' not in repo.requirements and lfutil.any_(
404 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
404 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
405 repo.requirements.add('largefiles')
405 repo.requirements.add('largefiles')
406 repo._writerequirements()
406 repo._writerequirements()
407
407
408 checkrequireslfiles(ui, repo)
408 checkrequireslfiles(ui, repo)
409
409
410 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
410 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles)
411 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
411 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles)
General Comments 0
You need to be logged in to leave comments. Login now