##// END OF EJS Templates
introduce localrepo.spath for the store path, sopener fixes
Benoit Boissinot -
r3791:8643b9f9 default
parent child Browse files
Show More
@@ -1,256 +1,256 b''
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "changegroup util os struct bz2 tempfile")
16 demandload(globals(), "changegroup util os struct bz2 tempfile")
17
17
18 import localrepo, changelog, manifest, filelog, revlog
18 import localrepo, changelog, manifest, filelog, revlog
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, datafile, bundlefile,
21 def __init__(self, opener, indexfile, datafile, bundlefile,
22 linkmapper=None):
22 linkmapper=None):
23 # How it works:
23 # How it works:
24 # to retrieve a revision, we need to know the offset of
24 # to retrieve a revision, we need to know the offset of
25 # the revision in the bundlefile (an opened file).
25 # the revision in the bundlefile (an opened file).
26 #
26 #
27 # We store this offset in the index (start), to differentiate a
27 # We store this offset in the index (start), to differentiate a
28 # rev in the bundle and from a rev in the revlog, we check
28 # rev in the bundle and from a rev in the revlog, we check
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 # (it is bigger since we store the node to which the delta is)
30 # (it is bigger since we store the node to which the delta is)
31 #
31 #
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
33 self.bundlefile = bundlefile
33 self.bundlefile = bundlefile
34 self.basemap = {}
34 self.basemap = {}
35 def chunkpositer():
35 def chunkpositer():
36 for chunk in changegroup.chunkiter(bundlefile):
36 for chunk in changegroup.chunkiter(bundlefile):
37 pos = bundlefile.tell()
37 pos = bundlefile.tell()
38 yield chunk, pos - len(chunk)
38 yield chunk, pos - len(chunk)
39 n = self.count()
39 n = self.count()
40 prev = None
40 prev = None
41 for chunk, start in chunkpositer():
41 for chunk, start in chunkpositer():
42 size = len(chunk)
42 size = len(chunk)
43 if size < 80:
43 if size < 80:
44 raise util.Abort("invalid changegroup")
44 raise util.Abort("invalid changegroup")
45 start += 80
45 start += 80
46 size -= 80
46 size -= 80
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 if node in self.nodemap:
48 if node in self.nodemap:
49 prev = node
49 prev = node
50 continue
50 continue
51 for p in (p1, p2):
51 for p in (p1, p2):
52 if not p in self.nodemap:
52 if not p in self.nodemap:
53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
54 if linkmapper is None:
54 if linkmapper is None:
55 link = n
55 link = n
56 else:
56 else:
57 link = linkmapper(cs)
57 link = linkmapper(cs)
58
58
59 if not prev:
59 if not prev:
60 prev = p1
60 prev = p1
61 # start, size, base is not used, link, p1, p2, delta ref
61 # start, size, base is not used, link, p1, p2, delta ref
62 if self.version == revlog.REVLOGV0:
62 if self.version == revlog.REVLOGV0:
63 e = (start, size, None, link, p1, p2, node)
63 e = (start, size, None, link, p1, p2, node)
64 else:
64 else:
65 e = (self.offset_type(start, 0), size, -1, None, link,
65 e = (self.offset_type(start, 0), size, -1, None, link,
66 self.rev(p1), self.rev(p2), node)
66 self.rev(p1), self.rev(p2), node)
67 self.basemap[n] = prev
67 self.basemap[n] = prev
68 self.index.append(e)
68 self.index.append(e)
69 self.nodemap[node] = n
69 self.nodemap[node] = n
70 prev = node
70 prev = node
71 n += 1
71 n += 1
72
72
73 def bundle(self, rev):
73 def bundle(self, rev):
74 """is rev from the bundle"""
74 """is rev from the bundle"""
75 if rev < 0:
75 if rev < 0:
76 return False
76 return False
77 return rev in self.basemap
77 return rev in self.basemap
78 def bundlebase(self, rev): return self.basemap[rev]
78 def bundlebase(self, rev): return self.basemap[rev]
79 def chunk(self, rev, df=None, cachelen=4096):
79 def chunk(self, rev, df=None, cachelen=4096):
80 # Warning: in case of bundle, the diff is against bundlebase,
80 # Warning: in case of bundle, the diff is against bundlebase,
81 # not against rev - 1
81 # not against rev - 1
82 # XXX: could use some caching
82 # XXX: could use some caching
83 if not self.bundle(rev):
83 if not self.bundle(rev):
84 return revlog.revlog.chunk(self, rev, df, cachelen)
84 return revlog.revlog.chunk(self, rev, df, cachelen)
85 self.bundlefile.seek(self.start(rev))
85 self.bundlefile.seek(self.start(rev))
86 return self.bundlefile.read(self.length(rev))
86 return self.bundlefile.read(self.length(rev))
87
87
88 def revdiff(self, rev1, rev2):
88 def revdiff(self, rev1, rev2):
89 """return or calculate a delta between two revisions"""
89 """return or calculate a delta between two revisions"""
90 if self.bundle(rev1) and self.bundle(rev2):
90 if self.bundle(rev1) and self.bundle(rev2):
91 # hot path for bundle
91 # hot path for bundle
92 revb = self.rev(self.bundlebase(rev2))
92 revb = self.rev(self.bundlebase(rev2))
93 if revb == rev1:
93 if revb == rev1:
94 return self.chunk(rev2)
94 return self.chunk(rev2)
95 elif not self.bundle(rev1) and not self.bundle(rev2):
95 elif not self.bundle(rev1) and not self.bundle(rev2):
96 return revlog.revlog.chunk(self, rev1, rev2)
96 return revlog.revlog.chunk(self, rev1, rev2)
97
97
98 return self.diff(self.revision(self.node(rev1)),
98 return self.diff(self.revision(self.node(rev1)),
99 self.revision(self.node(rev2)))
99 self.revision(self.node(rev2)))
100
100
101 def revision(self, node):
101 def revision(self, node):
102 """return an uncompressed revision of a given"""
102 """return an uncompressed revision of a given"""
103 if node == nullid: return ""
103 if node == nullid: return ""
104
104
105 text = None
105 text = None
106 chain = []
106 chain = []
107 iter_node = node
107 iter_node = node
108 rev = self.rev(iter_node)
108 rev = self.rev(iter_node)
109 # reconstruct the revision if it is from a changegroup
109 # reconstruct the revision if it is from a changegroup
110 while self.bundle(rev):
110 while self.bundle(rev):
111 if self.cache and self.cache[0] == iter_node:
111 if self.cache and self.cache[0] == iter_node:
112 text = self.cache[2]
112 text = self.cache[2]
113 break
113 break
114 chain.append(rev)
114 chain.append(rev)
115 iter_node = self.bundlebase(rev)
115 iter_node = self.bundlebase(rev)
116 rev = self.rev(iter_node)
116 rev = self.rev(iter_node)
117 if text is None:
117 if text is None:
118 text = revlog.revlog.revision(self, iter_node)
118 text = revlog.revlog.revision(self, iter_node)
119
119
120 while chain:
120 while chain:
121 delta = self.chunk(chain.pop())
121 delta = self.chunk(chain.pop())
122 text = self.patches(text, [delta])
122 text = self.patches(text, [delta])
123
123
124 p1, p2 = self.parents(node)
124 p1, p2 = self.parents(node)
125 if node != revlog.hash(text, p1, p2):
125 if node != revlog.hash(text, p1, p2):
126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
127 % (self.datafile, self.rev(node)))
127 % (self.datafile, self.rev(node)))
128
128
129 self.cache = (node, self.rev(node), text)
129 self.cache = (node, self.rev(node), text)
130 return text
130 return text
131
131
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 raise NotImplementedError
133 raise NotImplementedError
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
135 raise NotImplementedError
135 raise NotImplementedError
136 def strip(self, rev, minlink):
136 def strip(self, rev, minlink):
137 raise NotImplementedError
137 raise NotImplementedError
138 def checksize(self):
138 def checksize(self):
139 raise NotImplementedError
139 raise NotImplementedError
140
140
141 class bundlechangelog(bundlerevlog, changelog.changelog):
141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 def __init__(self, opener, bundlefile):
142 def __init__(self, opener, bundlefile):
143 changelog.changelog.__init__(self, opener)
143 changelog.changelog.__init__(self, opener)
144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
145 bundlefile)
145 bundlefile)
146
146
147 class bundlemanifest(bundlerevlog, manifest.manifest):
147 class bundlemanifest(bundlerevlog, manifest.manifest):
148 def __init__(self, opener, bundlefile, linkmapper):
148 def __init__(self, opener, bundlefile, linkmapper):
149 manifest.manifest.__init__(self, opener)
149 manifest.manifest.__init__(self, opener)
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
151 bundlefile, linkmapper)
151 bundlefile, linkmapper)
152
152
153 class bundlefilelog(bundlerevlog, filelog.filelog):
153 class bundlefilelog(bundlerevlog, filelog.filelog):
154 def __init__(self, opener, path, bundlefile, linkmapper):
154 def __init__(self, opener, path, bundlefile, linkmapper):
155 filelog.filelog.__init__(self, opener, path)
155 filelog.filelog.__init__(self, opener, path)
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
157 bundlefile, linkmapper)
157 bundlefile, linkmapper)
158
158
159 class bundlerepository(localrepo.localrepository):
159 class bundlerepository(localrepo.localrepository):
160 def __init__(self, ui, path, bundlename):
160 def __init__(self, ui, path, bundlename):
161 localrepo.localrepository.__init__(self, ui, path)
161 localrepo.localrepository.__init__(self, ui, path)
162
162
163 self._url = 'bundle:' + bundlename
163 self._url = 'bundle:' + bundlename
164 if path: self._url += '+' + path
164 if path: self._url += '+' + path
165
165
166 self.tempfile = None
166 self.tempfile = None
167 self.bundlefile = open(bundlename, "rb")
167 self.bundlefile = open(bundlename, "rb")
168 header = self.bundlefile.read(6)
168 header = self.bundlefile.read(6)
169 if not header.startswith("HG"):
169 if not header.startswith("HG"):
170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
171 elif not header.startswith("HG10"):
171 elif not header.startswith("HG10"):
172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
173 elif header == "HG10BZ":
173 elif header == "HG10BZ":
174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
175 suffix=".hg10un", dir=self.path)
175 suffix=".hg10un", dir=self.path)
176 self.tempfile = temp
176 self.tempfile = temp
177 fptemp = os.fdopen(fdtemp, 'wb')
177 fptemp = os.fdopen(fdtemp, 'wb')
178 def generator(f):
178 def generator(f):
179 zd = bz2.BZ2Decompressor()
179 zd = bz2.BZ2Decompressor()
180 zd.decompress("BZ")
180 zd.decompress("BZ")
181 for chunk in f:
181 for chunk in f:
182 yield zd.decompress(chunk)
182 yield zd.decompress(chunk)
183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
184
184
185 try:
185 try:
186 fptemp.write("HG10UN")
186 fptemp.write("HG10UN")
187 for chunk in gen:
187 for chunk in gen:
188 fptemp.write(chunk)
188 fptemp.write(chunk)
189 finally:
189 finally:
190 fptemp.close()
190 fptemp.close()
191 self.bundlefile.close()
191 self.bundlefile.close()
192
192
193 self.bundlefile = open(self.tempfile, "rb")
193 self.bundlefile = open(self.tempfile, "rb")
194 # seek right after the header
194 # seek right after the header
195 self.bundlefile.seek(6)
195 self.bundlefile.seek(6)
196 elif header == "HG10UN":
196 elif header == "HG10UN":
197 # nothing to do
197 # nothing to do
198 pass
198 pass
199 else:
199 else:
200 raise util.Abort(_("%s: unknown bundle compression type")
200 raise util.Abort(_("%s: unknown bundle compression type")
201 % bundlename)
201 % bundlename)
202 self.changelog = bundlechangelog(self.opener, self.bundlefile)
202 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
203 self.manifest = bundlemanifest(self.opener, self.bundlefile,
203 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
204 self.changelog.rev)
204 self.changelog.rev)
205 # dict with the mapping 'filename' -> position in the bundle
205 # dict with the mapping 'filename' -> position in the bundle
206 self.bundlefilespos = {}
206 self.bundlefilespos = {}
207 while 1:
207 while 1:
208 f = changegroup.getchunk(self.bundlefile)
208 f = changegroup.getchunk(self.bundlefile)
209 if not f:
209 if not f:
210 break
210 break
211 self.bundlefilespos[f] = self.bundlefile.tell()
211 self.bundlefilespos[f] = self.bundlefile.tell()
212 for c in changegroup.chunkiter(self.bundlefile):
212 for c in changegroup.chunkiter(self.bundlefile):
213 pass
213 pass
214
214
215 def url(self):
215 def url(self):
216 return self._url
216 return self._url
217
217
218 def dev(self):
218 def dev(self):
219 return -1
219 return -1
220
220
221 def file(self, f):
221 def file(self, f):
222 if f[0] == '/':
222 if f[0] == '/':
223 f = f[1:]
223 f = f[1:]
224 if f in self.bundlefilespos:
224 if f in self.bundlefilespos:
225 self.bundlefile.seek(self.bundlefilespos[f])
225 self.bundlefile.seek(self.bundlefilespos[f])
226 return bundlefilelog(self.opener, f, self.bundlefile,
226 return bundlefilelog(self.sopener, f, self.bundlefile,
227 self.changelog.rev)
227 self.changelog.rev)
228 else:
228 else:
229 return filelog.filelog(self.opener, f)
229 return filelog.filelog(self.sopener, f)
230
230
231 def close(self):
231 def close(self):
232 """Close assigned bundle file immediately."""
232 """Close assigned bundle file immediately."""
233 self.bundlefile.close()
233 self.bundlefile.close()
234
234
235 def __del__(self):
235 def __del__(self):
236 bundlefile = getattr(self, 'bundlefile', None)
236 bundlefile = getattr(self, 'bundlefile', None)
237 if bundlefile and not bundlefile.closed:
237 if bundlefile and not bundlefile.closed:
238 bundlefile.close()
238 bundlefile.close()
239 tempfile = getattr(self, 'tempfile', None)
239 tempfile = getattr(self, 'tempfile', None)
240 if tempfile is not None:
240 if tempfile is not None:
241 os.unlink(tempfile)
241 os.unlink(tempfile)
242
242
243 def instance(ui, path, create):
243 def instance(ui, path, create):
244 if create:
244 if create:
245 raise util.Abort(_('cannot create new bundle repository'))
245 raise util.Abort(_('cannot create new bundle repository'))
246 path = util.drop_scheme('file', path)
246 path = util.drop_scheme('file', path)
247 if path.startswith('bundle:'):
247 if path.startswith('bundle:'):
248 path = util.drop_scheme('bundle', path)
248 path = util.drop_scheme('bundle', path)
249 s = path.split("+", 1)
249 s = path.split("+", 1)
250 if len(s) == 1:
250 if len(s) == 1:
251 repopath, bundlename = "", s[0]
251 repopath, bundlename = "", s[0]
252 else:
252 else:
253 repopath, bundlename = s
253 repopath, bundlename = s
254 else:
254 else:
255 repopath, bundlename = '', path
255 repopath, bundlename = '', path
256 return bundlerepository(ui, repopath, bundlename)
256 return bundlerepository(ui, repopath, bundlename)
@@ -1,256 +1,256 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists") % dest)
118 raise util.Abort(_("destination '%s' already exists") % dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dest_repo = repository(ui, dest, create=True)
130 dest_repo = repository(ui, dest, create=True)
131
131
132 dest_path = None
133 dir_cleanup = None
132 dir_cleanup = None
134 if dest_repo.local():
133 if dest_repo.local():
135 dest_path = os.path.realpath(dest_repo.root)
134 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
136 dir_cleanup = DirCleanup(dest_path)
137
135
138 abspath = source
136 abspath = source
139 copy = False
137 copy = False
140 if src_repo.local() and dest_repo.local():
138 if src_repo.local() and dest_repo.local():
141 abspath = os.path.abspath(source)
139 abspath = os.path.abspath(source)
142 copy = not pull and not rev
140 copy = not pull and not rev
143
141
144 src_lock, dest_lock = None, None
142 src_lock, dest_lock = None, None
145 if copy:
143 if copy:
146 try:
144 try:
147 # we use a lock here because if we race with commit, we
145 # we use a lock here because if we race with commit, we
148 # can end up with extra data in the cloned revlogs that's
146 # can end up with extra data in the cloned revlogs that's
149 # not pointed to by changesets, thus causing verify to
147 # not pointed to by changesets, thus causing verify to
150 # fail
148 # fail
151 src_lock = src_repo.lock()
149 src_lock = src_repo.lock()
152 except lock.LockException:
150 except lock.LockException:
153 copy = False
151 copy = False
154
152
155 if copy:
153 if copy:
156 # we lock here to avoid premature writing to the target
154 # we lock here to avoid premature writing to the target
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
155 src_store = os.path.realpath(src_repo.spath)
156 dest_store = os.path.realpath(dest_repo.spath)
157 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
158
158
159 files = ("data",
159 files = ("data",
160 "00manifest.d", "00manifest.i",
160 "00manifest.d", "00manifest.i",
161 "00changelog.d", "00changelog.i")
161 "00changelog.d", "00changelog.i")
162 for f in files:
162 for f in files:
163 src = os.path.join(source, ".hg", f)
163 src = os.path.join(src_store, f)
164 dst = os.path.join(dest_path, ".hg", f)
164 dst = os.path.join(dest_store, f)
165 try:
165 try:
166 util.copyfiles(src, dst)
166 util.copyfiles(src, dst)
167 except OSError, inst:
167 except OSError, inst:
168 if inst.errno != errno.ENOENT:
168 if inst.errno != errno.ENOENT:
169 raise
169 raise
170
170
171 # we need to re-init the repo after manually copying the data
171 # we need to re-init the repo after manually copying the data
172 # into it
172 # into it
173 dest_repo = repository(ui, dest)
173 dest_repo = repository(ui, dest)
174
174
175 else:
175 else:
176 revs = None
176 revs = None
177 if rev:
177 if rev:
178 if 'lookup' not in src_repo.capabilities:
178 if 'lookup' not in src_repo.capabilities:
179 raise util.Abort(_("src repository does not support revision "
179 raise util.Abort(_("src repository does not support revision "
180 "lookup and so doesn't support clone by "
180 "lookup and so doesn't support clone by "
181 "revision"))
181 "revision"))
182 revs = [src_repo.lookup(r) for r in rev]
182 revs = [src_repo.lookup(r) for r in rev]
183
183
184 if dest_repo.local():
184 if dest_repo.local():
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
186 elif src_repo.local():
186 elif src_repo.local():
187 src_repo.push(dest_repo, revs=revs)
187 src_repo.push(dest_repo, revs=revs)
188 else:
188 else:
189 raise util.Abort(_("clone from remote to remote not supported"))
189 raise util.Abort(_("clone from remote to remote not supported"))
190
190
191 if src_lock:
191 if src_lock:
192 src_lock.release()
192 src_lock.release()
193
193
194 if dest_repo.local():
194 if dest_repo.local():
195 fp = dest_repo.opener("hgrc", "w", text=True)
195 fp = dest_repo.opener("hgrc", "w", text=True)
196 fp.write("[paths]\n")
196 fp.write("[paths]\n")
197 fp.write("default = %s\n" % abspath)
197 fp.write("default = %s\n" % abspath)
198 fp.close()
198 fp.close()
199
199
200 if dest_lock:
200 if dest_lock:
201 dest_lock.release()
201 dest_lock.release()
202
202
203 if update:
203 if update:
204 _update(dest_repo, dest_repo.changelog.tip())
204 _update(dest_repo, dest_repo.changelog.tip())
205 if dir_cleanup:
205 if dir_cleanup:
206 dir_cleanup.close()
206 dir_cleanup.close()
207
207
208 return src_repo, dest_repo
208 return src_repo, dest_repo
209
209
210 def _showstats(repo, stats):
210 def _showstats(repo, stats):
211 stats = ((stats[0], _("updated")),
211 stats = ((stats[0], _("updated")),
212 (stats[1], _("merged")),
212 (stats[1], _("merged")),
213 (stats[2], _("removed")),
213 (stats[2], _("removed")),
214 (stats[3], _("unresolved")))
214 (stats[3], _("unresolved")))
215 note = ", ".join([_("%d files %s") % s for s in stats])
215 note = ", ".join([_("%d files %s") % s for s in stats])
216 repo.ui.status("%s\n" % note)
216 repo.ui.status("%s\n" % note)
217
217
218 def _update(repo, node): return update(repo, node)
218 def _update(repo, node): return update(repo, node)
219
219
220 def update(repo, node):
220 def update(repo, node):
221 """update the working directory to node, merging linear changes"""
221 """update the working directory to node, merging linear changes"""
222 stats = _merge.update(repo, node, False, False, None, None)
222 stats = _merge.update(repo, node, False, False, None, None)
223 _showstats(repo, stats)
223 _showstats(repo, stats)
224 if stats[3]:
224 if stats[3]:
225 repo.ui.status(_("There are unresolved merges with"
225 repo.ui.status(_("There are unresolved merges with"
226 " locally modified files.\n"))
226 " locally modified files.\n"))
227 return stats[3]
227 return stats[3]
228
228
229 def clean(repo, node, wlock=None, show_stats=True):
229 def clean(repo, node, wlock=None, show_stats=True):
230 """forcibly switch the working directory to node, clobbering changes"""
230 """forcibly switch the working directory to node, clobbering changes"""
231 stats = _merge.update(repo, node, False, True, None, wlock)
231 stats = _merge.update(repo, node, False, True, None, wlock)
232 if show_stats: _showstats(repo, stats)
232 if show_stats: _showstats(repo, stats)
233 return stats[3]
233 return stats[3]
234
234
235 def merge(repo, node, force=None, remind=True, wlock=None):
235 def merge(repo, node, force=None, remind=True, wlock=None):
236 """branch merge with node, resolving changes"""
236 """branch merge with node, resolving changes"""
237 stats = _merge.update(repo, node, True, force, False, wlock)
237 stats = _merge.update(repo, node, True, force, False, wlock)
238 _showstats(repo, stats)
238 _showstats(repo, stats)
239 if stats[3]:
239 if stats[3]:
240 pl = repo.parents()
240 pl = repo.parents()
241 repo.ui.status(_("There are unresolved merges,"
241 repo.ui.status(_("There are unresolved merges,"
242 " you can redo the full merge using:\n"
242 " you can redo the full merge using:\n"
243 " hg update -C %s\n"
243 " hg update -C %s\n"
244 " hg merge %s\n")
244 " hg merge %s\n")
245 % (pl[0].rev(), pl[1].rev()))
245 % (pl[0].rev(), pl[1].rev()))
246 elif remind:
246 elif remind:
247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
248 return stats[3]
248 return stats[3]
249
249
250 def revert(repo, node, choose, wlock):
250 def revert(repo, node, choose, wlock):
251 """revert changes to revision in node without updating dirstate"""
251 """revert changes to revision in node without updating dirstate"""
252 return _merge.update(repo, node, False, True, choose, wlock)[3]
252 return _merge.update(repo, node, False, True, choose, wlock)[3]
253
253
254 def verify(repo):
254 def verify(repo):
255 """verify the consistency of a repository"""
255 """verify the consistency of a repository"""
256 return _verify.verify(repo)
256 return _verify.verify(repo)
@@ -1,1913 +1,1916 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
34
35
35 if not os.path.isdir(self.path):
36 if not os.path.isdir(self.path):
36 if create:
37 if create:
37 if not os.path.exists(path):
38 if not os.path.exists(path):
38 os.mkdir(path)
39 os.mkdir(path)
39 os.mkdir(self.path)
40 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
40 else:
43 else:
41 raise repo.RepoError(_("repository %s not found") % path)
44 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
45 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
46 raise repo.RepoError(_("repository %s already exists") % path)
44
47
45 self.root = os.path.realpath(path)
48 self.root = os.path.realpath(path)
46 self.origroot = path
49 self.origroot = path
47 self.ui = ui.ui(parentui=parentui)
50 self.ui = ui.ui(parentui=parentui)
48 self.opener = util.opener(self.path)
51 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
50 self.wopener = util.opener(self.root)
53 self.wopener = util.opener(self.root)
51
54
52 try:
55 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
57 except IOError:
55 pass
58 pass
56
59
57 v = self.ui.configrevlog()
60 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
63 fl = v.get('flags', None)
61 flags = 0
64 flags = 0
62 if fl != None:
65 if fl != None:
63 for x in fl.split():
66 for x in fl.split():
64 flags |= revlog.flagstr(x)
67 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
68 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
69 flags = revlog.REVLOG_DEFAULT_FLAGS
67
70
68 v = self.revlogversion | flags
71 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.sopener, v)
72 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
71
74
72 # the changelog might not have the inline index flag
75 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
76 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
77 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
78 # Otherwise, just version from the changelog
76 v = self.changelog.version
79 v = self.changelog.version
77 if v == self.revlogversion:
80 if v == self.revlogversion:
78 v |= flags
81 v |= flags
79 self.revlogversion = v
82 self.revlogversion = v
80
83
81 self.tagscache = None
84 self.tagscache = None
82 self.branchcache = None
85 self.branchcache = None
83 self.nodetagscache = None
86 self.nodetagscache = None
84 self.encodepats = None
87 self.encodepats = None
85 self.decodepats = None
88 self.decodepats = None
86 self.transhandle = None
89 self.transhandle = None
87
90
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
92
90 def url(self):
93 def url(self):
91 return 'file:' + self.root
94 return 'file:' + self.root
92
95
93 def hook(self, name, throw=False, **args):
96 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
97 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
98 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
99 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
100 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
101 hook failure. exception propagates if throw is "true".
99
102
100 reason for "true" meaning "hook failed" is so that
103 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
104 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
105 be run as hooks without wrappers to convert return values.'''
103
106
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
108 d = funcname.rfind('.')
106 if d == -1:
109 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
111 % (hname, funcname))
109 modname = funcname[:d]
112 modname = funcname[:d]
110 try:
113 try:
111 obj = __import__(modname)
114 obj = __import__(modname)
112 except ImportError:
115 except ImportError:
113 try:
116 try:
114 # extensions are loaded with hgext_ prefix
117 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
118 obj = __import__("hgext_%s" % modname)
116 except ImportError:
119 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
121 '(import of "%s" failed)') %
119 (hname, modname))
122 (hname, modname))
120 try:
123 try:
121 for p in funcname.split('.')[1:]:
124 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
125 obj = getattr(obj, p)
123 except AttributeError, err:
126 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
128 '("%s" is not defined)') %
126 (hname, funcname))
129 (hname, funcname))
127 if not callable(obj):
130 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
131 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
132 '("%s" is not callable)') %
130 (hname, funcname))
133 (hname, funcname))
131 try:
134 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
136 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
137 raise
135 except Exception, exc:
138 except Exception, exc:
136 if isinstance(exc, util.Abort):
139 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
140 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
141 (hname, exc.args[0]))
139 else:
142 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
143 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
144 '%s\n') % (hname, exc))
142 if throw:
145 if throw:
143 raise
146 raise
144 self.ui.print_exc()
147 self.ui.print_exc()
145 return True
148 return True
146 if r:
149 if r:
147 if throw:
150 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
151 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
152 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
153 return r
151
154
152 def runhook(name, cmd):
155 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
158 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
159 if r:
157 desc, r = util.explain_exit(r)
160 desc, r = util.explain_exit(r)
158 if throw:
161 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
162 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
164 return r
162
165
163 r = False
166 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
168 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
169 hooks.sort()
167 for hname, cmd in hooks:
170 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
171 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
172 r = callhook(hname, cmd[7:].strip()) or r
170 else:
173 else:
171 r = runhook(hname, cmd) or r
174 r = runhook(hname, cmd) or r
172 return r
175 return r
173
176
174 tag_disallowed = ':\r\n'
177 tag_disallowed = ':\r\n'
175
178
176 def tag(self, name, node, message, local, user, date):
179 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
180 '''tag a revision with a symbolic name.
178
181
179 if local is True, the tag is stored in a per-repository file.
182 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
183 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
184 changeset is committed with the change.
182
185
183 keyword arguments:
186 keyword arguments:
184
187
185 local: whether to store tag in non-version-controlled file
188 local: whether to store tag in non-version-controlled file
186 (default False)
189 (default False)
187
190
188 message: commit message to use if committing
191 message: commit message to use if committing
189
192
190 user: name of user to use if committing
193 user: name of user to use if committing
191
194
192 date: date tuple to use if committing'''
195 date: date tuple to use if committing'''
193
196
194 for c in self.tag_disallowed:
197 for c in self.tag_disallowed:
195 if c in name:
198 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
200
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
202
200 if local:
203 if local:
201 # local tags are stored in the current charset
204 # local tags are stored in the current charset
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
206 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
207 return
205
208
206 for x in self.status()[:5]:
209 for x in self.status()[:5]:
207 if '.hgtags' in x:
210 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
211 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
212 '(please commit .hgtags manually)'))
210
213
211 # committed tags are stored in UTF-8
214 # committed tags are stored in UTF-8
212 line = '%s %s\n' % (hex(node), util.fromlocal(name))
215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 self.wfile('.hgtags', 'ab').write(line)
216 self.wfile('.hgtags', 'ab').write(line)
214 if self.dirstate.state('.hgtags') == '?':
217 if self.dirstate.state('.hgtags') == '?':
215 self.add(['.hgtags'])
218 self.add(['.hgtags'])
216
219
217 self.commit(['.hgtags'], message, user, date)
220 self.commit(['.hgtags'], message, user, date)
218 self.hook('tag', node=hex(node), tag=name, local=local)
221 self.hook('tag', node=hex(node), tag=name, local=local)
219
222
220 def tags(self):
223 def tags(self):
221 '''return a mapping of tag to node'''
224 '''return a mapping of tag to node'''
222 if not self.tagscache:
225 if not self.tagscache:
223 self.tagscache = {}
226 self.tagscache = {}
224
227
225 def parsetag(line, context):
228 def parsetag(line, context):
226 if not line:
229 if not line:
227 return
230 return
228 s = l.split(" ", 1)
231 s = l.split(" ", 1)
229 if len(s) != 2:
232 if len(s) != 2:
230 self.ui.warn(_("%s: cannot parse entry\n") % context)
233 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 return
234 return
232 node, key = s
235 node, key = s
233 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
234 try:
237 try:
235 bin_n = bin(node)
238 bin_n = bin(node)
236 except TypeError:
239 except TypeError:
237 self.ui.warn(_("%s: node '%s' is not well formed\n") %
240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 (context, node))
241 (context, node))
239 return
242 return
240 if bin_n not in self.changelog.nodemap:
243 if bin_n not in self.changelog.nodemap:
241 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 (context, key))
245 (context, key))
243 return
246 return
244 self.tagscache[key] = bin_n
247 self.tagscache[key] = bin_n
245
248
246 # read the tags file from each head, ending with the tip,
249 # read the tags file from each head, ending with the tip,
247 # and add each tag found to the map, with "newer" ones
250 # and add each tag found to the map, with "newer" ones
248 # taking precedence
251 # taking precedence
249 f = None
252 f = None
250 for rev, node, fnode in self._hgtagsnodes():
253 for rev, node, fnode in self._hgtagsnodes():
251 f = (f and f.filectx(fnode) or
254 f = (f and f.filectx(fnode) or
252 self.filectx('.hgtags', fileid=fnode))
255 self.filectx('.hgtags', fileid=fnode))
253 count = 0
256 count = 0
254 for l in f.data().splitlines():
257 for l in f.data().splitlines():
255 count += 1
258 count += 1
256 parsetag(l, _("%s, line %d") % (str(f), count))
259 parsetag(l, _("%s, line %d") % (str(f), count))
257
260
258 try:
261 try:
259 f = self.opener("localtags")
262 f = self.opener("localtags")
260 count = 0
263 count = 0
261 for l in f:
264 for l in f:
262 # localtags are stored in the local character set
265 # localtags are stored in the local character set
263 # while the internal tag table is stored in UTF-8
266 # while the internal tag table is stored in UTF-8
264 l = util.fromlocal(l)
267 l = util.fromlocal(l)
265 count += 1
268 count += 1
266 parsetag(l, _("localtags, line %d") % count)
269 parsetag(l, _("localtags, line %d") % count)
267 except IOError:
270 except IOError:
268 pass
271 pass
269
272
270 self.tagscache['tip'] = self.changelog.tip()
273 self.tagscache['tip'] = self.changelog.tip()
271
274
272 return self.tagscache
275 return self.tagscache
273
276
274 def _hgtagsnodes(self):
277 def _hgtagsnodes(self):
275 heads = self.heads()
278 heads = self.heads()
276 heads.reverse()
279 heads.reverse()
277 last = {}
280 last = {}
278 ret = []
281 ret = []
279 for node in heads:
282 for node in heads:
280 c = self.changectx(node)
283 c = self.changectx(node)
281 rev = c.rev()
284 rev = c.rev()
282 try:
285 try:
283 fnode = c.filenode('.hgtags')
286 fnode = c.filenode('.hgtags')
284 except repo.LookupError:
287 except repo.LookupError:
285 continue
288 continue
286 ret.append((rev, node, fnode))
289 ret.append((rev, node, fnode))
287 if fnode in last:
290 if fnode in last:
288 ret[last[fnode]] = None
291 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
292 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
293 return [item for item in ret if item]
291
294
292 def tagslist(self):
295 def tagslist(self):
293 '''return a list of tags ordered by revision'''
296 '''return a list of tags ordered by revision'''
294 l = []
297 l = []
295 for t, n in self.tags().items():
298 for t, n in self.tags().items():
296 try:
299 try:
297 r = self.changelog.rev(n)
300 r = self.changelog.rev(n)
298 except:
301 except:
299 r = -2 # sort to the beginning of the list if unknown
302 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
303 l.append((r, t, n))
301 l.sort()
304 l.sort()
302 return [(t, n) for r, t, n in l]
305 return [(t, n) for r, t, n in l]
303
306
304 def nodetags(self, node):
307 def nodetags(self, node):
305 '''return the tags associated with a node'''
308 '''return the tags associated with a node'''
306 if not self.nodetagscache:
309 if not self.nodetagscache:
307 self.nodetagscache = {}
310 self.nodetagscache = {}
308 for t, n in self.tags().items():
311 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
312 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
313 return self.nodetagscache.get(node, [])
311
314
312 def branchtags(self):
315 def branchtags(self):
313 if self.branchcache != None:
316 if self.branchcache != None:
314 return self.branchcache
317 return self.branchcache
315
318
316 self.branchcache = {} # avoid recursion in changectx
319 self.branchcache = {} # avoid recursion in changectx
317
320
318 partial, last, lrev = self._readbranchcache()
321 partial, last, lrev = self._readbranchcache()
319
322
320 tiprev = self.changelog.count() - 1
323 tiprev = self.changelog.count() - 1
321 if lrev != tiprev:
324 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
325 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
327
325 # the branch cache is stored on disk as UTF-8, but in the local
328 # the branch cache is stored on disk as UTF-8, but in the local
326 # charset internally
329 # charset internally
327 for k, v in partial.items():
330 for k, v in partial.items():
328 self.branchcache[util.tolocal(k)] = v
331 self.branchcache[util.tolocal(k)] = v
329 return self.branchcache
332 return self.branchcache
330
333
331 def _readbranchcache(self):
334 def _readbranchcache(self):
332 partial = {}
335 partial = {}
333 try:
336 try:
334 f = self.opener("branches.cache")
337 f = self.opener("branches.cache")
335 lines = f.read().split('\n')
338 lines = f.read().split('\n')
336 f.close()
339 f.close()
337 last, lrev = lines.pop(0).rstrip().split(" ", 1)
340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 last, lrev = bin(last), int(lrev)
341 last, lrev = bin(last), int(lrev)
339 if not (lrev < self.changelog.count() and
342 if not (lrev < self.changelog.count() and
340 self.changelog.node(lrev) == last): # sanity check
343 self.changelog.node(lrev) == last): # sanity check
341 # invalidate the cache
344 # invalidate the cache
342 raise ValueError('Invalid branch cache: unknown tip')
345 raise ValueError('Invalid branch cache: unknown tip')
343 for l in lines:
346 for l in lines:
344 if not l: continue
347 if not l: continue
345 node, label = l.rstrip().split(" ", 1)
348 node, label = l.rstrip().split(" ", 1)
346 partial[label] = bin(node)
349 partial[label] = bin(node)
347 except (KeyboardInterrupt, util.SignalInterrupt):
350 except (KeyboardInterrupt, util.SignalInterrupt):
348 raise
351 raise
349 except Exception, inst:
352 except Exception, inst:
350 if self.ui.debugflag:
353 if self.ui.debugflag:
351 self.ui.warn(str(inst), '\n')
354 self.ui.warn(str(inst), '\n')
352 partial, last, lrev = {}, nullid, nullrev
355 partial, last, lrev = {}, nullid, nullrev
353 return partial, last, lrev
356 return partial, last, lrev
354
357
355 def _writebranchcache(self, branches, tip, tiprev):
358 def _writebranchcache(self, branches, tip, tiprev):
356 try:
359 try:
357 f = self.opener("branches.cache", "w")
360 f = self.opener("branches.cache", "w")
358 f.write("%s %s\n" % (hex(tip), tiprev))
361 f.write("%s %s\n" % (hex(tip), tiprev))
359 for label, node in branches.iteritems():
362 for label, node in branches.iteritems():
360 f.write("%s %s\n" % (hex(node), label))
363 f.write("%s %s\n" % (hex(node), label))
361 except IOError:
364 except IOError:
362 pass
365 pass
363
366
364 def _updatebranchcache(self, partial, start, end):
367 def _updatebranchcache(self, partial, start, end):
365 for r in xrange(start, end):
368 for r in xrange(start, end):
366 c = self.changectx(r)
369 c = self.changectx(r)
367 b = c.branch()
370 b = c.branch()
368 if b:
371 if b:
369 partial[b] = c.node()
372 partial[b] = c.node()
370
373
371 def lookup(self, key):
374 def lookup(self, key):
372 if key == '.':
375 if key == '.':
373 key = self.dirstate.parents()[0]
376 key = self.dirstate.parents()[0]
374 if key == nullid:
377 if key == nullid:
375 raise repo.RepoError(_("no revision checked out"))
378 raise repo.RepoError(_("no revision checked out"))
376 n = self.changelog._match(key)
379 n = self.changelog._match(key)
377 if n:
380 if n:
378 return n
381 return n
379 if key in self.tags():
382 if key in self.tags():
380 return self.tags()[key]
383 return self.tags()[key]
381 if key in self.branchtags():
384 if key in self.branchtags():
382 return self.branchtags()[key]
385 return self.branchtags()[key]
383 n = self.changelog._partialmatch(key)
386 n = self.changelog._partialmatch(key)
384 if n:
387 if n:
385 return n
388 return n
386 raise repo.RepoError(_("unknown revision '%s'") % key)
389 raise repo.RepoError(_("unknown revision '%s'") % key)
387
390
388 def dev(self):
391 def dev(self):
389 return os.lstat(self.path).st_dev
392 return os.lstat(self.path).st_dev
390
393
391 def local(self):
394 def local(self):
392 return True
395 return True
393
396
394 def join(self, f):
397 def join(self, f):
395 return os.path.join(self.path, f)
398 return os.path.join(self.path, f)
396
399
397 def sjoin(self, f):
400 def sjoin(self, f):
398 return os.path.join(self.path, f)
401 return os.path.join(self.spath, f)
399
402
400 def wjoin(self, f):
403 def wjoin(self, f):
401 return os.path.join(self.root, f)
404 return os.path.join(self.root, f)
402
405
403 def file(self, f):
406 def file(self, f):
404 if f[0] == '/':
407 if f[0] == '/':
405 f = f[1:]
408 f = f[1:]
406 return filelog.filelog(self.sopener, f, self.revlogversion)
409 return filelog.filelog(self.sopener, f, self.revlogversion)
407
410
408 def changectx(self, changeid=None):
411 def changectx(self, changeid=None):
409 return context.changectx(self, changeid)
412 return context.changectx(self, changeid)
410
413
411 def workingctx(self):
414 def workingctx(self):
412 return context.workingctx(self)
415 return context.workingctx(self)
413
416
414 def parents(self, changeid=None):
417 def parents(self, changeid=None):
415 '''
418 '''
416 get list of changectxs for parents of changeid or working directory
419 get list of changectxs for parents of changeid or working directory
417 '''
420 '''
418 if changeid is None:
421 if changeid is None:
419 pl = self.dirstate.parents()
422 pl = self.dirstate.parents()
420 else:
423 else:
421 n = self.changelog.lookup(changeid)
424 n = self.changelog.lookup(changeid)
422 pl = self.changelog.parents(n)
425 pl = self.changelog.parents(n)
423 if pl[1] == nullid:
426 if pl[1] == nullid:
424 return [self.changectx(pl[0])]
427 return [self.changectx(pl[0])]
425 return [self.changectx(pl[0]), self.changectx(pl[1])]
428 return [self.changectx(pl[0]), self.changectx(pl[1])]
426
429
427 def filectx(self, path, changeid=None, fileid=None):
430 def filectx(self, path, changeid=None, fileid=None):
428 """changeid can be a changeset revision, node, or tag.
431 """changeid can be a changeset revision, node, or tag.
429 fileid can be a file revision or node."""
432 fileid can be a file revision or node."""
430 return context.filectx(self, path, changeid, fileid)
433 return context.filectx(self, path, changeid, fileid)
431
434
432 def getcwd(self):
435 def getcwd(self):
433 return self.dirstate.getcwd()
436 return self.dirstate.getcwd()
434
437
435 def wfile(self, f, mode='r'):
438 def wfile(self, f, mode='r'):
436 return self.wopener(f, mode)
439 return self.wopener(f, mode)
437
440
438 def wread(self, filename):
441 def wread(self, filename):
439 if self.encodepats == None:
442 if self.encodepats == None:
440 l = []
443 l = []
441 for pat, cmd in self.ui.configitems("encode"):
444 for pat, cmd in self.ui.configitems("encode"):
442 mf = util.matcher(self.root, "", [pat], [], [])[1]
445 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 l.append((mf, cmd))
446 l.append((mf, cmd))
444 self.encodepats = l
447 self.encodepats = l
445
448
446 data = self.wopener(filename, 'r').read()
449 data = self.wopener(filename, 'r').read()
447
450
448 for mf, cmd in self.encodepats:
451 for mf, cmd in self.encodepats:
449 if mf(filename):
452 if mf(filename):
450 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 data = util.filter(data, cmd)
454 data = util.filter(data, cmd)
452 break
455 break
453
456
454 return data
457 return data
455
458
456 def wwrite(self, filename, data, fd=None):
459 def wwrite(self, filename, data, fd=None):
457 if self.decodepats == None:
460 if self.decodepats == None:
458 l = []
461 l = []
459 for pat, cmd in self.ui.configitems("decode"):
462 for pat, cmd in self.ui.configitems("decode"):
460 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 l.append((mf, cmd))
464 l.append((mf, cmd))
462 self.decodepats = l
465 self.decodepats = l
463
466
464 for mf, cmd in self.decodepats:
467 for mf, cmd in self.decodepats:
465 if mf(filename):
468 if mf(filename):
466 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 data = util.filter(data, cmd)
470 data = util.filter(data, cmd)
468 break
471 break
469
472
470 if fd:
473 if fd:
471 return fd.write(data)
474 return fd.write(data)
472 return self.wopener(filename, 'w').write(data)
475 return self.wopener(filename, 'w').write(data)
473
476
474 def transaction(self):
477 def transaction(self):
475 tr = self.transhandle
478 tr = self.transhandle
476 if tr != None and tr.running():
479 if tr != None and tr.running():
477 return tr.nest()
480 return tr.nest()
478
481
479 # save dirstate for rollback
482 # save dirstate for rollback
480 try:
483 try:
481 ds = self.opener("dirstate").read()
484 ds = self.opener("dirstate").read()
482 except IOError:
485 except IOError:
483 ds = ""
486 ds = ""
484 self.opener("journal.dirstate", "w").write(ds)
487 self.opener("journal.dirstate", "w").write(ds)
485
488
486 renames = [(self.sjoin("journal"), self.sjoin("undo")),
489 renames = [(self.sjoin("journal"), self.sjoin("undo")),
487 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
490 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
488 tr = transaction.transaction(self.ui.warn, self.sopener,
491 tr = transaction.transaction(self.ui.warn, self.sopener,
489 self.sjoin("journal"),
492 self.sjoin("journal"),
490 aftertrans(renames))
493 aftertrans(renames))
491 self.transhandle = tr
494 self.transhandle = tr
492 return tr
495 return tr
493
496
494 def recover(self):
497 def recover(self):
495 l = self.lock()
498 l = self.lock()
496 if os.path.exists(self.sjoin("journal")):
499 if os.path.exists(self.sjoin("journal")):
497 self.ui.status(_("rolling back interrupted transaction\n"))
500 self.ui.status(_("rolling back interrupted transaction\n"))
498 transaction.rollback(self.sopener, self.sjoin("journal"))
501 transaction.rollback(self.sopener, self.sjoin("journal"))
499 self.reload()
502 self.reload()
500 return True
503 return True
501 else:
504 else:
502 self.ui.warn(_("no interrupted transaction available\n"))
505 self.ui.warn(_("no interrupted transaction available\n"))
503 return False
506 return False
504
507
505 def rollback(self, wlock=None):
508 def rollback(self, wlock=None):
506 if not wlock:
509 if not wlock:
507 wlock = self.wlock()
510 wlock = self.wlock()
508 l = self.lock()
511 l = self.lock()
509 if os.path.exists(self.sjoin("undo")):
512 if os.path.exists(self.sjoin("undo")):
510 self.ui.status(_("rolling back last transaction\n"))
513 self.ui.status(_("rolling back last transaction\n"))
511 transaction.rollback(self.sopener, self.sjoin("undo"))
514 transaction.rollback(self.sopener, self.sjoin("undo"))
512 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
515 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
513 self.reload()
516 self.reload()
514 self.wreload()
517 self.wreload()
515 else:
518 else:
516 self.ui.warn(_("no rollback information available\n"))
519 self.ui.warn(_("no rollback information available\n"))
517
520
518 def wreload(self):
521 def wreload(self):
519 self.dirstate.read()
522 self.dirstate.read()
520
523
521 def reload(self):
524 def reload(self):
522 self.changelog.load()
525 self.changelog.load()
523 self.manifest.load()
526 self.manifest.load()
524 self.tagscache = None
527 self.tagscache = None
525 self.nodetagscache = None
528 self.nodetagscache = None
526
529
527 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
530 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
528 desc=None):
531 desc=None):
529 try:
532 try:
530 l = lock.lock(lockname, 0, releasefn, desc=desc)
533 l = lock.lock(lockname, 0, releasefn, desc=desc)
531 except lock.LockHeld, inst:
534 except lock.LockHeld, inst:
532 if not wait:
535 if not wait:
533 raise
536 raise
534 self.ui.warn(_("waiting for lock on %s held by %r\n") %
537 self.ui.warn(_("waiting for lock on %s held by %r\n") %
535 (desc, inst.locker))
538 (desc, inst.locker))
536 # default to 600 seconds timeout
539 # default to 600 seconds timeout
537 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
540 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
538 releasefn, desc=desc)
541 releasefn, desc=desc)
539 if acquirefn:
542 if acquirefn:
540 acquirefn()
543 acquirefn()
541 return l
544 return l
542
545
543 def lock(self, wait=1):
546 def lock(self, wait=1):
544 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
547 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
545 desc=_('repository %s') % self.origroot)
548 desc=_('repository %s') % self.origroot)
546
549
547 def wlock(self, wait=1):
550 def wlock(self, wait=1):
548 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
551 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
549 self.wreload,
552 self.wreload,
550 desc=_('working directory of %s') % self.origroot)
553 desc=_('working directory of %s') % self.origroot)
551
554
552 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
555 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
553 """
556 """
554 commit an individual file as part of a larger transaction
557 commit an individual file as part of a larger transaction
555 """
558 """
556
559
557 t = self.wread(fn)
560 t = self.wread(fn)
558 fl = self.file(fn)
561 fl = self.file(fn)
559 fp1 = manifest1.get(fn, nullid)
562 fp1 = manifest1.get(fn, nullid)
560 fp2 = manifest2.get(fn, nullid)
563 fp2 = manifest2.get(fn, nullid)
561
564
562 meta = {}
565 meta = {}
563 cp = self.dirstate.copied(fn)
566 cp = self.dirstate.copied(fn)
564 if cp:
567 if cp:
565 meta["copy"] = cp
568 meta["copy"] = cp
566 if not manifest2: # not a branch merge
569 if not manifest2: # not a branch merge
567 meta["copyrev"] = hex(manifest1.get(cp, nullid))
570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
568 fp2 = nullid
571 fp2 = nullid
569 elif fp2 != nullid: # copied on remote side
572 elif fp2 != nullid: # copied on remote side
570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
571 elif fp1 != nullid: # copied on local side, reversed
574 elif fp1 != nullid: # copied on local side, reversed
572 meta["copyrev"] = hex(manifest2.get(cp))
575 meta["copyrev"] = hex(manifest2.get(cp))
573 fp2 = nullid
576 fp2 = nullid
574 else: # directory rename
577 else: # directory rename
575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
578 meta["copyrev"] = hex(manifest1.get(cp, nullid))
576 self.ui.debug(_(" %s: copy %s:%s\n") %
579 self.ui.debug(_(" %s: copy %s:%s\n") %
577 (fn, cp, meta["copyrev"]))
580 (fn, cp, meta["copyrev"]))
578 fp1 = nullid
581 fp1 = nullid
579 elif fp2 != nullid:
582 elif fp2 != nullid:
580 # is one parent an ancestor of the other?
583 # is one parent an ancestor of the other?
581 fpa = fl.ancestor(fp1, fp2)
584 fpa = fl.ancestor(fp1, fp2)
582 if fpa == fp1:
585 if fpa == fp1:
583 fp1, fp2 = fp2, nullid
586 fp1, fp2 = fp2, nullid
584 elif fpa == fp2:
587 elif fpa == fp2:
585 fp2 = nullid
588 fp2 = nullid
586
589
587 # is the file unmodified from the parent? report existing entry
590 # is the file unmodified from the parent? report existing entry
588 if fp2 == nullid and not fl.cmp(fp1, t):
591 if fp2 == nullid and not fl.cmp(fp1, t):
589 return fp1
592 return fp1
590
593
591 changelist.append(fn)
594 changelist.append(fn)
592 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
595 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
593
596
594 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
597 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
595 if p1 is None:
598 if p1 is None:
596 p1, p2 = self.dirstate.parents()
599 p1, p2 = self.dirstate.parents()
597 return self.commit(files=files, text=text, user=user, date=date,
600 return self.commit(files=files, text=text, user=user, date=date,
598 p1=p1, p2=p2, wlock=wlock)
601 p1=p1, p2=p2, wlock=wlock)
599
602
600 def commit(self, files=None, text="", user=None, date=None,
603 def commit(self, files=None, text="", user=None, date=None,
601 match=util.always, force=False, lock=None, wlock=None,
604 match=util.always, force=False, lock=None, wlock=None,
602 force_editor=False, p1=None, p2=None, extra={}):
605 force_editor=False, p1=None, p2=None, extra={}):
603
606
604 commit = []
607 commit = []
605 remove = []
608 remove = []
606 changed = []
609 changed = []
607 use_dirstate = (p1 is None) # not rawcommit
610 use_dirstate = (p1 is None) # not rawcommit
608 extra = extra.copy()
611 extra = extra.copy()
609
612
610 if use_dirstate:
613 if use_dirstate:
611 if files:
614 if files:
612 for f in files:
615 for f in files:
613 s = self.dirstate.state(f)
616 s = self.dirstate.state(f)
614 if s in 'nmai':
617 if s in 'nmai':
615 commit.append(f)
618 commit.append(f)
616 elif s == 'r':
619 elif s == 'r':
617 remove.append(f)
620 remove.append(f)
618 else:
621 else:
619 self.ui.warn(_("%s not tracked!\n") % f)
622 self.ui.warn(_("%s not tracked!\n") % f)
620 else:
623 else:
621 changes = self.status(match=match)[:5]
624 changes = self.status(match=match)[:5]
622 modified, added, removed, deleted, unknown = changes
625 modified, added, removed, deleted, unknown = changes
623 commit = modified + added
626 commit = modified + added
624 remove = removed
627 remove = removed
625 else:
628 else:
626 commit = files
629 commit = files
627
630
628 if use_dirstate:
631 if use_dirstate:
629 p1, p2 = self.dirstate.parents()
632 p1, p2 = self.dirstate.parents()
630 update_dirstate = True
633 update_dirstate = True
631 else:
634 else:
632 p1, p2 = p1, p2 or nullid
635 p1, p2 = p1, p2 or nullid
633 update_dirstate = (self.dirstate.parents()[0] == p1)
636 update_dirstate = (self.dirstate.parents()[0] == p1)
634
637
635 c1 = self.changelog.read(p1)
638 c1 = self.changelog.read(p1)
636 c2 = self.changelog.read(p2)
639 c2 = self.changelog.read(p2)
637 m1 = self.manifest.read(c1[0]).copy()
640 m1 = self.manifest.read(c1[0]).copy()
638 m2 = self.manifest.read(c2[0])
641 m2 = self.manifest.read(c2[0])
639
642
640 if use_dirstate:
643 if use_dirstate:
641 branchname = util.fromlocal(self.workingctx().branch())
644 branchname = util.fromlocal(self.workingctx().branch())
642 else:
645 else:
643 branchname = ""
646 branchname = ""
644
647
645 if use_dirstate:
648 if use_dirstate:
646 oldname = c1[5].get("branch", "") # stored in UTF-8
649 oldname = c1[5].get("branch", "") # stored in UTF-8
647 if not commit and not remove and not force and p2 == nullid and \
650 if not commit and not remove and not force and p2 == nullid and \
648 branchname == oldname:
651 branchname == oldname:
649 self.ui.status(_("nothing changed\n"))
652 self.ui.status(_("nothing changed\n"))
650 return None
653 return None
651
654
652 xp1 = hex(p1)
655 xp1 = hex(p1)
653 if p2 == nullid: xp2 = ''
656 if p2 == nullid: xp2 = ''
654 else: xp2 = hex(p2)
657 else: xp2 = hex(p2)
655
658
656 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
659 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
657
660
658 if not wlock:
661 if not wlock:
659 wlock = self.wlock()
662 wlock = self.wlock()
660 if not lock:
663 if not lock:
661 lock = self.lock()
664 lock = self.lock()
662 tr = self.transaction()
665 tr = self.transaction()
663
666
664 # check in files
667 # check in files
665 new = {}
668 new = {}
666 linkrev = self.changelog.count()
669 linkrev = self.changelog.count()
667 commit.sort()
670 commit.sort()
668 for f in commit:
671 for f in commit:
669 self.ui.note(f + "\n")
672 self.ui.note(f + "\n")
670 try:
673 try:
671 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
674 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
672 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
675 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
673 except IOError:
676 except IOError:
674 if use_dirstate:
677 if use_dirstate:
675 self.ui.warn(_("trouble committing %s!\n") % f)
678 self.ui.warn(_("trouble committing %s!\n") % f)
676 raise
679 raise
677 else:
680 else:
678 remove.append(f)
681 remove.append(f)
679
682
680 # update manifest
683 # update manifest
681 m1.update(new)
684 m1.update(new)
682 remove.sort()
685 remove.sort()
683
686
684 for f in remove:
687 for f in remove:
685 if f in m1:
688 if f in m1:
686 del m1[f]
689 del m1[f]
687 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
690 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
688
691
689 # add changeset
692 # add changeset
690 new = new.keys()
693 new = new.keys()
691 new.sort()
694 new.sort()
692
695
693 user = user or self.ui.username()
696 user = user or self.ui.username()
694 if not text or force_editor:
697 if not text or force_editor:
695 edittext = []
698 edittext = []
696 if text:
699 if text:
697 edittext.append(text)
700 edittext.append(text)
698 edittext.append("")
701 edittext.append("")
699 edittext.append("HG: user: %s" % user)
702 edittext.append("HG: user: %s" % user)
700 if p2 != nullid:
703 if p2 != nullid:
701 edittext.append("HG: branch merge")
704 edittext.append("HG: branch merge")
702 edittext.extend(["HG: changed %s" % f for f in changed])
705 edittext.extend(["HG: changed %s" % f for f in changed])
703 edittext.extend(["HG: removed %s" % f for f in remove])
706 edittext.extend(["HG: removed %s" % f for f in remove])
704 if not changed and not remove:
707 if not changed and not remove:
705 edittext.append("HG: no files changed")
708 edittext.append("HG: no files changed")
706 edittext.append("")
709 edittext.append("")
707 # run editor in the repository root
710 # run editor in the repository root
708 olddir = os.getcwd()
711 olddir = os.getcwd()
709 os.chdir(self.root)
712 os.chdir(self.root)
710 text = self.ui.edit("\n".join(edittext), user)
713 text = self.ui.edit("\n".join(edittext), user)
711 os.chdir(olddir)
714 os.chdir(olddir)
712
715
713 lines = [line.rstrip() for line in text.rstrip().splitlines()]
716 lines = [line.rstrip() for line in text.rstrip().splitlines()]
714 while lines and not lines[0]:
717 while lines and not lines[0]:
715 del lines[0]
718 del lines[0]
716 if not lines:
719 if not lines:
717 return None
720 return None
718 text = '\n'.join(lines)
721 text = '\n'.join(lines)
719 if branchname:
722 if branchname:
720 extra["branch"] = branchname
723 extra["branch"] = branchname
721 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
724 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
722 user, date, extra)
725 user, date, extra)
723 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
724 parent2=xp2)
727 parent2=xp2)
725 tr.close()
728 tr.close()
726
729
727 if use_dirstate or update_dirstate:
730 if use_dirstate or update_dirstate:
728 self.dirstate.setparents(n)
731 self.dirstate.setparents(n)
729 if use_dirstate:
732 if use_dirstate:
730 self.dirstate.update(new, "n")
733 self.dirstate.update(new, "n")
731 self.dirstate.forget(remove)
734 self.dirstate.forget(remove)
732
735
733 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
736 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
734 return n
737 return n
735
738
736 def walk(self, node=None, files=[], match=util.always, badmatch=None):
739 def walk(self, node=None, files=[], match=util.always, badmatch=None):
737 '''
740 '''
738 walk recursively through the directory tree or a given
741 walk recursively through the directory tree or a given
739 changeset, finding all files matched by the match
742 changeset, finding all files matched by the match
740 function
743 function
741
744
742 results are yielded in a tuple (src, filename), where src
745 results are yielded in a tuple (src, filename), where src
743 is one of:
746 is one of:
744 'f' the file was found in the directory tree
747 'f' the file was found in the directory tree
745 'm' the file was only in the dirstate and not in the tree
748 'm' the file was only in the dirstate and not in the tree
746 'b' file was not found and matched badmatch
749 'b' file was not found and matched badmatch
747 '''
750 '''
748
751
749 if node:
752 if node:
750 fdict = dict.fromkeys(files)
753 fdict = dict.fromkeys(files)
751 for fn in self.manifest.read(self.changelog.read(node)[0]):
754 for fn in self.manifest.read(self.changelog.read(node)[0]):
752 for ffn in fdict:
755 for ffn in fdict:
753 # match if the file is the exact name or a directory
756 # match if the file is the exact name or a directory
754 if ffn == fn or fn.startswith("%s/" % ffn):
757 if ffn == fn or fn.startswith("%s/" % ffn):
755 del fdict[ffn]
758 del fdict[ffn]
756 break
759 break
757 if match(fn):
760 if match(fn):
758 yield 'm', fn
761 yield 'm', fn
759 for fn in fdict:
762 for fn in fdict:
760 if badmatch and badmatch(fn):
763 if badmatch and badmatch(fn):
761 if match(fn):
764 if match(fn):
762 yield 'b', fn
765 yield 'b', fn
763 else:
766 else:
764 self.ui.warn(_('%s: No such file in rev %s\n') % (
767 self.ui.warn(_('%s: No such file in rev %s\n') % (
765 util.pathto(self.getcwd(), fn), short(node)))
768 util.pathto(self.getcwd(), fn), short(node)))
766 else:
769 else:
767 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
770 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
768 yield src, fn
771 yield src, fn
769
772
770 def status(self, node1=None, node2=None, files=[], match=util.always,
773 def status(self, node1=None, node2=None, files=[], match=util.always,
771 wlock=None, list_ignored=False, list_clean=False):
774 wlock=None, list_ignored=False, list_clean=False):
772 """return status of files between two nodes or node and working directory
775 """return status of files between two nodes or node and working directory
773
776
774 If node1 is None, use the first dirstate parent instead.
777 If node1 is None, use the first dirstate parent instead.
775 If node2 is None, compare node1 with working directory.
778 If node2 is None, compare node1 with working directory.
776 """
779 """
777
780
778 def fcmp(fn, mf):
781 def fcmp(fn, mf):
779 t1 = self.wread(fn)
782 t1 = self.wread(fn)
780 return self.file(fn).cmp(mf.get(fn, nullid), t1)
783 return self.file(fn).cmp(mf.get(fn, nullid), t1)
781
784
782 def mfmatches(node):
785 def mfmatches(node):
783 change = self.changelog.read(node)
786 change = self.changelog.read(node)
784 mf = self.manifest.read(change[0]).copy()
787 mf = self.manifest.read(change[0]).copy()
785 for fn in mf.keys():
788 for fn in mf.keys():
786 if not match(fn):
789 if not match(fn):
787 del mf[fn]
790 del mf[fn]
788 return mf
791 return mf
789
792
790 modified, added, removed, deleted, unknown = [], [], [], [], []
793 modified, added, removed, deleted, unknown = [], [], [], [], []
791 ignored, clean = [], []
794 ignored, clean = [], []
792
795
793 compareworking = False
796 compareworking = False
794 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
797 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
795 compareworking = True
798 compareworking = True
796
799
797 if not compareworking:
800 if not compareworking:
798 # read the manifest from node1 before the manifest from node2,
801 # read the manifest from node1 before the manifest from node2,
799 # so that we'll hit the manifest cache if we're going through
802 # so that we'll hit the manifest cache if we're going through
800 # all the revisions in parent->child order.
803 # all the revisions in parent->child order.
801 mf1 = mfmatches(node1)
804 mf1 = mfmatches(node1)
802
805
803 # are we comparing the working directory?
806 # are we comparing the working directory?
804 if not node2:
807 if not node2:
805 if not wlock:
808 if not wlock:
806 try:
809 try:
807 wlock = self.wlock(wait=0)
810 wlock = self.wlock(wait=0)
808 except lock.LockException:
811 except lock.LockException:
809 wlock = None
812 wlock = None
810 (lookup, modified, added, removed, deleted, unknown,
813 (lookup, modified, added, removed, deleted, unknown,
811 ignored, clean) = self.dirstate.status(files, match,
814 ignored, clean) = self.dirstate.status(files, match,
812 list_ignored, list_clean)
815 list_ignored, list_clean)
813
816
814 # are we comparing working dir against its parent?
817 # are we comparing working dir against its parent?
815 if compareworking:
818 if compareworking:
816 if lookup:
819 if lookup:
817 # do a full compare of any files that might have changed
820 # do a full compare of any files that might have changed
818 mf2 = mfmatches(self.dirstate.parents()[0])
821 mf2 = mfmatches(self.dirstate.parents()[0])
819 for f in lookup:
822 for f in lookup:
820 if fcmp(f, mf2):
823 if fcmp(f, mf2):
821 modified.append(f)
824 modified.append(f)
822 else:
825 else:
823 clean.append(f)
826 clean.append(f)
824 if wlock is not None:
827 if wlock is not None:
825 self.dirstate.update([f], "n")
828 self.dirstate.update([f], "n")
826 else:
829 else:
827 # we are comparing working dir against non-parent
830 # we are comparing working dir against non-parent
828 # generate a pseudo-manifest for the working dir
831 # generate a pseudo-manifest for the working dir
829 # XXX: create it in dirstate.py ?
832 # XXX: create it in dirstate.py ?
830 mf2 = mfmatches(self.dirstate.parents()[0])
833 mf2 = mfmatches(self.dirstate.parents()[0])
831 for f in lookup + modified + added:
834 for f in lookup + modified + added:
832 mf2[f] = ""
835 mf2[f] = ""
833 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
836 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
834 for f in removed:
837 for f in removed:
835 if f in mf2:
838 if f in mf2:
836 del mf2[f]
839 del mf2[f]
837 else:
840 else:
838 # we are comparing two revisions
841 # we are comparing two revisions
839 mf2 = mfmatches(node2)
842 mf2 = mfmatches(node2)
840
843
841 if not compareworking:
844 if not compareworking:
842 # flush lists from dirstate before comparing manifests
845 # flush lists from dirstate before comparing manifests
843 modified, added, clean = [], [], []
846 modified, added, clean = [], [], []
844
847
845 # make sure to sort the files so we talk to the disk in a
848 # make sure to sort the files so we talk to the disk in a
846 # reasonable order
849 # reasonable order
847 mf2keys = mf2.keys()
850 mf2keys = mf2.keys()
848 mf2keys.sort()
851 mf2keys.sort()
849 for fn in mf2keys:
852 for fn in mf2keys:
850 if mf1.has_key(fn):
853 if mf1.has_key(fn):
851 if mf1.flags(fn) != mf2.flags(fn) or \
854 if mf1.flags(fn) != mf2.flags(fn) or \
852 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
855 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
853 modified.append(fn)
856 modified.append(fn)
854 elif list_clean:
857 elif list_clean:
855 clean.append(fn)
858 clean.append(fn)
856 del mf1[fn]
859 del mf1[fn]
857 else:
860 else:
858 added.append(fn)
861 added.append(fn)
859
862
860 removed = mf1.keys()
863 removed = mf1.keys()
861
864
862 # sort and return results:
865 # sort and return results:
863 for l in modified, added, removed, deleted, unknown, ignored, clean:
866 for l in modified, added, removed, deleted, unknown, ignored, clean:
864 l.sort()
867 l.sort()
865 return (modified, added, removed, deleted, unknown, ignored, clean)
868 return (modified, added, removed, deleted, unknown, ignored, clean)
866
869
867 def add(self, list, wlock=None):
870 def add(self, list, wlock=None):
868 if not wlock:
871 if not wlock:
869 wlock = self.wlock()
872 wlock = self.wlock()
870 for f in list:
873 for f in list:
871 p = self.wjoin(f)
874 p = self.wjoin(f)
872 if not os.path.exists(p):
875 if not os.path.exists(p):
873 self.ui.warn(_("%s does not exist!\n") % f)
876 self.ui.warn(_("%s does not exist!\n") % f)
874 elif not os.path.isfile(p):
877 elif not os.path.isfile(p):
875 self.ui.warn(_("%s not added: only files supported currently\n")
878 self.ui.warn(_("%s not added: only files supported currently\n")
876 % f)
879 % f)
877 elif self.dirstate.state(f) in 'an':
880 elif self.dirstate.state(f) in 'an':
878 self.ui.warn(_("%s already tracked!\n") % f)
881 self.ui.warn(_("%s already tracked!\n") % f)
879 else:
882 else:
880 self.dirstate.update([f], "a")
883 self.dirstate.update([f], "a")
881
884
882 def forget(self, list, wlock=None):
885 def forget(self, list, wlock=None):
883 if not wlock:
886 if not wlock:
884 wlock = self.wlock()
887 wlock = self.wlock()
885 for f in list:
888 for f in list:
886 if self.dirstate.state(f) not in 'ai':
889 if self.dirstate.state(f) not in 'ai':
887 self.ui.warn(_("%s not added!\n") % f)
890 self.ui.warn(_("%s not added!\n") % f)
888 else:
891 else:
889 self.dirstate.forget([f])
892 self.dirstate.forget([f])
890
893
891 def remove(self, list, unlink=False, wlock=None):
894 def remove(self, list, unlink=False, wlock=None):
892 if unlink:
895 if unlink:
893 for f in list:
896 for f in list:
894 try:
897 try:
895 util.unlink(self.wjoin(f))
898 util.unlink(self.wjoin(f))
896 except OSError, inst:
899 except OSError, inst:
897 if inst.errno != errno.ENOENT:
900 if inst.errno != errno.ENOENT:
898 raise
901 raise
899 if not wlock:
902 if not wlock:
900 wlock = self.wlock()
903 wlock = self.wlock()
901 for f in list:
904 for f in list:
902 p = self.wjoin(f)
905 p = self.wjoin(f)
903 if os.path.exists(p):
906 if os.path.exists(p):
904 self.ui.warn(_("%s still exists!\n") % f)
907 self.ui.warn(_("%s still exists!\n") % f)
905 elif self.dirstate.state(f) == 'a':
908 elif self.dirstate.state(f) == 'a':
906 self.dirstate.forget([f])
909 self.dirstate.forget([f])
907 elif f not in self.dirstate:
910 elif f not in self.dirstate:
908 self.ui.warn(_("%s not tracked!\n") % f)
911 self.ui.warn(_("%s not tracked!\n") % f)
909 else:
912 else:
910 self.dirstate.update([f], "r")
913 self.dirstate.update([f], "r")
911
914
912 def undelete(self, list, wlock=None):
915 def undelete(self, list, wlock=None):
913 p = self.dirstate.parents()[0]
916 p = self.dirstate.parents()[0]
914 mn = self.changelog.read(p)[0]
917 mn = self.changelog.read(p)[0]
915 m = self.manifest.read(mn)
918 m = self.manifest.read(mn)
916 if not wlock:
919 if not wlock:
917 wlock = self.wlock()
920 wlock = self.wlock()
918 for f in list:
921 for f in list:
919 if self.dirstate.state(f) not in "r":
922 if self.dirstate.state(f) not in "r":
920 self.ui.warn("%s not removed!\n" % f)
923 self.ui.warn("%s not removed!\n" % f)
921 else:
924 else:
922 t = self.file(f).read(m[f])
925 t = self.file(f).read(m[f])
923 self.wwrite(f, t)
926 self.wwrite(f, t)
924 util.set_exec(self.wjoin(f), m.execf(f))
927 util.set_exec(self.wjoin(f), m.execf(f))
925 self.dirstate.update([f], "n")
928 self.dirstate.update([f], "n")
926
929
927 def copy(self, source, dest, wlock=None):
930 def copy(self, source, dest, wlock=None):
928 p = self.wjoin(dest)
931 p = self.wjoin(dest)
929 if not os.path.exists(p):
932 if not os.path.exists(p):
930 self.ui.warn(_("%s does not exist!\n") % dest)
933 self.ui.warn(_("%s does not exist!\n") % dest)
931 elif not os.path.isfile(p):
934 elif not os.path.isfile(p):
932 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
935 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
933 else:
936 else:
934 if not wlock:
937 if not wlock:
935 wlock = self.wlock()
938 wlock = self.wlock()
936 if self.dirstate.state(dest) == '?':
939 if self.dirstate.state(dest) == '?':
937 self.dirstate.update([dest], "a")
940 self.dirstate.update([dest], "a")
938 self.dirstate.copy(source, dest)
941 self.dirstate.copy(source, dest)
939
942
940 def heads(self, start=None):
943 def heads(self, start=None):
941 heads = self.changelog.heads(start)
944 heads = self.changelog.heads(start)
942 # sort the output in rev descending order
945 # sort the output in rev descending order
943 heads = [(-self.changelog.rev(h), h) for h in heads]
946 heads = [(-self.changelog.rev(h), h) for h in heads]
944 heads.sort()
947 heads.sort()
945 return [n for (r, n) in heads]
948 return [n for (r, n) in heads]
946
949
947 # branchlookup returns a dict giving a list of branches for
950 # branchlookup returns a dict giving a list of branches for
948 # each head. A branch is defined as the tag of a node or
951 # each head. A branch is defined as the tag of a node or
949 # the branch of the node's parents. If a node has multiple
952 # the branch of the node's parents. If a node has multiple
950 # branch tags, tags are eliminated if they are visible from other
953 # branch tags, tags are eliminated if they are visible from other
951 # branch tags.
954 # branch tags.
952 #
955 #
953 # So, for this graph: a->b->c->d->e
956 # So, for this graph: a->b->c->d->e
954 # \ /
957 # \ /
955 # aa -----/
958 # aa -----/
956 # a has tag 2.6.12
959 # a has tag 2.6.12
957 # d has tag 2.6.13
960 # d has tag 2.6.13
958 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
961 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
959 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
962 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
960 # from the list.
963 # from the list.
961 #
964 #
962 # It is possible that more than one head will have the same branch tag.
965 # It is possible that more than one head will have the same branch tag.
963 # callers need to check the result for multiple heads under the same
966 # callers need to check the result for multiple heads under the same
964 # branch tag if that is a problem for them (ie checkout of a specific
967 # branch tag if that is a problem for them (ie checkout of a specific
965 # branch).
968 # branch).
966 #
969 #
967 # passing in a specific branch will limit the depth of the search
970 # passing in a specific branch will limit the depth of the search
968 # through the parents. It won't limit the branches returned in the
971 # through the parents. It won't limit the branches returned in the
969 # result though.
972 # result though.
970 def branchlookup(self, heads=None, branch=None):
973 def branchlookup(self, heads=None, branch=None):
971 if not heads:
974 if not heads:
972 heads = self.heads()
975 heads = self.heads()
973 headt = [ h for h in heads ]
976 headt = [ h for h in heads ]
974 chlog = self.changelog
977 chlog = self.changelog
975 branches = {}
978 branches = {}
976 merges = []
979 merges = []
977 seenmerge = {}
980 seenmerge = {}
978
981
979 # traverse the tree once for each head, recording in the branches
982 # traverse the tree once for each head, recording in the branches
980 # dict which tags are visible from this head. The branches
983 # dict which tags are visible from this head. The branches
981 # dict also records which tags are visible from each tag
984 # dict also records which tags are visible from each tag
982 # while we traverse.
985 # while we traverse.
983 while headt or merges:
986 while headt or merges:
984 if merges:
987 if merges:
985 n, found = merges.pop()
988 n, found = merges.pop()
986 visit = [n]
989 visit = [n]
987 else:
990 else:
988 h = headt.pop()
991 h = headt.pop()
989 visit = [h]
992 visit = [h]
990 found = [h]
993 found = [h]
991 seen = {}
994 seen = {}
992 while visit:
995 while visit:
993 n = visit.pop()
996 n = visit.pop()
994 if n in seen:
997 if n in seen:
995 continue
998 continue
996 pp = chlog.parents(n)
999 pp = chlog.parents(n)
997 tags = self.nodetags(n)
1000 tags = self.nodetags(n)
998 if tags:
1001 if tags:
999 for x in tags:
1002 for x in tags:
1000 if x == 'tip':
1003 if x == 'tip':
1001 continue
1004 continue
1002 for f in found:
1005 for f in found:
1003 branches.setdefault(f, {})[n] = 1
1006 branches.setdefault(f, {})[n] = 1
1004 branches.setdefault(n, {})[n] = 1
1007 branches.setdefault(n, {})[n] = 1
1005 break
1008 break
1006 if n not in found:
1009 if n not in found:
1007 found.append(n)
1010 found.append(n)
1008 if branch in tags:
1011 if branch in tags:
1009 continue
1012 continue
1010 seen[n] = 1
1013 seen[n] = 1
1011 if pp[1] != nullid and n not in seenmerge:
1014 if pp[1] != nullid and n not in seenmerge:
1012 merges.append((pp[1], [x for x in found]))
1015 merges.append((pp[1], [x for x in found]))
1013 seenmerge[n] = 1
1016 seenmerge[n] = 1
1014 if pp[0] != nullid:
1017 if pp[0] != nullid:
1015 visit.append(pp[0])
1018 visit.append(pp[0])
1016 # traverse the branches dict, eliminating branch tags from each
1019 # traverse the branches dict, eliminating branch tags from each
1017 # head that are visible from another branch tag for that head.
1020 # head that are visible from another branch tag for that head.
1018 out = {}
1021 out = {}
1019 viscache = {}
1022 viscache = {}
1020 for h in heads:
1023 for h in heads:
1021 def visible(node):
1024 def visible(node):
1022 if node in viscache:
1025 if node in viscache:
1023 return viscache[node]
1026 return viscache[node]
1024 ret = {}
1027 ret = {}
1025 visit = [node]
1028 visit = [node]
1026 while visit:
1029 while visit:
1027 x = visit.pop()
1030 x = visit.pop()
1028 if x in viscache:
1031 if x in viscache:
1029 ret.update(viscache[x])
1032 ret.update(viscache[x])
1030 elif x not in ret:
1033 elif x not in ret:
1031 ret[x] = 1
1034 ret[x] = 1
1032 if x in branches:
1035 if x in branches:
1033 visit[len(visit):] = branches[x].keys()
1036 visit[len(visit):] = branches[x].keys()
1034 viscache[node] = ret
1037 viscache[node] = ret
1035 return ret
1038 return ret
1036 if h not in branches:
1039 if h not in branches:
1037 continue
1040 continue
1038 # O(n^2), but somewhat limited. This only searches the
1041 # O(n^2), but somewhat limited. This only searches the
1039 # tags visible from a specific head, not all the tags in the
1042 # tags visible from a specific head, not all the tags in the
1040 # whole repo.
1043 # whole repo.
1041 for b in branches[h]:
1044 for b in branches[h]:
1042 vis = False
1045 vis = False
1043 for bb in branches[h].keys():
1046 for bb in branches[h].keys():
1044 if b != bb:
1047 if b != bb:
1045 if b in visible(bb):
1048 if b in visible(bb):
1046 vis = True
1049 vis = True
1047 break
1050 break
1048 if not vis:
1051 if not vis:
1049 l = out.setdefault(h, [])
1052 l = out.setdefault(h, [])
1050 l[len(l):] = self.nodetags(b)
1053 l[len(l):] = self.nodetags(b)
1051 return out
1054 return out
1052
1055
1053 def branches(self, nodes):
1056 def branches(self, nodes):
1054 if not nodes:
1057 if not nodes:
1055 nodes = [self.changelog.tip()]
1058 nodes = [self.changelog.tip()]
1056 b = []
1059 b = []
1057 for n in nodes:
1060 for n in nodes:
1058 t = n
1061 t = n
1059 while 1:
1062 while 1:
1060 p = self.changelog.parents(n)
1063 p = self.changelog.parents(n)
1061 if p[1] != nullid or p[0] == nullid:
1064 if p[1] != nullid or p[0] == nullid:
1062 b.append((t, n, p[0], p[1]))
1065 b.append((t, n, p[0], p[1]))
1063 break
1066 break
1064 n = p[0]
1067 n = p[0]
1065 return b
1068 return b
1066
1069
1067 def between(self, pairs):
1070 def between(self, pairs):
1068 r = []
1071 r = []
1069
1072
1070 for top, bottom in pairs:
1073 for top, bottom in pairs:
1071 n, l, i = top, [], 0
1074 n, l, i = top, [], 0
1072 f = 1
1075 f = 1
1073
1076
1074 while n != bottom:
1077 while n != bottom:
1075 p = self.changelog.parents(n)[0]
1078 p = self.changelog.parents(n)[0]
1076 if i == f:
1079 if i == f:
1077 l.append(n)
1080 l.append(n)
1078 f = f * 2
1081 f = f * 2
1079 n = p
1082 n = p
1080 i += 1
1083 i += 1
1081
1084
1082 r.append(l)
1085 r.append(l)
1083
1086
1084 return r
1087 return r
1085
1088
1086 def findincoming(self, remote, base=None, heads=None, force=False):
1089 def findincoming(self, remote, base=None, heads=None, force=False):
1087 """Return list of roots of the subsets of missing nodes from remote
1090 """Return list of roots of the subsets of missing nodes from remote
1088
1091
1089 If base dict is specified, assume that these nodes and their parents
1092 If base dict is specified, assume that these nodes and their parents
1090 exist on the remote side and that no child of a node of base exists
1093 exist on the remote side and that no child of a node of base exists
1091 in both remote and self.
1094 in both remote and self.
1092 Furthermore base will be updated to include the nodes that exists
1095 Furthermore base will be updated to include the nodes that exists
1093 in self and remote but no children exists in self and remote.
1096 in self and remote but no children exists in self and remote.
1094 If a list of heads is specified, return only nodes which are heads
1097 If a list of heads is specified, return only nodes which are heads
1095 or ancestors of these heads.
1098 or ancestors of these heads.
1096
1099
1097 All the ancestors of base are in self and in remote.
1100 All the ancestors of base are in self and in remote.
1098 All the descendants of the list returned are missing in self.
1101 All the descendants of the list returned are missing in self.
1099 (and so we know that the rest of the nodes are missing in remote, see
1102 (and so we know that the rest of the nodes are missing in remote, see
1100 outgoing)
1103 outgoing)
1101 """
1104 """
1102 m = self.changelog.nodemap
1105 m = self.changelog.nodemap
1103 search = []
1106 search = []
1104 fetch = {}
1107 fetch = {}
1105 seen = {}
1108 seen = {}
1106 seenbranch = {}
1109 seenbranch = {}
1107 if base == None:
1110 if base == None:
1108 base = {}
1111 base = {}
1109
1112
1110 if not heads:
1113 if not heads:
1111 heads = remote.heads()
1114 heads = remote.heads()
1112
1115
1113 if self.changelog.tip() == nullid:
1116 if self.changelog.tip() == nullid:
1114 base[nullid] = 1
1117 base[nullid] = 1
1115 if heads != [nullid]:
1118 if heads != [nullid]:
1116 return [nullid]
1119 return [nullid]
1117 return []
1120 return []
1118
1121
1119 # assume we're closer to the tip than the root
1122 # assume we're closer to the tip than the root
1120 # and start by examining the heads
1123 # and start by examining the heads
1121 self.ui.status(_("searching for changes\n"))
1124 self.ui.status(_("searching for changes\n"))
1122
1125
1123 unknown = []
1126 unknown = []
1124 for h in heads:
1127 for h in heads:
1125 if h not in m:
1128 if h not in m:
1126 unknown.append(h)
1129 unknown.append(h)
1127 else:
1130 else:
1128 base[h] = 1
1131 base[h] = 1
1129
1132
1130 if not unknown:
1133 if not unknown:
1131 return []
1134 return []
1132
1135
1133 req = dict.fromkeys(unknown)
1136 req = dict.fromkeys(unknown)
1134 reqcnt = 0
1137 reqcnt = 0
1135
1138
1136 # search through remote branches
1139 # search through remote branches
1137 # a 'branch' here is a linear segment of history, with four parts:
1140 # a 'branch' here is a linear segment of history, with four parts:
1138 # head, root, first parent, second parent
1141 # head, root, first parent, second parent
1139 # (a branch always has two parents (or none) by definition)
1142 # (a branch always has two parents (or none) by definition)
1140 unknown = remote.branches(unknown)
1143 unknown = remote.branches(unknown)
1141 while unknown:
1144 while unknown:
1142 r = []
1145 r = []
1143 while unknown:
1146 while unknown:
1144 n = unknown.pop(0)
1147 n = unknown.pop(0)
1145 if n[0] in seen:
1148 if n[0] in seen:
1146 continue
1149 continue
1147
1150
1148 self.ui.debug(_("examining %s:%s\n")
1151 self.ui.debug(_("examining %s:%s\n")
1149 % (short(n[0]), short(n[1])))
1152 % (short(n[0]), short(n[1])))
1150 if n[0] == nullid: # found the end of the branch
1153 if n[0] == nullid: # found the end of the branch
1151 pass
1154 pass
1152 elif n in seenbranch:
1155 elif n in seenbranch:
1153 self.ui.debug(_("branch already found\n"))
1156 self.ui.debug(_("branch already found\n"))
1154 continue
1157 continue
1155 elif n[1] and n[1] in m: # do we know the base?
1158 elif n[1] and n[1] in m: # do we know the base?
1156 self.ui.debug(_("found incomplete branch %s:%s\n")
1159 self.ui.debug(_("found incomplete branch %s:%s\n")
1157 % (short(n[0]), short(n[1])))
1160 % (short(n[0]), short(n[1])))
1158 search.append(n) # schedule branch range for scanning
1161 search.append(n) # schedule branch range for scanning
1159 seenbranch[n] = 1
1162 seenbranch[n] = 1
1160 else:
1163 else:
1161 if n[1] not in seen and n[1] not in fetch:
1164 if n[1] not in seen and n[1] not in fetch:
1162 if n[2] in m and n[3] in m:
1165 if n[2] in m and n[3] in m:
1163 self.ui.debug(_("found new changeset %s\n") %
1166 self.ui.debug(_("found new changeset %s\n") %
1164 short(n[1]))
1167 short(n[1]))
1165 fetch[n[1]] = 1 # earliest unknown
1168 fetch[n[1]] = 1 # earliest unknown
1166 for p in n[2:4]:
1169 for p in n[2:4]:
1167 if p in m:
1170 if p in m:
1168 base[p] = 1 # latest known
1171 base[p] = 1 # latest known
1169
1172
1170 for p in n[2:4]:
1173 for p in n[2:4]:
1171 if p not in req and p not in m:
1174 if p not in req and p not in m:
1172 r.append(p)
1175 r.append(p)
1173 req[p] = 1
1176 req[p] = 1
1174 seen[n[0]] = 1
1177 seen[n[0]] = 1
1175
1178
1176 if r:
1179 if r:
1177 reqcnt += 1
1180 reqcnt += 1
1178 self.ui.debug(_("request %d: %s\n") %
1181 self.ui.debug(_("request %d: %s\n") %
1179 (reqcnt, " ".join(map(short, r))))
1182 (reqcnt, " ".join(map(short, r))))
1180 for p in xrange(0, len(r), 10):
1183 for p in xrange(0, len(r), 10):
1181 for b in remote.branches(r[p:p+10]):
1184 for b in remote.branches(r[p:p+10]):
1182 self.ui.debug(_("received %s:%s\n") %
1185 self.ui.debug(_("received %s:%s\n") %
1183 (short(b[0]), short(b[1])))
1186 (short(b[0]), short(b[1])))
1184 unknown.append(b)
1187 unknown.append(b)
1185
1188
1186 # do binary search on the branches we found
1189 # do binary search on the branches we found
1187 while search:
1190 while search:
1188 n = search.pop(0)
1191 n = search.pop(0)
1189 reqcnt += 1
1192 reqcnt += 1
1190 l = remote.between([(n[0], n[1])])[0]
1193 l = remote.between([(n[0], n[1])])[0]
1191 l.append(n[1])
1194 l.append(n[1])
1192 p = n[0]
1195 p = n[0]
1193 f = 1
1196 f = 1
1194 for i in l:
1197 for i in l:
1195 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1198 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1196 if i in m:
1199 if i in m:
1197 if f <= 2:
1200 if f <= 2:
1198 self.ui.debug(_("found new branch changeset %s\n") %
1201 self.ui.debug(_("found new branch changeset %s\n") %
1199 short(p))
1202 short(p))
1200 fetch[p] = 1
1203 fetch[p] = 1
1201 base[i] = 1
1204 base[i] = 1
1202 else:
1205 else:
1203 self.ui.debug(_("narrowed branch search to %s:%s\n")
1206 self.ui.debug(_("narrowed branch search to %s:%s\n")
1204 % (short(p), short(i)))
1207 % (short(p), short(i)))
1205 search.append((p, i))
1208 search.append((p, i))
1206 break
1209 break
1207 p, f = i, f * 2
1210 p, f = i, f * 2
1208
1211
1209 # sanity check our fetch list
1212 # sanity check our fetch list
1210 for f in fetch.keys():
1213 for f in fetch.keys():
1211 if f in m:
1214 if f in m:
1212 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1215 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1213
1216
1214 if base.keys() == [nullid]:
1217 if base.keys() == [nullid]:
1215 if force:
1218 if force:
1216 self.ui.warn(_("warning: repository is unrelated\n"))
1219 self.ui.warn(_("warning: repository is unrelated\n"))
1217 else:
1220 else:
1218 raise util.Abort(_("repository is unrelated"))
1221 raise util.Abort(_("repository is unrelated"))
1219
1222
1220 self.ui.debug(_("found new changesets starting at ") +
1223 self.ui.debug(_("found new changesets starting at ") +
1221 " ".join([short(f) for f in fetch]) + "\n")
1224 " ".join([short(f) for f in fetch]) + "\n")
1222
1225
1223 self.ui.debug(_("%d total queries\n") % reqcnt)
1226 self.ui.debug(_("%d total queries\n") % reqcnt)
1224
1227
1225 return fetch.keys()
1228 return fetch.keys()
1226
1229
1227 def findoutgoing(self, remote, base=None, heads=None, force=False):
1230 def findoutgoing(self, remote, base=None, heads=None, force=False):
1228 """Return list of nodes that are roots of subsets not in remote
1231 """Return list of nodes that are roots of subsets not in remote
1229
1232
1230 If base dict is specified, assume that these nodes and their parents
1233 If base dict is specified, assume that these nodes and their parents
1231 exist on the remote side.
1234 exist on the remote side.
1232 If a list of heads is specified, return only nodes which are heads
1235 If a list of heads is specified, return only nodes which are heads
1233 or ancestors of these heads, and return a second element which
1236 or ancestors of these heads, and return a second element which
1234 contains all remote heads which get new children.
1237 contains all remote heads which get new children.
1235 """
1238 """
1236 if base == None:
1239 if base == None:
1237 base = {}
1240 base = {}
1238 self.findincoming(remote, base, heads, force=force)
1241 self.findincoming(remote, base, heads, force=force)
1239
1242
1240 self.ui.debug(_("common changesets up to ")
1243 self.ui.debug(_("common changesets up to ")
1241 + " ".join(map(short, base.keys())) + "\n")
1244 + " ".join(map(short, base.keys())) + "\n")
1242
1245
1243 remain = dict.fromkeys(self.changelog.nodemap)
1246 remain = dict.fromkeys(self.changelog.nodemap)
1244
1247
1245 # prune everything remote has from the tree
1248 # prune everything remote has from the tree
1246 del remain[nullid]
1249 del remain[nullid]
1247 remove = base.keys()
1250 remove = base.keys()
1248 while remove:
1251 while remove:
1249 n = remove.pop(0)
1252 n = remove.pop(0)
1250 if n in remain:
1253 if n in remain:
1251 del remain[n]
1254 del remain[n]
1252 for p in self.changelog.parents(n):
1255 for p in self.changelog.parents(n):
1253 remove.append(p)
1256 remove.append(p)
1254
1257
1255 # find every node whose parents have been pruned
1258 # find every node whose parents have been pruned
1256 subset = []
1259 subset = []
1257 # find every remote head that will get new children
1260 # find every remote head that will get new children
1258 updated_heads = {}
1261 updated_heads = {}
1259 for n in remain:
1262 for n in remain:
1260 p1, p2 = self.changelog.parents(n)
1263 p1, p2 = self.changelog.parents(n)
1261 if p1 not in remain and p2 not in remain:
1264 if p1 not in remain and p2 not in remain:
1262 subset.append(n)
1265 subset.append(n)
1263 if heads:
1266 if heads:
1264 if p1 in heads:
1267 if p1 in heads:
1265 updated_heads[p1] = True
1268 updated_heads[p1] = True
1266 if p2 in heads:
1269 if p2 in heads:
1267 updated_heads[p2] = True
1270 updated_heads[p2] = True
1268
1271
1269 # this is the set of all roots we have to push
1272 # this is the set of all roots we have to push
1270 if heads:
1273 if heads:
1271 return subset, updated_heads.keys()
1274 return subset, updated_heads.keys()
1272 else:
1275 else:
1273 return subset
1276 return subset
1274
1277
1275 def pull(self, remote, heads=None, force=False, lock=None):
1278 def pull(self, remote, heads=None, force=False, lock=None):
1276 mylock = False
1279 mylock = False
1277 if not lock:
1280 if not lock:
1278 lock = self.lock()
1281 lock = self.lock()
1279 mylock = True
1282 mylock = True
1280
1283
1281 try:
1284 try:
1282 fetch = self.findincoming(remote, force=force)
1285 fetch = self.findincoming(remote, force=force)
1283 if fetch == [nullid]:
1286 if fetch == [nullid]:
1284 self.ui.status(_("requesting all changes\n"))
1287 self.ui.status(_("requesting all changes\n"))
1285
1288
1286 if not fetch:
1289 if not fetch:
1287 self.ui.status(_("no changes found\n"))
1290 self.ui.status(_("no changes found\n"))
1288 return 0
1291 return 0
1289
1292
1290 if heads is None:
1293 if heads is None:
1291 cg = remote.changegroup(fetch, 'pull')
1294 cg = remote.changegroup(fetch, 'pull')
1292 else:
1295 else:
1293 if 'changegroupsubset' not in remote.capabilities:
1296 if 'changegroupsubset' not in remote.capabilities:
1294 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1297 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1295 cg = remote.changegroupsubset(fetch, heads, 'pull')
1298 cg = remote.changegroupsubset(fetch, heads, 'pull')
1296 return self.addchangegroup(cg, 'pull', remote.url())
1299 return self.addchangegroup(cg, 'pull', remote.url())
1297 finally:
1300 finally:
1298 if mylock:
1301 if mylock:
1299 lock.release()
1302 lock.release()
1300
1303
1301 def push(self, remote, force=False, revs=None):
1304 def push(self, remote, force=False, revs=None):
1302 # there are two ways to push to remote repo:
1305 # there are two ways to push to remote repo:
1303 #
1306 #
1304 # addchangegroup assumes local user can lock remote
1307 # addchangegroup assumes local user can lock remote
1305 # repo (local filesystem, old ssh servers).
1308 # repo (local filesystem, old ssh servers).
1306 #
1309 #
1307 # unbundle assumes local user cannot lock remote repo (new ssh
1310 # unbundle assumes local user cannot lock remote repo (new ssh
1308 # servers, http servers).
1311 # servers, http servers).
1309
1312
1310 if remote.capable('unbundle'):
1313 if remote.capable('unbundle'):
1311 return self.push_unbundle(remote, force, revs)
1314 return self.push_unbundle(remote, force, revs)
1312 return self.push_addchangegroup(remote, force, revs)
1315 return self.push_addchangegroup(remote, force, revs)
1313
1316
1314 def prepush(self, remote, force, revs):
1317 def prepush(self, remote, force, revs):
1315 base = {}
1318 base = {}
1316 remote_heads = remote.heads()
1319 remote_heads = remote.heads()
1317 inc = self.findincoming(remote, base, remote_heads, force=force)
1320 inc = self.findincoming(remote, base, remote_heads, force=force)
1318
1321
1319 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1322 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1320 if revs is not None:
1323 if revs is not None:
1321 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1324 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1322 else:
1325 else:
1323 bases, heads = update, self.changelog.heads()
1326 bases, heads = update, self.changelog.heads()
1324
1327
1325 if not bases:
1328 if not bases:
1326 self.ui.status(_("no changes found\n"))
1329 self.ui.status(_("no changes found\n"))
1327 return None, 1
1330 return None, 1
1328 elif not force:
1331 elif not force:
1329 # check if we're creating new remote heads
1332 # check if we're creating new remote heads
1330 # to be a remote head after push, node must be either
1333 # to be a remote head after push, node must be either
1331 # - unknown locally
1334 # - unknown locally
1332 # - a local outgoing head descended from update
1335 # - a local outgoing head descended from update
1333 # - a remote head that's known locally and not
1336 # - a remote head that's known locally and not
1334 # ancestral to an outgoing head
1337 # ancestral to an outgoing head
1335
1338
1336 warn = 0
1339 warn = 0
1337
1340
1338 if remote_heads == [nullid]:
1341 if remote_heads == [nullid]:
1339 warn = 0
1342 warn = 0
1340 elif not revs and len(heads) > len(remote_heads):
1343 elif not revs and len(heads) > len(remote_heads):
1341 warn = 1
1344 warn = 1
1342 else:
1345 else:
1343 newheads = list(heads)
1346 newheads = list(heads)
1344 for r in remote_heads:
1347 for r in remote_heads:
1345 if r in self.changelog.nodemap:
1348 if r in self.changelog.nodemap:
1346 desc = self.changelog.heads(r)
1349 desc = self.changelog.heads(r)
1347 l = [h for h in heads if h in desc]
1350 l = [h for h in heads if h in desc]
1348 if not l:
1351 if not l:
1349 newheads.append(r)
1352 newheads.append(r)
1350 else:
1353 else:
1351 newheads.append(r)
1354 newheads.append(r)
1352 if len(newheads) > len(remote_heads):
1355 if len(newheads) > len(remote_heads):
1353 warn = 1
1356 warn = 1
1354
1357
1355 if warn:
1358 if warn:
1356 self.ui.warn(_("abort: push creates new remote branches!\n"))
1359 self.ui.warn(_("abort: push creates new remote branches!\n"))
1357 self.ui.status(_("(did you forget to merge?"
1360 self.ui.status(_("(did you forget to merge?"
1358 " use push -f to force)\n"))
1361 " use push -f to force)\n"))
1359 return None, 1
1362 return None, 1
1360 elif inc:
1363 elif inc:
1361 self.ui.warn(_("note: unsynced remote changes!\n"))
1364 self.ui.warn(_("note: unsynced remote changes!\n"))
1362
1365
1363
1366
1364 if revs is None:
1367 if revs is None:
1365 cg = self.changegroup(update, 'push')
1368 cg = self.changegroup(update, 'push')
1366 else:
1369 else:
1367 cg = self.changegroupsubset(update, revs, 'push')
1370 cg = self.changegroupsubset(update, revs, 'push')
1368 return cg, remote_heads
1371 return cg, remote_heads
1369
1372
1370 def push_addchangegroup(self, remote, force, revs):
1373 def push_addchangegroup(self, remote, force, revs):
1371 lock = remote.lock()
1374 lock = remote.lock()
1372
1375
1373 ret = self.prepush(remote, force, revs)
1376 ret = self.prepush(remote, force, revs)
1374 if ret[0] is not None:
1377 if ret[0] is not None:
1375 cg, remote_heads = ret
1378 cg, remote_heads = ret
1376 return remote.addchangegroup(cg, 'push', self.url())
1379 return remote.addchangegroup(cg, 'push', self.url())
1377 return ret[1]
1380 return ret[1]
1378
1381
1379 def push_unbundle(self, remote, force, revs):
1382 def push_unbundle(self, remote, force, revs):
1380 # local repo finds heads on server, finds out what revs it
1383 # local repo finds heads on server, finds out what revs it
1381 # must push. once revs transferred, if server finds it has
1384 # must push. once revs transferred, if server finds it has
1382 # different heads (someone else won commit/push race), server
1385 # different heads (someone else won commit/push race), server
1383 # aborts.
1386 # aborts.
1384
1387
1385 ret = self.prepush(remote, force, revs)
1388 ret = self.prepush(remote, force, revs)
1386 if ret[0] is not None:
1389 if ret[0] is not None:
1387 cg, remote_heads = ret
1390 cg, remote_heads = ret
1388 if force: remote_heads = ['force']
1391 if force: remote_heads = ['force']
1389 return remote.unbundle(cg, remote_heads, 'push')
1392 return remote.unbundle(cg, remote_heads, 'push')
1390 return ret[1]
1393 return ret[1]
1391
1394
1392 def changegroupinfo(self, nodes):
1395 def changegroupinfo(self, nodes):
1393 self.ui.note(_("%d changesets found\n") % len(nodes))
1396 self.ui.note(_("%d changesets found\n") % len(nodes))
1394 if self.ui.debugflag:
1397 if self.ui.debugflag:
1395 self.ui.debug(_("List of changesets:\n"))
1398 self.ui.debug(_("List of changesets:\n"))
1396 for node in nodes:
1399 for node in nodes:
1397 self.ui.debug("%s\n" % hex(node))
1400 self.ui.debug("%s\n" % hex(node))
1398
1401
1399 def changegroupsubset(self, bases, heads, source):
1402 def changegroupsubset(self, bases, heads, source):
1400 """This function generates a changegroup consisting of all the nodes
1403 """This function generates a changegroup consisting of all the nodes
1401 that are descendents of any of the bases, and ancestors of any of
1404 that are descendents of any of the bases, and ancestors of any of
1402 the heads.
1405 the heads.
1403
1406
1404 It is fairly complex as determining which filenodes and which
1407 It is fairly complex as determining which filenodes and which
1405 manifest nodes need to be included for the changeset to be complete
1408 manifest nodes need to be included for the changeset to be complete
1406 is non-trivial.
1409 is non-trivial.
1407
1410
1408 Another wrinkle is doing the reverse, figuring out which changeset in
1411 Another wrinkle is doing the reverse, figuring out which changeset in
1409 the changegroup a particular filenode or manifestnode belongs to."""
1412 the changegroup a particular filenode or manifestnode belongs to."""
1410
1413
1411 self.hook('preoutgoing', throw=True, source=source)
1414 self.hook('preoutgoing', throw=True, source=source)
1412
1415
1413 # Set up some initial variables
1416 # Set up some initial variables
1414 # Make it easy to refer to self.changelog
1417 # Make it easy to refer to self.changelog
1415 cl = self.changelog
1418 cl = self.changelog
1416 # msng is short for missing - compute the list of changesets in this
1419 # msng is short for missing - compute the list of changesets in this
1417 # changegroup.
1420 # changegroup.
1418 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1421 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1419 self.changegroupinfo(msng_cl_lst)
1422 self.changegroupinfo(msng_cl_lst)
1420 # Some bases may turn out to be superfluous, and some heads may be
1423 # Some bases may turn out to be superfluous, and some heads may be
1421 # too. nodesbetween will return the minimal set of bases and heads
1424 # too. nodesbetween will return the minimal set of bases and heads
1422 # necessary to re-create the changegroup.
1425 # necessary to re-create the changegroup.
1423
1426
1424 # Known heads are the list of heads that it is assumed the recipient
1427 # Known heads are the list of heads that it is assumed the recipient
1425 # of this changegroup will know about.
1428 # of this changegroup will know about.
1426 knownheads = {}
1429 knownheads = {}
1427 # We assume that all parents of bases are known heads.
1430 # We assume that all parents of bases are known heads.
1428 for n in bases:
1431 for n in bases:
1429 for p in cl.parents(n):
1432 for p in cl.parents(n):
1430 if p != nullid:
1433 if p != nullid:
1431 knownheads[p] = 1
1434 knownheads[p] = 1
1432 knownheads = knownheads.keys()
1435 knownheads = knownheads.keys()
1433 if knownheads:
1436 if knownheads:
1434 # Now that we know what heads are known, we can compute which
1437 # Now that we know what heads are known, we can compute which
1435 # changesets are known. The recipient must know about all
1438 # changesets are known. The recipient must know about all
1436 # changesets required to reach the known heads from the null
1439 # changesets required to reach the known heads from the null
1437 # changeset.
1440 # changeset.
1438 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1441 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1439 junk = None
1442 junk = None
1440 # Transform the list into an ersatz set.
1443 # Transform the list into an ersatz set.
1441 has_cl_set = dict.fromkeys(has_cl_set)
1444 has_cl_set = dict.fromkeys(has_cl_set)
1442 else:
1445 else:
1443 # If there were no known heads, the recipient cannot be assumed to
1446 # If there were no known heads, the recipient cannot be assumed to
1444 # know about any changesets.
1447 # know about any changesets.
1445 has_cl_set = {}
1448 has_cl_set = {}
1446
1449
1447 # Make it easy to refer to self.manifest
1450 # Make it easy to refer to self.manifest
1448 mnfst = self.manifest
1451 mnfst = self.manifest
1449 # We don't know which manifests are missing yet
1452 # We don't know which manifests are missing yet
1450 msng_mnfst_set = {}
1453 msng_mnfst_set = {}
1451 # Nor do we know which filenodes are missing.
1454 # Nor do we know which filenodes are missing.
1452 msng_filenode_set = {}
1455 msng_filenode_set = {}
1453
1456
1454 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1457 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1455 junk = None
1458 junk = None
1456
1459
1457 # A changeset always belongs to itself, so the changenode lookup
1460 # A changeset always belongs to itself, so the changenode lookup
1458 # function for a changenode is identity.
1461 # function for a changenode is identity.
1459 def identity(x):
1462 def identity(x):
1460 return x
1463 return x
1461
1464
1462 # A function generating function. Sets up an environment for the
1465 # A function generating function. Sets up an environment for the
1463 # inner function.
1466 # inner function.
1464 def cmp_by_rev_func(revlog):
1467 def cmp_by_rev_func(revlog):
1465 # Compare two nodes by their revision number in the environment's
1468 # Compare two nodes by their revision number in the environment's
1466 # revision history. Since the revision number both represents the
1469 # revision history. Since the revision number both represents the
1467 # most efficient order to read the nodes in, and represents a
1470 # most efficient order to read the nodes in, and represents a
1468 # topological sorting of the nodes, this function is often useful.
1471 # topological sorting of the nodes, this function is often useful.
1469 def cmp_by_rev(a, b):
1472 def cmp_by_rev(a, b):
1470 return cmp(revlog.rev(a), revlog.rev(b))
1473 return cmp(revlog.rev(a), revlog.rev(b))
1471 return cmp_by_rev
1474 return cmp_by_rev
1472
1475
1473 # If we determine that a particular file or manifest node must be a
1476 # If we determine that a particular file or manifest node must be a
1474 # node that the recipient of the changegroup will already have, we can
1477 # node that the recipient of the changegroup will already have, we can
1475 # also assume the recipient will have all the parents. This function
1478 # also assume the recipient will have all the parents. This function
1476 # prunes them from the set of missing nodes.
1479 # prunes them from the set of missing nodes.
1477 def prune_parents(revlog, hasset, msngset):
1480 def prune_parents(revlog, hasset, msngset):
1478 haslst = hasset.keys()
1481 haslst = hasset.keys()
1479 haslst.sort(cmp_by_rev_func(revlog))
1482 haslst.sort(cmp_by_rev_func(revlog))
1480 for node in haslst:
1483 for node in haslst:
1481 parentlst = [p for p in revlog.parents(node) if p != nullid]
1484 parentlst = [p for p in revlog.parents(node) if p != nullid]
1482 while parentlst:
1485 while parentlst:
1483 n = parentlst.pop()
1486 n = parentlst.pop()
1484 if n not in hasset:
1487 if n not in hasset:
1485 hasset[n] = 1
1488 hasset[n] = 1
1486 p = [p for p in revlog.parents(n) if p != nullid]
1489 p = [p for p in revlog.parents(n) if p != nullid]
1487 parentlst.extend(p)
1490 parentlst.extend(p)
1488 for n in hasset:
1491 for n in hasset:
1489 msngset.pop(n, None)
1492 msngset.pop(n, None)
1490
1493
1491 # This is a function generating function used to set up an environment
1494 # This is a function generating function used to set up an environment
1492 # for the inner function to execute in.
1495 # for the inner function to execute in.
1493 def manifest_and_file_collector(changedfileset):
1496 def manifest_and_file_collector(changedfileset):
1494 # This is an information gathering function that gathers
1497 # This is an information gathering function that gathers
1495 # information from each changeset node that goes out as part of
1498 # information from each changeset node that goes out as part of
1496 # the changegroup. The information gathered is a list of which
1499 # the changegroup. The information gathered is a list of which
1497 # manifest nodes are potentially required (the recipient may
1500 # manifest nodes are potentially required (the recipient may
1498 # already have them) and total list of all files which were
1501 # already have them) and total list of all files which were
1499 # changed in any changeset in the changegroup.
1502 # changed in any changeset in the changegroup.
1500 #
1503 #
1501 # We also remember the first changenode we saw any manifest
1504 # We also remember the first changenode we saw any manifest
1502 # referenced by so we can later determine which changenode 'owns'
1505 # referenced by so we can later determine which changenode 'owns'
1503 # the manifest.
1506 # the manifest.
1504 def collect_manifests_and_files(clnode):
1507 def collect_manifests_and_files(clnode):
1505 c = cl.read(clnode)
1508 c = cl.read(clnode)
1506 for f in c[3]:
1509 for f in c[3]:
1507 # This is to make sure we only have one instance of each
1510 # This is to make sure we only have one instance of each
1508 # filename string for each filename.
1511 # filename string for each filename.
1509 changedfileset.setdefault(f, f)
1512 changedfileset.setdefault(f, f)
1510 msng_mnfst_set.setdefault(c[0], clnode)
1513 msng_mnfst_set.setdefault(c[0], clnode)
1511 return collect_manifests_and_files
1514 return collect_manifests_and_files
1512
1515
1513 # Figure out which manifest nodes (of the ones we think might be part
1516 # Figure out which manifest nodes (of the ones we think might be part
1514 # of the changegroup) the recipient must know about and remove them
1517 # of the changegroup) the recipient must know about and remove them
1515 # from the changegroup.
1518 # from the changegroup.
1516 def prune_manifests():
1519 def prune_manifests():
1517 has_mnfst_set = {}
1520 has_mnfst_set = {}
1518 for n in msng_mnfst_set:
1521 for n in msng_mnfst_set:
1519 # If a 'missing' manifest thinks it belongs to a changenode
1522 # If a 'missing' manifest thinks it belongs to a changenode
1520 # the recipient is assumed to have, obviously the recipient
1523 # the recipient is assumed to have, obviously the recipient
1521 # must have that manifest.
1524 # must have that manifest.
1522 linknode = cl.node(mnfst.linkrev(n))
1525 linknode = cl.node(mnfst.linkrev(n))
1523 if linknode in has_cl_set:
1526 if linknode in has_cl_set:
1524 has_mnfst_set[n] = 1
1527 has_mnfst_set[n] = 1
1525 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1528 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1526
1529
1527 # Use the information collected in collect_manifests_and_files to say
1530 # Use the information collected in collect_manifests_and_files to say
1528 # which changenode any manifestnode belongs to.
1531 # which changenode any manifestnode belongs to.
1529 def lookup_manifest_link(mnfstnode):
1532 def lookup_manifest_link(mnfstnode):
1530 return msng_mnfst_set[mnfstnode]
1533 return msng_mnfst_set[mnfstnode]
1531
1534
1532 # A function generating function that sets up the initial environment
1535 # A function generating function that sets up the initial environment
1533 # the inner function.
1536 # the inner function.
1534 def filenode_collector(changedfiles):
1537 def filenode_collector(changedfiles):
1535 next_rev = [0]
1538 next_rev = [0]
1536 # This gathers information from each manifestnode included in the
1539 # This gathers information from each manifestnode included in the
1537 # changegroup about which filenodes the manifest node references
1540 # changegroup about which filenodes the manifest node references
1538 # so we can include those in the changegroup too.
1541 # so we can include those in the changegroup too.
1539 #
1542 #
1540 # It also remembers which changenode each filenode belongs to. It
1543 # It also remembers which changenode each filenode belongs to. It
1541 # does this by assuming the a filenode belongs to the changenode
1544 # does this by assuming the a filenode belongs to the changenode
1542 # the first manifest that references it belongs to.
1545 # the first manifest that references it belongs to.
1543 def collect_msng_filenodes(mnfstnode):
1546 def collect_msng_filenodes(mnfstnode):
1544 r = mnfst.rev(mnfstnode)
1547 r = mnfst.rev(mnfstnode)
1545 if r == next_rev[0]:
1548 if r == next_rev[0]:
1546 # If the last rev we looked at was the one just previous,
1549 # If the last rev we looked at was the one just previous,
1547 # we only need to see a diff.
1550 # we only need to see a diff.
1548 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1551 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1549 # For each line in the delta
1552 # For each line in the delta
1550 for dline in delta.splitlines():
1553 for dline in delta.splitlines():
1551 # get the filename and filenode for that line
1554 # get the filename and filenode for that line
1552 f, fnode = dline.split('\0')
1555 f, fnode = dline.split('\0')
1553 fnode = bin(fnode[:40])
1556 fnode = bin(fnode[:40])
1554 f = changedfiles.get(f, None)
1557 f = changedfiles.get(f, None)
1555 # And if the file is in the list of files we care
1558 # And if the file is in the list of files we care
1556 # about.
1559 # about.
1557 if f is not None:
1560 if f is not None:
1558 # Get the changenode this manifest belongs to
1561 # Get the changenode this manifest belongs to
1559 clnode = msng_mnfst_set[mnfstnode]
1562 clnode = msng_mnfst_set[mnfstnode]
1560 # Create the set of filenodes for the file if
1563 # Create the set of filenodes for the file if
1561 # there isn't one already.
1564 # there isn't one already.
1562 ndset = msng_filenode_set.setdefault(f, {})
1565 ndset = msng_filenode_set.setdefault(f, {})
1563 # And set the filenode's changelog node to the
1566 # And set the filenode's changelog node to the
1564 # manifest's if it hasn't been set already.
1567 # manifest's if it hasn't been set already.
1565 ndset.setdefault(fnode, clnode)
1568 ndset.setdefault(fnode, clnode)
1566 else:
1569 else:
1567 # Otherwise we need a full manifest.
1570 # Otherwise we need a full manifest.
1568 m = mnfst.read(mnfstnode)
1571 m = mnfst.read(mnfstnode)
1569 # For every file in we care about.
1572 # For every file in we care about.
1570 for f in changedfiles:
1573 for f in changedfiles:
1571 fnode = m.get(f, None)
1574 fnode = m.get(f, None)
1572 # If it's in the manifest
1575 # If it's in the manifest
1573 if fnode is not None:
1576 if fnode is not None:
1574 # See comments above.
1577 # See comments above.
1575 clnode = msng_mnfst_set[mnfstnode]
1578 clnode = msng_mnfst_set[mnfstnode]
1576 ndset = msng_filenode_set.setdefault(f, {})
1579 ndset = msng_filenode_set.setdefault(f, {})
1577 ndset.setdefault(fnode, clnode)
1580 ndset.setdefault(fnode, clnode)
1578 # Remember the revision we hope to see next.
1581 # Remember the revision we hope to see next.
1579 next_rev[0] = r + 1
1582 next_rev[0] = r + 1
1580 return collect_msng_filenodes
1583 return collect_msng_filenodes
1581
1584
1582 # We have a list of filenodes we think we need for a file, lets remove
1585 # We have a list of filenodes we think we need for a file, lets remove
1583 # all those we now the recipient must have.
1586 # all those we now the recipient must have.
1584 def prune_filenodes(f, filerevlog):
1587 def prune_filenodes(f, filerevlog):
1585 msngset = msng_filenode_set[f]
1588 msngset = msng_filenode_set[f]
1586 hasset = {}
1589 hasset = {}
1587 # If a 'missing' filenode thinks it belongs to a changenode we
1590 # If a 'missing' filenode thinks it belongs to a changenode we
1588 # assume the recipient must have, then the recipient must have
1591 # assume the recipient must have, then the recipient must have
1589 # that filenode.
1592 # that filenode.
1590 for n in msngset:
1593 for n in msngset:
1591 clnode = cl.node(filerevlog.linkrev(n))
1594 clnode = cl.node(filerevlog.linkrev(n))
1592 if clnode in has_cl_set:
1595 if clnode in has_cl_set:
1593 hasset[n] = 1
1596 hasset[n] = 1
1594 prune_parents(filerevlog, hasset, msngset)
1597 prune_parents(filerevlog, hasset, msngset)
1595
1598
1596 # A function generator function that sets up the a context for the
1599 # A function generator function that sets up the a context for the
1597 # inner function.
1600 # inner function.
1598 def lookup_filenode_link_func(fname):
1601 def lookup_filenode_link_func(fname):
1599 msngset = msng_filenode_set[fname]
1602 msngset = msng_filenode_set[fname]
1600 # Lookup the changenode the filenode belongs to.
1603 # Lookup the changenode the filenode belongs to.
1601 def lookup_filenode_link(fnode):
1604 def lookup_filenode_link(fnode):
1602 return msngset[fnode]
1605 return msngset[fnode]
1603 return lookup_filenode_link
1606 return lookup_filenode_link
1604
1607
1605 # Now that we have all theses utility functions to help out and
1608 # Now that we have all theses utility functions to help out and
1606 # logically divide up the task, generate the group.
1609 # logically divide up the task, generate the group.
1607 def gengroup():
1610 def gengroup():
1608 # The set of changed files starts empty.
1611 # The set of changed files starts empty.
1609 changedfiles = {}
1612 changedfiles = {}
1610 # Create a changenode group generator that will call our functions
1613 # Create a changenode group generator that will call our functions
1611 # back to lookup the owning changenode and collect information.
1614 # back to lookup the owning changenode and collect information.
1612 group = cl.group(msng_cl_lst, identity,
1615 group = cl.group(msng_cl_lst, identity,
1613 manifest_and_file_collector(changedfiles))
1616 manifest_and_file_collector(changedfiles))
1614 for chnk in group:
1617 for chnk in group:
1615 yield chnk
1618 yield chnk
1616
1619
1617 # The list of manifests has been collected by the generator
1620 # The list of manifests has been collected by the generator
1618 # calling our functions back.
1621 # calling our functions back.
1619 prune_manifests()
1622 prune_manifests()
1620 msng_mnfst_lst = msng_mnfst_set.keys()
1623 msng_mnfst_lst = msng_mnfst_set.keys()
1621 # Sort the manifestnodes by revision number.
1624 # Sort the manifestnodes by revision number.
1622 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1625 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1623 # Create a generator for the manifestnodes that calls our lookup
1626 # Create a generator for the manifestnodes that calls our lookup
1624 # and data collection functions back.
1627 # and data collection functions back.
1625 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1628 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1626 filenode_collector(changedfiles))
1629 filenode_collector(changedfiles))
1627 for chnk in group:
1630 for chnk in group:
1628 yield chnk
1631 yield chnk
1629
1632
1630 # These are no longer needed, dereference and toss the memory for
1633 # These are no longer needed, dereference and toss the memory for
1631 # them.
1634 # them.
1632 msng_mnfst_lst = None
1635 msng_mnfst_lst = None
1633 msng_mnfst_set.clear()
1636 msng_mnfst_set.clear()
1634
1637
1635 changedfiles = changedfiles.keys()
1638 changedfiles = changedfiles.keys()
1636 changedfiles.sort()
1639 changedfiles.sort()
1637 # Go through all our files in order sorted by name.
1640 # Go through all our files in order sorted by name.
1638 for fname in changedfiles:
1641 for fname in changedfiles:
1639 filerevlog = self.file(fname)
1642 filerevlog = self.file(fname)
1640 # Toss out the filenodes that the recipient isn't really
1643 # Toss out the filenodes that the recipient isn't really
1641 # missing.
1644 # missing.
1642 if msng_filenode_set.has_key(fname):
1645 if msng_filenode_set.has_key(fname):
1643 prune_filenodes(fname, filerevlog)
1646 prune_filenodes(fname, filerevlog)
1644 msng_filenode_lst = msng_filenode_set[fname].keys()
1647 msng_filenode_lst = msng_filenode_set[fname].keys()
1645 else:
1648 else:
1646 msng_filenode_lst = []
1649 msng_filenode_lst = []
1647 # If any filenodes are left, generate the group for them,
1650 # If any filenodes are left, generate the group for them,
1648 # otherwise don't bother.
1651 # otherwise don't bother.
1649 if len(msng_filenode_lst) > 0:
1652 if len(msng_filenode_lst) > 0:
1650 yield changegroup.genchunk(fname)
1653 yield changegroup.genchunk(fname)
1651 # Sort the filenodes by their revision #
1654 # Sort the filenodes by their revision #
1652 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1655 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1653 # Create a group generator and only pass in a changenode
1656 # Create a group generator and only pass in a changenode
1654 # lookup function as we need to collect no information
1657 # lookup function as we need to collect no information
1655 # from filenodes.
1658 # from filenodes.
1656 group = filerevlog.group(msng_filenode_lst,
1659 group = filerevlog.group(msng_filenode_lst,
1657 lookup_filenode_link_func(fname))
1660 lookup_filenode_link_func(fname))
1658 for chnk in group:
1661 for chnk in group:
1659 yield chnk
1662 yield chnk
1660 if msng_filenode_set.has_key(fname):
1663 if msng_filenode_set.has_key(fname):
1661 # Don't need this anymore, toss it to free memory.
1664 # Don't need this anymore, toss it to free memory.
1662 del msng_filenode_set[fname]
1665 del msng_filenode_set[fname]
1663 # Signal that no more groups are left.
1666 # Signal that no more groups are left.
1664 yield changegroup.closechunk()
1667 yield changegroup.closechunk()
1665
1668
1666 if msng_cl_lst:
1669 if msng_cl_lst:
1667 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1670 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1668
1671
1669 return util.chunkbuffer(gengroup())
1672 return util.chunkbuffer(gengroup())
1670
1673
1671 def changegroup(self, basenodes, source):
1674 def changegroup(self, basenodes, source):
1672 """Generate a changegroup of all nodes that we have that a recipient
1675 """Generate a changegroup of all nodes that we have that a recipient
1673 doesn't.
1676 doesn't.
1674
1677
1675 This is much easier than the previous function as we can assume that
1678 This is much easier than the previous function as we can assume that
1676 the recipient has any changenode we aren't sending them."""
1679 the recipient has any changenode we aren't sending them."""
1677
1680
1678 self.hook('preoutgoing', throw=True, source=source)
1681 self.hook('preoutgoing', throw=True, source=source)
1679
1682
1680 cl = self.changelog
1683 cl = self.changelog
1681 nodes = cl.nodesbetween(basenodes, None)[0]
1684 nodes = cl.nodesbetween(basenodes, None)[0]
1682 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1685 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1683 self.changegroupinfo(nodes)
1686 self.changegroupinfo(nodes)
1684
1687
1685 def identity(x):
1688 def identity(x):
1686 return x
1689 return x
1687
1690
1688 def gennodelst(revlog):
1691 def gennodelst(revlog):
1689 for r in xrange(0, revlog.count()):
1692 for r in xrange(0, revlog.count()):
1690 n = revlog.node(r)
1693 n = revlog.node(r)
1691 if revlog.linkrev(n) in revset:
1694 if revlog.linkrev(n) in revset:
1692 yield n
1695 yield n
1693
1696
1694 def changed_file_collector(changedfileset):
1697 def changed_file_collector(changedfileset):
1695 def collect_changed_files(clnode):
1698 def collect_changed_files(clnode):
1696 c = cl.read(clnode)
1699 c = cl.read(clnode)
1697 for fname in c[3]:
1700 for fname in c[3]:
1698 changedfileset[fname] = 1
1701 changedfileset[fname] = 1
1699 return collect_changed_files
1702 return collect_changed_files
1700
1703
1701 def lookuprevlink_func(revlog):
1704 def lookuprevlink_func(revlog):
1702 def lookuprevlink(n):
1705 def lookuprevlink(n):
1703 return cl.node(revlog.linkrev(n))
1706 return cl.node(revlog.linkrev(n))
1704 return lookuprevlink
1707 return lookuprevlink
1705
1708
1706 def gengroup():
1709 def gengroup():
1707 # construct a list of all changed files
1710 # construct a list of all changed files
1708 changedfiles = {}
1711 changedfiles = {}
1709
1712
1710 for chnk in cl.group(nodes, identity,
1713 for chnk in cl.group(nodes, identity,
1711 changed_file_collector(changedfiles)):
1714 changed_file_collector(changedfiles)):
1712 yield chnk
1715 yield chnk
1713 changedfiles = changedfiles.keys()
1716 changedfiles = changedfiles.keys()
1714 changedfiles.sort()
1717 changedfiles.sort()
1715
1718
1716 mnfst = self.manifest
1719 mnfst = self.manifest
1717 nodeiter = gennodelst(mnfst)
1720 nodeiter = gennodelst(mnfst)
1718 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1721 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1719 yield chnk
1722 yield chnk
1720
1723
1721 for fname in changedfiles:
1724 for fname in changedfiles:
1722 filerevlog = self.file(fname)
1725 filerevlog = self.file(fname)
1723 nodeiter = gennodelst(filerevlog)
1726 nodeiter = gennodelst(filerevlog)
1724 nodeiter = list(nodeiter)
1727 nodeiter = list(nodeiter)
1725 if nodeiter:
1728 if nodeiter:
1726 yield changegroup.genchunk(fname)
1729 yield changegroup.genchunk(fname)
1727 lookup = lookuprevlink_func(filerevlog)
1730 lookup = lookuprevlink_func(filerevlog)
1728 for chnk in filerevlog.group(nodeiter, lookup):
1731 for chnk in filerevlog.group(nodeiter, lookup):
1729 yield chnk
1732 yield chnk
1730
1733
1731 yield changegroup.closechunk()
1734 yield changegroup.closechunk()
1732
1735
1733 if nodes:
1736 if nodes:
1734 self.hook('outgoing', node=hex(nodes[0]), source=source)
1737 self.hook('outgoing', node=hex(nodes[0]), source=source)
1735
1738
1736 return util.chunkbuffer(gengroup())
1739 return util.chunkbuffer(gengroup())
1737
1740
1738 def addchangegroup(self, source, srctype, url):
1741 def addchangegroup(self, source, srctype, url):
1739 """add changegroup to repo.
1742 """add changegroup to repo.
1740 returns number of heads modified or added + 1."""
1743 returns number of heads modified or added + 1."""
1741
1744
1742 def csmap(x):
1745 def csmap(x):
1743 self.ui.debug(_("add changeset %s\n") % short(x))
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1744 return cl.count()
1747 return cl.count()
1745
1748
1746 def revmap(x):
1749 def revmap(x):
1747 return cl.rev(x)
1750 return cl.rev(x)
1748
1751
1749 if not source:
1752 if not source:
1750 return 0
1753 return 0
1751
1754
1752 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1753
1756
1754 changesets = files = revisions = 0
1757 changesets = files = revisions = 0
1755
1758
1756 tr = self.transaction()
1759 tr = self.transaction()
1757
1760
1758 # write changelog data to temp files so concurrent readers will not see
1761 # write changelog data to temp files so concurrent readers will not see
1759 # inconsistent view
1762 # inconsistent view
1760 cl = None
1763 cl = None
1761 try:
1764 try:
1762 cl = appendfile.appendchangelog(self.sopener,
1765 cl = appendfile.appendchangelog(self.sopener,
1763 self.changelog.version)
1766 self.changelog.version)
1764
1767
1765 oldheads = len(cl.heads())
1768 oldheads = len(cl.heads())
1766
1769
1767 # pull off the changeset group
1770 # pull off the changeset group
1768 self.ui.status(_("adding changesets\n"))
1771 self.ui.status(_("adding changesets\n"))
1769 cor = cl.count() - 1
1772 cor = cl.count() - 1
1770 chunkiter = changegroup.chunkiter(source)
1773 chunkiter = changegroup.chunkiter(source)
1771 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1772 raise util.Abort(_("received changelog group is empty"))
1775 raise util.Abort(_("received changelog group is empty"))
1773 cnr = cl.count() - 1
1776 cnr = cl.count() - 1
1774 changesets = cnr - cor
1777 changesets = cnr - cor
1775
1778
1776 # pull off the manifest group
1779 # pull off the manifest group
1777 self.ui.status(_("adding manifests\n"))
1780 self.ui.status(_("adding manifests\n"))
1778 chunkiter = changegroup.chunkiter(source)
1781 chunkiter = changegroup.chunkiter(source)
1779 # no need to check for empty manifest group here:
1782 # no need to check for empty manifest group here:
1780 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1781 # no new manifest will be created and the manifest group will
1784 # no new manifest will be created and the manifest group will
1782 # be empty during the pull
1785 # be empty during the pull
1783 self.manifest.addgroup(chunkiter, revmap, tr)
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1784
1787
1785 # process the files
1788 # process the files
1786 self.ui.status(_("adding file changes\n"))
1789 self.ui.status(_("adding file changes\n"))
1787 while 1:
1790 while 1:
1788 f = changegroup.getchunk(source)
1791 f = changegroup.getchunk(source)
1789 if not f:
1792 if not f:
1790 break
1793 break
1791 self.ui.debug(_("adding %s revisions\n") % f)
1794 self.ui.debug(_("adding %s revisions\n") % f)
1792 fl = self.file(f)
1795 fl = self.file(f)
1793 o = fl.count()
1796 o = fl.count()
1794 chunkiter = changegroup.chunkiter(source)
1797 chunkiter = changegroup.chunkiter(source)
1795 if fl.addgroup(chunkiter, revmap, tr) is None:
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1796 raise util.Abort(_("received file revlog group is empty"))
1799 raise util.Abort(_("received file revlog group is empty"))
1797 revisions += fl.count() - o
1800 revisions += fl.count() - o
1798 files += 1
1801 files += 1
1799
1802
1800 cl.writedata()
1803 cl.writedata()
1801 finally:
1804 finally:
1802 if cl:
1805 if cl:
1803 cl.cleanup()
1806 cl.cleanup()
1804
1807
1805 # make changelog see real files again
1808 # make changelog see real files again
1806 self.changelog = changelog.changelog(self.sopener,
1809 self.changelog = changelog.changelog(self.sopener,
1807 self.changelog.version)
1810 self.changelog.version)
1808 self.changelog.checkinlinesize(tr)
1811 self.changelog.checkinlinesize(tr)
1809
1812
1810 newheads = len(self.changelog.heads())
1813 newheads = len(self.changelog.heads())
1811 heads = ""
1814 heads = ""
1812 if oldheads and newheads != oldheads:
1815 if oldheads and newheads != oldheads:
1813 heads = _(" (%+d heads)") % (newheads - oldheads)
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1814
1817
1815 self.ui.status(_("added %d changesets"
1818 self.ui.status(_("added %d changesets"
1816 " with %d changes to %d files%s\n")
1819 " with %d changes to %d files%s\n")
1817 % (changesets, revisions, files, heads))
1820 % (changesets, revisions, files, heads))
1818
1821
1819 if changesets > 0:
1822 if changesets > 0:
1820 self.hook('pretxnchangegroup', throw=True,
1823 self.hook('pretxnchangegroup', throw=True,
1821 node=hex(self.changelog.node(cor+1)), source=srctype,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1822 url=url)
1825 url=url)
1823
1826
1824 tr.close()
1827 tr.close()
1825
1828
1826 if changesets > 0:
1829 if changesets > 0:
1827 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1828 source=srctype, url=url)
1831 source=srctype, url=url)
1829
1832
1830 for i in xrange(cor + 1, cnr + 1):
1833 for i in xrange(cor + 1, cnr + 1):
1831 self.hook("incoming", node=hex(self.changelog.node(i)),
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1832 source=srctype, url=url)
1835 source=srctype, url=url)
1833
1836
1834 return newheads - oldheads + 1
1837 return newheads - oldheads + 1
1835
1838
1836
1839
1837 def stream_in(self, remote):
1840 def stream_in(self, remote):
1838 fp = remote.stream_out()
1841 fp = remote.stream_out()
1839 l = fp.readline()
1842 l = fp.readline()
1840 try:
1843 try:
1841 resp = int(l)
1844 resp = int(l)
1842 except ValueError:
1845 except ValueError:
1843 raise util.UnexpectedOutput(
1846 raise util.UnexpectedOutput(
1844 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1845 if resp == 1:
1848 if resp == 1:
1846 raise util.Abort(_('operation forbidden by server'))
1849 raise util.Abort(_('operation forbidden by server'))
1847 elif resp == 2:
1850 elif resp == 2:
1848 raise util.Abort(_('locking the remote repository failed'))
1851 raise util.Abort(_('locking the remote repository failed'))
1849 elif resp != 0:
1852 elif resp != 0:
1850 raise util.Abort(_('the server sent an unknown error code'))
1853 raise util.Abort(_('the server sent an unknown error code'))
1851 self.ui.status(_('streaming all changes\n'))
1854 self.ui.status(_('streaming all changes\n'))
1852 l = fp.readline()
1855 l = fp.readline()
1853 try:
1856 try:
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1857 total_files, total_bytes = map(int, l.split(' ', 1))
1855 except ValueError, TypeError:
1858 except ValueError, TypeError:
1856 raise util.UnexpectedOutput(
1859 raise util.UnexpectedOutput(
1857 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1861 self.ui.status(_('%d files to transfer, %s of data\n') %
1859 (total_files, util.bytecount(total_bytes)))
1862 (total_files, util.bytecount(total_bytes)))
1860 start = time.time()
1863 start = time.time()
1861 for i in xrange(total_files):
1864 for i in xrange(total_files):
1862 # XXX doesn't support '\n' or '\r' in filenames
1865 # XXX doesn't support '\n' or '\r' in filenames
1863 l = fp.readline()
1866 l = fp.readline()
1864 try:
1867 try:
1865 name, size = l.split('\0', 1)
1868 name, size = l.split('\0', 1)
1866 size = int(size)
1869 size = int(size)
1867 except ValueError, TypeError:
1870 except ValueError, TypeError:
1868 raise util.UnexpectedOutput(
1871 raise util.UnexpectedOutput(
1869 _('Unexpected response from remote server:'), l)
1872 _('Unexpected response from remote server:'), l)
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1873 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1871 ofp = self.sopener(name, 'w')
1874 ofp = self.sopener(name, 'w')
1872 for chunk in util.filechunkiter(fp, limit=size):
1875 for chunk in util.filechunkiter(fp, limit=size):
1873 ofp.write(chunk)
1876 ofp.write(chunk)
1874 ofp.close()
1877 ofp.close()
1875 elapsed = time.time() - start
1878 elapsed = time.time() - start
1876 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1877 (util.bytecount(total_bytes), elapsed,
1880 (util.bytecount(total_bytes), elapsed,
1878 util.bytecount(total_bytes / elapsed)))
1881 util.bytecount(total_bytes / elapsed)))
1879 self.reload()
1882 self.reload()
1880 return len(self.heads()) + 1
1883 return len(self.heads()) + 1
1881
1884
1882 def clone(self, remote, heads=[], stream=False):
1885 def clone(self, remote, heads=[], stream=False):
1883 '''clone remote repository.
1886 '''clone remote repository.
1884
1887
1885 keyword arguments:
1888 keyword arguments:
1886 heads: list of revs to clone (forces use of pull)
1889 heads: list of revs to clone (forces use of pull)
1887 stream: use streaming clone if possible'''
1890 stream: use streaming clone if possible'''
1888
1891
1889 # now, all clients that can request uncompressed clones can
1892 # now, all clients that can request uncompressed clones can
1890 # read repo formats supported by all servers that can serve
1893 # read repo formats supported by all servers that can serve
1891 # them.
1894 # them.
1892
1895
1893 # if revlog format changes, client will have to check version
1896 # if revlog format changes, client will have to check version
1894 # and format flags on "stream" capability, and use
1897 # and format flags on "stream" capability, and use
1895 # uncompressed only if compatible.
1898 # uncompressed only if compatible.
1896
1899
1897 if stream and not heads and remote.capable('stream'):
1900 if stream and not heads and remote.capable('stream'):
1898 return self.stream_in(remote)
1901 return self.stream_in(remote)
1899 return self.pull(remote, heads)
1902 return self.pull(remote, heads)
1900
1903
1901 # used to avoid circular references so destructors work
1904 # used to avoid circular references so destructors work
1902 def aftertrans(files):
1905 def aftertrans(files):
1903 renamefiles = [tuple(t) for t in files]
1906 renamefiles = [tuple(t) for t in files]
1904 def a():
1907 def a():
1905 for src, dest in renamefiles:
1908 for src, dest in renamefiles:
1906 util.rename(src, dest)
1909 util.rename(src, dest)
1907 return a
1910 return a
1908
1911
1909 def instance(ui, path, create):
1912 def instance(ui, path, create):
1910 return localrepository(ui, util.drop_scheme('file', path), create)
1913 return localrepository(ui, util.drop_scheme('file', path), create)
1911
1914
1912 def islocal(path):
1915 def islocal(path):
1913 return True
1916 return True
@@ -1,65 +1,66 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = os.path.join(p, urllib.quote(path))
28 f = os.path.join(p, urllib.quote(path))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
35 self.path = (path + "/.hg")
36 self.spath = self.path
36 self.ui = ui
37 self.ui = ui
37 self.revlogversion = 0
38 self.revlogversion = 0
38 self.opener = opener(self.path)
39 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
40 self.sopener = opener(self.spath)
40 self.manifest = manifest.manifest(self.opener)
41 self.manifest = manifest.manifest(self.sopener)
41 self.changelog = changelog.changelog(self.opener)
42 self.changelog = changelog.changelog(self.sopener)
42 self.tagscache = None
43 self.tagscache = None
43 self.nodetagscache = None
44 self.nodetagscache = None
44 self.encodepats = None
45 self.encodepats = None
45 self.decodepats = None
46 self.decodepats = None
46
47
47 def url(self):
48 def url(self):
48 return 'static-' + self._url
49 return 'static-' + self._url
49
50
50 def dev(self):
51 def dev(self):
51 return -1
52 return -1
52
53
53 def local(self):
54 def local(self):
54 return False
55 return False
55
56
56 def instance(ui, path, create):
57 def instance(ui, path, create):
57 if create:
58 if create:
58 raise util.Abort(_('cannot create new static-http repository'))
59 raise util.Abort(_('cannot create new static-http repository'))
59 if path.startswith('old-http:'):
60 if path.startswith('old-http:'):
60 ui.warn(_("old-http:// syntax is deprecated, "
61 ui.warn(_("old-http:// syntax is deprecated, "
61 "please use static-http:// instead\n"))
62 "please use static-http:// instead\n"))
62 path = path[4:]
63 path = path[4:]
63 else:
64 else:
64 path = path[7:]
65 path = path[7:]
65 return statichttprepository(ui, path)
66 return statichttprepository(ui, path)
@@ -1,95 +1,95 b''
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import demandload
8 from demandload import demandload
9 from i18n import gettext as _
9 from i18n import gettext as _
10 demandload(globals(), "os stat util lock")
10 demandload(globals(), "os stat util lock")
11
11
12 # if server supports streaming clone, it advertises "stream"
12 # if server supports streaming clone, it advertises "stream"
13 # capability with value that is version+flags of repo it is serving.
13 # capability with value that is version+flags of repo it is serving.
14 # client only streams if it can read that repo format.
14 # client only streams if it can read that repo format.
15
15
16 def walkrepo(root):
16 def walkrepo(root):
17 '''iterate over metadata files in repository.
17 '''iterate over metadata files in repository.
18 walk in natural (sorted) order.
18 walk in natural (sorted) order.
19 yields 2-tuples: name of .d or .i file, size of file.'''
19 yields 2-tuples: name of .d or .i file, size of file.'''
20
20
21 strip_count = len(root) + len(os.sep)
21 strip_count = len(root) + len(os.sep)
22 def walk(path, recurse):
22 def walk(path, recurse):
23 ents = os.listdir(path)
23 ents = os.listdir(path)
24 ents.sort()
24 ents.sort()
25 for e in ents:
25 for e in ents:
26 pe = os.path.join(path, e)
26 pe = os.path.join(path, e)
27 st = os.lstat(pe)
27 st = os.lstat(pe)
28 if stat.S_ISDIR(st.st_mode):
28 if stat.S_ISDIR(st.st_mode):
29 if recurse:
29 if recurse:
30 for x in walk(pe, True):
30 for x in walk(pe, True):
31 yield x
31 yield x
32 else:
32 else:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 continue
34 continue
35 sfx = e[-2:]
35 sfx = e[-2:]
36 if sfx in ('.d', '.i'):
36 if sfx in ('.d', '.i'):
37 yield pe[strip_count:], st.st_size
37 yield pe[strip_count:], st.st_size
38 # write file data first
38 # write file data first
39 for x in walk(os.path.join(root, 'data'), True):
39 for x in walk(os.path.join(root, 'data'), True):
40 yield x
40 yield x
41 # write manifest before changelog
41 # write manifest before changelog
42 meta = list(walk(root, False))
42 meta = list(walk(root, False))
43 meta.sort()
43 meta.sort()
44 meta.reverse()
44 meta.reverse()
45 for x in meta:
45 for x in meta:
46 yield x
46 yield x
47
47
48 # stream file format is simple.
48 # stream file format is simple.
49 #
49 #
50 # server writes out line that says how many files, how many total
50 # server writes out line that says how many files, how many total
51 # bytes. separator is ascii space, byte counts are strings.
51 # bytes. separator is ascii space, byte counts are strings.
52 #
52 #
53 # then for each file:
53 # then for each file:
54 #
54 #
55 # server writes out line that says file name, how many bytes in
55 # server writes out line that says file name, how many bytes in
56 # file. separator is ascii nul, byte count is string.
56 # file. separator is ascii nul, byte count is string.
57 #
57 #
58 # server writes out raw file data.
58 # server writes out raw file data.
59
59
60 def stream_out(repo, fileobj):
60 def stream_out(repo, fileobj):
61 '''stream out all metadata files in repository.
61 '''stream out all metadata files in repository.
62 writes to file-like object, must support write() and optional flush().'''
62 writes to file-like object, must support write() and optional flush().'''
63
63
64 if not repo.ui.configbool('server', 'uncompressed'):
64 if not repo.ui.configbool('server', 'uncompressed'):
65 fileobj.write('1\n')
65 fileobj.write('1\n')
66 return
66 return
67
67
68 # get consistent snapshot of repo. lock during scan so lock not
68 # get consistent snapshot of repo. lock during scan so lock not
69 # needed while we stream, and commits can happen.
69 # needed while we stream, and commits can happen.
70 try:
70 try:
71 repolock = repo.lock()
71 repolock = repo.lock()
72 except (lock.LockHeld, lock.LockUnavailable), inst:
72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 fileobj.write('2\n')
74 fileobj.write('2\n')
75 return
75 return
76
76
77 fileobj.write('0\n')
77 fileobj.write('0\n')
78 repo.ui.debug('scanning\n')
78 repo.ui.debug('scanning\n')
79 entries = []
79 entries = []
80 total_bytes = 0
80 total_bytes = 0
81 for name, size in walkrepo(repo.path):
81 for name, size in walkrepo(repo.spath):
82 entries.append((name, size))
82 entries.append((name, size))
83 total_bytes += size
83 total_bytes += size
84 repolock.release()
84 repolock.release()
85
85
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 (len(entries), total_bytes))
87 (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 for name, size in entries:
89 for name, size in entries:
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.opener(name), limit=size):
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 fileobj.write(chunk)
93 fileobj.write(chunk)
94 flush = getattr(fileobj, 'flush', None)
94 flush = getattr(fileobj, 'flush', None)
95 if flush: flush()
95 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now