##// END OF EJS Templates
merge with crew
Benoit Boissinot -
r3793:f3fbf76d merge default
parent child Browse files
Show More
@@ -1,256 +1,256 b''
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "changegroup util os struct bz2 tempfile")
16 demandload(globals(), "changegroup util os struct bz2 tempfile")
17
17
18 import localrepo, changelog, manifest, filelog, revlog
18 import localrepo, changelog, manifest, filelog, revlog
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, datafile, bundlefile,
21 def __init__(self, opener, indexfile, datafile, bundlefile,
22 linkmapper=None):
22 linkmapper=None):
23 # How it works:
23 # How it works:
24 # to retrieve a revision, we need to know the offset of
24 # to retrieve a revision, we need to know the offset of
25 # the revision in the bundlefile (an opened file).
25 # the revision in the bundlefile (an opened file).
26 #
26 #
27 # We store this offset in the index (start), to differentiate a
27 # We store this offset in the index (start), to differentiate a
28 # rev in the bundle and from a rev in the revlog, we check
28 # rev in the bundle and from a rev in the revlog, we check
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 # (it is bigger since we store the node to which the delta is)
30 # (it is bigger since we store the node to which the delta is)
31 #
31 #
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
33 self.bundlefile = bundlefile
33 self.bundlefile = bundlefile
34 self.basemap = {}
34 self.basemap = {}
35 def chunkpositer():
35 def chunkpositer():
36 for chunk in changegroup.chunkiter(bundlefile):
36 for chunk in changegroup.chunkiter(bundlefile):
37 pos = bundlefile.tell()
37 pos = bundlefile.tell()
38 yield chunk, pos - len(chunk)
38 yield chunk, pos - len(chunk)
39 n = self.count()
39 n = self.count()
40 prev = None
40 prev = None
41 for chunk, start in chunkpositer():
41 for chunk, start in chunkpositer():
42 size = len(chunk)
42 size = len(chunk)
43 if size < 80:
43 if size < 80:
44 raise util.Abort("invalid changegroup")
44 raise util.Abort("invalid changegroup")
45 start += 80
45 start += 80
46 size -= 80
46 size -= 80
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 if node in self.nodemap:
48 if node in self.nodemap:
49 prev = node
49 prev = node
50 continue
50 continue
51 for p in (p1, p2):
51 for p in (p1, p2):
52 if not p in self.nodemap:
52 if not p in self.nodemap:
53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
54 if linkmapper is None:
54 if linkmapper is None:
55 link = n
55 link = n
56 else:
56 else:
57 link = linkmapper(cs)
57 link = linkmapper(cs)
58
58
59 if not prev:
59 if not prev:
60 prev = p1
60 prev = p1
61 # start, size, base is not used, link, p1, p2, delta ref
61 # start, size, base is not used, link, p1, p2, delta ref
62 if self.version == revlog.REVLOGV0:
62 if self.version == revlog.REVLOGV0:
63 e = (start, size, None, link, p1, p2, node)
63 e = (start, size, None, link, p1, p2, node)
64 else:
64 else:
65 e = (self.offset_type(start, 0), size, -1, None, link,
65 e = (self.offset_type(start, 0), size, -1, None, link,
66 self.rev(p1), self.rev(p2), node)
66 self.rev(p1), self.rev(p2), node)
67 self.basemap[n] = prev
67 self.basemap[n] = prev
68 self.index.append(e)
68 self.index.append(e)
69 self.nodemap[node] = n
69 self.nodemap[node] = n
70 prev = node
70 prev = node
71 n += 1
71 n += 1
72
72
73 def bundle(self, rev):
73 def bundle(self, rev):
74 """is rev from the bundle"""
74 """is rev from the bundle"""
75 if rev < 0:
75 if rev < 0:
76 return False
76 return False
77 return rev in self.basemap
77 return rev in self.basemap
78 def bundlebase(self, rev): return self.basemap[rev]
78 def bundlebase(self, rev): return self.basemap[rev]
79 def chunk(self, rev, df=None, cachelen=4096):
79 def chunk(self, rev, df=None, cachelen=4096):
80 # Warning: in case of bundle, the diff is against bundlebase,
80 # Warning: in case of bundle, the diff is against bundlebase,
81 # not against rev - 1
81 # not against rev - 1
82 # XXX: could use some caching
82 # XXX: could use some caching
83 if not self.bundle(rev):
83 if not self.bundle(rev):
84 return revlog.revlog.chunk(self, rev, df, cachelen)
84 return revlog.revlog.chunk(self, rev, df, cachelen)
85 self.bundlefile.seek(self.start(rev))
85 self.bundlefile.seek(self.start(rev))
86 return self.bundlefile.read(self.length(rev))
86 return self.bundlefile.read(self.length(rev))
87
87
88 def revdiff(self, rev1, rev2):
88 def revdiff(self, rev1, rev2):
89 """return or calculate a delta between two revisions"""
89 """return or calculate a delta between two revisions"""
90 if self.bundle(rev1) and self.bundle(rev2):
90 if self.bundle(rev1) and self.bundle(rev2):
91 # hot path for bundle
91 # hot path for bundle
92 revb = self.rev(self.bundlebase(rev2))
92 revb = self.rev(self.bundlebase(rev2))
93 if revb == rev1:
93 if revb == rev1:
94 return self.chunk(rev2)
94 return self.chunk(rev2)
95 elif not self.bundle(rev1) and not self.bundle(rev2):
95 elif not self.bundle(rev1) and not self.bundle(rev2):
96 return revlog.revlog.chunk(self, rev1, rev2)
96 return revlog.revlog.chunk(self, rev1, rev2)
97
97
98 return self.diff(self.revision(self.node(rev1)),
98 return self.diff(self.revision(self.node(rev1)),
99 self.revision(self.node(rev2)))
99 self.revision(self.node(rev2)))
100
100
101 def revision(self, node):
101 def revision(self, node):
102 """return an uncompressed revision of a given"""
102 """return an uncompressed revision of a given"""
103 if node == nullid: return ""
103 if node == nullid: return ""
104
104
105 text = None
105 text = None
106 chain = []
106 chain = []
107 iter_node = node
107 iter_node = node
108 rev = self.rev(iter_node)
108 rev = self.rev(iter_node)
109 # reconstruct the revision if it is from a changegroup
109 # reconstruct the revision if it is from a changegroup
110 while self.bundle(rev):
110 while self.bundle(rev):
111 if self.cache and self.cache[0] == iter_node:
111 if self.cache and self.cache[0] == iter_node:
112 text = self.cache[2]
112 text = self.cache[2]
113 break
113 break
114 chain.append(rev)
114 chain.append(rev)
115 iter_node = self.bundlebase(rev)
115 iter_node = self.bundlebase(rev)
116 rev = self.rev(iter_node)
116 rev = self.rev(iter_node)
117 if text is None:
117 if text is None:
118 text = revlog.revlog.revision(self, iter_node)
118 text = revlog.revlog.revision(self, iter_node)
119
119
120 while chain:
120 while chain:
121 delta = self.chunk(chain.pop())
121 delta = self.chunk(chain.pop())
122 text = self.patches(text, [delta])
122 text = self.patches(text, [delta])
123
123
124 p1, p2 = self.parents(node)
124 p1, p2 = self.parents(node)
125 if node != revlog.hash(text, p1, p2):
125 if node != revlog.hash(text, p1, p2):
126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
127 % (self.datafile, self.rev(node)))
127 % (self.datafile, self.rev(node)))
128
128
129 self.cache = (node, self.rev(node), text)
129 self.cache = (node, self.rev(node), text)
130 return text
130 return text
131
131
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 raise NotImplementedError
133 raise NotImplementedError
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
135 raise NotImplementedError
135 raise NotImplementedError
136 def strip(self, rev, minlink):
136 def strip(self, rev, minlink):
137 raise NotImplementedError
137 raise NotImplementedError
138 def checksize(self):
138 def checksize(self):
139 raise NotImplementedError
139 raise NotImplementedError
140
140
141 class bundlechangelog(bundlerevlog, changelog.changelog):
141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 def __init__(self, opener, bundlefile):
142 def __init__(self, opener, bundlefile):
143 changelog.changelog.__init__(self, opener)
143 changelog.changelog.__init__(self, opener)
144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
145 bundlefile)
145 bundlefile)
146
146
147 class bundlemanifest(bundlerevlog, manifest.manifest):
147 class bundlemanifest(bundlerevlog, manifest.manifest):
148 def __init__(self, opener, bundlefile, linkmapper):
148 def __init__(self, opener, bundlefile, linkmapper):
149 manifest.manifest.__init__(self, opener)
149 manifest.manifest.__init__(self, opener)
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
151 bundlefile, linkmapper)
151 bundlefile, linkmapper)
152
152
153 class bundlefilelog(bundlerevlog, filelog.filelog):
153 class bundlefilelog(bundlerevlog, filelog.filelog):
154 def __init__(self, opener, path, bundlefile, linkmapper):
154 def __init__(self, opener, path, bundlefile, linkmapper):
155 filelog.filelog.__init__(self, opener, path)
155 filelog.filelog.__init__(self, opener, path)
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
157 bundlefile, linkmapper)
157 bundlefile, linkmapper)
158
158
159 class bundlerepository(localrepo.localrepository):
159 class bundlerepository(localrepo.localrepository):
160 def __init__(self, ui, path, bundlename):
160 def __init__(self, ui, path, bundlename):
161 localrepo.localrepository.__init__(self, ui, path)
161 localrepo.localrepository.__init__(self, ui, path)
162
162
163 self._url = 'bundle:' + bundlename
163 self._url = 'bundle:' + bundlename
164 if path: self._url += '+' + path
164 if path: self._url += '+' + path
165
165
166 self.tempfile = None
166 self.tempfile = None
167 self.bundlefile = open(bundlename, "rb")
167 self.bundlefile = open(bundlename, "rb")
168 header = self.bundlefile.read(6)
168 header = self.bundlefile.read(6)
169 if not header.startswith("HG"):
169 if not header.startswith("HG"):
170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
171 elif not header.startswith("HG10"):
171 elif not header.startswith("HG10"):
172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
173 elif header == "HG10BZ":
173 elif header == "HG10BZ":
174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
175 suffix=".hg10un", dir=self.path)
175 suffix=".hg10un", dir=self.path)
176 self.tempfile = temp
176 self.tempfile = temp
177 fptemp = os.fdopen(fdtemp, 'wb')
177 fptemp = os.fdopen(fdtemp, 'wb')
178 def generator(f):
178 def generator(f):
179 zd = bz2.BZ2Decompressor()
179 zd = bz2.BZ2Decompressor()
180 zd.decompress("BZ")
180 zd.decompress("BZ")
181 for chunk in f:
181 for chunk in f:
182 yield zd.decompress(chunk)
182 yield zd.decompress(chunk)
183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
184
184
185 try:
185 try:
186 fptemp.write("HG10UN")
186 fptemp.write("HG10UN")
187 for chunk in gen:
187 for chunk in gen:
188 fptemp.write(chunk)
188 fptemp.write(chunk)
189 finally:
189 finally:
190 fptemp.close()
190 fptemp.close()
191 self.bundlefile.close()
191 self.bundlefile.close()
192
192
193 self.bundlefile = open(self.tempfile, "rb")
193 self.bundlefile = open(self.tempfile, "rb")
194 # seek right after the header
194 # seek right after the header
195 self.bundlefile.seek(6)
195 self.bundlefile.seek(6)
196 elif header == "HG10UN":
196 elif header == "HG10UN":
197 # nothing to do
197 # nothing to do
198 pass
198 pass
199 else:
199 else:
200 raise util.Abort(_("%s: unknown bundle compression type")
200 raise util.Abort(_("%s: unknown bundle compression type")
201 % bundlename)
201 % bundlename)
202 self.changelog = bundlechangelog(self.opener, self.bundlefile)
202 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
203 self.manifest = bundlemanifest(self.opener, self.bundlefile,
203 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
204 self.changelog.rev)
204 self.changelog.rev)
205 # dict with the mapping 'filename' -> position in the bundle
205 # dict with the mapping 'filename' -> position in the bundle
206 self.bundlefilespos = {}
206 self.bundlefilespos = {}
207 while 1:
207 while 1:
208 f = changegroup.getchunk(self.bundlefile)
208 f = changegroup.getchunk(self.bundlefile)
209 if not f:
209 if not f:
210 break
210 break
211 self.bundlefilespos[f] = self.bundlefile.tell()
211 self.bundlefilespos[f] = self.bundlefile.tell()
212 for c in changegroup.chunkiter(self.bundlefile):
212 for c in changegroup.chunkiter(self.bundlefile):
213 pass
213 pass
214
214
215 def url(self):
215 def url(self):
216 return self._url
216 return self._url
217
217
218 def dev(self):
218 def dev(self):
219 return -1
219 return -1
220
220
221 def file(self, f):
221 def file(self, f):
222 if f[0] == '/':
222 if f[0] == '/':
223 f = f[1:]
223 f = f[1:]
224 if f in self.bundlefilespos:
224 if f in self.bundlefilespos:
225 self.bundlefile.seek(self.bundlefilespos[f])
225 self.bundlefile.seek(self.bundlefilespos[f])
226 return bundlefilelog(self.opener, f, self.bundlefile,
226 return bundlefilelog(self.sopener, f, self.bundlefile,
227 self.changelog.rev)
227 self.changelog.rev)
228 else:
228 else:
229 return filelog.filelog(self.opener, f)
229 return filelog.filelog(self.sopener, f)
230
230
231 def close(self):
231 def close(self):
232 """Close assigned bundle file immediately."""
232 """Close assigned bundle file immediately."""
233 self.bundlefile.close()
233 self.bundlefile.close()
234
234
235 def __del__(self):
235 def __del__(self):
236 bundlefile = getattr(self, 'bundlefile', None)
236 bundlefile = getattr(self, 'bundlefile', None)
237 if bundlefile and not bundlefile.closed:
237 if bundlefile and not bundlefile.closed:
238 bundlefile.close()
238 bundlefile.close()
239 tempfile = getattr(self, 'tempfile', None)
239 tempfile = getattr(self, 'tempfile', None)
240 if tempfile is not None:
240 if tempfile is not None:
241 os.unlink(tempfile)
241 os.unlink(tempfile)
242
242
243 def instance(ui, path, create):
243 def instance(ui, path, create):
244 if create:
244 if create:
245 raise util.Abort(_('cannot create new bundle repository'))
245 raise util.Abort(_('cannot create new bundle repository'))
246 path = util.drop_scheme('file', path)
246 path = util.drop_scheme('file', path)
247 if path.startswith('bundle:'):
247 if path.startswith('bundle:'):
248 path = util.drop_scheme('bundle', path)
248 path = util.drop_scheme('bundle', path)
249 s = path.split("+", 1)
249 s = path.split("+", 1)
250 if len(s) == 1:
250 if len(s) == 1:
251 repopath, bundlename = "", s[0]
251 repopath, bundlename = "", s[0]
252 else:
252 else:
253 repopath, bundlename = s
253 repopath, bundlename = s
254 else:
254 else:
255 repopath, bundlename = '', path
255 repopath, bundlename = '', path
256 return bundlerepository(ui, repopath, bundlename)
256 return bundlerepository(ui, repopath, bundlename)
@@ -1,256 +1,256 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from repo import *
10 from repo import *
11 from demandload import *
11 from demandload import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15
15
16 def _local(path):
16 def _local(path):
17 return (os.path.isfile(util.drop_scheme('file', path)) and
17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 bundlerepo or localrepo)
18 bundlerepo or localrepo)
19
19
20 schemes = {
20 schemes = {
21 'bundle': bundlerepo,
21 'bundle': bundlerepo,
22 'file': _local,
22 'file': _local,
23 'hg': httprepo,
23 'hg': httprepo,
24 'http': httprepo,
24 'http': httprepo,
25 'https': httprepo,
25 'https': httprepo,
26 'old-http': statichttprepo,
26 'old-http': statichttprepo,
27 'ssh': sshrepo,
27 'ssh': sshrepo,
28 'static-http': statichttprepo,
28 'static-http': statichttprepo,
29 }
29 }
30
30
31 def _lookup(path):
31 def _lookup(path):
32 scheme = 'file'
32 scheme = 'file'
33 if path:
33 if path:
34 c = path.find(':')
34 c = path.find(':')
35 if c > 0:
35 if c > 0:
36 scheme = path[:c]
36 scheme = path[:c]
37 thing = schemes.get(scheme) or schemes['file']
37 thing = schemes.get(scheme) or schemes['file']
38 try:
38 try:
39 return thing(path)
39 return thing(path)
40 except TypeError:
40 except TypeError:
41 return thing
41 return thing
42
42
43 def islocal(repo):
43 def islocal(repo):
44 '''return true if repo or path is local'''
44 '''return true if repo or path is local'''
45 if isinstance(repo, str):
45 if isinstance(repo, str):
46 try:
46 try:
47 return _lookup(repo).islocal(repo)
47 return _lookup(repo).islocal(repo)
48 except AttributeError:
48 except AttributeError:
49 return False
49 return False
50 return repo.local()
50 return repo.local()
51
51
52 repo_setup_hooks = []
52 repo_setup_hooks = []
53
53
54 def repository(ui, path='', create=False):
54 def repository(ui, path='', create=False):
55 """return a repository object for the specified path"""
55 """return a repository object for the specified path"""
56 repo = _lookup(path).instance(ui, path, create)
56 repo = _lookup(path).instance(ui, path, create)
57 for hook in repo_setup_hooks:
57 for hook in repo_setup_hooks:
58 hook(ui, repo)
58 hook(ui, repo)
59 return repo
59 return repo
60
60
61 def defaultdest(source):
61 def defaultdest(source):
62 '''return default destination of clone if none is given'''
62 '''return default destination of clone if none is given'''
63 return os.path.basename(os.path.normpath(source))
63 return os.path.basename(os.path.normpath(source))
64
64
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 stream=False):
66 stream=False):
67 """Make a copy of an existing repository.
67 """Make a copy of an existing repository.
68
68
69 Create a copy of an existing repository in a new directory. The
69 Create a copy of an existing repository in a new directory. The
70 source and destination are URLs, as passed to the repository
70 source and destination are URLs, as passed to the repository
71 function. Returns a pair of repository objects, the source and
71 function. Returns a pair of repository objects, the source and
72 newly created destination.
72 newly created destination.
73
73
74 The location of the source is added to the new repository's
74 The location of the source is added to the new repository's
75 .hg/hgrc file, as the default to be used for future pulls and
75 .hg/hgrc file, as the default to be used for future pulls and
76 pushes.
76 pushes.
77
77
78 If an exception is raised, the partly cloned/updated destination
78 If an exception is raised, the partly cloned/updated destination
79 repository will be deleted.
79 repository will be deleted.
80
80
81 Arguments:
81 Arguments:
82
82
83 source: repository object or URL
83 source: repository object or URL
84
84
85 dest: URL of destination repository to create (defaults to base
85 dest: URL of destination repository to create (defaults to base
86 name of source repository)
86 name of source repository)
87
87
88 pull: always pull from source repository, even in local case
88 pull: always pull from source repository, even in local case
89
89
90 stream: stream raw data uncompressed from repository (fast over
90 stream: stream raw data uncompressed from repository (fast over
91 LAN, slow over WAN)
91 LAN, slow over WAN)
92
92
93 rev: revision to clone up to (implies pull=True)
93 rev: revision to clone up to (implies pull=True)
94
94
95 update: update working directory after clone completes, if
95 update: update working directory after clone completes, if
96 destination is local repository
96 destination is local repository
97 """
97 """
98 if isinstance(source, str):
98 if isinstance(source, str):
99 src_repo = repository(ui, source)
99 src_repo = repository(ui, source)
100 else:
100 else:
101 src_repo = source
101 src_repo = source
102 source = src_repo.url()
102 source = src_repo.url()
103
103
104 if dest is None:
104 if dest is None:
105 dest = defaultdest(source)
105 dest = defaultdest(source)
106
106
107 def localpath(path):
107 def localpath(path):
108 if path.startswith('file://'):
108 if path.startswith('file://'):
109 return path[7:]
109 return path[7:]
110 if path.startswith('file:'):
110 if path.startswith('file:'):
111 return path[5:]
111 return path[5:]
112 return path
112 return path
113
113
114 dest = localpath(dest)
114 dest = localpath(dest)
115 source = localpath(source)
115 source = localpath(source)
116
116
117 if os.path.exists(dest):
117 if os.path.exists(dest):
118 raise util.Abort(_("destination '%s' already exists") % dest)
118 raise util.Abort(_("destination '%s' already exists") % dest)
119
119
120 class DirCleanup(object):
120 class DirCleanup(object):
121 def __init__(self, dir_):
121 def __init__(self, dir_):
122 self.rmtree = shutil.rmtree
122 self.rmtree = shutil.rmtree
123 self.dir_ = dir_
123 self.dir_ = dir_
124 def close(self):
124 def close(self):
125 self.dir_ = None
125 self.dir_ = None
126 def __del__(self):
126 def __del__(self):
127 if self.dir_:
127 if self.dir_:
128 self.rmtree(self.dir_, True)
128 self.rmtree(self.dir_, True)
129
129
130 dest_repo = repository(ui, dest, create=True)
130 dest_repo = repository(ui, dest, create=True)
131
131
132 dest_path = None
133 dir_cleanup = None
132 dir_cleanup = None
134 if dest_repo.local():
133 if dest_repo.local():
135 dest_path = os.path.realpath(dest_repo.root)
134 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
136 dir_cleanup = DirCleanup(dest_path)
137
135
138 abspath = source
136 abspath = source
139 copy = False
137 copy = False
140 if src_repo.local() and dest_repo.local():
138 if src_repo.local() and dest_repo.local():
141 abspath = os.path.abspath(source)
139 abspath = os.path.abspath(source)
142 copy = not pull and not rev
140 copy = not pull and not rev
143
141
144 src_lock, dest_lock = None, None
142 src_lock, dest_lock = None, None
145 if copy:
143 if copy:
146 try:
144 try:
147 # we use a lock here because if we race with commit, we
145 # we use a lock here because if we race with commit, we
148 # can end up with extra data in the cloned revlogs that's
146 # can end up with extra data in the cloned revlogs that's
149 # not pointed to by changesets, thus causing verify to
147 # not pointed to by changesets, thus causing verify to
150 # fail
148 # fail
151 src_lock = src_repo.lock()
149 src_lock = src_repo.lock()
152 except lock.LockException:
150 except lock.LockException:
153 copy = False
151 copy = False
154
152
155 if copy:
153 if copy:
156 # we lock here to avoid premature writing to the target
154 # we lock here to avoid premature writing to the target
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
155 src_store = os.path.realpath(src_repo.spath)
156 dest_store = os.path.realpath(dest_repo.spath)
157 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
158
158
159 files = ("data",
159 files = ("data",
160 "00manifest.d", "00manifest.i",
160 "00manifest.d", "00manifest.i",
161 "00changelog.d", "00changelog.i")
161 "00changelog.d", "00changelog.i")
162 for f in files:
162 for f in files:
163 src = os.path.join(source, ".hg", f)
163 src = os.path.join(src_store, f)
164 dst = os.path.join(dest_path, ".hg", f)
164 dst = os.path.join(dest_store, f)
165 try:
165 try:
166 util.copyfiles(src, dst)
166 util.copyfiles(src, dst)
167 except OSError, inst:
167 except OSError, inst:
168 if inst.errno != errno.ENOENT:
168 if inst.errno != errno.ENOENT:
169 raise
169 raise
170
170
171 # we need to re-init the repo after manually copying the data
171 # we need to re-init the repo after manually copying the data
172 # into it
172 # into it
173 dest_repo = repository(ui, dest)
173 dest_repo = repository(ui, dest)
174
174
175 else:
175 else:
176 revs = None
176 revs = None
177 if rev:
177 if rev:
178 if 'lookup' not in src_repo.capabilities:
178 if 'lookup' not in src_repo.capabilities:
179 raise util.Abort(_("src repository does not support revision "
179 raise util.Abort(_("src repository does not support revision "
180 "lookup and so doesn't support clone by "
180 "lookup and so doesn't support clone by "
181 "revision"))
181 "revision"))
182 revs = [src_repo.lookup(r) for r in rev]
182 revs = [src_repo.lookup(r) for r in rev]
183
183
184 if dest_repo.local():
184 if dest_repo.local():
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
185 dest_repo.clone(src_repo, heads=revs, stream=stream)
186 elif src_repo.local():
186 elif src_repo.local():
187 src_repo.push(dest_repo, revs=revs)
187 src_repo.push(dest_repo, revs=revs)
188 else:
188 else:
189 raise util.Abort(_("clone from remote to remote not supported"))
189 raise util.Abort(_("clone from remote to remote not supported"))
190
190
191 if src_lock:
191 if src_lock:
192 src_lock.release()
192 src_lock.release()
193
193
194 if dest_repo.local():
194 if dest_repo.local():
195 fp = dest_repo.opener("hgrc", "w", text=True)
195 fp = dest_repo.opener("hgrc", "w", text=True)
196 fp.write("[paths]\n")
196 fp.write("[paths]\n")
197 fp.write("default = %s\n" % abspath)
197 fp.write("default = %s\n" % abspath)
198 fp.close()
198 fp.close()
199
199
200 if dest_lock:
200 if dest_lock:
201 dest_lock.release()
201 dest_lock.release()
202
202
203 if update:
203 if update:
204 _update(dest_repo, dest_repo.changelog.tip())
204 _update(dest_repo, dest_repo.changelog.tip())
205 if dir_cleanup:
205 if dir_cleanup:
206 dir_cleanup.close()
206 dir_cleanup.close()
207
207
208 return src_repo, dest_repo
208 return src_repo, dest_repo
209
209
210 def _showstats(repo, stats):
210 def _showstats(repo, stats):
211 stats = ((stats[0], _("updated")),
211 stats = ((stats[0], _("updated")),
212 (stats[1], _("merged")),
212 (stats[1], _("merged")),
213 (stats[2], _("removed")),
213 (stats[2], _("removed")),
214 (stats[3], _("unresolved")))
214 (stats[3], _("unresolved")))
215 note = ", ".join([_("%d files %s") % s for s in stats])
215 note = ", ".join([_("%d files %s") % s for s in stats])
216 repo.ui.status("%s\n" % note)
216 repo.ui.status("%s\n" % note)
217
217
218 def _update(repo, node): return update(repo, node)
218 def _update(repo, node): return update(repo, node)
219
219
220 def update(repo, node):
220 def update(repo, node):
221 """update the working directory to node, merging linear changes"""
221 """update the working directory to node, merging linear changes"""
222 stats = _merge.update(repo, node, False, False, None, None)
222 stats = _merge.update(repo, node, False, False, None, None)
223 _showstats(repo, stats)
223 _showstats(repo, stats)
224 if stats[3]:
224 if stats[3]:
225 repo.ui.status(_("There are unresolved merges with"
225 repo.ui.status(_("There are unresolved merges with"
226 " locally modified files.\n"))
226 " locally modified files.\n"))
227 return stats[3]
227 return stats[3]
228
228
229 def clean(repo, node, wlock=None, show_stats=True):
229 def clean(repo, node, wlock=None, show_stats=True):
230 """forcibly switch the working directory to node, clobbering changes"""
230 """forcibly switch the working directory to node, clobbering changes"""
231 stats = _merge.update(repo, node, False, True, None, wlock)
231 stats = _merge.update(repo, node, False, True, None, wlock)
232 if show_stats: _showstats(repo, stats)
232 if show_stats: _showstats(repo, stats)
233 return stats[3]
233 return stats[3]
234
234
235 def merge(repo, node, force=None, remind=True, wlock=None):
235 def merge(repo, node, force=None, remind=True, wlock=None):
236 """branch merge with node, resolving changes"""
236 """branch merge with node, resolving changes"""
237 stats = _merge.update(repo, node, True, force, False, wlock)
237 stats = _merge.update(repo, node, True, force, False, wlock)
238 _showstats(repo, stats)
238 _showstats(repo, stats)
239 if stats[3]:
239 if stats[3]:
240 pl = repo.parents()
240 pl = repo.parents()
241 repo.ui.status(_("There are unresolved merges,"
241 repo.ui.status(_("There are unresolved merges,"
242 " you can redo the full merge using:\n"
242 " you can redo the full merge using:\n"
243 " hg update -C %s\n"
243 " hg update -C %s\n"
244 " hg merge %s\n")
244 " hg merge %s\n")
245 % (pl[0].rev(), pl[1].rev()))
245 % (pl[0].rev(), pl[1].rev()))
246 elif remind:
246 elif remind:
247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
248 return stats[3]
248 return stats[3]
249
249
250 def revert(repo, node, choose, wlock):
250 def revert(repo, node, choose, wlock):
251 """revert changes to revision in node without updating dirstate"""
251 """revert changes to revision in node without updating dirstate"""
252 return _merge.update(repo, node, False, True, choose, wlock)[3]
252 return _merge.update(repo, node, False, True, choose, wlock)[3]
253
253
254 def verify(repo):
254 def verify(repo):
255 """verify the consistency of a repository"""
255 """verify the consistency of a repository"""
256 return _verify.verify(repo)
256 return _verify.verify(repo)
@@ -1,1912 +1,1916 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
34
35
35 if not os.path.isdir(self.path):
36 if not os.path.isdir(self.path):
36 if create:
37 if create:
37 if not os.path.exists(path):
38 if not os.path.exists(path):
38 os.mkdir(path)
39 os.mkdir(path)
39 os.mkdir(self.path)
40 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
40 else:
43 else:
41 raise repo.RepoError(_("repository %s not found") % path)
44 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
45 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
46 raise repo.RepoError(_("repository %s already exists") % path)
44
47
45 self.root = os.path.realpath(path)
48 self.root = os.path.realpath(path)
46 self.origroot = path
49 self.origroot = path
47 self.ui = ui.ui(parentui=parentui)
50 self.ui = ui.ui(parentui=parentui)
48 self.opener = util.opener(self.path)
51 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
50 self.wopener = util.opener(self.root)
53 self.wopener = util.opener(self.root)
51
54
52 try:
55 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
57 except IOError:
55 pass
58 pass
56
59
57 v = self.ui.configrevlog()
60 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
63 fl = v.get('flags', None)
61 flags = 0
64 flags = 0
62 if fl != None:
65 if fl != None:
63 for x in fl.split():
66 for x in fl.split():
64 flags |= revlog.flagstr(x)
67 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
68 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
69 flags = revlog.REVLOG_DEFAULT_FLAGS
67
70
68 v = self.revlogversion | flags
71 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.sopener, v)
72 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
71
74
72 # the changelog might not have the inline index flag
75 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
76 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
77 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
78 # Otherwise, just version from the changelog
76 v = self.changelog.version
79 v = self.changelog.version
77 if v == self.revlogversion:
80 if v == self.revlogversion:
78 v |= flags
81 v |= flags
79 self.revlogversion = v
82 self.revlogversion = v
80
83
81 self.tagscache = None
84 self.tagscache = None
82 self.branchcache = None
85 self.branchcache = None
83 self.nodetagscache = None
86 self.nodetagscache = None
84 self.encodepats = None
87 self.encodepats = None
85 self.decodepats = None
88 self.decodepats = None
86 self.transhandle = None
89 self.transhandle = None
87
90
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
92
90 def url(self):
93 def url(self):
91 return 'file:' + self.root
94 return 'file:' + self.root
92
95
93 def hook(self, name, throw=False, **args):
96 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
97 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
98 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
99 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
100 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
101 hook failure. exception propagates if throw is "true".
99
102
100 reason for "true" meaning "hook failed" is so that
103 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
104 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
105 be run as hooks without wrappers to convert return values.'''
103
106
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
108 d = funcname.rfind('.')
106 if d == -1:
109 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
111 % (hname, funcname))
109 modname = funcname[:d]
112 modname = funcname[:d]
110 try:
113 try:
111 obj = __import__(modname)
114 obj = __import__(modname)
112 except ImportError:
115 except ImportError:
113 try:
116 try:
114 # extensions are loaded with hgext_ prefix
117 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
118 obj = __import__("hgext_%s" % modname)
116 except ImportError:
119 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
121 '(import of "%s" failed)') %
119 (hname, modname))
122 (hname, modname))
120 try:
123 try:
121 for p in funcname.split('.')[1:]:
124 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
125 obj = getattr(obj, p)
123 except AttributeError, err:
126 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
128 '("%s" is not defined)') %
126 (hname, funcname))
129 (hname, funcname))
127 if not callable(obj):
130 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
131 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
132 '("%s" is not callable)') %
130 (hname, funcname))
133 (hname, funcname))
131 try:
134 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
136 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
137 raise
135 except Exception, exc:
138 except Exception, exc:
136 if isinstance(exc, util.Abort):
139 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
140 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
141 (hname, exc.args[0]))
139 else:
142 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
143 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
144 '%s\n') % (hname, exc))
142 if throw:
145 if throw:
143 raise
146 raise
144 self.ui.print_exc()
147 self.ui.print_exc()
145 return True
148 return True
146 if r:
149 if r:
147 if throw:
150 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
151 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
152 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
153 return r
151
154
152 def runhook(name, cmd):
155 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
158 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
159 if r:
157 desc, r = util.explain_exit(r)
160 desc, r = util.explain_exit(r)
158 if throw:
161 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
162 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
164 return r
162
165
163 r = False
166 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
168 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
169 hooks.sort()
167 for hname, cmd in hooks:
170 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
171 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
172 r = callhook(hname, cmd[7:].strip()) or r
170 else:
173 else:
171 r = runhook(hname, cmd) or r
174 r = runhook(hname, cmd) or r
172 return r
175 return r
173
176
174 tag_disallowed = ':\r\n'
177 tag_disallowed = ':\r\n'
175
178
176 def tag(self, name, node, message, local, user, date):
179 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
180 '''tag a revision with a symbolic name.
178
181
179 if local is True, the tag is stored in a per-repository file.
182 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
183 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
184 changeset is committed with the change.
182
185
183 keyword arguments:
186 keyword arguments:
184
187
185 local: whether to store tag in non-version-controlled file
188 local: whether to store tag in non-version-controlled file
186 (default False)
189 (default False)
187
190
188 message: commit message to use if committing
191 message: commit message to use if committing
189
192
190 user: name of user to use if committing
193 user: name of user to use if committing
191
194
192 date: date tuple to use if committing'''
195 date: date tuple to use if committing'''
193
196
194 for c in self.tag_disallowed:
197 for c in self.tag_disallowed:
195 if c in name:
198 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
200
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
202
200 if local:
203 if local:
201 # local tags are stored in the current charset
204 # local tags are stored in the current charset
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
206 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
207 return
205
208
206 for x in self.status()[:5]:
209 for x in self.status()[:5]:
207 if '.hgtags' in x:
210 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
211 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
212 '(please commit .hgtags manually)'))
210
213
211 # committed tags are stored in UTF-8
214 # committed tags are stored in UTF-8
212 line = '%s %s\n' % (hex(node), util.fromlocal(name))
215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 self.wfile('.hgtags', 'ab').write(line)
216 self.wfile('.hgtags', 'ab').write(line)
214 if self.dirstate.state('.hgtags') == '?':
217 if self.dirstate.state('.hgtags') == '?':
215 self.add(['.hgtags'])
218 self.add(['.hgtags'])
216
219
217 self.commit(['.hgtags'], message, user, date)
220 self.commit(['.hgtags'], message, user, date)
218 self.hook('tag', node=hex(node), tag=name, local=local)
221 self.hook('tag', node=hex(node), tag=name, local=local)
219
222
220 def tags(self):
223 def tags(self):
221 '''return a mapping of tag to node'''
224 '''return a mapping of tag to node'''
222 if not self.tagscache:
225 if not self.tagscache:
223 self.tagscache = {}
226 self.tagscache = {}
224
227
225 def parsetag(line, context):
228 def parsetag(line, context):
226 if not line:
229 if not line:
227 return
230 return
228 s = l.split(" ", 1)
231 s = l.split(" ", 1)
229 if len(s) != 2:
232 if len(s) != 2:
230 self.ui.warn(_("%s: cannot parse entry\n") % context)
233 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 return
234 return
232 node, key = s
235 node, key = s
233 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
234 try:
237 try:
235 bin_n = bin(node)
238 bin_n = bin(node)
236 except TypeError:
239 except TypeError:
237 self.ui.warn(_("%s: node '%s' is not well formed\n") %
240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 (context, node))
241 (context, node))
239 return
242 return
240 if bin_n not in self.changelog.nodemap:
243 if bin_n not in self.changelog.nodemap:
241 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 (context, key))
245 (context, key))
243 return
246 return
244 self.tagscache[key] = bin_n
247 self.tagscache[key] = bin_n
245
248
246 # read the tags file from each head, ending with the tip,
249 # read the tags file from each head, ending with the tip,
247 # and add each tag found to the map, with "newer" ones
250 # and add each tag found to the map, with "newer" ones
248 # taking precedence
251 # taking precedence
249 f = None
252 f = None
250 for rev, node, fnode in self._hgtagsnodes():
253 for rev, node, fnode in self._hgtagsnodes():
251 f = (f and f.filectx(fnode) or
254 f = (f and f.filectx(fnode) or
252 self.filectx('.hgtags', fileid=fnode))
255 self.filectx('.hgtags', fileid=fnode))
253 count = 0
256 count = 0
254 for l in f.data().splitlines():
257 for l in f.data().splitlines():
255 count += 1
258 count += 1
256 parsetag(l, _("%s, line %d") % (str(f), count))
259 parsetag(l, _("%s, line %d") % (str(f), count))
257
260
258 try:
261 try:
259 f = self.opener("localtags")
262 f = self.opener("localtags")
260 count = 0
263 count = 0
261 for l in f:
264 for l in f:
262 # localtags are stored in the local character set
265 # localtags are stored in the local character set
263 # while the internal tag table is stored in UTF-8
266 # while the internal tag table is stored in UTF-8
264 l = util.fromlocal(l)
267 l = util.fromlocal(l)
265 count += 1
268 count += 1
266 parsetag(l, _("localtags, line %d") % count)
269 parsetag(l, _("localtags, line %d") % count)
267 except IOError:
270 except IOError:
268 pass
271 pass
269
272
270 self.tagscache['tip'] = self.changelog.tip()
273 self.tagscache['tip'] = self.changelog.tip()
271
274
272 return self.tagscache
275 return self.tagscache
273
276
274 def _hgtagsnodes(self):
277 def _hgtagsnodes(self):
275 heads = self.heads()
278 heads = self.heads()
276 heads.reverse()
279 heads.reverse()
277 last = {}
280 last = {}
278 ret = []
281 ret = []
279 for node in heads:
282 for node in heads:
280 c = self.changectx(node)
283 c = self.changectx(node)
281 rev = c.rev()
284 rev = c.rev()
282 try:
285 try:
283 fnode = c.filenode('.hgtags')
286 fnode = c.filenode('.hgtags')
284 except repo.LookupError:
287 except repo.LookupError:
285 continue
288 continue
286 ret.append((rev, node, fnode))
289 ret.append((rev, node, fnode))
287 if fnode in last:
290 if fnode in last:
288 ret[last[fnode]] = None
291 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
292 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
293 return [item for item in ret if item]
291
294
292 def tagslist(self):
295 def tagslist(self):
293 '''return a list of tags ordered by revision'''
296 '''return a list of tags ordered by revision'''
294 l = []
297 l = []
295 for t, n in self.tags().items():
298 for t, n in self.tags().items():
296 try:
299 try:
297 r = self.changelog.rev(n)
300 r = self.changelog.rev(n)
298 except:
301 except:
299 r = -2 # sort to the beginning of the list if unknown
302 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
303 l.append((r, t, n))
301 l.sort()
304 l.sort()
302 return [(t, n) for r, t, n in l]
305 return [(t, n) for r, t, n in l]
303
306
304 def nodetags(self, node):
307 def nodetags(self, node):
305 '''return the tags associated with a node'''
308 '''return the tags associated with a node'''
306 if not self.nodetagscache:
309 if not self.nodetagscache:
307 self.nodetagscache = {}
310 self.nodetagscache = {}
308 for t, n in self.tags().items():
311 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
312 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
313 return self.nodetagscache.get(node, [])
311
314
312 def branchtags(self):
315 def branchtags(self):
313 if self.branchcache != None:
316 if self.branchcache != None:
314 return self.branchcache
317 return self.branchcache
315
318
316 self.branchcache = {} # avoid recursion in changectx
319 self.branchcache = {} # avoid recursion in changectx
317
320
318 partial, last, lrev = self._readbranchcache()
321 partial, last, lrev = self._readbranchcache()
319
322
320 tiprev = self.changelog.count() - 1
323 tiprev = self.changelog.count() - 1
321 if lrev != tiprev:
324 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
325 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
327
325 # the branch cache is stored on disk as UTF-8, but in the local
328 # the branch cache is stored on disk as UTF-8, but in the local
326 # charset internally
329 # charset internally
327 for k, v in partial.items():
330 for k, v in partial.items():
328 self.branchcache[util.tolocal(k)] = v
331 self.branchcache[util.tolocal(k)] = v
329 return self.branchcache
332 return self.branchcache
330
333
331 def _readbranchcache(self):
334 def _readbranchcache(self):
332 partial = {}
335 partial = {}
333 try:
336 try:
334 f = self.opener("branches.cache")
337 f = self.opener("branches.cache")
335 lines = f.read().split('\n')
338 lines = f.read().split('\n')
336 f.close()
339 f.close()
337 last, lrev = lines.pop(0).rstrip().split(" ", 1)
340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 last, lrev = bin(last), int(lrev)
341 last, lrev = bin(last), int(lrev)
339 if not (lrev < self.changelog.count() and
342 if not (lrev < self.changelog.count() and
340 self.changelog.node(lrev) == last): # sanity check
343 self.changelog.node(lrev) == last): # sanity check
341 # invalidate the cache
344 # invalidate the cache
342 raise ValueError('Invalid branch cache: unknown tip')
345 raise ValueError('Invalid branch cache: unknown tip')
343 for l in lines:
346 for l in lines:
344 if not l: continue
347 if not l: continue
345 node, label = l.rstrip().split(" ", 1)
348 node, label = l.rstrip().split(" ", 1)
346 partial[label] = bin(node)
349 partial[label] = bin(node)
347 except (KeyboardInterrupt, util.SignalInterrupt):
350 except (KeyboardInterrupt, util.SignalInterrupt):
348 raise
351 raise
349 except Exception, inst:
352 except Exception, inst:
350 if self.ui.debugflag:
353 if self.ui.debugflag:
351 self.ui.warn(str(inst), '\n')
354 self.ui.warn(str(inst), '\n')
352 partial, last, lrev = {}, nullid, nullrev
355 partial, last, lrev = {}, nullid, nullrev
353 return partial, last, lrev
356 return partial, last, lrev
354
357
355 def _writebranchcache(self, branches, tip, tiprev):
358 def _writebranchcache(self, branches, tip, tiprev):
356 try:
359 try:
357 f = self.opener("branches.cache", "w")
360 f = self.opener("branches.cache", "w")
358 f.write("%s %s\n" % (hex(tip), tiprev))
361 f.write("%s %s\n" % (hex(tip), tiprev))
359 for label, node in branches.iteritems():
362 for label, node in branches.iteritems():
360 f.write("%s %s\n" % (hex(node), label))
363 f.write("%s %s\n" % (hex(node), label))
361 except IOError:
364 except IOError:
362 pass
365 pass
363
366
364 def _updatebranchcache(self, partial, start, end):
367 def _updatebranchcache(self, partial, start, end):
365 for r in xrange(start, end):
368 for r in xrange(start, end):
366 c = self.changectx(r)
369 c = self.changectx(r)
367 b = c.branch()
370 b = c.branch()
368 if b:
371 if b:
369 partial[b] = c.node()
372 partial[b] = c.node()
370
373
371 def lookup(self, key):
374 def lookup(self, key):
372 if key == '.':
375 if key == '.':
373 key = self.dirstate.parents()[0]
376 key = self.dirstate.parents()[0]
374 if key == nullid:
377 if key == nullid:
375 raise repo.RepoError(_("no revision checked out"))
378 raise repo.RepoError(_("no revision checked out"))
376 n = self.changelog._match(key)
379 n = self.changelog._match(key)
377 if n:
380 if n:
378 return n
381 return n
379 if key in self.tags():
382 if key in self.tags():
380 return self.tags()[key]
383 return self.tags()[key]
381 if key in self.branchtags():
384 if key in self.branchtags():
382 return self.branchtags()[key]
385 return self.branchtags()[key]
383 n = self.changelog._partialmatch(key)
386 n = self.changelog._partialmatch(key)
384 if n:
387 if n:
385 return n
388 return n
386 raise repo.RepoError(_("unknown revision '%s'") % key)
389 raise repo.RepoError(_("unknown revision '%s'") % key)
387
390
388 def dev(self):
391 def dev(self):
389 return os.lstat(self.path).st_dev
392 return os.lstat(self.path).st_dev
390
393
391 def local(self):
394 def local(self):
392 return True
395 return True
393
396
394 def join(self, f):
397 def join(self, f):
395 return os.path.join(self.path, f)
398 return os.path.join(self.path, f)
396
399
397 def sjoin(self, f):
400 def sjoin(self, f):
398 return os.path.join(self.path, f)
401 return os.path.join(self.spath, f)
399
402
400 def wjoin(self, f):
403 def wjoin(self, f):
401 return os.path.join(self.root, f)
404 return os.path.join(self.root, f)
402
405
403 def file(self, f):
406 def file(self, f):
404 if f[0] == '/':
407 if f[0] == '/':
405 f = f[1:]
408 f = f[1:]
406 return filelog.filelog(self.sopener, f, self.revlogversion)
409 return filelog.filelog(self.sopener, f, self.revlogversion)
407
410
408 def changectx(self, changeid=None):
411 def changectx(self, changeid=None):
409 return context.changectx(self, changeid)
412 return context.changectx(self, changeid)
410
413
411 def workingctx(self):
414 def workingctx(self):
412 return context.workingctx(self)
415 return context.workingctx(self)
413
416
414 def parents(self, changeid=None):
417 def parents(self, changeid=None):
415 '''
418 '''
416 get list of changectxs for parents of changeid or working directory
419 get list of changectxs for parents of changeid or working directory
417 '''
420 '''
418 if changeid is None:
421 if changeid is None:
419 pl = self.dirstate.parents()
422 pl = self.dirstate.parents()
420 else:
423 else:
421 n = self.changelog.lookup(changeid)
424 n = self.changelog.lookup(changeid)
422 pl = self.changelog.parents(n)
425 pl = self.changelog.parents(n)
423 if pl[1] == nullid:
426 if pl[1] == nullid:
424 return [self.changectx(pl[0])]
427 return [self.changectx(pl[0])]
425 return [self.changectx(pl[0]), self.changectx(pl[1])]
428 return [self.changectx(pl[0]), self.changectx(pl[1])]
426
429
427 def filectx(self, path, changeid=None, fileid=None):
430 def filectx(self, path, changeid=None, fileid=None):
428 """changeid can be a changeset revision, node, or tag.
431 """changeid can be a changeset revision, node, or tag.
429 fileid can be a file revision or node."""
432 fileid can be a file revision or node."""
430 return context.filectx(self, path, changeid, fileid)
433 return context.filectx(self, path, changeid, fileid)
431
434
432 def getcwd(self):
435 def getcwd(self):
433 return self.dirstate.getcwd()
436 return self.dirstate.getcwd()
434
437
435 def wfile(self, f, mode='r'):
438 def wfile(self, f, mode='r'):
436 return self.wopener(f, mode)
439 return self.wopener(f, mode)
437
440
438 def wread(self, filename):
441 def wread(self, filename):
439 if self.encodepats == None:
442 if self.encodepats == None:
440 l = []
443 l = []
441 for pat, cmd in self.ui.configitems("encode"):
444 for pat, cmd in self.ui.configitems("encode"):
442 mf = util.matcher(self.root, "", [pat], [], [])[1]
445 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 l.append((mf, cmd))
446 l.append((mf, cmd))
444 self.encodepats = l
447 self.encodepats = l
445
448
446 data = self.wopener(filename, 'r').read()
449 data = self.wopener(filename, 'r').read()
447
450
448 for mf, cmd in self.encodepats:
451 for mf, cmd in self.encodepats:
449 if mf(filename):
452 if mf(filename):
450 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 data = util.filter(data, cmd)
454 data = util.filter(data, cmd)
452 break
455 break
453
456
454 return data
457 return data
455
458
456 def wwrite(self, filename, data, fd=None):
459 def wwrite(self, filename, data, fd=None):
457 if self.decodepats == None:
460 if self.decodepats == None:
458 l = []
461 l = []
459 for pat, cmd in self.ui.configitems("decode"):
462 for pat, cmd in self.ui.configitems("decode"):
460 mf = util.matcher(self.root, "", [pat], [], [])[1]
463 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 l.append((mf, cmd))
464 l.append((mf, cmd))
462 self.decodepats = l
465 self.decodepats = l
463
466
464 for mf, cmd in self.decodepats:
467 for mf, cmd in self.decodepats:
465 if mf(filename):
468 if mf(filename):
466 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
469 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 data = util.filter(data, cmd)
470 data = util.filter(data, cmd)
468 break
471 break
469
472
470 if fd:
473 if fd:
471 return fd.write(data)
474 return fd.write(data)
472 return self.wopener(filename, 'w').write(data)
475 return self.wopener(filename, 'w').write(data)
473
476
474 def transaction(self):
477 def transaction(self):
475 tr = self.transhandle
478 tr = self.transhandle
476 if tr != None and tr.running():
479 if tr != None and tr.running():
477 return tr.nest()
480 return tr.nest()
478
481
479 # save dirstate for rollback
482 # save dirstate for rollback
480 try:
483 try:
481 ds = self.opener("dirstate").read()
484 ds = self.opener("dirstate").read()
482 except IOError:
485 except IOError:
483 ds = ""
486 ds = ""
484 self.opener("journal.dirstate", "w").write(ds)
487 self.opener("journal.dirstate", "w").write(ds)
485
488
489 renames = [(self.sjoin("journal"), self.sjoin("undo")),
490 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
486 tr = transaction.transaction(self.ui.warn, self.sopener,
491 tr = transaction.transaction(self.ui.warn, self.sopener,
487 self.sjoin("journal"),
492 self.sjoin("journal"),
488 aftertrans(self.path))
493 aftertrans(renames))
489 self.transhandle = tr
494 self.transhandle = tr
490 return tr
495 return tr
491
496
492 def recover(self):
497 def recover(self):
493 l = self.lock()
498 l = self.lock()
494 if os.path.exists(self.sjoin("journal")):
499 if os.path.exists(self.sjoin("journal")):
495 self.ui.status(_("rolling back interrupted transaction\n"))
500 self.ui.status(_("rolling back interrupted transaction\n"))
496 transaction.rollback(self.sopener, self.sjoin("journal"))
501 transaction.rollback(self.sopener, self.sjoin("journal"))
497 self.reload()
502 self.reload()
498 return True
503 return True
499 else:
504 else:
500 self.ui.warn(_("no interrupted transaction available\n"))
505 self.ui.warn(_("no interrupted transaction available\n"))
501 return False
506 return False
502
507
503 def rollback(self, wlock=None):
508 def rollback(self, wlock=None):
504 if not wlock:
509 if not wlock:
505 wlock = self.wlock()
510 wlock = self.wlock()
506 l = self.lock()
511 l = self.lock()
507 if os.path.exists(self.sjoin("undo")):
512 if os.path.exists(self.sjoin("undo")):
508 self.ui.status(_("rolling back last transaction\n"))
513 self.ui.status(_("rolling back last transaction\n"))
509 transaction.rollback(self.sopener, self.sjoin("undo"))
514 transaction.rollback(self.sopener, self.sjoin("undo"))
510 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
515 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
511 self.reload()
516 self.reload()
512 self.wreload()
517 self.wreload()
513 else:
518 else:
514 self.ui.warn(_("no rollback information available\n"))
519 self.ui.warn(_("no rollback information available\n"))
515
520
516 def wreload(self):
521 def wreload(self):
517 self.dirstate.read()
522 self.dirstate.read()
518
523
519 def reload(self):
524 def reload(self):
520 self.changelog.load()
525 self.changelog.load()
521 self.manifest.load()
526 self.manifest.load()
522 self.tagscache = None
527 self.tagscache = None
523 self.nodetagscache = None
528 self.nodetagscache = None
524
529
525 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
530 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
526 desc=None):
531 desc=None):
527 try:
532 try:
528 l = lock.lock(lockname, 0, releasefn, desc=desc)
533 l = lock.lock(lockname, 0, releasefn, desc=desc)
529 except lock.LockHeld, inst:
534 except lock.LockHeld, inst:
530 if not wait:
535 if not wait:
531 raise
536 raise
532 self.ui.warn(_("waiting for lock on %s held by %r\n") %
537 self.ui.warn(_("waiting for lock on %s held by %r\n") %
533 (desc, inst.locker))
538 (desc, inst.locker))
534 # default to 600 seconds timeout
539 # default to 600 seconds timeout
535 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
540 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
536 releasefn, desc=desc)
541 releasefn, desc=desc)
537 if acquirefn:
542 if acquirefn:
538 acquirefn()
543 acquirefn()
539 return l
544 return l
540
545
541 def lock(self, wait=1):
546 def lock(self, wait=1):
542 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
547 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
543 desc=_('repository %s') % self.origroot)
548 desc=_('repository %s') % self.origroot)
544
549
545 def wlock(self, wait=1):
550 def wlock(self, wait=1):
546 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
551 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
547 self.wreload,
552 self.wreload,
548 desc=_('working directory of %s') % self.origroot)
553 desc=_('working directory of %s') % self.origroot)
549
554
550 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
555 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
551 """
556 """
552 commit an individual file as part of a larger transaction
557 commit an individual file as part of a larger transaction
553 """
558 """
554
559
555 t = self.wread(fn)
560 t = self.wread(fn)
556 fl = self.file(fn)
561 fl = self.file(fn)
557 fp1 = manifest1.get(fn, nullid)
562 fp1 = manifest1.get(fn, nullid)
558 fp2 = manifest2.get(fn, nullid)
563 fp2 = manifest2.get(fn, nullid)
559
564
560 meta = {}
565 meta = {}
561 cp = self.dirstate.copied(fn)
566 cp = self.dirstate.copied(fn)
562 if cp:
567 if cp:
563 meta["copy"] = cp
568 meta["copy"] = cp
564 if not manifest2: # not a branch merge
569 if not manifest2: # not a branch merge
565 meta["copyrev"] = hex(manifest1.get(cp, nullid))
570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
566 fp2 = nullid
571 fp2 = nullid
567 elif fp2 != nullid: # copied on remote side
572 elif fp2 != nullid: # copied on remote side
568 meta["copyrev"] = hex(manifest1.get(cp, nullid))
573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
569 elif fp1 != nullid: # copied on local side, reversed
574 elif fp1 != nullid: # copied on local side, reversed
570 meta["copyrev"] = hex(manifest2.get(cp))
575 meta["copyrev"] = hex(manifest2.get(cp))
571 fp2 = nullid
576 fp2 = nullid
572 else: # directory rename
577 else: # directory rename
573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
578 meta["copyrev"] = hex(manifest1.get(cp, nullid))
574 self.ui.debug(_(" %s: copy %s:%s\n") %
579 self.ui.debug(_(" %s: copy %s:%s\n") %
575 (fn, cp, meta["copyrev"]))
580 (fn, cp, meta["copyrev"]))
576 fp1 = nullid
581 fp1 = nullid
577 elif fp2 != nullid:
582 elif fp2 != nullid:
578 # is one parent an ancestor of the other?
583 # is one parent an ancestor of the other?
579 fpa = fl.ancestor(fp1, fp2)
584 fpa = fl.ancestor(fp1, fp2)
580 if fpa == fp1:
585 if fpa == fp1:
581 fp1, fp2 = fp2, nullid
586 fp1, fp2 = fp2, nullid
582 elif fpa == fp2:
587 elif fpa == fp2:
583 fp2 = nullid
588 fp2 = nullid
584
589
585 # is the file unmodified from the parent? report existing entry
590 # is the file unmodified from the parent? report existing entry
586 if fp2 == nullid and not fl.cmp(fp1, t):
591 if fp2 == nullid and not fl.cmp(fp1, t):
587 return fp1
592 return fp1
588
593
589 changelist.append(fn)
594 changelist.append(fn)
590 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
595 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
591
596
592 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
597 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
593 if p1 is None:
598 if p1 is None:
594 p1, p2 = self.dirstate.parents()
599 p1, p2 = self.dirstate.parents()
595 return self.commit(files=files, text=text, user=user, date=date,
600 return self.commit(files=files, text=text, user=user, date=date,
596 p1=p1, p2=p2, wlock=wlock)
601 p1=p1, p2=p2, wlock=wlock)
597
602
598 def commit(self, files=None, text="", user=None, date=None,
603 def commit(self, files=None, text="", user=None, date=None,
599 match=util.always, force=False, lock=None, wlock=None,
604 match=util.always, force=False, lock=None, wlock=None,
600 force_editor=False, p1=None, p2=None, extra={}):
605 force_editor=False, p1=None, p2=None, extra={}):
601
606
602 commit = []
607 commit = []
603 remove = []
608 remove = []
604 changed = []
609 changed = []
605 use_dirstate = (p1 is None) # not rawcommit
610 use_dirstate = (p1 is None) # not rawcommit
606 extra = extra.copy()
611 extra = extra.copy()
607
612
608 if use_dirstate:
613 if use_dirstate:
609 if files:
614 if files:
610 for f in files:
615 for f in files:
611 s = self.dirstate.state(f)
616 s = self.dirstate.state(f)
612 if s in 'nmai':
617 if s in 'nmai':
613 commit.append(f)
618 commit.append(f)
614 elif s == 'r':
619 elif s == 'r':
615 remove.append(f)
620 remove.append(f)
616 else:
621 else:
617 self.ui.warn(_("%s not tracked!\n") % f)
622 self.ui.warn(_("%s not tracked!\n") % f)
618 else:
623 else:
619 changes = self.status(match=match)[:5]
624 changes = self.status(match=match)[:5]
620 modified, added, removed, deleted, unknown = changes
625 modified, added, removed, deleted, unknown = changes
621 commit = modified + added
626 commit = modified + added
622 remove = removed
627 remove = removed
623 else:
628 else:
624 commit = files
629 commit = files
625
630
626 if use_dirstate:
631 if use_dirstate:
627 p1, p2 = self.dirstate.parents()
632 p1, p2 = self.dirstate.parents()
628 update_dirstate = True
633 update_dirstate = True
629 else:
634 else:
630 p1, p2 = p1, p2 or nullid
635 p1, p2 = p1, p2 or nullid
631 update_dirstate = (self.dirstate.parents()[0] == p1)
636 update_dirstate = (self.dirstate.parents()[0] == p1)
632
637
633 c1 = self.changelog.read(p1)
638 c1 = self.changelog.read(p1)
634 c2 = self.changelog.read(p2)
639 c2 = self.changelog.read(p2)
635 m1 = self.manifest.read(c1[0]).copy()
640 m1 = self.manifest.read(c1[0]).copy()
636 m2 = self.manifest.read(c2[0])
641 m2 = self.manifest.read(c2[0])
637
642
638 if use_dirstate:
643 if use_dirstate:
639 branchname = util.fromlocal(self.workingctx().branch())
644 branchname = util.fromlocal(self.workingctx().branch())
640 else:
645 else:
641 branchname = ""
646 branchname = ""
642
647
643 if use_dirstate:
648 if use_dirstate:
644 oldname = c1[5].get("branch", "") # stored in UTF-8
649 oldname = c1[5].get("branch", "") # stored in UTF-8
645 if not commit and not remove and not force and p2 == nullid and \
650 if not commit and not remove and not force and p2 == nullid and \
646 branchname == oldname:
651 branchname == oldname:
647 self.ui.status(_("nothing changed\n"))
652 self.ui.status(_("nothing changed\n"))
648 return None
653 return None
649
654
650 xp1 = hex(p1)
655 xp1 = hex(p1)
651 if p2 == nullid: xp2 = ''
656 if p2 == nullid: xp2 = ''
652 else: xp2 = hex(p2)
657 else: xp2 = hex(p2)
653
658
654 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
659 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
655
660
656 if not wlock:
661 if not wlock:
657 wlock = self.wlock()
662 wlock = self.wlock()
658 if not lock:
663 if not lock:
659 lock = self.lock()
664 lock = self.lock()
660 tr = self.transaction()
665 tr = self.transaction()
661
666
662 # check in files
667 # check in files
663 new = {}
668 new = {}
664 linkrev = self.changelog.count()
669 linkrev = self.changelog.count()
665 commit.sort()
670 commit.sort()
666 for f in commit:
671 for f in commit:
667 self.ui.note(f + "\n")
672 self.ui.note(f + "\n")
668 try:
673 try:
669 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
674 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
670 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
675 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
671 except IOError:
676 except IOError:
672 if use_dirstate:
677 if use_dirstate:
673 self.ui.warn(_("trouble committing %s!\n") % f)
678 self.ui.warn(_("trouble committing %s!\n") % f)
674 raise
679 raise
675 else:
680 else:
676 remove.append(f)
681 remove.append(f)
677
682
678 # update manifest
683 # update manifest
679 m1.update(new)
684 m1.update(new)
680 remove.sort()
685 remove.sort()
681
686
682 for f in remove:
687 for f in remove:
683 if f in m1:
688 if f in m1:
684 del m1[f]
689 del m1[f]
685 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
690 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
686
691
687 # add changeset
692 # add changeset
688 new = new.keys()
693 new = new.keys()
689 new.sort()
694 new.sort()
690
695
691 user = user or self.ui.username()
696 user = user or self.ui.username()
692 if not text or force_editor:
697 if not text or force_editor:
693 edittext = []
698 edittext = []
694 if text:
699 if text:
695 edittext.append(text)
700 edittext.append(text)
696 edittext.append("")
701 edittext.append("")
697 edittext.append("HG: user: %s" % user)
702 edittext.append("HG: user: %s" % user)
698 if p2 != nullid:
703 if p2 != nullid:
699 edittext.append("HG: branch merge")
704 edittext.append("HG: branch merge")
700 edittext.extend(["HG: changed %s" % f for f in changed])
705 edittext.extend(["HG: changed %s" % f for f in changed])
701 edittext.extend(["HG: removed %s" % f for f in remove])
706 edittext.extend(["HG: removed %s" % f for f in remove])
702 if not changed and not remove:
707 if not changed and not remove:
703 edittext.append("HG: no files changed")
708 edittext.append("HG: no files changed")
704 edittext.append("")
709 edittext.append("")
705 # run editor in the repository root
710 # run editor in the repository root
706 olddir = os.getcwd()
711 olddir = os.getcwd()
707 os.chdir(self.root)
712 os.chdir(self.root)
708 text = self.ui.edit("\n".join(edittext), user)
713 text = self.ui.edit("\n".join(edittext), user)
709 os.chdir(olddir)
714 os.chdir(olddir)
710
715
711 lines = [line.rstrip() for line in text.rstrip().splitlines()]
716 lines = [line.rstrip() for line in text.rstrip().splitlines()]
712 while lines and not lines[0]:
717 while lines and not lines[0]:
713 del lines[0]
718 del lines[0]
714 if not lines:
719 if not lines:
715 return None
720 return None
716 text = '\n'.join(lines)
721 text = '\n'.join(lines)
717 if branchname:
722 if branchname:
718 extra["branch"] = branchname
723 extra["branch"] = branchname
719 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
724 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
720 user, date, extra)
725 user, date, extra)
721 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
722 parent2=xp2)
727 parent2=xp2)
723 tr.close()
728 tr.close()
724
729
725 if use_dirstate or update_dirstate:
730 if use_dirstate or update_dirstate:
726 self.dirstate.setparents(n)
731 self.dirstate.setparents(n)
727 if use_dirstate:
732 if use_dirstate:
728 self.dirstate.update(new, "n")
733 self.dirstate.update(new, "n")
729 self.dirstate.forget(remove)
734 self.dirstate.forget(remove)
730
735
731 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
736 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
732 return n
737 return n
733
738
734 def walk(self, node=None, files=[], match=util.always, badmatch=None):
739 def walk(self, node=None, files=[], match=util.always, badmatch=None):
735 '''
740 '''
736 walk recursively through the directory tree or a given
741 walk recursively through the directory tree or a given
737 changeset, finding all files matched by the match
742 changeset, finding all files matched by the match
738 function
743 function
739
744
740 results are yielded in a tuple (src, filename), where src
745 results are yielded in a tuple (src, filename), where src
741 is one of:
746 is one of:
742 'f' the file was found in the directory tree
747 'f' the file was found in the directory tree
743 'm' the file was only in the dirstate and not in the tree
748 'm' the file was only in the dirstate and not in the tree
744 'b' file was not found and matched badmatch
749 'b' file was not found and matched badmatch
745 '''
750 '''
746
751
747 if node:
752 if node:
748 fdict = dict.fromkeys(files)
753 fdict = dict.fromkeys(files)
749 for fn in self.manifest.read(self.changelog.read(node)[0]):
754 for fn in self.manifest.read(self.changelog.read(node)[0]):
750 for ffn in fdict:
755 for ffn in fdict:
751 # match if the file is the exact name or a directory
756 # match if the file is the exact name or a directory
752 if ffn == fn or fn.startswith("%s/" % ffn):
757 if ffn == fn or fn.startswith("%s/" % ffn):
753 del fdict[ffn]
758 del fdict[ffn]
754 break
759 break
755 if match(fn):
760 if match(fn):
756 yield 'm', fn
761 yield 'm', fn
757 for fn in fdict:
762 for fn in fdict:
758 if badmatch and badmatch(fn):
763 if badmatch and badmatch(fn):
759 if match(fn):
764 if match(fn):
760 yield 'b', fn
765 yield 'b', fn
761 else:
766 else:
762 self.ui.warn(_('%s: No such file in rev %s\n') % (
767 self.ui.warn(_('%s: No such file in rev %s\n') % (
763 util.pathto(self.getcwd(), fn), short(node)))
768 util.pathto(self.getcwd(), fn), short(node)))
764 else:
769 else:
765 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
770 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
766 yield src, fn
771 yield src, fn
767
772
768 def status(self, node1=None, node2=None, files=[], match=util.always,
773 def status(self, node1=None, node2=None, files=[], match=util.always,
769 wlock=None, list_ignored=False, list_clean=False):
774 wlock=None, list_ignored=False, list_clean=False):
770 """return status of files between two nodes or node and working directory
775 """return status of files between two nodes or node and working directory
771
776
772 If node1 is None, use the first dirstate parent instead.
777 If node1 is None, use the first dirstate parent instead.
773 If node2 is None, compare node1 with working directory.
778 If node2 is None, compare node1 with working directory.
774 """
779 """
775
780
776 def fcmp(fn, mf):
781 def fcmp(fn, mf):
777 t1 = self.wread(fn)
782 t1 = self.wread(fn)
778 return self.file(fn).cmp(mf.get(fn, nullid), t1)
783 return self.file(fn).cmp(mf.get(fn, nullid), t1)
779
784
780 def mfmatches(node):
785 def mfmatches(node):
781 change = self.changelog.read(node)
786 change = self.changelog.read(node)
782 mf = self.manifest.read(change[0]).copy()
787 mf = self.manifest.read(change[0]).copy()
783 for fn in mf.keys():
788 for fn in mf.keys():
784 if not match(fn):
789 if not match(fn):
785 del mf[fn]
790 del mf[fn]
786 return mf
791 return mf
787
792
788 modified, added, removed, deleted, unknown = [], [], [], [], []
793 modified, added, removed, deleted, unknown = [], [], [], [], []
789 ignored, clean = [], []
794 ignored, clean = [], []
790
795
791 compareworking = False
796 compareworking = False
792 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
797 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
793 compareworking = True
798 compareworking = True
794
799
795 if not compareworking:
800 if not compareworking:
796 # read the manifest from node1 before the manifest from node2,
801 # read the manifest from node1 before the manifest from node2,
797 # so that we'll hit the manifest cache if we're going through
802 # so that we'll hit the manifest cache if we're going through
798 # all the revisions in parent->child order.
803 # all the revisions in parent->child order.
799 mf1 = mfmatches(node1)
804 mf1 = mfmatches(node1)
800
805
801 # are we comparing the working directory?
806 # are we comparing the working directory?
802 if not node2:
807 if not node2:
803 if not wlock:
808 if not wlock:
804 try:
809 try:
805 wlock = self.wlock(wait=0)
810 wlock = self.wlock(wait=0)
806 except lock.LockException:
811 except lock.LockException:
807 wlock = None
812 wlock = None
808 (lookup, modified, added, removed, deleted, unknown,
813 (lookup, modified, added, removed, deleted, unknown,
809 ignored, clean) = self.dirstate.status(files, match,
814 ignored, clean) = self.dirstate.status(files, match,
810 list_ignored, list_clean)
815 list_ignored, list_clean)
811
816
812 # are we comparing working dir against its parent?
817 # are we comparing working dir against its parent?
813 if compareworking:
818 if compareworking:
814 if lookup:
819 if lookup:
815 # do a full compare of any files that might have changed
820 # do a full compare of any files that might have changed
816 mf2 = mfmatches(self.dirstate.parents()[0])
821 mf2 = mfmatches(self.dirstate.parents()[0])
817 for f in lookup:
822 for f in lookup:
818 if fcmp(f, mf2):
823 if fcmp(f, mf2):
819 modified.append(f)
824 modified.append(f)
820 else:
825 else:
821 clean.append(f)
826 clean.append(f)
822 if wlock is not None:
827 if wlock is not None:
823 self.dirstate.update([f], "n")
828 self.dirstate.update([f], "n")
824 else:
829 else:
825 # we are comparing working dir against non-parent
830 # we are comparing working dir against non-parent
826 # generate a pseudo-manifest for the working dir
831 # generate a pseudo-manifest for the working dir
827 # XXX: create it in dirstate.py ?
832 # XXX: create it in dirstate.py ?
828 mf2 = mfmatches(self.dirstate.parents()[0])
833 mf2 = mfmatches(self.dirstate.parents()[0])
829 for f in lookup + modified + added:
834 for f in lookup + modified + added:
830 mf2[f] = ""
835 mf2[f] = ""
831 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
836 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
832 for f in removed:
837 for f in removed:
833 if f in mf2:
838 if f in mf2:
834 del mf2[f]
839 del mf2[f]
835 else:
840 else:
836 # we are comparing two revisions
841 # we are comparing two revisions
837 mf2 = mfmatches(node2)
842 mf2 = mfmatches(node2)
838
843
839 if not compareworking:
844 if not compareworking:
840 # flush lists from dirstate before comparing manifests
845 # flush lists from dirstate before comparing manifests
841 modified, added, clean = [], [], []
846 modified, added, clean = [], [], []
842
847
843 # make sure to sort the files so we talk to the disk in a
848 # make sure to sort the files so we talk to the disk in a
844 # reasonable order
849 # reasonable order
845 mf2keys = mf2.keys()
850 mf2keys = mf2.keys()
846 mf2keys.sort()
851 mf2keys.sort()
847 for fn in mf2keys:
852 for fn in mf2keys:
848 if mf1.has_key(fn):
853 if mf1.has_key(fn):
849 if mf1.flags(fn) != mf2.flags(fn) or \
854 if mf1.flags(fn) != mf2.flags(fn) or \
850 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
855 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
851 modified.append(fn)
856 modified.append(fn)
852 elif list_clean:
857 elif list_clean:
853 clean.append(fn)
858 clean.append(fn)
854 del mf1[fn]
859 del mf1[fn]
855 else:
860 else:
856 added.append(fn)
861 added.append(fn)
857
862
858 removed = mf1.keys()
863 removed = mf1.keys()
859
864
860 # sort and return results:
865 # sort and return results:
861 for l in modified, added, removed, deleted, unknown, ignored, clean:
866 for l in modified, added, removed, deleted, unknown, ignored, clean:
862 l.sort()
867 l.sort()
863 return (modified, added, removed, deleted, unknown, ignored, clean)
868 return (modified, added, removed, deleted, unknown, ignored, clean)
864
869
865 def add(self, list, wlock=None):
870 def add(self, list, wlock=None):
866 if not wlock:
871 if not wlock:
867 wlock = self.wlock()
872 wlock = self.wlock()
868 for f in list:
873 for f in list:
869 p = self.wjoin(f)
874 p = self.wjoin(f)
870 if not os.path.exists(p):
875 if not os.path.exists(p):
871 self.ui.warn(_("%s does not exist!\n") % f)
876 self.ui.warn(_("%s does not exist!\n") % f)
872 elif not os.path.isfile(p):
877 elif not os.path.isfile(p):
873 self.ui.warn(_("%s not added: only files supported currently\n")
878 self.ui.warn(_("%s not added: only files supported currently\n")
874 % f)
879 % f)
875 elif self.dirstate.state(f) in 'an':
880 elif self.dirstate.state(f) in 'an':
876 self.ui.warn(_("%s already tracked!\n") % f)
881 self.ui.warn(_("%s already tracked!\n") % f)
877 else:
882 else:
878 self.dirstate.update([f], "a")
883 self.dirstate.update([f], "a")
879
884
880 def forget(self, list, wlock=None):
885 def forget(self, list, wlock=None):
881 if not wlock:
886 if not wlock:
882 wlock = self.wlock()
887 wlock = self.wlock()
883 for f in list:
888 for f in list:
884 if self.dirstate.state(f) not in 'ai':
889 if self.dirstate.state(f) not in 'ai':
885 self.ui.warn(_("%s not added!\n") % f)
890 self.ui.warn(_("%s not added!\n") % f)
886 else:
891 else:
887 self.dirstate.forget([f])
892 self.dirstate.forget([f])
888
893
889 def remove(self, list, unlink=False, wlock=None):
894 def remove(self, list, unlink=False, wlock=None):
890 if unlink:
895 if unlink:
891 for f in list:
896 for f in list:
892 try:
897 try:
893 util.unlink(self.wjoin(f))
898 util.unlink(self.wjoin(f))
894 except OSError, inst:
899 except OSError, inst:
895 if inst.errno != errno.ENOENT:
900 if inst.errno != errno.ENOENT:
896 raise
901 raise
897 if not wlock:
902 if not wlock:
898 wlock = self.wlock()
903 wlock = self.wlock()
899 for f in list:
904 for f in list:
900 p = self.wjoin(f)
905 p = self.wjoin(f)
901 if os.path.exists(p):
906 if os.path.exists(p):
902 self.ui.warn(_("%s still exists!\n") % f)
907 self.ui.warn(_("%s still exists!\n") % f)
903 elif self.dirstate.state(f) == 'a':
908 elif self.dirstate.state(f) == 'a':
904 self.dirstate.forget([f])
909 self.dirstate.forget([f])
905 elif f not in self.dirstate:
910 elif f not in self.dirstate:
906 self.ui.warn(_("%s not tracked!\n") % f)
911 self.ui.warn(_("%s not tracked!\n") % f)
907 else:
912 else:
908 self.dirstate.update([f], "r")
913 self.dirstate.update([f], "r")
909
914
910 def undelete(self, list, wlock=None):
915 def undelete(self, list, wlock=None):
911 p = self.dirstate.parents()[0]
916 p = self.dirstate.parents()[0]
912 mn = self.changelog.read(p)[0]
917 mn = self.changelog.read(p)[0]
913 m = self.manifest.read(mn)
918 m = self.manifest.read(mn)
914 if not wlock:
919 if not wlock:
915 wlock = self.wlock()
920 wlock = self.wlock()
916 for f in list:
921 for f in list:
917 if self.dirstate.state(f) not in "r":
922 if self.dirstate.state(f) not in "r":
918 self.ui.warn("%s not removed!\n" % f)
923 self.ui.warn("%s not removed!\n" % f)
919 else:
924 else:
920 t = self.file(f).read(m[f])
925 t = self.file(f).read(m[f])
921 self.wwrite(f, t)
926 self.wwrite(f, t)
922 util.set_exec(self.wjoin(f), m.execf(f))
927 util.set_exec(self.wjoin(f), m.execf(f))
923 self.dirstate.update([f], "n")
928 self.dirstate.update([f], "n")
924
929
925 def copy(self, source, dest, wlock=None):
930 def copy(self, source, dest, wlock=None):
926 p = self.wjoin(dest)
931 p = self.wjoin(dest)
927 if not os.path.exists(p):
932 if not os.path.exists(p):
928 self.ui.warn(_("%s does not exist!\n") % dest)
933 self.ui.warn(_("%s does not exist!\n") % dest)
929 elif not os.path.isfile(p):
934 elif not os.path.isfile(p):
930 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
935 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
931 else:
936 else:
932 if not wlock:
937 if not wlock:
933 wlock = self.wlock()
938 wlock = self.wlock()
934 if self.dirstate.state(dest) == '?':
939 if self.dirstate.state(dest) == '?':
935 self.dirstate.update([dest], "a")
940 self.dirstate.update([dest], "a")
936 self.dirstate.copy(source, dest)
941 self.dirstate.copy(source, dest)
937
942
938 def heads(self, start=None):
943 def heads(self, start=None):
939 heads = self.changelog.heads(start)
944 heads = self.changelog.heads(start)
940 # sort the output in rev descending order
945 # sort the output in rev descending order
941 heads = [(-self.changelog.rev(h), h) for h in heads]
946 heads = [(-self.changelog.rev(h), h) for h in heads]
942 heads.sort()
947 heads.sort()
943 return [n for (r, n) in heads]
948 return [n for (r, n) in heads]
944
949
945 # branchlookup returns a dict giving a list of branches for
950 # branchlookup returns a dict giving a list of branches for
946 # each head. A branch is defined as the tag of a node or
951 # each head. A branch is defined as the tag of a node or
947 # the branch of the node's parents. If a node has multiple
952 # the branch of the node's parents. If a node has multiple
948 # branch tags, tags are eliminated if they are visible from other
953 # branch tags, tags are eliminated if they are visible from other
949 # branch tags.
954 # branch tags.
950 #
955 #
951 # So, for this graph: a->b->c->d->e
956 # So, for this graph: a->b->c->d->e
952 # \ /
957 # \ /
953 # aa -----/
958 # aa -----/
954 # a has tag 2.6.12
959 # a has tag 2.6.12
955 # d has tag 2.6.13
960 # d has tag 2.6.13
956 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
961 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
957 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
962 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
958 # from the list.
963 # from the list.
959 #
964 #
960 # It is possible that more than one head will have the same branch tag.
965 # It is possible that more than one head will have the same branch tag.
961 # callers need to check the result for multiple heads under the same
966 # callers need to check the result for multiple heads under the same
962 # branch tag if that is a problem for them (ie checkout of a specific
967 # branch tag if that is a problem for them (ie checkout of a specific
963 # branch).
968 # branch).
964 #
969 #
965 # passing in a specific branch will limit the depth of the search
970 # passing in a specific branch will limit the depth of the search
966 # through the parents. It won't limit the branches returned in the
971 # through the parents. It won't limit the branches returned in the
967 # result though.
972 # result though.
968 def branchlookup(self, heads=None, branch=None):
973 def branchlookup(self, heads=None, branch=None):
969 if not heads:
974 if not heads:
970 heads = self.heads()
975 heads = self.heads()
971 headt = [ h for h in heads ]
976 headt = [ h for h in heads ]
972 chlog = self.changelog
977 chlog = self.changelog
973 branches = {}
978 branches = {}
974 merges = []
979 merges = []
975 seenmerge = {}
980 seenmerge = {}
976
981
977 # traverse the tree once for each head, recording in the branches
982 # traverse the tree once for each head, recording in the branches
978 # dict which tags are visible from this head. The branches
983 # dict which tags are visible from this head. The branches
979 # dict also records which tags are visible from each tag
984 # dict also records which tags are visible from each tag
980 # while we traverse.
985 # while we traverse.
981 while headt or merges:
986 while headt or merges:
982 if merges:
987 if merges:
983 n, found = merges.pop()
988 n, found = merges.pop()
984 visit = [n]
989 visit = [n]
985 else:
990 else:
986 h = headt.pop()
991 h = headt.pop()
987 visit = [h]
992 visit = [h]
988 found = [h]
993 found = [h]
989 seen = {}
994 seen = {}
990 while visit:
995 while visit:
991 n = visit.pop()
996 n = visit.pop()
992 if n in seen:
997 if n in seen:
993 continue
998 continue
994 pp = chlog.parents(n)
999 pp = chlog.parents(n)
995 tags = self.nodetags(n)
1000 tags = self.nodetags(n)
996 if tags:
1001 if tags:
997 for x in tags:
1002 for x in tags:
998 if x == 'tip':
1003 if x == 'tip':
999 continue
1004 continue
1000 for f in found:
1005 for f in found:
1001 branches.setdefault(f, {})[n] = 1
1006 branches.setdefault(f, {})[n] = 1
1002 branches.setdefault(n, {})[n] = 1
1007 branches.setdefault(n, {})[n] = 1
1003 break
1008 break
1004 if n not in found:
1009 if n not in found:
1005 found.append(n)
1010 found.append(n)
1006 if branch in tags:
1011 if branch in tags:
1007 continue
1012 continue
1008 seen[n] = 1
1013 seen[n] = 1
1009 if pp[1] != nullid and n not in seenmerge:
1014 if pp[1] != nullid and n not in seenmerge:
1010 merges.append((pp[1], [x for x in found]))
1015 merges.append((pp[1], [x for x in found]))
1011 seenmerge[n] = 1
1016 seenmerge[n] = 1
1012 if pp[0] != nullid:
1017 if pp[0] != nullid:
1013 visit.append(pp[0])
1018 visit.append(pp[0])
1014 # traverse the branches dict, eliminating branch tags from each
1019 # traverse the branches dict, eliminating branch tags from each
1015 # head that are visible from another branch tag for that head.
1020 # head that are visible from another branch tag for that head.
1016 out = {}
1021 out = {}
1017 viscache = {}
1022 viscache = {}
1018 for h in heads:
1023 for h in heads:
1019 def visible(node):
1024 def visible(node):
1020 if node in viscache:
1025 if node in viscache:
1021 return viscache[node]
1026 return viscache[node]
1022 ret = {}
1027 ret = {}
1023 visit = [node]
1028 visit = [node]
1024 while visit:
1029 while visit:
1025 x = visit.pop()
1030 x = visit.pop()
1026 if x in viscache:
1031 if x in viscache:
1027 ret.update(viscache[x])
1032 ret.update(viscache[x])
1028 elif x not in ret:
1033 elif x not in ret:
1029 ret[x] = 1
1034 ret[x] = 1
1030 if x in branches:
1035 if x in branches:
1031 visit[len(visit):] = branches[x].keys()
1036 visit[len(visit):] = branches[x].keys()
1032 viscache[node] = ret
1037 viscache[node] = ret
1033 return ret
1038 return ret
1034 if h not in branches:
1039 if h not in branches:
1035 continue
1040 continue
1036 # O(n^2), but somewhat limited. This only searches the
1041 # O(n^2), but somewhat limited. This only searches the
1037 # tags visible from a specific head, not all the tags in the
1042 # tags visible from a specific head, not all the tags in the
1038 # whole repo.
1043 # whole repo.
1039 for b in branches[h]:
1044 for b in branches[h]:
1040 vis = False
1045 vis = False
1041 for bb in branches[h].keys():
1046 for bb in branches[h].keys():
1042 if b != bb:
1047 if b != bb:
1043 if b in visible(bb):
1048 if b in visible(bb):
1044 vis = True
1049 vis = True
1045 break
1050 break
1046 if not vis:
1051 if not vis:
1047 l = out.setdefault(h, [])
1052 l = out.setdefault(h, [])
1048 l[len(l):] = self.nodetags(b)
1053 l[len(l):] = self.nodetags(b)
1049 return out
1054 return out
1050
1055
1051 def branches(self, nodes):
1056 def branches(self, nodes):
1052 if not nodes:
1057 if not nodes:
1053 nodes = [self.changelog.tip()]
1058 nodes = [self.changelog.tip()]
1054 b = []
1059 b = []
1055 for n in nodes:
1060 for n in nodes:
1056 t = n
1061 t = n
1057 while 1:
1062 while 1:
1058 p = self.changelog.parents(n)
1063 p = self.changelog.parents(n)
1059 if p[1] != nullid or p[0] == nullid:
1064 if p[1] != nullid or p[0] == nullid:
1060 b.append((t, n, p[0], p[1]))
1065 b.append((t, n, p[0], p[1]))
1061 break
1066 break
1062 n = p[0]
1067 n = p[0]
1063 return b
1068 return b
1064
1069
1065 def between(self, pairs):
1070 def between(self, pairs):
1066 r = []
1071 r = []
1067
1072
1068 for top, bottom in pairs:
1073 for top, bottom in pairs:
1069 n, l, i = top, [], 0
1074 n, l, i = top, [], 0
1070 f = 1
1075 f = 1
1071
1076
1072 while n != bottom:
1077 while n != bottom:
1073 p = self.changelog.parents(n)[0]
1078 p = self.changelog.parents(n)[0]
1074 if i == f:
1079 if i == f:
1075 l.append(n)
1080 l.append(n)
1076 f = f * 2
1081 f = f * 2
1077 n = p
1082 n = p
1078 i += 1
1083 i += 1
1079
1084
1080 r.append(l)
1085 r.append(l)
1081
1086
1082 return r
1087 return r
1083
1088
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1089 def findincoming(self, remote, base=None, heads=None, force=False):
1085 """Return list of roots of the subsets of missing nodes from remote
1090 """Return list of roots of the subsets of missing nodes from remote
1086
1091
1087 If base dict is specified, assume that these nodes and their parents
1092 If base dict is specified, assume that these nodes and their parents
1088 exist on the remote side and that no child of a node of base exists
1093 exist on the remote side and that no child of a node of base exists
1089 in both remote and self.
1094 in both remote and self.
1090 Furthermore base will be updated to include the nodes that exists
1095 Furthermore base will be updated to include the nodes that exists
1091 in self and remote but no children exists in self and remote.
1096 in self and remote but no children exists in self and remote.
1092 If a list of heads is specified, return only nodes which are heads
1097 If a list of heads is specified, return only nodes which are heads
1093 or ancestors of these heads.
1098 or ancestors of these heads.
1094
1099
1095 All the ancestors of base are in self and in remote.
1100 All the ancestors of base are in self and in remote.
1096 All the descendants of the list returned are missing in self.
1101 All the descendants of the list returned are missing in self.
1097 (and so we know that the rest of the nodes are missing in remote, see
1102 (and so we know that the rest of the nodes are missing in remote, see
1098 outgoing)
1103 outgoing)
1099 """
1104 """
1100 m = self.changelog.nodemap
1105 m = self.changelog.nodemap
1101 search = []
1106 search = []
1102 fetch = {}
1107 fetch = {}
1103 seen = {}
1108 seen = {}
1104 seenbranch = {}
1109 seenbranch = {}
1105 if base == None:
1110 if base == None:
1106 base = {}
1111 base = {}
1107
1112
1108 if not heads:
1113 if not heads:
1109 heads = remote.heads()
1114 heads = remote.heads()
1110
1115
1111 if self.changelog.tip() == nullid:
1116 if self.changelog.tip() == nullid:
1112 base[nullid] = 1
1117 base[nullid] = 1
1113 if heads != [nullid]:
1118 if heads != [nullid]:
1114 return [nullid]
1119 return [nullid]
1115 return []
1120 return []
1116
1121
1117 # assume we're closer to the tip than the root
1122 # assume we're closer to the tip than the root
1118 # and start by examining the heads
1123 # and start by examining the heads
1119 self.ui.status(_("searching for changes\n"))
1124 self.ui.status(_("searching for changes\n"))
1120
1125
1121 unknown = []
1126 unknown = []
1122 for h in heads:
1127 for h in heads:
1123 if h not in m:
1128 if h not in m:
1124 unknown.append(h)
1129 unknown.append(h)
1125 else:
1130 else:
1126 base[h] = 1
1131 base[h] = 1
1127
1132
1128 if not unknown:
1133 if not unknown:
1129 return []
1134 return []
1130
1135
1131 req = dict.fromkeys(unknown)
1136 req = dict.fromkeys(unknown)
1132 reqcnt = 0
1137 reqcnt = 0
1133
1138
1134 # search through remote branches
1139 # search through remote branches
1135 # a 'branch' here is a linear segment of history, with four parts:
1140 # a 'branch' here is a linear segment of history, with four parts:
1136 # head, root, first parent, second parent
1141 # head, root, first parent, second parent
1137 # (a branch always has two parents (or none) by definition)
1142 # (a branch always has two parents (or none) by definition)
1138 unknown = remote.branches(unknown)
1143 unknown = remote.branches(unknown)
1139 while unknown:
1144 while unknown:
1140 r = []
1145 r = []
1141 while unknown:
1146 while unknown:
1142 n = unknown.pop(0)
1147 n = unknown.pop(0)
1143 if n[0] in seen:
1148 if n[0] in seen:
1144 continue
1149 continue
1145
1150
1146 self.ui.debug(_("examining %s:%s\n")
1151 self.ui.debug(_("examining %s:%s\n")
1147 % (short(n[0]), short(n[1])))
1152 % (short(n[0]), short(n[1])))
1148 if n[0] == nullid: # found the end of the branch
1153 if n[0] == nullid: # found the end of the branch
1149 pass
1154 pass
1150 elif n in seenbranch:
1155 elif n in seenbranch:
1151 self.ui.debug(_("branch already found\n"))
1156 self.ui.debug(_("branch already found\n"))
1152 continue
1157 continue
1153 elif n[1] and n[1] in m: # do we know the base?
1158 elif n[1] and n[1] in m: # do we know the base?
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1159 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 % (short(n[0]), short(n[1])))
1160 % (short(n[0]), short(n[1])))
1156 search.append(n) # schedule branch range for scanning
1161 search.append(n) # schedule branch range for scanning
1157 seenbranch[n] = 1
1162 seenbranch[n] = 1
1158 else:
1163 else:
1159 if n[1] not in seen and n[1] not in fetch:
1164 if n[1] not in seen and n[1] not in fetch:
1160 if n[2] in m and n[3] in m:
1165 if n[2] in m and n[3] in m:
1161 self.ui.debug(_("found new changeset %s\n") %
1166 self.ui.debug(_("found new changeset %s\n") %
1162 short(n[1]))
1167 short(n[1]))
1163 fetch[n[1]] = 1 # earliest unknown
1168 fetch[n[1]] = 1 # earliest unknown
1164 for p in n[2:4]:
1169 for p in n[2:4]:
1165 if p in m:
1170 if p in m:
1166 base[p] = 1 # latest known
1171 base[p] = 1 # latest known
1167
1172
1168 for p in n[2:4]:
1173 for p in n[2:4]:
1169 if p not in req and p not in m:
1174 if p not in req and p not in m:
1170 r.append(p)
1175 r.append(p)
1171 req[p] = 1
1176 req[p] = 1
1172 seen[n[0]] = 1
1177 seen[n[0]] = 1
1173
1178
1174 if r:
1179 if r:
1175 reqcnt += 1
1180 reqcnt += 1
1176 self.ui.debug(_("request %d: %s\n") %
1181 self.ui.debug(_("request %d: %s\n") %
1177 (reqcnt, " ".join(map(short, r))))
1182 (reqcnt, " ".join(map(short, r))))
1178 for p in xrange(0, len(r), 10):
1183 for p in xrange(0, len(r), 10):
1179 for b in remote.branches(r[p:p+10]):
1184 for b in remote.branches(r[p:p+10]):
1180 self.ui.debug(_("received %s:%s\n") %
1185 self.ui.debug(_("received %s:%s\n") %
1181 (short(b[0]), short(b[1])))
1186 (short(b[0]), short(b[1])))
1182 unknown.append(b)
1187 unknown.append(b)
1183
1188
1184 # do binary search on the branches we found
1189 # do binary search on the branches we found
1185 while search:
1190 while search:
1186 n = search.pop(0)
1191 n = search.pop(0)
1187 reqcnt += 1
1192 reqcnt += 1
1188 l = remote.between([(n[0], n[1])])[0]
1193 l = remote.between([(n[0], n[1])])[0]
1189 l.append(n[1])
1194 l.append(n[1])
1190 p = n[0]
1195 p = n[0]
1191 f = 1
1196 f = 1
1192 for i in l:
1197 for i in l:
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1198 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 if i in m:
1199 if i in m:
1195 if f <= 2:
1200 if f <= 2:
1196 self.ui.debug(_("found new branch changeset %s\n") %
1201 self.ui.debug(_("found new branch changeset %s\n") %
1197 short(p))
1202 short(p))
1198 fetch[p] = 1
1203 fetch[p] = 1
1199 base[i] = 1
1204 base[i] = 1
1200 else:
1205 else:
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1206 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 % (short(p), short(i)))
1207 % (short(p), short(i)))
1203 search.append((p, i))
1208 search.append((p, i))
1204 break
1209 break
1205 p, f = i, f * 2
1210 p, f = i, f * 2
1206
1211
1207 # sanity check our fetch list
1212 # sanity check our fetch list
1208 for f in fetch.keys():
1213 for f in fetch.keys():
1209 if f in m:
1214 if f in m:
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1215 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211
1216
1212 if base.keys() == [nullid]:
1217 if base.keys() == [nullid]:
1213 if force:
1218 if force:
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1219 self.ui.warn(_("warning: repository is unrelated\n"))
1215 else:
1220 else:
1216 raise util.Abort(_("repository is unrelated"))
1221 raise util.Abort(_("repository is unrelated"))
1217
1222
1218 self.ui.debug(_("found new changesets starting at ") +
1223 self.ui.debug(_("found new changesets starting at ") +
1219 " ".join([short(f) for f in fetch]) + "\n")
1224 " ".join([short(f) for f in fetch]) + "\n")
1220
1225
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1226 self.ui.debug(_("%d total queries\n") % reqcnt)
1222
1227
1223 return fetch.keys()
1228 return fetch.keys()
1224
1229
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1230 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 """Return list of nodes that are roots of subsets not in remote
1231 """Return list of nodes that are roots of subsets not in remote
1227
1232
1228 If base dict is specified, assume that these nodes and their parents
1233 If base dict is specified, assume that these nodes and their parents
1229 exist on the remote side.
1234 exist on the remote side.
1230 If a list of heads is specified, return only nodes which are heads
1235 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads, and return a second element which
1236 or ancestors of these heads, and return a second element which
1232 contains all remote heads which get new children.
1237 contains all remote heads which get new children.
1233 """
1238 """
1234 if base == None:
1239 if base == None:
1235 base = {}
1240 base = {}
1236 self.findincoming(remote, base, heads, force=force)
1241 self.findincoming(remote, base, heads, force=force)
1237
1242
1238 self.ui.debug(_("common changesets up to ")
1243 self.ui.debug(_("common changesets up to ")
1239 + " ".join(map(short, base.keys())) + "\n")
1244 + " ".join(map(short, base.keys())) + "\n")
1240
1245
1241 remain = dict.fromkeys(self.changelog.nodemap)
1246 remain = dict.fromkeys(self.changelog.nodemap)
1242
1247
1243 # prune everything remote has from the tree
1248 # prune everything remote has from the tree
1244 del remain[nullid]
1249 del remain[nullid]
1245 remove = base.keys()
1250 remove = base.keys()
1246 while remove:
1251 while remove:
1247 n = remove.pop(0)
1252 n = remove.pop(0)
1248 if n in remain:
1253 if n in remain:
1249 del remain[n]
1254 del remain[n]
1250 for p in self.changelog.parents(n):
1255 for p in self.changelog.parents(n):
1251 remove.append(p)
1256 remove.append(p)
1252
1257
1253 # find every node whose parents have been pruned
1258 # find every node whose parents have been pruned
1254 subset = []
1259 subset = []
1255 # find every remote head that will get new children
1260 # find every remote head that will get new children
1256 updated_heads = {}
1261 updated_heads = {}
1257 for n in remain:
1262 for n in remain:
1258 p1, p2 = self.changelog.parents(n)
1263 p1, p2 = self.changelog.parents(n)
1259 if p1 not in remain and p2 not in remain:
1264 if p1 not in remain and p2 not in remain:
1260 subset.append(n)
1265 subset.append(n)
1261 if heads:
1266 if heads:
1262 if p1 in heads:
1267 if p1 in heads:
1263 updated_heads[p1] = True
1268 updated_heads[p1] = True
1264 if p2 in heads:
1269 if p2 in heads:
1265 updated_heads[p2] = True
1270 updated_heads[p2] = True
1266
1271
1267 # this is the set of all roots we have to push
1272 # this is the set of all roots we have to push
1268 if heads:
1273 if heads:
1269 return subset, updated_heads.keys()
1274 return subset, updated_heads.keys()
1270 else:
1275 else:
1271 return subset
1276 return subset
1272
1277
1273 def pull(self, remote, heads=None, force=False, lock=None):
1278 def pull(self, remote, heads=None, force=False, lock=None):
1274 mylock = False
1279 mylock = False
1275 if not lock:
1280 if not lock:
1276 lock = self.lock()
1281 lock = self.lock()
1277 mylock = True
1282 mylock = True
1278
1283
1279 try:
1284 try:
1280 fetch = self.findincoming(remote, force=force)
1285 fetch = self.findincoming(remote, force=force)
1281 if fetch == [nullid]:
1286 if fetch == [nullid]:
1282 self.ui.status(_("requesting all changes\n"))
1287 self.ui.status(_("requesting all changes\n"))
1283
1288
1284 if not fetch:
1289 if not fetch:
1285 self.ui.status(_("no changes found\n"))
1290 self.ui.status(_("no changes found\n"))
1286 return 0
1291 return 0
1287
1292
1288 if heads is None:
1293 if heads is None:
1289 cg = remote.changegroup(fetch, 'pull')
1294 cg = remote.changegroup(fetch, 'pull')
1290 else:
1295 else:
1291 if 'changegroupsubset' not in remote.capabilities:
1296 if 'changegroupsubset' not in remote.capabilities:
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1297 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1298 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 return self.addchangegroup(cg, 'pull', remote.url())
1299 return self.addchangegroup(cg, 'pull', remote.url())
1295 finally:
1300 finally:
1296 if mylock:
1301 if mylock:
1297 lock.release()
1302 lock.release()
1298
1303
1299 def push(self, remote, force=False, revs=None):
1304 def push(self, remote, force=False, revs=None):
1300 # there are two ways to push to remote repo:
1305 # there are two ways to push to remote repo:
1301 #
1306 #
1302 # addchangegroup assumes local user can lock remote
1307 # addchangegroup assumes local user can lock remote
1303 # repo (local filesystem, old ssh servers).
1308 # repo (local filesystem, old ssh servers).
1304 #
1309 #
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1310 # unbundle assumes local user cannot lock remote repo (new ssh
1306 # servers, http servers).
1311 # servers, http servers).
1307
1312
1308 if remote.capable('unbundle'):
1313 if remote.capable('unbundle'):
1309 return self.push_unbundle(remote, force, revs)
1314 return self.push_unbundle(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1315 return self.push_addchangegroup(remote, force, revs)
1311
1316
1312 def prepush(self, remote, force, revs):
1317 def prepush(self, remote, force, revs):
1313 base = {}
1318 base = {}
1314 remote_heads = remote.heads()
1319 remote_heads = remote.heads()
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1320 inc = self.findincoming(remote, base, remote_heads, force=force)
1316
1321
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1322 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 if revs is not None:
1323 if revs is not None:
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1324 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 else:
1325 else:
1321 bases, heads = update, self.changelog.heads()
1326 bases, heads = update, self.changelog.heads()
1322
1327
1323 if not bases:
1328 if not bases:
1324 self.ui.status(_("no changes found\n"))
1329 self.ui.status(_("no changes found\n"))
1325 return None, 1
1330 return None, 1
1326 elif not force:
1331 elif not force:
1327 # check if we're creating new remote heads
1332 # check if we're creating new remote heads
1328 # to be a remote head after push, node must be either
1333 # to be a remote head after push, node must be either
1329 # - unknown locally
1334 # - unknown locally
1330 # - a local outgoing head descended from update
1335 # - a local outgoing head descended from update
1331 # - a remote head that's known locally and not
1336 # - a remote head that's known locally and not
1332 # ancestral to an outgoing head
1337 # ancestral to an outgoing head
1333
1338
1334 warn = 0
1339 warn = 0
1335
1340
1336 if remote_heads == [nullid]:
1341 if remote_heads == [nullid]:
1337 warn = 0
1342 warn = 0
1338 elif not revs and len(heads) > len(remote_heads):
1343 elif not revs and len(heads) > len(remote_heads):
1339 warn = 1
1344 warn = 1
1340 else:
1345 else:
1341 newheads = list(heads)
1346 newheads = list(heads)
1342 for r in remote_heads:
1347 for r in remote_heads:
1343 if r in self.changelog.nodemap:
1348 if r in self.changelog.nodemap:
1344 desc = self.changelog.heads(r)
1349 desc = self.changelog.heads(r)
1345 l = [h for h in heads if h in desc]
1350 l = [h for h in heads if h in desc]
1346 if not l:
1351 if not l:
1347 newheads.append(r)
1352 newheads.append(r)
1348 else:
1353 else:
1349 newheads.append(r)
1354 newheads.append(r)
1350 if len(newheads) > len(remote_heads):
1355 if len(newheads) > len(remote_heads):
1351 warn = 1
1356 warn = 1
1352
1357
1353 if warn:
1358 if warn:
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1359 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 self.ui.status(_("(did you forget to merge?"
1360 self.ui.status(_("(did you forget to merge?"
1356 " use push -f to force)\n"))
1361 " use push -f to force)\n"))
1357 return None, 1
1362 return None, 1
1358 elif inc:
1363 elif inc:
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1364 self.ui.warn(_("note: unsynced remote changes!\n"))
1360
1365
1361
1366
1362 if revs is None:
1367 if revs is None:
1363 cg = self.changegroup(update, 'push')
1368 cg = self.changegroup(update, 'push')
1364 else:
1369 else:
1365 cg = self.changegroupsubset(update, revs, 'push')
1370 cg = self.changegroupsubset(update, revs, 'push')
1366 return cg, remote_heads
1371 return cg, remote_heads
1367
1372
1368 def push_addchangegroup(self, remote, force, revs):
1373 def push_addchangegroup(self, remote, force, revs):
1369 lock = remote.lock()
1374 lock = remote.lock()
1370
1375
1371 ret = self.prepush(remote, force, revs)
1376 ret = self.prepush(remote, force, revs)
1372 if ret[0] is not None:
1377 if ret[0] is not None:
1373 cg, remote_heads = ret
1378 cg, remote_heads = ret
1374 return remote.addchangegroup(cg, 'push', self.url())
1379 return remote.addchangegroup(cg, 'push', self.url())
1375 return ret[1]
1380 return ret[1]
1376
1381
1377 def push_unbundle(self, remote, force, revs):
1382 def push_unbundle(self, remote, force, revs):
1378 # local repo finds heads on server, finds out what revs it
1383 # local repo finds heads on server, finds out what revs it
1379 # must push. once revs transferred, if server finds it has
1384 # must push. once revs transferred, if server finds it has
1380 # different heads (someone else won commit/push race), server
1385 # different heads (someone else won commit/push race), server
1381 # aborts.
1386 # aborts.
1382
1387
1383 ret = self.prepush(remote, force, revs)
1388 ret = self.prepush(remote, force, revs)
1384 if ret[0] is not None:
1389 if ret[0] is not None:
1385 cg, remote_heads = ret
1390 cg, remote_heads = ret
1386 if force: remote_heads = ['force']
1391 if force: remote_heads = ['force']
1387 return remote.unbundle(cg, remote_heads, 'push')
1392 return remote.unbundle(cg, remote_heads, 'push')
1388 return ret[1]
1393 return ret[1]
1389
1394
1390 def changegroupinfo(self, nodes):
1395 def changegroupinfo(self, nodes):
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1396 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 if self.ui.debugflag:
1397 if self.ui.debugflag:
1393 self.ui.debug(_("List of changesets:\n"))
1398 self.ui.debug(_("List of changesets:\n"))
1394 for node in nodes:
1399 for node in nodes:
1395 self.ui.debug("%s\n" % hex(node))
1400 self.ui.debug("%s\n" % hex(node))
1396
1401
1397 def changegroupsubset(self, bases, heads, source):
1402 def changegroupsubset(self, bases, heads, source):
1398 """This function generates a changegroup consisting of all the nodes
1403 """This function generates a changegroup consisting of all the nodes
1399 that are descendents of any of the bases, and ancestors of any of
1404 that are descendents of any of the bases, and ancestors of any of
1400 the heads.
1405 the heads.
1401
1406
1402 It is fairly complex as determining which filenodes and which
1407 It is fairly complex as determining which filenodes and which
1403 manifest nodes need to be included for the changeset to be complete
1408 manifest nodes need to be included for the changeset to be complete
1404 is non-trivial.
1409 is non-trivial.
1405
1410
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1411 Another wrinkle is doing the reverse, figuring out which changeset in
1407 the changegroup a particular filenode or manifestnode belongs to."""
1412 the changegroup a particular filenode or manifestnode belongs to."""
1408
1413
1409 self.hook('preoutgoing', throw=True, source=source)
1414 self.hook('preoutgoing', throw=True, source=source)
1410
1415
1411 # Set up some initial variables
1416 # Set up some initial variables
1412 # Make it easy to refer to self.changelog
1417 # Make it easy to refer to self.changelog
1413 cl = self.changelog
1418 cl = self.changelog
1414 # msng is short for missing - compute the list of changesets in this
1419 # msng is short for missing - compute the list of changesets in this
1415 # changegroup.
1420 # changegroup.
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1421 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 self.changegroupinfo(msng_cl_lst)
1422 self.changegroupinfo(msng_cl_lst)
1418 # Some bases may turn out to be superfluous, and some heads may be
1423 # Some bases may turn out to be superfluous, and some heads may be
1419 # too. nodesbetween will return the minimal set of bases and heads
1424 # too. nodesbetween will return the minimal set of bases and heads
1420 # necessary to re-create the changegroup.
1425 # necessary to re-create the changegroup.
1421
1426
1422 # Known heads are the list of heads that it is assumed the recipient
1427 # Known heads are the list of heads that it is assumed the recipient
1423 # of this changegroup will know about.
1428 # of this changegroup will know about.
1424 knownheads = {}
1429 knownheads = {}
1425 # We assume that all parents of bases are known heads.
1430 # We assume that all parents of bases are known heads.
1426 for n in bases:
1431 for n in bases:
1427 for p in cl.parents(n):
1432 for p in cl.parents(n):
1428 if p != nullid:
1433 if p != nullid:
1429 knownheads[p] = 1
1434 knownheads[p] = 1
1430 knownheads = knownheads.keys()
1435 knownheads = knownheads.keys()
1431 if knownheads:
1436 if knownheads:
1432 # Now that we know what heads are known, we can compute which
1437 # Now that we know what heads are known, we can compute which
1433 # changesets are known. The recipient must know about all
1438 # changesets are known. The recipient must know about all
1434 # changesets required to reach the known heads from the null
1439 # changesets required to reach the known heads from the null
1435 # changeset.
1440 # changeset.
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1441 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 junk = None
1442 junk = None
1438 # Transform the list into an ersatz set.
1443 # Transform the list into an ersatz set.
1439 has_cl_set = dict.fromkeys(has_cl_set)
1444 has_cl_set = dict.fromkeys(has_cl_set)
1440 else:
1445 else:
1441 # If there were no known heads, the recipient cannot be assumed to
1446 # If there were no known heads, the recipient cannot be assumed to
1442 # know about any changesets.
1447 # know about any changesets.
1443 has_cl_set = {}
1448 has_cl_set = {}
1444
1449
1445 # Make it easy to refer to self.manifest
1450 # Make it easy to refer to self.manifest
1446 mnfst = self.manifest
1451 mnfst = self.manifest
1447 # We don't know which manifests are missing yet
1452 # We don't know which manifests are missing yet
1448 msng_mnfst_set = {}
1453 msng_mnfst_set = {}
1449 # Nor do we know which filenodes are missing.
1454 # Nor do we know which filenodes are missing.
1450 msng_filenode_set = {}
1455 msng_filenode_set = {}
1451
1456
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1457 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 junk = None
1458 junk = None
1454
1459
1455 # A changeset always belongs to itself, so the changenode lookup
1460 # A changeset always belongs to itself, so the changenode lookup
1456 # function for a changenode is identity.
1461 # function for a changenode is identity.
1457 def identity(x):
1462 def identity(x):
1458 return x
1463 return x
1459
1464
1460 # A function generating function. Sets up an environment for the
1465 # A function generating function. Sets up an environment for the
1461 # inner function.
1466 # inner function.
1462 def cmp_by_rev_func(revlog):
1467 def cmp_by_rev_func(revlog):
1463 # Compare two nodes by their revision number in the environment's
1468 # Compare two nodes by their revision number in the environment's
1464 # revision history. Since the revision number both represents the
1469 # revision history. Since the revision number both represents the
1465 # most efficient order to read the nodes in, and represents a
1470 # most efficient order to read the nodes in, and represents a
1466 # topological sorting of the nodes, this function is often useful.
1471 # topological sorting of the nodes, this function is often useful.
1467 def cmp_by_rev(a, b):
1472 def cmp_by_rev(a, b):
1468 return cmp(revlog.rev(a), revlog.rev(b))
1473 return cmp(revlog.rev(a), revlog.rev(b))
1469 return cmp_by_rev
1474 return cmp_by_rev
1470
1475
1471 # If we determine that a particular file or manifest node must be a
1476 # If we determine that a particular file or manifest node must be a
1472 # node that the recipient of the changegroup will already have, we can
1477 # node that the recipient of the changegroup will already have, we can
1473 # also assume the recipient will have all the parents. This function
1478 # also assume the recipient will have all the parents. This function
1474 # prunes them from the set of missing nodes.
1479 # prunes them from the set of missing nodes.
1475 def prune_parents(revlog, hasset, msngset):
1480 def prune_parents(revlog, hasset, msngset):
1476 haslst = hasset.keys()
1481 haslst = hasset.keys()
1477 haslst.sort(cmp_by_rev_func(revlog))
1482 haslst.sort(cmp_by_rev_func(revlog))
1478 for node in haslst:
1483 for node in haslst:
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1484 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 while parentlst:
1485 while parentlst:
1481 n = parentlst.pop()
1486 n = parentlst.pop()
1482 if n not in hasset:
1487 if n not in hasset:
1483 hasset[n] = 1
1488 hasset[n] = 1
1484 p = [p for p in revlog.parents(n) if p != nullid]
1489 p = [p for p in revlog.parents(n) if p != nullid]
1485 parentlst.extend(p)
1490 parentlst.extend(p)
1486 for n in hasset:
1491 for n in hasset:
1487 msngset.pop(n, None)
1492 msngset.pop(n, None)
1488
1493
1489 # This is a function generating function used to set up an environment
1494 # This is a function generating function used to set up an environment
1490 # for the inner function to execute in.
1495 # for the inner function to execute in.
1491 def manifest_and_file_collector(changedfileset):
1496 def manifest_and_file_collector(changedfileset):
1492 # This is an information gathering function that gathers
1497 # This is an information gathering function that gathers
1493 # information from each changeset node that goes out as part of
1498 # information from each changeset node that goes out as part of
1494 # the changegroup. The information gathered is a list of which
1499 # the changegroup. The information gathered is a list of which
1495 # manifest nodes are potentially required (the recipient may
1500 # manifest nodes are potentially required (the recipient may
1496 # already have them) and total list of all files which were
1501 # already have them) and total list of all files which were
1497 # changed in any changeset in the changegroup.
1502 # changed in any changeset in the changegroup.
1498 #
1503 #
1499 # We also remember the first changenode we saw any manifest
1504 # We also remember the first changenode we saw any manifest
1500 # referenced by so we can later determine which changenode 'owns'
1505 # referenced by so we can later determine which changenode 'owns'
1501 # the manifest.
1506 # the manifest.
1502 def collect_manifests_and_files(clnode):
1507 def collect_manifests_and_files(clnode):
1503 c = cl.read(clnode)
1508 c = cl.read(clnode)
1504 for f in c[3]:
1509 for f in c[3]:
1505 # This is to make sure we only have one instance of each
1510 # This is to make sure we only have one instance of each
1506 # filename string for each filename.
1511 # filename string for each filename.
1507 changedfileset.setdefault(f, f)
1512 changedfileset.setdefault(f, f)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1513 msng_mnfst_set.setdefault(c[0], clnode)
1509 return collect_manifests_and_files
1514 return collect_manifests_and_files
1510
1515
1511 # Figure out which manifest nodes (of the ones we think might be part
1516 # Figure out which manifest nodes (of the ones we think might be part
1512 # of the changegroup) the recipient must know about and remove them
1517 # of the changegroup) the recipient must know about and remove them
1513 # from the changegroup.
1518 # from the changegroup.
1514 def prune_manifests():
1519 def prune_manifests():
1515 has_mnfst_set = {}
1520 has_mnfst_set = {}
1516 for n in msng_mnfst_set:
1521 for n in msng_mnfst_set:
1517 # If a 'missing' manifest thinks it belongs to a changenode
1522 # If a 'missing' manifest thinks it belongs to a changenode
1518 # the recipient is assumed to have, obviously the recipient
1523 # the recipient is assumed to have, obviously the recipient
1519 # must have that manifest.
1524 # must have that manifest.
1520 linknode = cl.node(mnfst.linkrev(n))
1525 linknode = cl.node(mnfst.linkrev(n))
1521 if linknode in has_cl_set:
1526 if linknode in has_cl_set:
1522 has_mnfst_set[n] = 1
1527 has_mnfst_set[n] = 1
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1528 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524
1529
1525 # Use the information collected in collect_manifests_and_files to say
1530 # Use the information collected in collect_manifests_and_files to say
1526 # which changenode any manifestnode belongs to.
1531 # which changenode any manifestnode belongs to.
1527 def lookup_manifest_link(mnfstnode):
1532 def lookup_manifest_link(mnfstnode):
1528 return msng_mnfst_set[mnfstnode]
1533 return msng_mnfst_set[mnfstnode]
1529
1534
1530 # A function generating function that sets up the initial environment
1535 # A function generating function that sets up the initial environment
1531 # the inner function.
1536 # the inner function.
1532 def filenode_collector(changedfiles):
1537 def filenode_collector(changedfiles):
1533 next_rev = [0]
1538 next_rev = [0]
1534 # This gathers information from each manifestnode included in the
1539 # This gathers information from each manifestnode included in the
1535 # changegroup about which filenodes the manifest node references
1540 # changegroup about which filenodes the manifest node references
1536 # so we can include those in the changegroup too.
1541 # so we can include those in the changegroup too.
1537 #
1542 #
1538 # It also remembers which changenode each filenode belongs to. It
1543 # It also remembers which changenode each filenode belongs to. It
1539 # does this by assuming the a filenode belongs to the changenode
1544 # does this by assuming the a filenode belongs to the changenode
1540 # the first manifest that references it belongs to.
1545 # the first manifest that references it belongs to.
1541 def collect_msng_filenodes(mnfstnode):
1546 def collect_msng_filenodes(mnfstnode):
1542 r = mnfst.rev(mnfstnode)
1547 r = mnfst.rev(mnfstnode)
1543 if r == next_rev[0]:
1548 if r == next_rev[0]:
1544 # If the last rev we looked at was the one just previous,
1549 # If the last rev we looked at was the one just previous,
1545 # we only need to see a diff.
1550 # we only need to see a diff.
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1551 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 # For each line in the delta
1552 # For each line in the delta
1548 for dline in delta.splitlines():
1553 for dline in delta.splitlines():
1549 # get the filename and filenode for that line
1554 # get the filename and filenode for that line
1550 f, fnode = dline.split('\0')
1555 f, fnode = dline.split('\0')
1551 fnode = bin(fnode[:40])
1556 fnode = bin(fnode[:40])
1552 f = changedfiles.get(f, None)
1557 f = changedfiles.get(f, None)
1553 # And if the file is in the list of files we care
1558 # And if the file is in the list of files we care
1554 # about.
1559 # about.
1555 if f is not None:
1560 if f is not None:
1556 # Get the changenode this manifest belongs to
1561 # Get the changenode this manifest belongs to
1557 clnode = msng_mnfst_set[mnfstnode]
1562 clnode = msng_mnfst_set[mnfstnode]
1558 # Create the set of filenodes for the file if
1563 # Create the set of filenodes for the file if
1559 # there isn't one already.
1564 # there isn't one already.
1560 ndset = msng_filenode_set.setdefault(f, {})
1565 ndset = msng_filenode_set.setdefault(f, {})
1561 # And set the filenode's changelog node to the
1566 # And set the filenode's changelog node to the
1562 # manifest's if it hasn't been set already.
1567 # manifest's if it hasn't been set already.
1563 ndset.setdefault(fnode, clnode)
1568 ndset.setdefault(fnode, clnode)
1564 else:
1569 else:
1565 # Otherwise we need a full manifest.
1570 # Otherwise we need a full manifest.
1566 m = mnfst.read(mnfstnode)
1571 m = mnfst.read(mnfstnode)
1567 # For every file in we care about.
1572 # For every file in we care about.
1568 for f in changedfiles:
1573 for f in changedfiles:
1569 fnode = m.get(f, None)
1574 fnode = m.get(f, None)
1570 # If it's in the manifest
1575 # If it's in the manifest
1571 if fnode is not None:
1576 if fnode is not None:
1572 # See comments above.
1577 # See comments above.
1573 clnode = msng_mnfst_set[mnfstnode]
1578 clnode = msng_mnfst_set[mnfstnode]
1574 ndset = msng_filenode_set.setdefault(f, {})
1579 ndset = msng_filenode_set.setdefault(f, {})
1575 ndset.setdefault(fnode, clnode)
1580 ndset.setdefault(fnode, clnode)
1576 # Remember the revision we hope to see next.
1581 # Remember the revision we hope to see next.
1577 next_rev[0] = r + 1
1582 next_rev[0] = r + 1
1578 return collect_msng_filenodes
1583 return collect_msng_filenodes
1579
1584
1580 # We have a list of filenodes we think we need for a file, lets remove
1585 # We have a list of filenodes we think we need for a file, lets remove
1581 # all those we now the recipient must have.
1586 # all those we now the recipient must have.
1582 def prune_filenodes(f, filerevlog):
1587 def prune_filenodes(f, filerevlog):
1583 msngset = msng_filenode_set[f]
1588 msngset = msng_filenode_set[f]
1584 hasset = {}
1589 hasset = {}
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1590 # If a 'missing' filenode thinks it belongs to a changenode we
1586 # assume the recipient must have, then the recipient must have
1591 # assume the recipient must have, then the recipient must have
1587 # that filenode.
1592 # that filenode.
1588 for n in msngset:
1593 for n in msngset:
1589 clnode = cl.node(filerevlog.linkrev(n))
1594 clnode = cl.node(filerevlog.linkrev(n))
1590 if clnode in has_cl_set:
1595 if clnode in has_cl_set:
1591 hasset[n] = 1
1596 hasset[n] = 1
1592 prune_parents(filerevlog, hasset, msngset)
1597 prune_parents(filerevlog, hasset, msngset)
1593
1598
1594 # A function generator function that sets up the a context for the
1599 # A function generator function that sets up the a context for the
1595 # inner function.
1600 # inner function.
1596 def lookup_filenode_link_func(fname):
1601 def lookup_filenode_link_func(fname):
1597 msngset = msng_filenode_set[fname]
1602 msngset = msng_filenode_set[fname]
1598 # Lookup the changenode the filenode belongs to.
1603 # Lookup the changenode the filenode belongs to.
1599 def lookup_filenode_link(fnode):
1604 def lookup_filenode_link(fnode):
1600 return msngset[fnode]
1605 return msngset[fnode]
1601 return lookup_filenode_link
1606 return lookup_filenode_link
1602
1607
1603 # Now that we have all theses utility functions to help out and
1608 # Now that we have all theses utility functions to help out and
1604 # logically divide up the task, generate the group.
1609 # logically divide up the task, generate the group.
1605 def gengroup():
1610 def gengroup():
1606 # The set of changed files starts empty.
1611 # The set of changed files starts empty.
1607 changedfiles = {}
1612 changedfiles = {}
1608 # Create a changenode group generator that will call our functions
1613 # Create a changenode group generator that will call our functions
1609 # back to lookup the owning changenode and collect information.
1614 # back to lookup the owning changenode and collect information.
1610 group = cl.group(msng_cl_lst, identity,
1615 group = cl.group(msng_cl_lst, identity,
1611 manifest_and_file_collector(changedfiles))
1616 manifest_and_file_collector(changedfiles))
1612 for chnk in group:
1617 for chnk in group:
1613 yield chnk
1618 yield chnk
1614
1619
1615 # The list of manifests has been collected by the generator
1620 # The list of manifests has been collected by the generator
1616 # calling our functions back.
1621 # calling our functions back.
1617 prune_manifests()
1622 prune_manifests()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1623 msng_mnfst_lst = msng_mnfst_set.keys()
1619 # Sort the manifestnodes by revision number.
1624 # Sort the manifestnodes by revision number.
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1625 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 # Create a generator for the manifestnodes that calls our lookup
1626 # Create a generator for the manifestnodes that calls our lookup
1622 # and data collection functions back.
1627 # and data collection functions back.
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1628 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 filenode_collector(changedfiles))
1629 filenode_collector(changedfiles))
1625 for chnk in group:
1630 for chnk in group:
1626 yield chnk
1631 yield chnk
1627
1632
1628 # These are no longer needed, dereference and toss the memory for
1633 # These are no longer needed, dereference and toss the memory for
1629 # them.
1634 # them.
1630 msng_mnfst_lst = None
1635 msng_mnfst_lst = None
1631 msng_mnfst_set.clear()
1636 msng_mnfst_set.clear()
1632
1637
1633 changedfiles = changedfiles.keys()
1638 changedfiles = changedfiles.keys()
1634 changedfiles.sort()
1639 changedfiles.sort()
1635 # Go through all our files in order sorted by name.
1640 # Go through all our files in order sorted by name.
1636 for fname in changedfiles:
1641 for fname in changedfiles:
1637 filerevlog = self.file(fname)
1642 filerevlog = self.file(fname)
1638 # Toss out the filenodes that the recipient isn't really
1643 # Toss out the filenodes that the recipient isn't really
1639 # missing.
1644 # missing.
1640 if msng_filenode_set.has_key(fname):
1645 if msng_filenode_set.has_key(fname):
1641 prune_filenodes(fname, filerevlog)
1646 prune_filenodes(fname, filerevlog)
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1647 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 else:
1648 else:
1644 msng_filenode_lst = []
1649 msng_filenode_lst = []
1645 # If any filenodes are left, generate the group for them,
1650 # If any filenodes are left, generate the group for them,
1646 # otherwise don't bother.
1651 # otherwise don't bother.
1647 if len(msng_filenode_lst) > 0:
1652 if len(msng_filenode_lst) > 0:
1648 yield changegroup.genchunk(fname)
1653 yield changegroup.genchunk(fname)
1649 # Sort the filenodes by their revision #
1654 # Sort the filenodes by their revision #
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1655 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 # Create a group generator and only pass in a changenode
1656 # Create a group generator and only pass in a changenode
1652 # lookup function as we need to collect no information
1657 # lookup function as we need to collect no information
1653 # from filenodes.
1658 # from filenodes.
1654 group = filerevlog.group(msng_filenode_lst,
1659 group = filerevlog.group(msng_filenode_lst,
1655 lookup_filenode_link_func(fname))
1660 lookup_filenode_link_func(fname))
1656 for chnk in group:
1661 for chnk in group:
1657 yield chnk
1662 yield chnk
1658 if msng_filenode_set.has_key(fname):
1663 if msng_filenode_set.has_key(fname):
1659 # Don't need this anymore, toss it to free memory.
1664 # Don't need this anymore, toss it to free memory.
1660 del msng_filenode_set[fname]
1665 del msng_filenode_set[fname]
1661 # Signal that no more groups are left.
1666 # Signal that no more groups are left.
1662 yield changegroup.closechunk()
1667 yield changegroup.closechunk()
1663
1668
1664 if msng_cl_lst:
1669 if msng_cl_lst:
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1670 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666
1671
1667 return util.chunkbuffer(gengroup())
1672 return util.chunkbuffer(gengroup())
1668
1673
1669 def changegroup(self, basenodes, source):
1674 def changegroup(self, basenodes, source):
1670 """Generate a changegroup of all nodes that we have that a recipient
1675 """Generate a changegroup of all nodes that we have that a recipient
1671 doesn't.
1676 doesn't.
1672
1677
1673 This is much easier than the previous function as we can assume that
1678 This is much easier than the previous function as we can assume that
1674 the recipient has any changenode we aren't sending them."""
1679 the recipient has any changenode we aren't sending them."""
1675
1680
1676 self.hook('preoutgoing', throw=True, source=source)
1681 self.hook('preoutgoing', throw=True, source=source)
1677
1682
1678 cl = self.changelog
1683 cl = self.changelog
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1684 nodes = cl.nodesbetween(basenodes, None)[0]
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1685 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 self.changegroupinfo(nodes)
1686 self.changegroupinfo(nodes)
1682
1687
1683 def identity(x):
1688 def identity(x):
1684 return x
1689 return x
1685
1690
1686 def gennodelst(revlog):
1691 def gennodelst(revlog):
1687 for r in xrange(0, revlog.count()):
1692 for r in xrange(0, revlog.count()):
1688 n = revlog.node(r)
1693 n = revlog.node(r)
1689 if revlog.linkrev(n) in revset:
1694 if revlog.linkrev(n) in revset:
1690 yield n
1695 yield n
1691
1696
1692 def changed_file_collector(changedfileset):
1697 def changed_file_collector(changedfileset):
1693 def collect_changed_files(clnode):
1698 def collect_changed_files(clnode):
1694 c = cl.read(clnode)
1699 c = cl.read(clnode)
1695 for fname in c[3]:
1700 for fname in c[3]:
1696 changedfileset[fname] = 1
1701 changedfileset[fname] = 1
1697 return collect_changed_files
1702 return collect_changed_files
1698
1703
1699 def lookuprevlink_func(revlog):
1704 def lookuprevlink_func(revlog):
1700 def lookuprevlink(n):
1705 def lookuprevlink(n):
1701 return cl.node(revlog.linkrev(n))
1706 return cl.node(revlog.linkrev(n))
1702 return lookuprevlink
1707 return lookuprevlink
1703
1708
1704 def gengroup():
1709 def gengroup():
1705 # construct a list of all changed files
1710 # construct a list of all changed files
1706 changedfiles = {}
1711 changedfiles = {}
1707
1712
1708 for chnk in cl.group(nodes, identity,
1713 for chnk in cl.group(nodes, identity,
1709 changed_file_collector(changedfiles)):
1714 changed_file_collector(changedfiles)):
1710 yield chnk
1715 yield chnk
1711 changedfiles = changedfiles.keys()
1716 changedfiles = changedfiles.keys()
1712 changedfiles.sort()
1717 changedfiles.sort()
1713
1718
1714 mnfst = self.manifest
1719 mnfst = self.manifest
1715 nodeiter = gennodelst(mnfst)
1720 nodeiter = gennodelst(mnfst)
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1721 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 yield chnk
1722 yield chnk
1718
1723
1719 for fname in changedfiles:
1724 for fname in changedfiles:
1720 filerevlog = self.file(fname)
1725 filerevlog = self.file(fname)
1721 nodeiter = gennodelst(filerevlog)
1726 nodeiter = gennodelst(filerevlog)
1722 nodeiter = list(nodeiter)
1727 nodeiter = list(nodeiter)
1723 if nodeiter:
1728 if nodeiter:
1724 yield changegroup.genchunk(fname)
1729 yield changegroup.genchunk(fname)
1725 lookup = lookuprevlink_func(filerevlog)
1730 lookup = lookuprevlink_func(filerevlog)
1726 for chnk in filerevlog.group(nodeiter, lookup):
1731 for chnk in filerevlog.group(nodeiter, lookup):
1727 yield chnk
1732 yield chnk
1728
1733
1729 yield changegroup.closechunk()
1734 yield changegroup.closechunk()
1730
1735
1731 if nodes:
1736 if nodes:
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1737 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733
1738
1734 return util.chunkbuffer(gengroup())
1739 return util.chunkbuffer(gengroup())
1735
1740
1736 def addchangegroup(self, source, srctype, url):
1741 def addchangegroup(self, source, srctype, url):
1737 """add changegroup to repo.
1742 """add changegroup to repo.
1738 returns number of heads modified or added + 1."""
1743 returns number of heads modified or added + 1."""
1739
1744
1740 def csmap(x):
1745 def csmap(x):
1741 self.ui.debug(_("add changeset %s\n") % short(x))
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1742 return cl.count()
1747 return cl.count()
1743
1748
1744 def revmap(x):
1749 def revmap(x):
1745 return cl.rev(x)
1750 return cl.rev(x)
1746
1751
1747 if not source:
1752 if not source:
1748 return 0
1753 return 0
1749
1754
1750 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1751
1756
1752 changesets = files = revisions = 0
1757 changesets = files = revisions = 0
1753
1758
1754 tr = self.transaction()
1759 tr = self.transaction()
1755
1760
1756 # write changelog data to temp files so concurrent readers will not see
1761 # write changelog data to temp files so concurrent readers will not see
1757 # inconsistent view
1762 # inconsistent view
1758 cl = None
1763 cl = None
1759 try:
1764 try:
1760 cl = appendfile.appendchangelog(self.sopener,
1765 cl = appendfile.appendchangelog(self.sopener,
1761 self.changelog.version)
1766 self.changelog.version)
1762
1767
1763 oldheads = len(cl.heads())
1768 oldheads = len(cl.heads())
1764
1769
1765 # pull off the changeset group
1770 # pull off the changeset group
1766 self.ui.status(_("adding changesets\n"))
1771 self.ui.status(_("adding changesets\n"))
1767 cor = cl.count() - 1
1772 cor = cl.count() - 1
1768 chunkiter = changegroup.chunkiter(source)
1773 chunkiter = changegroup.chunkiter(source)
1769 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1770 raise util.Abort(_("received changelog group is empty"))
1775 raise util.Abort(_("received changelog group is empty"))
1771 cnr = cl.count() - 1
1776 cnr = cl.count() - 1
1772 changesets = cnr - cor
1777 changesets = cnr - cor
1773
1778
1774 # pull off the manifest group
1779 # pull off the manifest group
1775 self.ui.status(_("adding manifests\n"))
1780 self.ui.status(_("adding manifests\n"))
1776 chunkiter = changegroup.chunkiter(source)
1781 chunkiter = changegroup.chunkiter(source)
1777 # no need to check for empty manifest group here:
1782 # no need to check for empty manifest group here:
1778 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1779 # no new manifest will be created and the manifest group will
1784 # no new manifest will be created and the manifest group will
1780 # be empty during the pull
1785 # be empty during the pull
1781 self.manifest.addgroup(chunkiter, revmap, tr)
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1782
1787
1783 # process the files
1788 # process the files
1784 self.ui.status(_("adding file changes\n"))
1789 self.ui.status(_("adding file changes\n"))
1785 while 1:
1790 while 1:
1786 f = changegroup.getchunk(source)
1791 f = changegroup.getchunk(source)
1787 if not f:
1792 if not f:
1788 break
1793 break
1789 self.ui.debug(_("adding %s revisions\n") % f)
1794 self.ui.debug(_("adding %s revisions\n") % f)
1790 fl = self.file(f)
1795 fl = self.file(f)
1791 o = fl.count()
1796 o = fl.count()
1792 chunkiter = changegroup.chunkiter(source)
1797 chunkiter = changegroup.chunkiter(source)
1793 if fl.addgroup(chunkiter, revmap, tr) is None:
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1794 raise util.Abort(_("received file revlog group is empty"))
1799 raise util.Abort(_("received file revlog group is empty"))
1795 revisions += fl.count() - o
1800 revisions += fl.count() - o
1796 files += 1
1801 files += 1
1797
1802
1798 cl.writedata()
1803 cl.writedata()
1799 finally:
1804 finally:
1800 if cl:
1805 if cl:
1801 cl.cleanup()
1806 cl.cleanup()
1802
1807
1803 # make changelog see real files again
1808 # make changelog see real files again
1804 self.changelog = changelog.changelog(self.sopener,
1809 self.changelog = changelog.changelog(self.sopener,
1805 self.changelog.version)
1810 self.changelog.version)
1806 self.changelog.checkinlinesize(tr)
1811 self.changelog.checkinlinesize(tr)
1807
1812
1808 newheads = len(self.changelog.heads())
1813 newheads = len(self.changelog.heads())
1809 heads = ""
1814 heads = ""
1810 if oldheads and newheads != oldheads:
1815 if oldheads and newheads != oldheads:
1811 heads = _(" (%+d heads)") % (newheads - oldheads)
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1812
1817
1813 self.ui.status(_("added %d changesets"
1818 self.ui.status(_("added %d changesets"
1814 " with %d changes to %d files%s\n")
1819 " with %d changes to %d files%s\n")
1815 % (changesets, revisions, files, heads))
1820 % (changesets, revisions, files, heads))
1816
1821
1817 if changesets > 0:
1822 if changesets > 0:
1818 self.hook('pretxnchangegroup', throw=True,
1823 self.hook('pretxnchangegroup', throw=True,
1819 node=hex(self.changelog.node(cor+1)), source=srctype,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1820 url=url)
1825 url=url)
1821
1826
1822 tr.close()
1827 tr.close()
1823
1828
1824 if changesets > 0:
1829 if changesets > 0:
1825 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1826 source=srctype, url=url)
1831 source=srctype, url=url)
1827
1832
1828 for i in xrange(cor + 1, cnr + 1):
1833 for i in xrange(cor + 1, cnr + 1):
1829 self.hook("incoming", node=hex(self.changelog.node(i)),
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1830 source=srctype, url=url)
1835 source=srctype, url=url)
1831
1836
1832 return newheads - oldheads + 1
1837 return newheads - oldheads + 1
1833
1838
1834
1839
1835 def stream_in(self, remote):
1840 def stream_in(self, remote):
1836 fp = remote.stream_out()
1841 fp = remote.stream_out()
1837 l = fp.readline()
1842 l = fp.readline()
1838 try:
1843 try:
1839 resp = int(l)
1844 resp = int(l)
1840 except ValueError:
1845 except ValueError:
1841 raise util.UnexpectedOutput(
1846 raise util.UnexpectedOutput(
1842 _('Unexpected response from remote server:'), l)
1847 _('Unexpected response from remote server:'), l)
1843 if resp == 1:
1848 if resp == 1:
1844 raise util.Abort(_('operation forbidden by server'))
1849 raise util.Abort(_('operation forbidden by server'))
1845 elif resp == 2:
1850 elif resp == 2:
1846 raise util.Abort(_('locking the remote repository failed'))
1851 raise util.Abort(_('locking the remote repository failed'))
1847 elif resp != 0:
1852 elif resp != 0:
1848 raise util.Abort(_('the server sent an unknown error code'))
1853 raise util.Abort(_('the server sent an unknown error code'))
1849 self.ui.status(_('streaming all changes\n'))
1854 self.ui.status(_('streaming all changes\n'))
1850 l = fp.readline()
1855 l = fp.readline()
1851 try:
1856 try:
1852 total_files, total_bytes = map(int, l.split(' ', 1))
1857 total_files, total_bytes = map(int, l.split(' ', 1))
1853 except ValueError, TypeError:
1858 except ValueError, TypeError:
1854 raise util.UnexpectedOutput(
1859 raise util.UnexpectedOutput(
1855 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1856 self.ui.status(_('%d files to transfer, %s of data\n') %
1861 self.ui.status(_('%d files to transfer, %s of data\n') %
1857 (total_files, util.bytecount(total_bytes)))
1862 (total_files, util.bytecount(total_bytes)))
1858 start = time.time()
1863 start = time.time()
1859 for i in xrange(total_files):
1864 for i in xrange(total_files):
1860 # XXX doesn't support '\n' or '\r' in filenames
1865 # XXX doesn't support '\n' or '\r' in filenames
1861 l = fp.readline()
1866 l = fp.readline()
1862 try:
1867 try:
1863 name, size = l.split('\0', 1)
1868 name, size = l.split('\0', 1)
1864 size = int(size)
1869 size = int(size)
1865 except ValueError, TypeError:
1870 except ValueError, TypeError:
1866 raise util.UnexpectedOutput(
1871 raise util.UnexpectedOutput(
1867 _('Unexpected response from remote server:'), l)
1872 _('Unexpected response from remote server:'), l)
1868 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1873 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1869 ofp = self.sopener(name, 'w')
1874 ofp = self.sopener(name, 'w')
1870 for chunk in util.filechunkiter(fp, limit=size):
1875 for chunk in util.filechunkiter(fp, limit=size):
1871 ofp.write(chunk)
1876 ofp.write(chunk)
1872 ofp.close()
1877 ofp.close()
1873 elapsed = time.time() - start
1878 elapsed = time.time() - start
1874 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1875 (util.bytecount(total_bytes), elapsed,
1880 (util.bytecount(total_bytes), elapsed,
1876 util.bytecount(total_bytes / elapsed)))
1881 util.bytecount(total_bytes / elapsed)))
1877 self.reload()
1882 self.reload()
1878 return len(self.heads()) + 1
1883 return len(self.heads()) + 1
1879
1884
1880 def clone(self, remote, heads=[], stream=False):
1885 def clone(self, remote, heads=[], stream=False):
1881 '''clone remote repository.
1886 '''clone remote repository.
1882
1887
1883 keyword arguments:
1888 keyword arguments:
1884 heads: list of revs to clone (forces use of pull)
1889 heads: list of revs to clone (forces use of pull)
1885 stream: use streaming clone if possible'''
1890 stream: use streaming clone if possible'''
1886
1891
1887 # now, all clients that can request uncompressed clones can
1892 # now, all clients that can request uncompressed clones can
1888 # read repo formats supported by all servers that can serve
1893 # read repo formats supported by all servers that can serve
1889 # them.
1894 # them.
1890
1895
1891 # if revlog format changes, client will have to check version
1896 # if revlog format changes, client will have to check version
1892 # and format flags on "stream" capability, and use
1897 # and format flags on "stream" capability, and use
1893 # uncompressed only if compatible.
1898 # uncompressed only if compatible.
1894
1899
1895 if stream and not heads and remote.capable('stream'):
1900 if stream and not heads and remote.capable('stream'):
1896 return self.stream_in(remote)
1901 return self.stream_in(remote)
1897 return self.pull(remote, heads)
1902 return self.pull(remote, heads)
1898
1903
1899 # used to avoid circular references so destructors work
1904 # used to avoid circular references so destructors work
1900 def aftertrans(base):
1905 def aftertrans(files):
1901 p = base
1906 renamefiles = [tuple(t) for t in files]
1902 def a():
1907 def a():
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1908 for src, dest in renamefiles:
1904 util.rename(os.path.join(p, "journal.dirstate"),
1909 util.rename(src, dest)
1905 os.path.join(p, "undo.dirstate"))
1906 return a
1910 return a
1907
1911
1908 def instance(ui, path, create):
1912 def instance(ui, path, create):
1909 return localrepository(ui, util.drop_scheme('file', path), create)
1913 return localrepository(ui, util.drop_scheme('file', path), create)
1910
1914
1911 def islocal(path):
1915 def islocal(path):
1912 return True
1916 return True
@@ -1,65 +1,66 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 from demandload import *
10 from demandload import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 demandload(globals(), "changelog filelog httprangereader")
12 demandload(globals(), "changelog filelog httprangereader")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14
14
15 class rangereader(httprangereader.httprangereader):
15 class rangereader(httprangereader.httprangereader):
16 def read(self, size=None):
16 def read(self, size=None):
17 try:
17 try:
18 return httprangereader.httprangereader.read(self, size)
18 return httprangereader.httprangereader.read(self, size)
19 except urllib2.HTTPError, inst:
19 except urllib2.HTTPError, inst:
20 raise IOError(None, inst)
20 raise IOError(None, inst)
21 except urllib2.URLError, inst:
21 except urllib2.URLError, inst:
22 raise IOError(None, inst.reason[1])
22 raise IOError(None, inst.reason[1])
23
23
24 def opener(base):
24 def opener(base):
25 """return a function that opens files over http"""
25 """return a function that opens files over http"""
26 p = base
26 p = base
27 def o(path, mode="r"):
27 def o(path, mode="r"):
28 f = os.path.join(p, urllib.quote(path))
28 f = os.path.join(p, urllib.quote(path))
29 return rangereader(f)
29 return rangereader(f)
30 return o
30 return o
31
31
32 class statichttprepository(localrepo.localrepository):
32 class statichttprepository(localrepo.localrepository):
33 def __init__(self, ui, path):
33 def __init__(self, ui, path):
34 self._url = path
34 self._url = path
35 self.path = (path + "/.hg")
35 self.path = (path + "/.hg")
36 self.spath = self.path
36 self.ui = ui
37 self.ui = ui
37 self.revlogversion = 0
38 self.revlogversion = 0
38 self.opener = opener(self.path)
39 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
40 self.sopener = opener(self.spath)
40 self.manifest = manifest.manifest(self.opener)
41 self.manifest = manifest.manifest(self.sopener)
41 self.changelog = changelog.changelog(self.opener)
42 self.changelog = changelog.changelog(self.sopener)
42 self.tagscache = None
43 self.tagscache = None
43 self.nodetagscache = None
44 self.nodetagscache = None
44 self.encodepats = None
45 self.encodepats = None
45 self.decodepats = None
46 self.decodepats = None
46
47
47 def url(self):
48 def url(self):
48 return 'static-' + self._url
49 return 'static-' + self._url
49
50
50 def dev(self):
51 def dev(self):
51 return -1
52 return -1
52
53
53 def local(self):
54 def local(self):
54 return False
55 return False
55
56
56 def instance(ui, path, create):
57 def instance(ui, path, create):
57 if create:
58 if create:
58 raise util.Abort(_('cannot create new static-http repository'))
59 raise util.Abort(_('cannot create new static-http repository'))
59 if path.startswith('old-http:'):
60 if path.startswith('old-http:'):
60 ui.warn(_("old-http:// syntax is deprecated, "
61 ui.warn(_("old-http:// syntax is deprecated, "
61 "please use static-http:// instead\n"))
62 "please use static-http:// instead\n"))
62 path = path[4:]
63 path = path[4:]
63 else:
64 else:
64 path = path[7:]
65 path = path[7:]
65 return statichttprepository(ui, path)
66 return statichttprepository(ui, path)
@@ -1,95 +1,95 b''
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import demandload
8 from demandload import demandload
9 from i18n import gettext as _
9 from i18n import gettext as _
10 demandload(globals(), "os stat util lock")
10 demandload(globals(), "os stat util lock")
11
11
12 # if server supports streaming clone, it advertises "stream"
12 # if server supports streaming clone, it advertises "stream"
13 # capability with value that is version+flags of repo it is serving.
13 # capability with value that is version+flags of repo it is serving.
14 # client only streams if it can read that repo format.
14 # client only streams if it can read that repo format.
15
15
16 def walkrepo(root):
16 def walkrepo(root):
17 '''iterate over metadata files in repository.
17 '''iterate over metadata files in repository.
18 walk in natural (sorted) order.
18 walk in natural (sorted) order.
19 yields 2-tuples: name of .d or .i file, size of file.'''
19 yields 2-tuples: name of .d or .i file, size of file.'''
20
20
21 strip_count = len(root) + len(os.sep)
21 strip_count = len(root) + len(os.sep)
22 def walk(path, recurse):
22 def walk(path, recurse):
23 ents = os.listdir(path)
23 ents = os.listdir(path)
24 ents.sort()
24 ents.sort()
25 for e in ents:
25 for e in ents:
26 pe = os.path.join(path, e)
26 pe = os.path.join(path, e)
27 st = os.lstat(pe)
27 st = os.lstat(pe)
28 if stat.S_ISDIR(st.st_mode):
28 if stat.S_ISDIR(st.st_mode):
29 if recurse:
29 if recurse:
30 for x in walk(pe, True):
30 for x in walk(pe, True):
31 yield x
31 yield x
32 else:
32 else:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 continue
34 continue
35 sfx = e[-2:]
35 sfx = e[-2:]
36 if sfx in ('.d', '.i'):
36 if sfx in ('.d', '.i'):
37 yield pe[strip_count:], st.st_size
37 yield pe[strip_count:], st.st_size
38 # write file data first
38 # write file data first
39 for x in walk(os.path.join(root, 'data'), True):
39 for x in walk(os.path.join(root, 'data'), True):
40 yield x
40 yield x
41 # write manifest before changelog
41 # write manifest before changelog
42 meta = list(walk(root, False))
42 meta = list(walk(root, False))
43 meta.sort()
43 meta.sort()
44 meta.reverse()
44 meta.reverse()
45 for x in meta:
45 for x in meta:
46 yield x
46 yield x
47
47
48 # stream file format is simple.
48 # stream file format is simple.
49 #
49 #
50 # server writes out line that says how many files, how many total
50 # server writes out line that says how many files, how many total
51 # bytes. separator is ascii space, byte counts are strings.
51 # bytes. separator is ascii space, byte counts are strings.
52 #
52 #
53 # then for each file:
53 # then for each file:
54 #
54 #
55 # server writes out line that says file name, how many bytes in
55 # server writes out line that says file name, how many bytes in
56 # file. separator is ascii nul, byte count is string.
56 # file. separator is ascii nul, byte count is string.
57 #
57 #
58 # server writes out raw file data.
58 # server writes out raw file data.
59
59
60 def stream_out(repo, fileobj):
60 def stream_out(repo, fileobj):
61 '''stream out all metadata files in repository.
61 '''stream out all metadata files in repository.
62 writes to file-like object, must support write() and optional flush().'''
62 writes to file-like object, must support write() and optional flush().'''
63
63
64 if not repo.ui.configbool('server', 'uncompressed'):
64 if not repo.ui.configbool('server', 'uncompressed'):
65 fileobj.write('1\n')
65 fileobj.write('1\n')
66 return
66 return
67
67
68 # get consistent snapshot of repo. lock during scan so lock not
68 # get consistent snapshot of repo. lock during scan so lock not
69 # needed while we stream, and commits can happen.
69 # needed while we stream, and commits can happen.
70 try:
70 try:
71 repolock = repo.lock()
71 repolock = repo.lock()
72 except (lock.LockHeld, lock.LockUnavailable), inst:
72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 fileobj.write('2\n')
74 fileobj.write('2\n')
75 return
75 return
76
76
77 fileobj.write('0\n')
77 fileobj.write('0\n')
78 repo.ui.debug('scanning\n')
78 repo.ui.debug('scanning\n')
79 entries = []
79 entries = []
80 total_bytes = 0
80 total_bytes = 0
81 for name, size in walkrepo(repo.path):
81 for name, size in walkrepo(repo.spath):
82 entries.append((name, size))
82 entries.append((name, size))
83 total_bytes += size
83 total_bytes += size
84 repolock.release()
84 repolock.release()
85
85
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 (len(entries), total_bytes))
87 (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 for name, size in entries:
89 for name, size in entries:
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.opener(name), limit=size):
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 fileobj.write(chunk)
93 fileobj.write(chunk)
94 flush = getattr(fileobj, 'flush', None)
94 flush = getattr(fileobj, 'flush', None)
95 if flush: flush()
95 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now