##// END OF EJS Templates
introduce localrepo.spath for the store path, sopener fixes
Benoit Boissinot -
r3791:8643b9f9 default
parent child Browse files
Show More
@@ -1,256 +1,256 b''
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "changegroup util os struct bz2 tempfile")
17 17
18 18 import localrepo, changelog, manifest, filelog, revlog
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, datafile, bundlefile,
22 22 linkmapper=None):
23 23 # How it works:
24 24 # to retrieve a revision, we need to know the offset of
25 25 # the revision in the bundlefile (an opened file).
26 26 #
27 27 # We store this offset in the index (start), to differentiate a
28 28 # rev in the bundle and from a rev in the revlog, we check
29 29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 30 # (it is bigger since we store the node to which the delta is)
31 31 #
32 32 revlog.revlog.__init__(self, opener, indexfile, datafile)
33 33 self.bundlefile = bundlefile
34 34 self.basemap = {}
35 35 def chunkpositer():
36 36 for chunk in changegroup.chunkiter(bundlefile):
37 37 pos = bundlefile.tell()
38 38 yield chunk, pos - len(chunk)
39 39 n = self.count()
40 40 prev = None
41 41 for chunk, start in chunkpositer():
42 42 size = len(chunk)
43 43 if size < 80:
44 44 raise util.Abort("invalid changegroup")
45 45 start += 80
46 46 size -= 80
47 47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 48 if node in self.nodemap:
49 49 prev = node
50 50 continue
51 51 for p in (p1, p2):
52 52 if not p in self.nodemap:
53 53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
54 54 if linkmapper is None:
55 55 link = n
56 56 else:
57 57 link = linkmapper(cs)
58 58
59 59 if not prev:
60 60 prev = p1
61 61 # start, size, base is not used, link, p1, p2, delta ref
62 62 if self.version == revlog.REVLOGV0:
63 63 e = (start, size, None, link, p1, p2, node)
64 64 else:
65 65 e = (self.offset_type(start, 0), size, -1, None, link,
66 66 self.rev(p1), self.rev(p2), node)
67 67 self.basemap[n] = prev
68 68 self.index.append(e)
69 69 self.nodemap[node] = n
70 70 prev = node
71 71 n += 1
72 72
73 73 def bundle(self, rev):
74 74 """is rev from the bundle"""
75 75 if rev < 0:
76 76 return False
77 77 return rev in self.basemap
78 78 def bundlebase(self, rev): return self.basemap[rev]
79 79 def chunk(self, rev, df=None, cachelen=4096):
80 80 # Warning: in case of bundle, the diff is against bundlebase,
81 81 # not against rev - 1
82 82 # XXX: could use some caching
83 83 if not self.bundle(rev):
84 84 return revlog.revlog.chunk(self, rev, df, cachelen)
85 85 self.bundlefile.seek(self.start(rev))
86 86 return self.bundlefile.read(self.length(rev))
87 87
88 88 def revdiff(self, rev1, rev2):
89 89 """return or calculate a delta between two revisions"""
90 90 if self.bundle(rev1) and self.bundle(rev2):
91 91 # hot path for bundle
92 92 revb = self.rev(self.bundlebase(rev2))
93 93 if revb == rev1:
94 94 return self.chunk(rev2)
95 95 elif not self.bundle(rev1) and not self.bundle(rev2):
96 96 return revlog.revlog.chunk(self, rev1, rev2)
97 97
98 98 return self.diff(self.revision(self.node(rev1)),
99 99 self.revision(self.node(rev2)))
100 100
101 101 def revision(self, node):
102 102 """return an uncompressed revision of a given"""
103 103 if node == nullid: return ""
104 104
105 105 text = None
106 106 chain = []
107 107 iter_node = node
108 108 rev = self.rev(iter_node)
109 109 # reconstruct the revision if it is from a changegroup
110 110 while self.bundle(rev):
111 111 if self.cache and self.cache[0] == iter_node:
112 112 text = self.cache[2]
113 113 break
114 114 chain.append(rev)
115 115 iter_node = self.bundlebase(rev)
116 116 rev = self.rev(iter_node)
117 117 if text is None:
118 118 text = revlog.revlog.revision(self, iter_node)
119 119
120 120 while chain:
121 121 delta = self.chunk(chain.pop())
122 122 text = self.patches(text, [delta])
123 123
124 124 p1, p2 = self.parents(node)
125 125 if node != revlog.hash(text, p1, p2):
126 126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
127 127 % (self.datafile, self.rev(node)))
128 128
129 129 self.cache = (node, self.rev(node), text)
130 130 return text
131 131
132 132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 133 raise NotImplementedError
134 134 def addgroup(self, revs, linkmapper, transaction, unique=0):
135 135 raise NotImplementedError
136 136 def strip(self, rev, minlink):
137 137 raise NotImplementedError
138 138 def checksize(self):
139 139 raise NotImplementedError
140 140
141 141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 142 def __init__(self, opener, bundlefile):
143 143 changelog.changelog.__init__(self, opener)
144 144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
145 145 bundlefile)
146 146
147 147 class bundlemanifest(bundlerevlog, manifest.manifest):
148 148 def __init__(self, opener, bundlefile, linkmapper):
149 149 manifest.manifest.__init__(self, opener)
150 150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
151 151 bundlefile, linkmapper)
152 152
153 153 class bundlefilelog(bundlerevlog, filelog.filelog):
154 154 def __init__(self, opener, path, bundlefile, linkmapper):
155 155 filelog.filelog.__init__(self, opener, path)
156 156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
157 157 bundlefile, linkmapper)
158 158
159 159 class bundlerepository(localrepo.localrepository):
160 160 def __init__(self, ui, path, bundlename):
161 161 localrepo.localrepository.__init__(self, ui, path)
162 162
163 163 self._url = 'bundle:' + bundlename
164 164 if path: self._url += '+' + path
165 165
166 166 self.tempfile = None
167 167 self.bundlefile = open(bundlename, "rb")
168 168 header = self.bundlefile.read(6)
169 169 if not header.startswith("HG"):
170 170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
171 171 elif not header.startswith("HG10"):
172 172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
173 173 elif header == "HG10BZ":
174 174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
175 175 suffix=".hg10un", dir=self.path)
176 176 self.tempfile = temp
177 177 fptemp = os.fdopen(fdtemp, 'wb')
178 178 def generator(f):
179 179 zd = bz2.BZ2Decompressor()
180 180 zd.decompress("BZ")
181 181 for chunk in f:
182 182 yield zd.decompress(chunk)
183 183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
184 184
185 185 try:
186 186 fptemp.write("HG10UN")
187 187 for chunk in gen:
188 188 fptemp.write(chunk)
189 189 finally:
190 190 fptemp.close()
191 191 self.bundlefile.close()
192 192
193 193 self.bundlefile = open(self.tempfile, "rb")
194 194 # seek right after the header
195 195 self.bundlefile.seek(6)
196 196 elif header == "HG10UN":
197 197 # nothing to do
198 198 pass
199 199 else:
200 200 raise util.Abort(_("%s: unknown bundle compression type")
201 201 % bundlename)
202 self.changelog = bundlechangelog(self.opener, self.bundlefile)
203 self.manifest = bundlemanifest(self.opener, self.bundlefile,
202 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
203 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
204 204 self.changelog.rev)
205 205 # dict with the mapping 'filename' -> position in the bundle
206 206 self.bundlefilespos = {}
207 207 while 1:
208 208 f = changegroup.getchunk(self.bundlefile)
209 209 if not f:
210 210 break
211 211 self.bundlefilespos[f] = self.bundlefile.tell()
212 212 for c in changegroup.chunkiter(self.bundlefile):
213 213 pass
214 214
215 215 def url(self):
216 216 return self._url
217 217
218 218 def dev(self):
219 219 return -1
220 220
221 221 def file(self, f):
222 222 if f[0] == '/':
223 223 f = f[1:]
224 224 if f in self.bundlefilespos:
225 225 self.bundlefile.seek(self.bundlefilespos[f])
226 return bundlefilelog(self.opener, f, self.bundlefile,
226 return bundlefilelog(self.sopener, f, self.bundlefile,
227 227 self.changelog.rev)
228 228 else:
229 return filelog.filelog(self.opener, f)
229 return filelog.filelog(self.sopener, f)
230 230
231 231 def close(self):
232 232 """Close assigned bundle file immediately."""
233 233 self.bundlefile.close()
234 234
235 235 def __del__(self):
236 236 bundlefile = getattr(self, 'bundlefile', None)
237 237 if bundlefile and not bundlefile.closed:
238 238 bundlefile.close()
239 239 tempfile = getattr(self, 'tempfile', None)
240 240 if tempfile is not None:
241 241 os.unlink(tempfile)
242 242
243 243 def instance(ui, path, create):
244 244 if create:
245 245 raise util.Abort(_('cannot create new bundle repository'))
246 246 path = util.drop_scheme('file', path)
247 247 if path.startswith('bundle:'):
248 248 path = util.drop_scheme('bundle', path)
249 249 s = path.split("+", 1)
250 250 if len(s) == 1:
251 251 repopath, bundlename = "", s[0]
252 252 else:
253 253 repopath, bundlename = s
254 254 else:
255 255 repopath, bundlename = '', path
256 256 return bundlerepository(ui, repopath, bundlename)
@@ -1,256 +1,256 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path='', create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106
107 107 def localpath(path):
108 108 if path.startswith('file://'):
109 109 return path[7:]
110 110 if path.startswith('file:'):
111 111 return path[5:]
112 112 return path
113 113
114 114 dest = localpath(dest)
115 115 source = localpath(source)
116 116
117 117 if os.path.exists(dest):
118 118 raise util.Abort(_("destination '%s' already exists") % dest)
119 119
120 120 class DirCleanup(object):
121 121 def __init__(self, dir_):
122 122 self.rmtree = shutil.rmtree
123 123 self.dir_ = dir_
124 124 def close(self):
125 125 self.dir_ = None
126 126 def __del__(self):
127 127 if self.dir_:
128 128 self.rmtree(self.dir_, True)
129 129
130 130 dest_repo = repository(ui, dest, create=True)
131 131
132 dest_path = None
133 132 dir_cleanup = None
134 133 if dest_repo.local():
135 dest_path = os.path.realpath(dest_repo.root)
136 dir_cleanup = DirCleanup(dest_path)
134 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
137 135
138 136 abspath = source
139 137 copy = False
140 138 if src_repo.local() and dest_repo.local():
141 139 abspath = os.path.abspath(source)
142 140 copy = not pull and not rev
143 141
144 142 src_lock, dest_lock = None, None
145 143 if copy:
146 144 try:
147 145 # we use a lock here because if we race with commit, we
148 146 # can end up with extra data in the cloned revlogs that's
149 147 # not pointed to by changesets, thus causing verify to
150 148 # fail
151 149 src_lock = src_repo.lock()
152 150 except lock.LockException:
153 151 copy = False
154 152
155 153 if copy:
156 154 # we lock here to avoid premature writing to the target
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
155 src_store = os.path.realpath(src_repo.spath)
156 dest_store = os.path.realpath(dest_repo.spath)
157 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
158 158
159 159 files = ("data",
160 160 "00manifest.d", "00manifest.i",
161 161 "00changelog.d", "00changelog.i")
162 162 for f in files:
163 src = os.path.join(source, ".hg", f)
164 dst = os.path.join(dest_path, ".hg", f)
163 src = os.path.join(src_store, f)
164 dst = os.path.join(dest_store, f)
165 165 try:
166 166 util.copyfiles(src, dst)
167 167 except OSError, inst:
168 168 if inst.errno != errno.ENOENT:
169 169 raise
170 170
171 171 # we need to re-init the repo after manually copying the data
172 172 # into it
173 173 dest_repo = repository(ui, dest)
174 174
175 175 else:
176 176 revs = None
177 177 if rev:
178 178 if 'lookup' not in src_repo.capabilities:
179 179 raise util.Abort(_("src repository does not support revision "
180 180 "lookup and so doesn't support clone by "
181 181 "revision"))
182 182 revs = [src_repo.lookup(r) for r in rev]
183 183
184 184 if dest_repo.local():
185 185 dest_repo.clone(src_repo, heads=revs, stream=stream)
186 186 elif src_repo.local():
187 187 src_repo.push(dest_repo, revs=revs)
188 188 else:
189 189 raise util.Abort(_("clone from remote to remote not supported"))
190 190
191 191 if src_lock:
192 192 src_lock.release()
193 193
194 194 if dest_repo.local():
195 195 fp = dest_repo.opener("hgrc", "w", text=True)
196 196 fp.write("[paths]\n")
197 197 fp.write("default = %s\n" % abspath)
198 198 fp.close()
199 199
200 200 if dest_lock:
201 201 dest_lock.release()
202 202
203 203 if update:
204 204 _update(dest_repo, dest_repo.changelog.tip())
205 205 if dir_cleanup:
206 206 dir_cleanup.close()
207 207
208 208 return src_repo, dest_repo
209 209
210 210 def _showstats(repo, stats):
211 211 stats = ((stats[0], _("updated")),
212 212 (stats[1], _("merged")),
213 213 (stats[2], _("removed")),
214 214 (stats[3], _("unresolved")))
215 215 note = ", ".join([_("%d files %s") % s for s in stats])
216 216 repo.ui.status("%s\n" % note)
217 217
218 218 def _update(repo, node): return update(repo, node)
219 219
220 220 def update(repo, node):
221 221 """update the working directory to node, merging linear changes"""
222 222 stats = _merge.update(repo, node, False, False, None, None)
223 223 _showstats(repo, stats)
224 224 if stats[3]:
225 225 repo.ui.status(_("There are unresolved merges with"
226 226 " locally modified files.\n"))
227 227 return stats[3]
228 228
229 229 def clean(repo, node, wlock=None, show_stats=True):
230 230 """forcibly switch the working directory to node, clobbering changes"""
231 231 stats = _merge.update(repo, node, False, True, None, wlock)
232 232 if show_stats: _showstats(repo, stats)
233 233 return stats[3]
234 234
235 235 def merge(repo, node, force=None, remind=True, wlock=None):
236 236 """branch merge with node, resolving changes"""
237 237 stats = _merge.update(repo, node, True, force, False, wlock)
238 238 _showstats(repo, stats)
239 239 if stats[3]:
240 240 pl = repo.parents()
241 241 repo.ui.status(_("There are unresolved merges,"
242 242 " you can redo the full merge using:\n"
243 243 " hg update -C %s\n"
244 244 " hg merge %s\n")
245 245 % (pl[0].rev(), pl[1].rev()))
246 246 elif remind:
247 247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
248 248 return stats[3]
249 249
250 250 def revert(repo, node, choose, wlock):
251 251 """revert changes to revision in node without updating dirstate"""
252 252 return _merge.update(repo, node, False, True, choose, wlock)[3]
253 253
254 254 def verify(repo):
255 255 """verify the consistency of a repository"""
256 256 return _verify.verify(repo)
@@ -1,1913 +1,1916 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
34 35
35 36 if not os.path.isdir(self.path):
36 37 if create:
37 38 if not os.path.exists(path):
38 39 os.mkdir(path)
39 40 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
40 43 else:
41 44 raise repo.RepoError(_("repository %s not found") % path)
42 45 elif create:
43 46 raise repo.RepoError(_("repository %s already exists") % path)
44 47
45 48 self.root = os.path.realpath(path)
46 49 self.origroot = path
47 50 self.ui = ui.ui(parentui=parentui)
48 51 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
50 53 self.wopener = util.opener(self.root)
51 54
52 55 try:
53 56 self.ui.readconfig(self.join("hgrc"), self.root)
54 57 except IOError:
55 58 pass
56 59
57 60 v = self.ui.configrevlog()
58 61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 63 fl = v.get('flags', None)
61 64 flags = 0
62 65 if fl != None:
63 66 for x in fl.split():
64 67 flags |= revlog.flagstr(x)
65 68 elif self.revlogv1:
66 69 flags = revlog.REVLOG_DEFAULT_FLAGS
67 70
68 71 v = self.revlogversion | flags
69 72 self.manifest = manifest.manifest(self.sopener, v)
70 73 self.changelog = changelog.changelog(self.sopener, v)
71 74
72 75 # the changelog might not have the inline index flag
73 76 # on. If the format of the changelog is the same as found in
74 77 # .hgrc, apply any flags found in the .hgrc as well.
75 78 # Otherwise, just version from the changelog
76 79 v = self.changelog.version
77 80 if v == self.revlogversion:
78 81 v |= flags
79 82 self.revlogversion = v
80 83
81 84 self.tagscache = None
82 85 self.branchcache = None
83 86 self.nodetagscache = None
84 87 self.encodepats = None
85 88 self.decodepats = None
86 89 self.transhandle = None
87 90
88 91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 92
90 93 def url(self):
91 94 return 'file:' + self.root
92 95
93 96 def hook(self, name, throw=False, **args):
94 97 def callhook(hname, funcname):
95 98 '''call python hook. hook is callable object, looked up as
96 99 name in python module. if callable returns "true", hook
97 100 fails, else passes. if hook raises exception, treated as
98 101 hook failure. exception propagates if throw is "true".
99 102
100 103 reason for "true" meaning "hook failed" is so that
101 104 unmodified commands (e.g. mercurial.commands.update) can
102 105 be run as hooks without wrappers to convert return values.'''
103 106
104 107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 108 d = funcname.rfind('.')
106 109 if d == -1:
107 110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 111 % (hname, funcname))
109 112 modname = funcname[:d]
110 113 try:
111 114 obj = __import__(modname)
112 115 except ImportError:
113 116 try:
114 117 # extensions are loaded with hgext_ prefix
115 118 obj = __import__("hgext_%s" % modname)
116 119 except ImportError:
117 120 raise util.Abort(_('%s hook is invalid '
118 121 '(import of "%s" failed)') %
119 122 (hname, modname))
120 123 try:
121 124 for p in funcname.split('.')[1:]:
122 125 obj = getattr(obj, p)
123 126 except AttributeError, err:
124 127 raise util.Abort(_('%s hook is invalid '
125 128 '("%s" is not defined)') %
126 129 (hname, funcname))
127 130 if not callable(obj):
128 131 raise util.Abort(_('%s hook is invalid '
129 132 '("%s" is not callable)') %
130 133 (hname, funcname))
131 134 try:
132 135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 136 except (KeyboardInterrupt, util.SignalInterrupt):
134 137 raise
135 138 except Exception, exc:
136 139 if isinstance(exc, util.Abort):
137 140 self.ui.warn(_('error: %s hook failed: %s\n') %
138 141 (hname, exc.args[0]))
139 142 else:
140 143 self.ui.warn(_('error: %s hook raised an exception: '
141 144 '%s\n') % (hname, exc))
142 145 if throw:
143 146 raise
144 147 self.ui.print_exc()
145 148 return True
146 149 if r:
147 150 if throw:
148 151 raise util.Abort(_('%s hook failed') % hname)
149 152 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 153 return r
151 154
152 155 def runhook(name, cmd):
153 156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 158 r = util.system(cmd, environ=env, cwd=self.root)
156 159 if r:
157 160 desc, r = util.explain_exit(r)
158 161 if throw:
159 162 raise util.Abort(_('%s hook %s') % (name, desc))
160 163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 164 return r
162 165
163 166 r = False
164 167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 168 if hname.split(".", 1)[0] == name and cmd]
166 169 hooks.sort()
167 170 for hname, cmd in hooks:
168 171 if cmd.startswith('python:'):
169 172 r = callhook(hname, cmd[7:].strip()) or r
170 173 else:
171 174 r = runhook(hname, cmd) or r
172 175 return r
173 176
174 177 tag_disallowed = ':\r\n'
175 178
176 179 def tag(self, name, node, message, local, user, date):
177 180 '''tag a revision with a symbolic name.
178 181
179 182 if local is True, the tag is stored in a per-repository file.
180 183 otherwise, it is stored in the .hgtags file, and a new
181 184 changeset is committed with the change.
182 185
183 186 keyword arguments:
184 187
185 188 local: whether to store tag in non-version-controlled file
186 189 (default False)
187 190
188 191 message: commit message to use if committing
189 192
190 193 user: name of user to use if committing
191 194
192 195 date: date tuple to use if committing'''
193 196
194 197 for c in self.tag_disallowed:
195 198 if c in name:
196 199 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 200
198 201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 202
200 203 if local:
201 204 # local tags are stored in the current charset
202 205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 206 self.hook('tag', node=hex(node), tag=name, local=local)
204 207 return
205 208
206 209 for x in self.status()[:5]:
207 210 if '.hgtags' in x:
208 211 raise util.Abort(_('working copy of .hgtags is changed '
209 212 '(please commit .hgtags manually)'))
210 213
211 214 # committed tags are stored in UTF-8
212 215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 216 self.wfile('.hgtags', 'ab').write(line)
214 217 if self.dirstate.state('.hgtags') == '?':
215 218 self.add(['.hgtags'])
216 219
217 220 self.commit(['.hgtags'], message, user, date)
218 221 self.hook('tag', node=hex(node), tag=name, local=local)
219 222
220 223 def tags(self):
221 224 '''return a mapping of tag to node'''
222 225 if not self.tagscache:
223 226 self.tagscache = {}
224 227
225 228 def parsetag(line, context):
226 229 if not line:
227 230 return
228 231 s = l.split(" ", 1)
229 232 if len(s) != 2:
230 233 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 234 return
232 235 node, key = s
233 236 key = util.tolocal(key.strip()) # stored in UTF-8
234 237 try:
235 238 bin_n = bin(node)
236 239 except TypeError:
237 240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 241 (context, node))
239 242 return
240 243 if bin_n not in self.changelog.nodemap:
241 244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 245 (context, key))
243 246 return
244 247 self.tagscache[key] = bin_n
245 248
246 249 # read the tags file from each head, ending with the tip,
247 250 # and add each tag found to the map, with "newer" ones
248 251 # taking precedence
249 252 f = None
250 253 for rev, node, fnode in self._hgtagsnodes():
251 254 f = (f and f.filectx(fnode) or
252 255 self.filectx('.hgtags', fileid=fnode))
253 256 count = 0
254 257 for l in f.data().splitlines():
255 258 count += 1
256 259 parsetag(l, _("%s, line %d") % (str(f), count))
257 260
258 261 try:
259 262 f = self.opener("localtags")
260 263 count = 0
261 264 for l in f:
262 265 # localtags are stored in the local character set
263 266 # while the internal tag table is stored in UTF-8
264 267 l = util.fromlocal(l)
265 268 count += 1
266 269 parsetag(l, _("localtags, line %d") % count)
267 270 except IOError:
268 271 pass
269 272
270 273 self.tagscache['tip'] = self.changelog.tip()
271 274
272 275 return self.tagscache
273 276
274 277 def _hgtagsnodes(self):
275 278 heads = self.heads()
276 279 heads.reverse()
277 280 last = {}
278 281 ret = []
279 282 for node in heads:
280 283 c = self.changectx(node)
281 284 rev = c.rev()
282 285 try:
283 286 fnode = c.filenode('.hgtags')
284 287 except repo.LookupError:
285 288 continue
286 289 ret.append((rev, node, fnode))
287 290 if fnode in last:
288 291 ret[last[fnode]] = None
289 292 last[fnode] = len(ret) - 1
290 293 return [item for item in ret if item]
291 294
292 295 def tagslist(self):
293 296 '''return a list of tags ordered by revision'''
294 297 l = []
295 298 for t, n in self.tags().items():
296 299 try:
297 300 r = self.changelog.rev(n)
298 301 except:
299 302 r = -2 # sort to the beginning of the list if unknown
300 303 l.append((r, t, n))
301 304 l.sort()
302 305 return [(t, n) for r, t, n in l]
303 306
304 307 def nodetags(self, node):
305 308 '''return the tags associated with a node'''
306 309 if not self.nodetagscache:
307 310 self.nodetagscache = {}
308 311 for t, n in self.tags().items():
309 312 self.nodetagscache.setdefault(n, []).append(t)
310 313 return self.nodetagscache.get(node, [])
311 314
312 315 def branchtags(self):
313 316 if self.branchcache != None:
314 317 return self.branchcache
315 318
316 319 self.branchcache = {} # avoid recursion in changectx
317 320
318 321 partial, last, lrev = self._readbranchcache()
319 322
320 323 tiprev = self.changelog.count() - 1
321 324 if lrev != tiprev:
322 325 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 327
325 328 # the branch cache is stored on disk as UTF-8, but in the local
326 329 # charset internally
327 330 for k, v in partial.items():
328 331 self.branchcache[util.tolocal(k)] = v
329 332 return self.branchcache
330 333
331 334 def _readbranchcache(self):
332 335 partial = {}
333 336 try:
334 337 f = self.opener("branches.cache")
335 338 lines = f.read().split('\n')
336 339 f.close()
337 340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 341 last, lrev = bin(last), int(lrev)
339 342 if not (lrev < self.changelog.count() and
340 343 self.changelog.node(lrev) == last): # sanity check
341 344 # invalidate the cache
342 345 raise ValueError('Invalid branch cache: unknown tip')
343 346 for l in lines:
344 347 if not l: continue
345 348 node, label = l.rstrip().split(" ", 1)
346 349 partial[label] = bin(node)
347 350 except (KeyboardInterrupt, util.SignalInterrupt):
348 351 raise
349 352 except Exception, inst:
350 353 if self.ui.debugflag:
351 354 self.ui.warn(str(inst), '\n')
352 355 partial, last, lrev = {}, nullid, nullrev
353 356 return partial, last, lrev
354 357
355 358 def _writebranchcache(self, branches, tip, tiprev):
356 359 try:
357 360 f = self.opener("branches.cache", "w")
358 361 f.write("%s %s\n" % (hex(tip), tiprev))
359 362 for label, node in branches.iteritems():
360 363 f.write("%s %s\n" % (hex(node), label))
361 364 except IOError:
362 365 pass
363 366
364 367 def _updatebranchcache(self, partial, start, end):
365 368 for r in xrange(start, end):
366 369 c = self.changectx(r)
367 370 b = c.branch()
368 371 if b:
369 372 partial[b] = c.node()
370 373
371 374 def lookup(self, key):
372 375 if key == '.':
373 376 key = self.dirstate.parents()[0]
374 377 if key == nullid:
375 378 raise repo.RepoError(_("no revision checked out"))
376 379 n = self.changelog._match(key)
377 380 if n:
378 381 return n
379 382 if key in self.tags():
380 383 return self.tags()[key]
381 384 if key in self.branchtags():
382 385 return self.branchtags()[key]
383 386 n = self.changelog._partialmatch(key)
384 387 if n:
385 388 return n
386 389 raise repo.RepoError(_("unknown revision '%s'") % key)
387 390
388 391 def dev(self):
389 392 return os.lstat(self.path).st_dev
390 393
391 394 def local(self):
392 395 return True
393 396
394 397 def join(self, f):
395 398 return os.path.join(self.path, f)
396 399
397 400 def sjoin(self, f):
398 return os.path.join(self.path, f)
401 return os.path.join(self.spath, f)
399 402
400 403 def wjoin(self, f):
401 404 return os.path.join(self.root, f)
402 405
403 406 def file(self, f):
404 407 if f[0] == '/':
405 408 f = f[1:]
406 409 return filelog.filelog(self.sopener, f, self.revlogversion)
407 410
408 411 def changectx(self, changeid=None):
409 412 return context.changectx(self, changeid)
410 413
411 414 def workingctx(self):
412 415 return context.workingctx(self)
413 416
414 417 def parents(self, changeid=None):
415 418 '''
416 419 get list of changectxs for parents of changeid or working directory
417 420 '''
418 421 if changeid is None:
419 422 pl = self.dirstate.parents()
420 423 else:
421 424 n = self.changelog.lookup(changeid)
422 425 pl = self.changelog.parents(n)
423 426 if pl[1] == nullid:
424 427 return [self.changectx(pl[0])]
425 428 return [self.changectx(pl[0]), self.changectx(pl[1])]
426 429
427 430 def filectx(self, path, changeid=None, fileid=None):
428 431 """changeid can be a changeset revision, node, or tag.
429 432 fileid can be a file revision or node."""
430 433 return context.filectx(self, path, changeid, fileid)
431 434
432 435 def getcwd(self):
433 436 return self.dirstate.getcwd()
434 437
435 438 def wfile(self, f, mode='r'):
436 439 return self.wopener(f, mode)
437 440
438 441 def wread(self, filename):
439 442 if self.encodepats == None:
440 443 l = []
441 444 for pat, cmd in self.ui.configitems("encode"):
442 445 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 446 l.append((mf, cmd))
444 447 self.encodepats = l
445 448
446 449 data = self.wopener(filename, 'r').read()
447 450
448 451 for mf, cmd in self.encodepats:
449 452 if mf(filename):
450 453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 454 data = util.filter(data, cmd)
452 455 break
453 456
454 457 return data
455 458
456 459 def wwrite(self, filename, data, fd=None):
457 460 if self.decodepats == None:
458 461 l = []
459 462 for pat, cmd in self.ui.configitems("decode"):
460 463 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 464 l.append((mf, cmd))
462 465 self.decodepats = l
463 466
464 467 for mf, cmd in self.decodepats:
465 468 if mf(filename):
466 469 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 470 data = util.filter(data, cmd)
468 471 break
469 472
470 473 if fd:
471 474 return fd.write(data)
472 475 return self.wopener(filename, 'w').write(data)
473 476
474 477 def transaction(self):
475 478 tr = self.transhandle
476 479 if tr != None and tr.running():
477 480 return tr.nest()
478 481
479 482 # save dirstate for rollback
480 483 try:
481 484 ds = self.opener("dirstate").read()
482 485 except IOError:
483 486 ds = ""
484 487 self.opener("journal.dirstate", "w").write(ds)
485 488
486 489 renames = [(self.sjoin("journal"), self.sjoin("undo")),
487 490 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
488 491 tr = transaction.transaction(self.ui.warn, self.sopener,
489 492 self.sjoin("journal"),
490 493 aftertrans(renames))
491 494 self.transhandle = tr
492 495 return tr
493 496
494 497 def recover(self):
495 498 l = self.lock()
496 499 if os.path.exists(self.sjoin("journal")):
497 500 self.ui.status(_("rolling back interrupted transaction\n"))
498 501 transaction.rollback(self.sopener, self.sjoin("journal"))
499 502 self.reload()
500 503 return True
501 504 else:
502 505 self.ui.warn(_("no interrupted transaction available\n"))
503 506 return False
504 507
505 508 def rollback(self, wlock=None):
506 509 if not wlock:
507 510 wlock = self.wlock()
508 511 l = self.lock()
509 512 if os.path.exists(self.sjoin("undo")):
510 513 self.ui.status(_("rolling back last transaction\n"))
511 514 transaction.rollback(self.sopener, self.sjoin("undo"))
512 515 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
513 516 self.reload()
514 517 self.wreload()
515 518 else:
516 519 self.ui.warn(_("no rollback information available\n"))
517 520
518 521 def wreload(self):
519 522 self.dirstate.read()
520 523
521 524 def reload(self):
522 525 self.changelog.load()
523 526 self.manifest.load()
524 527 self.tagscache = None
525 528 self.nodetagscache = None
526 529
527 530 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
528 531 desc=None):
529 532 try:
530 533 l = lock.lock(lockname, 0, releasefn, desc=desc)
531 534 except lock.LockHeld, inst:
532 535 if not wait:
533 536 raise
534 537 self.ui.warn(_("waiting for lock on %s held by %r\n") %
535 538 (desc, inst.locker))
536 539 # default to 600 seconds timeout
537 540 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
538 541 releasefn, desc=desc)
539 542 if acquirefn:
540 543 acquirefn()
541 544 return l
542 545
543 546 def lock(self, wait=1):
544 547 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
545 548 desc=_('repository %s') % self.origroot)
546 549
547 550 def wlock(self, wait=1):
548 551 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
549 552 self.wreload,
550 553 desc=_('working directory of %s') % self.origroot)
551 554
552 555 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
553 556 """
554 557 commit an individual file as part of a larger transaction
555 558 """
556 559
557 560 t = self.wread(fn)
558 561 fl = self.file(fn)
559 562 fp1 = manifest1.get(fn, nullid)
560 563 fp2 = manifest2.get(fn, nullid)
561 564
562 565 meta = {}
563 566 cp = self.dirstate.copied(fn)
564 567 if cp:
565 568 meta["copy"] = cp
566 569 if not manifest2: # not a branch merge
567 570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
568 571 fp2 = nullid
569 572 elif fp2 != nullid: # copied on remote side
570 573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
571 574 elif fp1 != nullid: # copied on local side, reversed
572 575 meta["copyrev"] = hex(manifest2.get(cp))
573 576 fp2 = nullid
574 577 else: # directory rename
575 578 meta["copyrev"] = hex(manifest1.get(cp, nullid))
576 579 self.ui.debug(_(" %s: copy %s:%s\n") %
577 580 (fn, cp, meta["copyrev"]))
578 581 fp1 = nullid
579 582 elif fp2 != nullid:
580 583 # is one parent an ancestor of the other?
581 584 fpa = fl.ancestor(fp1, fp2)
582 585 if fpa == fp1:
583 586 fp1, fp2 = fp2, nullid
584 587 elif fpa == fp2:
585 588 fp2 = nullid
586 589
587 590 # is the file unmodified from the parent? report existing entry
588 591 if fp2 == nullid and not fl.cmp(fp1, t):
589 592 return fp1
590 593
591 594 changelist.append(fn)
592 595 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
593 596
594 597 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
595 598 if p1 is None:
596 599 p1, p2 = self.dirstate.parents()
597 600 return self.commit(files=files, text=text, user=user, date=date,
598 601 p1=p1, p2=p2, wlock=wlock)
599 602
600 603 def commit(self, files=None, text="", user=None, date=None,
601 604 match=util.always, force=False, lock=None, wlock=None,
602 605 force_editor=False, p1=None, p2=None, extra={}):
603 606
604 607 commit = []
605 608 remove = []
606 609 changed = []
607 610 use_dirstate = (p1 is None) # not rawcommit
608 611 extra = extra.copy()
609 612
610 613 if use_dirstate:
611 614 if files:
612 615 for f in files:
613 616 s = self.dirstate.state(f)
614 617 if s in 'nmai':
615 618 commit.append(f)
616 619 elif s == 'r':
617 620 remove.append(f)
618 621 else:
619 622 self.ui.warn(_("%s not tracked!\n") % f)
620 623 else:
621 624 changes = self.status(match=match)[:5]
622 625 modified, added, removed, deleted, unknown = changes
623 626 commit = modified + added
624 627 remove = removed
625 628 else:
626 629 commit = files
627 630
628 631 if use_dirstate:
629 632 p1, p2 = self.dirstate.parents()
630 633 update_dirstate = True
631 634 else:
632 635 p1, p2 = p1, p2 or nullid
633 636 update_dirstate = (self.dirstate.parents()[0] == p1)
634 637
635 638 c1 = self.changelog.read(p1)
636 639 c2 = self.changelog.read(p2)
637 640 m1 = self.manifest.read(c1[0]).copy()
638 641 m2 = self.manifest.read(c2[0])
639 642
640 643 if use_dirstate:
641 644 branchname = util.fromlocal(self.workingctx().branch())
642 645 else:
643 646 branchname = ""
644 647
645 648 if use_dirstate:
646 649 oldname = c1[5].get("branch", "") # stored in UTF-8
647 650 if not commit and not remove and not force and p2 == nullid and \
648 651 branchname == oldname:
649 652 self.ui.status(_("nothing changed\n"))
650 653 return None
651 654
652 655 xp1 = hex(p1)
653 656 if p2 == nullid: xp2 = ''
654 657 else: xp2 = hex(p2)
655 658
656 659 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
657 660
658 661 if not wlock:
659 662 wlock = self.wlock()
660 663 if not lock:
661 664 lock = self.lock()
662 665 tr = self.transaction()
663 666
664 667 # check in files
665 668 new = {}
666 669 linkrev = self.changelog.count()
667 670 commit.sort()
668 671 for f in commit:
669 672 self.ui.note(f + "\n")
670 673 try:
671 674 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
672 675 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
673 676 except IOError:
674 677 if use_dirstate:
675 678 self.ui.warn(_("trouble committing %s!\n") % f)
676 679 raise
677 680 else:
678 681 remove.append(f)
679 682
680 683 # update manifest
681 684 m1.update(new)
682 685 remove.sort()
683 686
684 687 for f in remove:
685 688 if f in m1:
686 689 del m1[f]
687 690 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
688 691
689 692 # add changeset
690 693 new = new.keys()
691 694 new.sort()
692 695
693 696 user = user or self.ui.username()
694 697 if not text or force_editor:
695 698 edittext = []
696 699 if text:
697 700 edittext.append(text)
698 701 edittext.append("")
699 702 edittext.append("HG: user: %s" % user)
700 703 if p2 != nullid:
701 704 edittext.append("HG: branch merge")
702 705 edittext.extend(["HG: changed %s" % f for f in changed])
703 706 edittext.extend(["HG: removed %s" % f for f in remove])
704 707 if not changed and not remove:
705 708 edittext.append("HG: no files changed")
706 709 edittext.append("")
707 710 # run editor in the repository root
708 711 olddir = os.getcwd()
709 712 os.chdir(self.root)
710 713 text = self.ui.edit("\n".join(edittext), user)
711 714 os.chdir(olddir)
712 715
713 716 lines = [line.rstrip() for line in text.rstrip().splitlines()]
714 717 while lines and not lines[0]:
715 718 del lines[0]
716 719 if not lines:
717 720 return None
718 721 text = '\n'.join(lines)
719 722 if branchname:
720 723 extra["branch"] = branchname
721 724 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
722 725 user, date, extra)
723 726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
724 727 parent2=xp2)
725 728 tr.close()
726 729
727 730 if use_dirstate or update_dirstate:
728 731 self.dirstate.setparents(n)
729 732 if use_dirstate:
730 733 self.dirstate.update(new, "n")
731 734 self.dirstate.forget(remove)
732 735
733 736 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
734 737 return n
735 738
736 739 def walk(self, node=None, files=[], match=util.always, badmatch=None):
737 740 '''
738 741 walk recursively through the directory tree or a given
739 742 changeset, finding all files matched by the match
740 743 function
741 744
742 745 results are yielded in a tuple (src, filename), where src
743 746 is one of:
744 747 'f' the file was found in the directory tree
745 748 'm' the file was only in the dirstate and not in the tree
746 749 'b' file was not found and matched badmatch
747 750 '''
748 751
749 752 if node:
750 753 fdict = dict.fromkeys(files)
751 754 for fn in self.manifest.read(self.changelog.read(node)[0]):
752 755 for ffn in fdict:
753 756 # match if the file is the exact name or a directory
754 757 if ffn == fn or fn.startswith("%s/" % ffn):
755 758 del fdict[ffn]
756 759 break
757 760 if match(fn):
758 761 yield 'm', fn
759 762 for fn in fdict:
760 763 if badmatch and badmatch(fn):
761 764 if match(fn):
762 765 yield 'b', fn
763 766 else:
764 767 self.ui.warn(_('%s: No such file in rev %s\n') % (
765 768 util.pathto(self.getcwd(), fn), short(node)))
766 769 else:
767 770 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
768 771 yield src, fn
769 772
770 773 def status(self, node1=None, node2=None, files=[], match=util.always,
771 774 wlock=None, list_ignored=False, list_clean=False):
772 775 """return status of files between two nodes or node and working directory
773 776
774 777 If node1 is None, use the first dirstate parent instead.
775 778 If node2 is None, compare node1 with working directory.
776 779 """
777 780
778 781 def fcmp(fn, mf):
779 782 t1 = self.wread(fn)
780 783 return self.file(fn).cmp(mf.get(fn, nullid), t1)
781 784
782 785 def mfmatches(node):
783 786 change = self.changelog.read(node)
784 787 mf = self.manifest.read(change[0]).copy()
785 788 for fn in mf.keys():
786 789 if not match(fn):
787 790 del mf[fn]
788 791 return mf
789 792
790 793 modified, added, removed, deleted, unknown = [], [], [], [], []
791 794 ignored, clean = [], []
792 795
793 796 compareworking = False
794 797 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
795 798 compareworking = True
796 799
797 800 if not compareworking:
798 801 # read the manifest from node1 before the manifest from node2,
799 802 # so that we'll hit the manifest cache if we're going through
800 803 # all the revisions in parent->child order.
801 804 mf1 = mfmatches(node1)
802 805
803 806 # are we comparing the working directory?
804 807 if not node2:
805 808 if not wlock:
806 809 try:
807 810 wlock = self.wlock(wait=0)
808 811 except lock.LockException:
809 812 wlock = None
810 813 (lookup, modified, added, removed, deleted, unknown,
811 814 ignored, clean) = self.dirstate.status(files, match,
812 815 list_ignored, list_clean)
813 816
814 817 # are we comparing working dir against its parent?
815 818 if compareworking:
816 819 if lookup:
817 820 # do a full compare of any files that might have changed
818 821 mf2 = mfmatches(self.dirstate.parents()[0])
819 822 for f in lookup:
820 823 if fcmp(f, mf2):
821 824 modified.append(f)
822 825 else:
823 826 clean.append(f)
824 827 if wlock is not None:
825 828 self.dirstate.update([f], "n")
826 829 else:
827 830 # we are comparing working dir against non-parent
828 831 # generate a pseudo-manifest for the working dir
829 832 # XXX: create it in dirstate.py ?
830 833 mf2 = mfmatches(self.dirstate.parents()[0])
831 834 for f in lookup + modified + added:
832 835 mf2[f] = ""
833 836 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
834 837 for f in removed:
835 838 if f in mf2:
836 839 del mf2[f]
837 840 else:
838 841 # we are comparing two revisions
839 842 mf2 = mfmatches(node2)
840 843
841 844 if not compareworking:
842 845 # flush lists from dirstate before comparing manifests
843 846 modified, added, clean = [], [], []
844 847
845 848 # make sure to sort the files so we talk to the disk in a
846 849 # reasonable order
847 850 mf2keys = mf2.keys()
848 851 mf2keys.sort()
849 852 for fn in mf2keys:
850 853 if mf1.has_key(fn):
851 854 if mf1.flags(fn) != mf2.flags(fn) or \
852 855 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
853 856 modified.append(fn)
854 857 elif list_clean:
855 858 clean.append(fn)
856 859 del mf1[fn]
857 860 else:
858 861 added.append(fn)
859 862
860 863 removed = mf1.keys()
861 864
862 865 # sort and return results:
863 866 for l in modified, added, removed, deleted, unknown, ignored, clean:
864 867 l.sort()
865 868 return (modified, added, removed, deleted, unknown, ignored, clean)
866 869
867 870 def add(self, list, wlock=None):
868 871 if not wlock:
869 872 wlock = self.wlock()
870 873 for f in list:
871 874 p = self.wjoin(f)
872 875 if not os.path.exists(p):
873 876 self.ui.warn(_("%s does not exist!\n") % f)
874 877 elif not os.path.isfile(p):
875 878 self.ui.warn(_("%s not added: only files supported currently\n")
876 879 % f)
877 880 elif self.dirstate.state(f) in 'an':
878 881 self.ui.warn(_("%s already tracked!\n") % f)
879 882 else:
880 883 self.dirstate.update([f], "a")
881 884
882 885 def forget(self, list, wlock=None):
883 886 if not wlock:
884 887 wlock = self.wlock()
885 888 for f in list:
886 889 if self.dirstate.state(f) not in 'ai':
887 890 self.ui.warn(_("%s not added!\n") % f)
888 891 else:
889 892 self.dirstate.forget([f])
890 893
891 894 def remove(self, list, unlink=False, wlock=None):
892 895 if unlink:
893 896 for f in list:
894 897 try:
895 898 util.unlink(self.wjoin(f))
896 899 except OSError, inst:
897 900 if inst.errno != errno.ENOENT:
898 901 raise
899 902 if not wlock:
900 903 wlock = self.wlock()
901 904 for f in list:
902 905 p = self.wjoin(f)
903 906 if os.path.exists(p):
904 907 self.ui.warn(_("%s still exists!\n") % f)
905 908 elif self.dirstate.state(f) == 'a':
906 909 self.dirstate.forget([f])
907 910 elif f not in self.dirstate:
908 911 self.ui.warn(_("%s not tracked!\n") % f)
909 912 else:
910 913 self.dirstate.update([f], "r")
911 914
912 915 def undelete(self, list, wlock=None):
913 916 p = self.dirstate.parents()[0]
914 917 mn = self.changelog.read(p)[0]
915 918 m = self.manifest.read(mn)
916 919 if not wlock:
917 920 wlock = self.wlock()
918 921 for f in list:
919 922 if self.dirstate.state(f) not in "r":
920 923 self.ui.warn("%s not removed!\n" % f)
921 924 else:
922 925 t = self.file(f).read(m[f])
923 926 self.wwrite(f, t)
924 927 util.set_exec(self.wjoin(f), m.execf(f))
925 928 self.dirstate.update([f], "n")
926 929
927 930 def copy(self, source, dest, wlock=None):
928 931 p = self.wjoin(dest)
929 932 if not os.path.exists(p):
930 933 self.ui.warn(_("%s does not exist!\n") % dest)
931 934 elif not os.path.isfile(p):
932 935 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
933 936 else:
934 937 if not wlock:
935 938 wlock = self.wlock()
936 939 if self.dirstate.state(dest) == '?':
937 940 self.dirstate.update([dest], "a")
938 941 self.dirstate.copy(source, dest)
939 942
940 943 def heads(self, start=None):
941 944 heads = self.changelog.heads(start)
942 945 # sort the output in rev descending order
943 946 heads = [(-self.changelog.rev(h), h) for h in heads]
944 947 heads.sort()
945 948 return [n for (r, n) in heads]
946 949
947 950 # branchlookup returns a dict giving a list of branches for
948 951 # each head. A branch is defined as the tag of a node or
949 952 # the branch of the node's parents. If a node has multiple
950 953 # branch tags, tags are eliminated if they are visible from other
951 954 # branch tags.
952 955 #
953 956 # So, for this graph: a->b->c->d->e
954 957 # \ /
955 958 # aa -----/
956 959 # a has tag 2.6.12
957 960 # d has tag 2.6.13
958 961 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
959 962 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
960 963 # from the list.
961 964 #
962 965 # It is possible that more than one head will have the same branch tag.
963 966 # callers need to check the result for multiple heads under the same
964 967 # branch tag if that is a problem for them (ie checkout of a specific
965 968 # branch).
966 969 #
967 970 # passing in a specific branch will limit the depth of the search
968 971 # through the parents. It won't limit the branches returned in the
969 972 # result though.
970 973 def branchlookup(self, heads=None, branch=None):
971 974 if not heads:
972 975 heads = self.heads()
973 976 headt = [ h for h in heads ]
974 977 chlog = self.changelog
975 978 branches = {}
976 979 merges = []
977 980 seenmerge = {}
978 981
979 982 # traverse the tree once for each head, recording in the branches
980 983 # dict which tags are visible from this head. The branches
981 984 # dict also records which tags are visible from each tag
982 985 # while we traverse.
983 986 while headt or merges:
984 987 if merges:
985 988 n, found = merges.pop()
986 989 visit = [n]
987 990 else:
988 991 h = headt.pop()
989 992 visit = [h]
990 993 found = [h]
991 994 seen = {}
992 995 while visit:
993 996 n = visit.pop()
994 997 if n in seen:
995 998 continue
996 999 pp = chlog.parents(n)
997 1000 tags = self.nodetags(n)
998 1001 if tags:
999 1002 for x in tags:
1000 1003 if x == 'tip':
1001 1004 continue
1002 1005 for f in found:
1003 1006 branches.setdefault(f, {})[n] = 1
1004 1007 branches.setdefault(n, {})[n] = 1
1005 1008 break
1006 1009 if n not in found:
1007 1010 found.append(n)
1008 1011 if branch in tags:
1009 1012 continue
1010 1013 seen[n] = 1
1011 1014 if pp[1] != nullid and n not in seenmerge:
1012 1015 merges.append((pp[1], [x for x in found]))
1013 1016 seenmerge[n] = 1
1014 1017 if pp[0] != nullid:
1015 1018 visit.append(pp[0])
1016 1019 # traverse the branches dict, eliminating branch tags from each
1017 1020 # head that are visible from another branch tag for that head.
1018 1021 out = {}
1019 1022 viscache = {}
1020 1023 for h in heads:
1021 1024 def visible(node):
1022 1025 if node in viscache:
1023 1026 return viscache[node]
1024 1027 ret = {}
1025 1028 visit = [node]
1026 1029 while visit:
1027 1030 x = visit.pop()
1028 1031 if x in viscache:
1029 1032 ret.update(viscache[x])
1030 1033 elif x not in ret:
1031 1034 ret[x] = 1
1032 1035 if x in branches:
1033 1036 visit[len(visit):] = branches[x].keys()
1034 1037 viscache[node] = ret
1035 1038 return ret
1036 1039 if h not in branches:
1037 1040 continue
1038 1041 # O(n^2), but somewhat limited. This only searches the
1039 1042 # tags visible from a specific head, not all the tags in the
1040 1043 # whole repo.
1041 1044 for b in branches[h]:
1042 1045 vis = False
1043 1046 for bb in branches[h].keys():
1044 1047 if b != bb:
1045 1048 if b in visible(bb):
1046 1049 vis = True
1047 1050 break
1048 1051 if not vis:
1049 1052 l = out.setdefault(h, [])
1050 1053 l[len(l):] = self.nodetags(b)
1051 1054 return out
1052 1055
1053 1056 def branches(self, nodes):
1054 1057 if not nodes:
1055 1058 nodes = [self.changelog.tip()]
1056 1059 b = []
1057 1060 for n in nodes:
1058 1061 t = n
1059 1062 while 1:
1060 1063 p = self.changelog.parents(n)
1061 1064 if p[1] != nullid or p[0] == nullid:
1062 1065 b.append((t, n, p[0], p[1]))
1063 1066 break
1064 1067 n = p[0]
1065 1068 return b
1066 1069
1067 1070 def between(self, pairs):
1068 1071 r = []
1069 1072
1070 1073 for top, bottom in pairs:
1071 1074 n, l, i = top, [], 0
1072 1075 f = 1
1073 1076
1074 1077 while n != bottom:
1075 1078 p = self.changelog.parents(n)[0]
1076 1079 if i == f:
1077 1080 l.append(n)
1078 1081 f = f * 2
1079 1082 n = p
1080 1083 i += 1
1081 1084
1082 1085 r.append(l)
1083 1086
1084 1087 return r
1085 1088
1086 1089 def findincoming(self, remote, base=None, heads=None, force=False):
1087 1090 """Return list of roots of the subsets of missing nodes from remote
1088 1091
1089 1092 If base dict is specified, assume that these nodes and their parents
1090 1093 exist on the remote side and that no child of a node of base exists
1091 1094 in both remote and self.
1092 1095 Furthermore base will be updated to include the nodes that exists
1093 1096 in self and remote but no children exists in self and remote.
1094 1097 If a list of heads is specified, return only nodes which are heads
1095 1098 or ancestors of these heads.
1096 1099
1097 1100 All the ancestors of base are in self and in remote.
1098 1101 All the descendants of the list returned are missing in self.
1099 1102 (and so we know that the rest of the nodes are missing in remote, see
1100 1103 outgoing)
1101 1104 """
1102 1105 m = self.changelog.nodemap
1103 1106 search = []
1104 1107 fetch = {}
1105 1108 seen = {}
1106 1109 seenbranch = {}
1107 1110 if base == None:
1108 1111 base = {}
1109 1112
1110 1113 if not heads:
1111 1114 heads = remote.heads()
1112 1115
1113 1116 if self.changelog.tip() == nullid:
1114 1117 base[nullid] = 1
1115 1118 if heads != [nullid]:
1116 1119 return [nullid]
1117 1120 return []
1118 1121
1119 1122 # assume we're closer to the tip than the root
1120 1123 # and start by examining the heads
1121 1124 self.ui.status(_("searching for changes\n"))
1122 1125
1123 1126 unknown = []
1124 1127 for h in heads:
1125 1128 if h not in m:
1126 1129 unknown.append(h)
1127 1130 else:
1128 1131 base[h] = 1
1129 1132
1130 1133 if not unknown:
1131 1134 return []
1132 1135
1133 1136 req = dict.fromkeys(unknown)
1134 1137 reqcnt = 0
1135 1138
1136 1139 # search through remote branches
1137 1140 # a 'branch' here is a linear segment of history, with four parts:
1138 1141 # head, root, first parent, second parent
1139 1142 # (a branch always has two parents (or none) by definition)
1140 1143 unknown = remote.branches(unknown)
1141 1144 while unknown:
1142 1145 r = []
1143 1146 while unknown:
1144 1147 n = unknown.pop(0)
1145 1148 if n[0] in seen:
1146 1149 continue
1147 1150
1148 1151 self.ui.debug(_("examining %s:%s\n")
1149 1152 % (short(n[0]), short(n[1])))
1150 1153 if n[0] == nullid: # found the end of the branch
1151 1154 pass
1152 1155 elif n in seenbranch:
1153 1156 self.ui.debug(_("branch already found\n"))
1154 1157 continue
1155 1158 elif n[1] and n[1] in m: # do we know the base?
1156 1159 self.ui.debug(_("found incomplete branch %s:%s\n")
1157 1160 % (short(n[0]), short(n[1])))
1158 1161 search.append(n) # schedule branch range for scanning
1159 1162 seenbranch[n] = 1
1160 1163 else:
1161 1164 if n[1] not in seen and n[1] not in fetch:
1162 1165 if n[2] in m and n[3] in m:
1163 1166 self.ui.debug(_("found new changeset %s\n") %
1164 1167 short(n[1]))
1165 1168 fetch[n[1]] = 1 # earliest unknown
1166 1169 for p in n[2:4]:
1167 1170 if p in m:
1168 1171 base[p] = 1 # latest known
1169 1172
1170 1173 for p in n[2:4]:
1171 1174 if p not in req and p not in m:
1172 1175 r.append(p)
1173 1176 req[p] = 1
1174 1177 seen[n[0]] = 1
1175 1178
1176 1179 if r:
1177 1180 reqcnt += 1
1178 1181 self.ui.debug(_("request %d: %s\n") %
1179 1182 (reqcnt, " ".join(map(short, r))))
1180 1183 for p in xrange(0, len(r), 10):
1181 1184 for b in remote.branches(r[p:p+10]):
1182 1185 self.ui.debug(_("received %s:%s\n") %
1183 1186 (short(b[0]), short(b[1])))
1184 1187 unknown.append(b)
1185 1188
1186 1189 # do binary search on the branches we found
1187 1190 while search:
1188 1191 n = search.pop(0)
1189 1192 reqcnt += 1
1190 1193 l = remote.between([(n[0], n[1])])[0]
1191 1194 l.append(n[1])
1192 1195 p = n[0]
1193 1196 f = 1
1194 1197 for i in l:
1195 1198 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1196 1199 if i in m:
1197 1200 if f <= 2:
1198 1201 self.ui.debug(_("found new branch changeset %s\n") %
1199 1202 short(p))
1200 1203 fetch[p] = 1
1201 1204 base[i] = 1
1202 1205 else:
1203 1206 self.ui.debug(_("narrowed branch search to %s:%s\n")
1204 1207 % (short(p), short(i)))
1205 1208 search.append((p, i))
1206 1209 break
1207 1210 p, f = i, f * 2
1208 1211
1209 1212 # sanity check our fetch list
1210 1213 for f in fetch.keys():
1211 1214 if f in m:
1212 1215 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1213 1216
1214 1217 if base.keys() == [nullid]:
1215 1218 if force:
1216 1219 self.ui.warn(_("warning: repository is unrelated\n"))
1217 1220 else:
1218 1221 raise util.Abort(_("repository is unrelated"))
1219 1222
1220 1223 self.ui.debug(_("found new changesets starting at ") +
1221 1224 " ".join([short(f) for f in fetch]) + "\n")
1222 1225
1223 1226 self.ui.debug(_("%d total queries\n") % reqcnt)
1224 1227
1225 1228 return fetch.keys()
1226 1229
1227 1230 def findoutgoing(self, remote, base=None, heads=None, force=False):
1228 1231 """Return list of nodes that are roots of subsets not in remote
1229 1232
1230 1233 If base dict is specified, assume that these nodes and their parents
1231 1234 exist on the remote side.
1232 1235 If a list of heads is specified, return only nodes which are heads
1233 1236 or ancestors of these heads, and return a second element which
1234 1237 contains all remote heads which get new children.
1235 1238 """
1236 1239 if base == None:
1237 1240 base = {}
1238 1241 self.findincoming(remote, base, heads, force=force)
1239 1242
1240 1243 self.ui.debug(_("common changesets up to ")
1241 1244 + " ".join(map(short, base.keys())) + "\n")
1242 1245
1243 1246 remain = dict.fromkeys(self.changelog.nodemap)
1244 1247
1245 1248 # prune everything remote has from the tree
1246 1249 del remain[nullid]
1247 1250 remove = base.keys()
1248 1251 while remove:
1249 1252 n = remove.pop(0)
1250 1253 if n in remain:
1251 1254 del remain[n]
1252 1255 for p in self.changelog.parents(n):
1253 1256 remove.append(p)
1254 1257
1255 1258 # find every node whose parents have been pruned
1256 1259 subset = []
1257 1260 # find every remote head that will get new children
1258 1261 updated_heads = {}
1259 1262 for n in remain:
1260 1263 p1, p2 = self.changelog.parents(n)
1261 1264 if p1 not in remain and p2 not in remain:
1262 1265 subset.append(n)
1263 1266 if heads:
1264 1267 if p1 in heads:
1265 1268 updated_heads[p1] = True
1266 1269 if p2 in heads:
1267 1270 updated_heads[p2] = True
1268 1271
1269 1272 # this is the set of all roots we have to push
1270 1273 if heads:
1271 1274 return subset, updated_heads.keys()
1272 1275 else:
1273 1276 return subset
1274 1277
1275 1278 def pull(self, remote, heads=None, force=False, lock=None):
1276 1279 mylock = False
1277 1280 if not lock:
1278 1281 lock = self.lock()
1279 1282 mylock = True
1280 1283
1281 1284 try:
1282 1285 fetch = self.findincoming(remote, force=force)
1283 1286 if fetch == [nullid]:
1284 1287 self.ui.status(_("requesting all changes\n"))
1285 1288
1286 1289 if not fetch:
1287 1290 self.ui.status(_("no changes found\n"))
1288 1291 return 0
1289 1292
1290 1293 if heads is None:
1291 1294 cg = remote.changegroup(fetch, 'pull')
1292 1295 else:
1293 1296 if 'changegroupsubset' not in remote.capabilities:
1294 1297 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1295 1298 cg = remote.changegroupsubset(fetch, heads, 'pull')
1296 1299 return self.addchangegroup(cg, 'pull', remote.url())
1297 1300 finally:
1298 1301 if mylock:
1299 1302 lock.release()
1300 1303
1301 1304 def push(self, remote, force=False, revs=None):
1302 1305 # there are two ways to push to remote repo:
1303 1306 #
1304 1307 # addchangegroup assumes local user can lock remote
1305 1308 # repo (local filesystem, old ssh servers).
1306 1309 #
1307 1310 # unbundle assumes local user cannot lock remote repo (new ssh
1308 1311 # servers, http servers).
1309 1312
1310 1313 if remote.capable('unbundle'):
1311 1314 return self.push_unbundle(remote, force, revs)
1312 1315 return self.push_addchangegroup(remote, force, revs)
1313 1316
1314 1317 def prepush(self, remote, force, revs):
1315 1318 base = {}
1316 1319 remote_heads = remote.heads()
1317 1320 inc = self.findincoming(remote, base, remote_heads, force=force)
1318 1321
1319 1322 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1320 1323 if revs is not None:
1321 1324 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1322 1325 else:
1323 1326 bases, heads = update, self.changelog.heads()
1324 1327
1325 1328 if not bases:
1326 1329 self.ui.status(_("no changes found\n"))
1327 1330 return None, 1
1328 1331 elif not force:
1329 1332 # check if we're creating new remote heads
1330 1333 # to be a remote head after push, node must be either
1331 1334 # - unknown locally
1332 1335 # - a local outgoing head descended from update
1333 1336 # - a remote head that's known locally and not
1334 1337 # ancestral to an outgoing head
1335 1338
1336 1339 warn = 0
1337 1340
1338 1341 if remote_heads == [nullid]:
1339 1342 warn = 0
1340 1343 elif not revs and len(heads) > len(remote_heads):
1341 1344 warn = 1
1342 1345 else:
1343 1346 newheads = list(heads)
1344 1347 for r in remote_heads:
1345 1348 if r in self.changelog.nodemap:
1346 1349 desc = self.changelog.heads(r)
1347 1350 l = [h for h in heads if h in desc]
1348 1351 if not l:
1349 1352 newheads.append(r)
1350 1353 else:
1351 1354 newheads.append(r)
1352 1355 if len(newheads) > len(remote_heads):
1353 1356 warn = 1
1354 1357
1355 1358 if warn:
1356 1359 self.ui.warn(_("abort: push creates new remote branches!\n"))
1357 1360 self.ui.status(_("(did you forget to merge?"
1358 1361 " use push -f to force)\n"))
1359 1362 return None, 1
1360 1363 elif inc:
1361 1364 self.ui.warn(_("note: unsynced remote changes!\n"))
1362 1365
1363 1366
1364 1367 if revs is None:
1365 1368 cg = self.changegroup(update, 'push')
1366 1369 else:
1367 1370 cg = self.changegroupsubset(update, revs, 'push')
1368 1371 return cg, remote_heads
1369 1372
1370 1373 def push_addchangegroup(self, remote, force, revs):
1371 1374 lock = remote.lock()
1372 1375
1373 1376 ret = self.prepush(remote, force, revs)
1374 1377 if ret[0] is not None:
1375 1378 cg, remote_heads = ret
1376 1379 return remote.addchangegroup(cg, 'push', self.url())
1377 1380 return ret[1]
1378 1381
1379 1382 def push_unbundle(self, remote, force, revs):
1380 1383 # local repo finds heads on server, finds out what revs it
1381 1384 # must push. once revs transferred, if server finds it has
1382 1385 # different heads (someone else won commit/push race), server
1383 1386 # aborts.
1384 1387
1385 1388 ret = self.prepush(remote, force, revs)
1386 1389 if ret[0] is not None:
1387 1390 cg, remote_heads = ret
1388 1391 if force: remote_heads = ['force']
1389 1392 return remote.unbundle(cg, remote_heads, 'push')
1390 1393 return ret[1]
1391 1394
1392 1395 def changegroupinfo(self, nodes):
1393 1396 self.ui.note(_("%d changesets found\n") % len(nodes))
1394 1397 if self.ui.debugflag:
1395 1398 self.ui.debug(_("List of changesets:\n"))
1396 1399 for node in nodes:
1397 1400 self.ui.debug("%s\n" % hex(node))
1398 1401
1399 1402 def changegroupsubset(self, bases, heads, source):
1400 1403 """This function generates a changegroup consisting of all the nodes
1401 1404 that are descendents of any of the bases, and ancestors of any of
1402 1405 the heads.
1403 1406
1404 1407 It is fairly complex as determining which filenodes and which
1405 1408 manifest nodes need to be included for the changeset to be complete
1406 1409 is non-trivial.
1407 1410
1408 1411 Another wrinkle is doing the reverse, figuring out which changeset in
1409 1412 the changegroup a particular filenode or manifestnode belongs to."""
1410 1413
1411 1414 self.hook('preoutgoing', throw=True, source=source)
1412 1415
1413 1416 # Set up some initial variables
1414 1417 # Make it easy to refer to self.changelog
1415 1418 cl = self.changelog
1416 1419 # msng is short for missing - compute the list of changesets in this
1417 1420 # changegroup.
1418 1421 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1419 1422 self.changegroupinfo(msng_cl_lst)
1420 1423 # Some bases may turn out to be superfluous, and some heads may be
1421 1424 # too. nodesbetween will return the minimal set of bases and heads
1422 1425 # necessary to re-create the changegroup.
1423 1426
1424 1427 # Known heads are the list of heads that it is assumed the recipient
1425 1428 # of this changegroup will know about.
1426 1429 knownheads = {}
1427 1430 # We assume that all parents of bases are known heads.
1428 1431 for n in bases:
1429 1432 for p in cl.parents(n):
1430 1433 if p != nullid:
1431 1434 knownheads[p] = 1
1432 1435 knownheads = knownheads.keys()
1433 1436 if knownheads:
1434 1437 # Now that we know what heads are known, we can compute which
1435 1438 # changesets are known. The recipient must know about all
1436 1439 # changesets required to reach the known heads from the null
1437 1440 # changeset.
1438 1441 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1439 1442 junk = None
1440 1443 # Transform the list into an ersatz set.
1441 1444 has_cl_set = dict.fromkeys(has_cl_set)
1442 1445 else:
1443 1446 # If there were no known heads, the recipient cannot be assumed to
1444 1447 # know about any changesets.
1445 1448 has_cl_set = {}
1446 1449
1447 1450 # Make it easy to refer to self.manifest
1448 1451 mnfst = self.manifest
1449 1452 # We don't know which manifests are missing yet
1450 1453 msng_mnfst_set = {}
1451 1454 # Nor do we know which filenodes are missing.
1452 1455 msng_filenode_set = {}
1453 1456
1454 1457 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1455 1458 junk = None
1456 1459
1457 1460 # A changeset always belongs to itself, so the changenode lookup
1458 1461 # function for a changenode is identity.
1459 1462 def identity(x):
1460 1463 return x
1461 1464
1462 1465 # A function generating function. Sets up an environment for the
1463 1466 # inner function.
1464 1467 def cmp_by_rev_func(revlog):
1465 1468 # Compare two nodes by their revision number in the environment's
1466 1469 # revision history. Since the revision number both represents the
1467 1470 # most efficient order to read the nodes in, and represents a
1468 1471 # topological sorting of the nodes, this function is often useful.
1469 1472 def cmp_by_rev(a, b):
1470 1473 return cmp(revlog.rev(a), revlog.rev(b))
1471 1474 return cmp_by_rev
1472 1475
1473 1476 # If we determine that a particular file or manifest node must be a
1474 1477 # node that the recipient of the changegroup will already have, we can
1475 1478 # also assume the recipient will have all the parents. This function
1476 1479 # prunes them from the set of missing nodes.
1477 1480 def prune_parents(revlog, hasset, msngset):
1478 1481 haslst = hasset.keys()
1479 1482 haslst.sort(cmp_by_rev_func(revlog))
1480 1483 for node in haslst:
1481 1484 parentlst = [p for p in revlog.parents(node) if p != nullid]
1482 1485 while parentlst:
1483 1486 n = parentlst.pop()
1484 1487 if n not in hasset:
1485 1488 hasset[n] = 1
1486 1489 p = [p for p in revlog.parents(n) if p != nullid]
1487 1490 parentlst.extend(p)
1488 1491 for n in hasset:
1489 1492 msngset.pop(n, None)
1490 1493
1491 1494 # This is a function generating function used to set up an environment
1492 1495 # for the inner function to execute in.
1493 1496 def manifest_and_file_collector(changedfileset):
1494 1497 # This is an information gathering function that gathers
1495 1498 # information from each changeset node that goes out as part of
1496 1499 # the changegroup. The information gathered is a list of which
1497 1500 # manifest nodes are potentially required (the recipient may
1498 1501 # already have them) and total list of all files which were
1499 1502 # changed in any changeset in the changegroup.
1500 1503 #
1501 1504 # We also remember the first changenode we saw any manifest
1502 1505 # referenced by so we can later determine which changenode 'owns'
1503 1506 # the manifest.
1504 1507 def collect_manifests_and_files(clnode):
1505 1508 c = cl.read(clnode)
1506 1509 for f in c[3]:
1507 1510 # This is to make sure we only have one instance of each
1508 1511 # filename string for each filename.
1509 1512 changedfileset.setdefault(f, f)
1510 1513 msng_mnfst_set.setdefault(c[0], clnode)
1511 1514 return collect_manifests_and_files
1512 1515
1513 1516 # Figure out which manifest nodes (of the ones we think might be part
1514 1517 # of the changegroup) the recipient must know about and remove them
1515 1518 # from the changegroup.
1516 1519 def prune_manifests():
1517 1520 has_mnfst_set = {}
1518 1521 for n in msng_mnfst_set:
1519 1522 # If a 'missing' manifest thinks it belongs to a changenode
1520 1523 # the recipient is assumed to have, obviously the recipient
1521 1524 # must have that manifest.
1522 1525 linknode = cl.node(mnfst.linkrev(n))
1523 1526 if linknode in has_cl_set:
1524 1527 has_mnfst_set[n] = 1
1525 1528 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1526 1529
1527 1530 # Use the information collected in collect_manifests_and_files to say
1528 1531 # which changenode any manifestnode belongs to.
1529 1532 def lookup_manifest_link(mnfstnode):
1530 1533 return msng_mnfst_set[mnfstnode]
1531 1534
1532 1535 # A function generating function that sets up the initial environment
1533 1536 # the inner function.
1534 1537 def filenode_collector(changedfiles):
1535 1538 next_rev = [0]
1536 1539 # This gathers information from each manifestnode included in the
1537 1540 # changegroup about which filenodes the manifest node references
1538 1541 # so we can include those in the changegroup too.
1539 1542 #
1540 1543 # It also remembers which changenode each filenode belongs to. It
1541 1544 # does this by assuming the a filenode belongs to the changenode
1542 1545 # the first manifest that references it belongs to.
1543 1546 def collect_msng_filenodes(mnfstnode):
1544 1547 r = mnfst.rev(mnfstnode)
1545 1548 if r == next_rev[0]:
1546 1549 # If the last rev we looked at was the one just previous,
1547 1550 # we only need to see a diff.
1548 1551 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1549 1552 # For each line in the delta
1550 1553 for dline in delta.splitlines():
1551 1554 # get the filename and filenode for that line
1552 1555 f, fnode = dline.split('\0')
1553 1556 fnode = bin(fnode[:40])
1554 1557 f = changedfiles.get(f, None)
1555 1558 # And if the file is in the list of files we care
1556 1559 # about.
1557 1560 if f is not None:
1558 1561 # Get the changenode this manifest belongs to
1559 1562 clnode = msng_mnfst_set[mnfstnode]
1560 1563 # Create the set of filenodes for the file if
1561 1564 # there isn't one already.
1562 1565 ndset = msng_filenode_set.setdefault(f, {})
1563 1566 # And set the filenode's changelog node to the
1564 1567 # manifest's if it hasn't been set already.
1565 1568 ndset.setdefault(fnode, clnode)
1566 1569 else:
1567 1570 # Otherwise we need a full manifest.
1568 1571 m = mnfst.read(mnfstnode)
1569 1572 # For every file in we care about.
1570 1573 for f in changedfiles:
1571 1574 fnode = m.get(f, None)
1572 1575 # If it's in the manifest
1573 1576 if fnode is not None:
1574 1577 # See comments above.
1575 1578 clnode = msng_mnfst_set[mnfstnode]
1576 1579 ndset = msng_filenode_set.setdefault(f, {})
1577 1580 ndset.setdefault(fnode, clnode)
1578 1581 # Remember the revision we hope to see next.
1579 1582 next_rev[0] = r + 1
1580 1583 return collect_msng_filenodes
1581 1584
1582 1585 # We have a list of filenodes we think we need for a file, lets remove
1583 1586 # all those we now the recipient must have.
1584 1587 def prune_filenodes(f, filerevlog):
1585 1588 msngset = msng_filenode_set[f]
1586 1589 hasset = {}
1587 1590 # If a 'missing' filenode thinks it belongs to a changenode we
1588 1591 # assume the recipient must have, then the recipient must have
1589 1592 # that filenode.
1590 1593 for n in msngset:
1591 1594 clnode = cl.node(filerevlog.linkrev(n))
1592 1595 if clnode in has_cl_set:
1593 1596 hasset[n] = 1
1594 1597 prune_parents(filerevlog, hasset, msngset)
1595 1598
1596 1599 # A function generator function that sets up the a context for the
1597 1600 # inner function.
1598 1601 def lookup_filenode_link_func(fname):
1599 1602 msngset = msng_filenode_set[fname]
1600 1603 # Lookup the changenode the filenode belongs to.
1601 1604 def lookup_filenode_link(fnode):
1602 1605 return msngset[fnode]
1603 1606 return lookup_filenode_link
1604 1607
1605 1608 # Now that we have all theses utility functions to help out and
1606 1609 # logically divide up the task, generate the group.
1607 1610 def gengroup():
1608 1611 # The set of changed files starts empty.
1609 1612 changedfiles = {}
1610 1613 # Create a changenode group generator that will call our functions
1611 1614 # back to lookup the owning changenode and collect information.
1612 1615 group = cl.group(msng_cl_lst, identity,
1613 1616 manifest_and_file_collector(changedfiles))
1614 1617 for chnk in group:
1615 1618 yield chnk
1616 1619
1617 1620 # The list of manifests has been collected by the generator
1618 1621 # calling our functions back.
1619 1622 prune_manifests()
1620 1623 msng_mnfst_lst = msng_mnfst_set.keys()
1621 1624 # Sort the manifestnodes by revision number.
1622 1625 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1623 1626 # Create a generator for the manifestnodes that calls our lookup
1624 1627 # and data collection functions back.
1625 1628 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1626 1629 filenode_collector(changedfiles))
1627 1630 for chnk in group:
1628 1631 yield chnk
1629 1632
1630 1633 # These are no longer needed, dereference and toss the memory for
1631 1634 # them.
1632 1635 msng_mnfst_lst = None
1633 1636 msng_mnfst_set.clear()
1634 1637
1635 1638 changedfiles = changedfiles.keys()
1636 1639 changedfiles.sort()
1637 1640 # Go through all our files in order sorted by name.
1638 1641 for fname in changedfiles:
1639 1642 filerevlog = self.file(fname)
1640 1643 # Toss out the filenodes that the recipient isn't really
1641 1644 # missing.
1642 1645 if msng_filenode_set.has_key(fname):
1643 1646 prune_filenodes(fname, filerevlog)
1644 1647 msng_filenode_lst = msng_filenode_set[fname].keys()
1645 1648 else:
1646 1649 msng_filenode_lst = []
1647 1650 # If any filenodes are left, generate the group for them,
1648 1651 # otherwise don't bother.
1649 1652 if len(msng_filenode_lst) > 0:
1650 1653 yield changegroup.genchunk(fname)
1651 1654 # Sort the filenodes by their revision #
1652 1655 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1653 1656 # Create a group generator and only pass in a changenode
1654 1657 # lookup function as we need to collect no information
1655 1658 # from filenodes.
1656 1659 group = filerevlog.group(msng_filenode_lst,
1657 1660 lookup_filenode_link_func(fname))
1658 1661 for chnk in group:
1659 1662 yield chnk
1660 1663 if msng_filenode_set.has_key(fname):
1661 1664 # Don't need this anymore, toss it to free memory.
1662 1665 del msng_filenode_set[fname]
1663 1666 # Signal that no more groups are left.
1664 1667 yield changegroup.closechunk()
1665 1668
1666 1669 if msng_cl_lst:
1667 1670 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1668 1671
1669 1672 return util.chunkbuffer(gengroup())
1670 1673
1671 1674 def changegroup(self, basenodes, source):
1672 1675 """Generate a changegroup of all nodes that we have that a recipient
1673 1676 doesn't.
1674 1677
1675 1678 This is much easier than the previous function as we can assume that
1676 1679 the recipient has any changenode we aren't sending them."""
1677 1680
1678 1681 self.hook('preoutgoing', throw=True, source=source)
1679 1682
1680 1683 cl = self.changelog
1681 1684 nodes = cl.nodesbetween(basenodes, None)[0]
1682 1685 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1683 1686 self.changegroupinfo(nodes)
1684 1687
1685 1688 def identity(x):
1686 1689 return x
1687 1690
1688 1691 def gennodelst(revlog):
1689 1692 for r in xrange(0, revlog.count()):
1690 1693 n = revlog.node(r)
1691 1694 if revlog.linkrev(n) in revset:
1692 1695 yield n
1693 1696
1694 1697 def changed_file_collector(changedfileset):
1695 1698 def collect_changed_files(clnode):
1696 1699 c = cl.read(clnode)
1697 1700 for fname in c[3]:
1698 1701 changedfileset[fname] = 1
1699 1702 return collect_changed_files
1700 1703
1701 1704 def lookuprevlink_func(revlog):
1702 1705 def lookuprevlink(n):
1703 1706 return cl.node(revlog.linkrev(n))
1704 1707 return lookuprevlink
1705 1708
1706 1709 def gengroup():
1707 1710 # construct a list of all changed files
1708 1711 changedfiles = {}
1709 1712
1710 1713 for chnk in cl.group(nodes, identity,
1711 1714 changed_file_collector(changedfiles)):
1712 1715 yield chnk
1713 1716 changedfiles = changedfiles.keys()
1714 1717 changedfiles.sort()
1715 1718
1716 1719 mnfst = self.manifest
1717 1720 nodeiter = gennodelst(mnfst)
1718 1721 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1719 1722 yield chnk
1720 1723
1721 1724 for fname in changedfiles:
1722 1725 filerevlog = self.file(fname)
1723 1726 nodeiter = gennodelst(filerevlog)
1724 1727 nodeiter = list(nodeiter)
1725 1728 if nodeiter:
1726 1729 yield changegroup.genchunk(fname)
1727 1730 lookup = lookuprevlink_func(filerevlog)
1728 1731 for chnk in filerevlog.group(nodeiter, lookup):
1729 1732 yield chnk
1730 1733
1731 1734 yield changegroup.closechunk()
1732 1735
1733 1736 if nodes:
1734 1737 self.hook('outgoing', node=hex(nodes[0]), source=source)
1735 1738
1736 1739 return util.chunkbuffer(gengroup())
1737 1740
1738 1741 def addchangegroup(self, source, srctype, url):
1739 1742 """add changegroup to repo.
1740 1743 returns number of heads modified or added + 1."""
1741 1744
1742 1745 def csmap(x):
1743 1746 self.ui.debug(_("add changeset %s\n") % short(x))
1744 1747 return cl.count()
1745 1748
1746 1749 def revmap(x):
1747 1750 return cl.rev(x)
1748 1751
1749 1752 if not source:
1750 1753 return 0
1751 1754
1752 1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1753 1756
1754 1757 changesets = files = revisions = 0
1755 1758
1756 1759 tr = self.transaction()
1757 1760
1758 1761 # write changelog data to temp files so concurrent readers will not see
1759 1762 # inconsistent view
1760 1763 cl = None
1761 1764 try:
1762 1765 cl = appendfile.appendchangelog(self.sopener,
1763 1766 self.changelog.version)
1764 1767
1765 1768 oldheads = len(cl.heads())
1766 1769
1767 1770 # pull off the changeset group
1768 1771 self.ui.status(_("adding changesets\n"))
1769 1772 cor = cl.count() - 1
1770 1773 chunkiter = changegroup.chunkiter(source)
1771 1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1772 1775 raise util.Abort(_("received changelog group is empty"))
1773 1776 cnr = cl.count() - 1
1774 1777 changesets = cnr - cor
1775 1778
1776 1779 # pull off the manifest group
1777 1780 self.ui.status(_("adding manifests\n"))
1778 1781 chunkiter = changegroup.chunkiter(source)
1779 1782 # no need to check for empty manifest group here:
1780 1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1781 1784 # no new manifest will be created and the manifest group will
1782 1785 # be empty during the pull
1783 1786 self.manifest.addgroup(chunkiter, revmap, tr)
1784 1787
1785 1788 # process the files
1786 1789 self.ui.status(_("adding file changes\n"))
1787 1790 while 1:
1788 1791 f = changegroup.getchunk(source)
1789 1792 if not f:
1790 1793 break
1791 1794 self.ui.debug(_("adding %s revisions\n") % f)
1792 1795 fl = self.file(f)
1793 1796 o = fl.count()
1794 1797 chunkiter = changegroup.chunkiter(source)
1795 1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1796 1799 raise util.Abort(_("received file revlog group is empty"))
1797 1800 revisions += fl.count() - o
1798 1801 files += 1
1799 1802
1800 1803 cl.writedata()
1801 1804 finally:
1802 1805 if cl:
1803 1806 cl.cleanup()
1804 1807
1805 1808 # make changelog see real files again
1806 1809 self.changelog = changelog.changelog(self.sopener,
1807 1810 self.changelog.version)
1808 1811 self.changelog.checkinlinesize(tr)
1809 1812
1810 1813 newheads = len(self.changelog.heads())
1811 1814 heads = ""
1812 1815 if oldheads and newheads != oldheads:
1813 1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1814 1817
1815 1818 self.ui.status(_("added %d changesets"
1816 1819 " with %d changes to %d files%s\n")
1817 1820 % (changesets, revisions, files, heads))
1818 1821
1819 1822 if changesets > 0:
1820 1823 self.hook('pretxnchangegroup', throw=True,
1821 1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1822 1825 url=url)
1823 1826
1824 1827 tr.close()
1825 1828
1826 1829 if changesets > 0:
1827 1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1828 1831 source=srctype, url=url)
1829 1832
1830 1833 for i in xrange(cor + 1, cnr + 1):
1831 1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1832 1835 source=srctype, url=url)
1833 1836
1834 1837 return newheads - oldheads + 1
1835 1838
1836 1839
1837 1840 def stream_in(self, remote):
1838 1841 fp = remote.stream_out()
1839 1842 l = fp.readline()
1840 1843 try:
1841 1844 resp = int(l)
1842 1845 except ValueError:
1843 1846 raise util.UnexpectedOutput(
1844 1847 _('Unexpected response from remote server:'), l)
1845 1848 if resp == 1:
1846 1849 raise util.Abort(_('operation forbidden by server'))
1847 1850 elif resp == 2:
1848 1851 raise util.Abort(_('locking the remote repository failed'))
1849 1852 elif resp != 0:
1850 1853 raise util.Abort(_('the server sent an unknown error code'))
1851 1854 self.ui.status(_('streaming all changes\n'))
1852 1855 l = fp.readline()
1853 1856 try:
1854 1857 total_files, total_bytes = map(int, l.split(' ', 1))
1855 1858 except ValueError, TypeError:
1856 1859 raise util.UnexpectedOutput(
1857 1860 _('Unexpected response from remote server:'), l)
1858 1861 self.ui.status(_('%d files to transfer, %s of data\n') %
1859 1862 (total_files, util.bytecount(total_bytes)))
1860 1863 start = time.time()
1861 1864 for i in xrange(total_files):
1862 1865 # XXX doesn't support '\n' or '\r' in filenames
1863 1866 l = fp.readline()
1864 1867 try:
1865 1868 name, size = l.split('\0', 1)
1866 1869 size = int(size)
1867 1870 except ValueError, TypeError:
1868 1871 raise util.UnexpectedOutput(
1869 1872 _('Unexpected response from remote server:'), l)
1870 1873 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1871 1874 ofp = self.sopener(name, 'w')
1872 1875 for chunk in util.filechunkiter(fp, limit=size):
1873 1876 ofp.write(chunk)
1874 1877 ofp.close()
1875 1878 elapsed = time.time() - start
1876 1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1877 1880 (util.bytecount(total_bytes), elapsed,
1878 1881 util.bytecount(total_bytes / elapsed)))
1879 1882 self.reload()
1880 1883 return len(self.heads()) + 1
1881 1884
1882 1885 def clone(self, remote, heads=[], stream=False):
1883 1886 '''clone remote repository.
1884 1887
1885 1888 keyword arguments:
1886 1889 heads: list of revs to clone (forces use of pull)
1887 1890 stream: use streaming clone if possible'''
1888 1891
1889 1892 # now, all clients that can request uncompressed clones can
1890 1893 # read repo formats supported by all servers that can serve
1891 1894 # them.
1892 1895
1893 1896 # if revlog format changes, client will have to check version
1894 1897 # and format flags on "stream" capability, and use
1895 1898 # uncompressed only if compatible.
1896 1899
1897 1900 if stream and not heads and remote.capable('stream'):
1898 1901 return self.stream_in(remote)
1899 1902 return self.pull(remote, heads)
1900 1903
1901 1904 # used to avoid circular references so destructors work
1902 1905 def aftertrans(files):
1903 1906 renamefiles = [tuple(t) for t in files]
1904 1907 def a():
1905 1908 for src, dest in renamefiles:
1906 1909 util.rename(src, dest)
1907 1910 return a
1908 1911
1909 1912 def instance(ui, path, create):
1910 1913 return localrepository(ui, util.drop_scheme('file', path), create)
1911 1914
1912 1915 def islocal(path):
1913 1916 return True
@@ -1,65 +1,66 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import *
11 11 from i18n import gettext as _
12 12 demandload(globals(), "changelog filelog httprangereader")
13 13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 raise IOError(None, inst)
21 21 except urllib2.URLError, inst:
22 22 raise IOError(None, inst.reason[1])
23 23
24 24 def opener(base):
25 25 """return a function that opens files over http"""
26 26 p = base
27 27 def o(path, mode="r"):
28 28 f = os.path.join(p, urllib.quote(path))
29 29 return rangereader(f)
30 30 return o
31 31
32 32 class statichttprepository(localrepo.localrepository):
33 33 def __init__(self, ui, path):
34 34 self._url = path
35 35 self.path = (path + "/.hg")
36 self.spath = self.path
36 37 self.ui = ui
37 38 self.revlogversion = 0
38 39 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
40 self.manifest = manifest.manifest(self.opener)
41 self.changelog = changelog.changelog(self.opener)
40 self.sopener = opener(self.spath)
41 self.manifest = manifest.manifest(self.sopener)
42 self.changelog = changelog.changelog(self.sopener)
42 43 self.tagscache = None
43 44 self.nodetagscache = None
44 45 self.encodepats = None
45 46 self.decodepats = None
46 47
47 48 def url(self):
48 49 return 'static-' + self._url
49 50
50 51 def dev(self):
51 52 return -1
52 53
53 54 def local(self):
54 55 return False
55 56
56 57 def instance(ui, path, create):
57 58 if create:
58 59 raise util.Abort(_('cannot create new static-http repository'))
59 60 if path.startswith('old-http:'):
60 61 ui.warn(_("old-http:// syntax is deprecated, "
61 62 "please use static-http:// instead\n"))
62 63 path = path[4:]
63 64 else:
64 65 path = path[7:]
65 66 return statichttprepository(ui, path)
@@ -1,95 +1,95 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from i18n import gettext as _
10 10 demandload(globals(), "os stat util lock")
11 11
12 12 # if server supports streaming clone, it advertises "stream"
13 13 # capability with value that is version+flags of repo it is serving.
14 14 # client only streams if it can read that repo format.
15 15
16 16 def walkrepo(root):
17 17 '''iterate over metadata files in repository.
18 18 walk in natural (sorted) order.
19 19 yields 2-tuples: name of .d or .i file, size of file.'''
20 20
21 21 strip_count = len(root) + len(os.sep)
22 22 def walk(path, recurse):
23 23 ents = os.listdir(path)
24 24 ents.sort()
25 25 for e in ents:
26 26 pe = os.path.join(path, e)
27 27 st = os.lstat(pe)
28 28 if stat.S_ISDIR(st.st_mode):
29 29 if recurse:
30 30 for x in walk(pe, True):
31 31 yield x
32 32 else:
33 33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 34 continue
35 35 sfx = e[-2:]
36 36 if sfx in ('.d', '.i'):
37 37 yield pe[strip_count:], st.st_size
38 38 # write file data first
39 39 for x in walk(os.path.join(root, 'data'), True):
40 40 yield x
41 41 # write manifest before changelog
42 42 meta = list(walk(root, False))
43 43 meta.sort()
44 44 meta.reverse()
45 45 for x in meta:
46 46 yield x
47 47
48 48 # stream file format is simple.
49 49 #
50 50 # server writes out line that says how many files, how many total
51 51 # bytes. separator is ascii space, byte counts are strings.
52 52 #
53 53 # then for each file:
54 54 #
55 55 # server writes out line that says file name, how many bytes in
56 56 # file. separator is ascii nul, byte count is string.
57 57 #
58 58 # server writes out raw file data.
59 59
60 60 def stream_out(repo, fileobj):
61 61 '''stream out all metadata files in repository.
62 62 writes to file-like object, must support write() and optional flush().'''
63 63
64 64 if not repo.ui.configbool('server', 'uncompressed'):
65 65 fileobj.write('1\n')
66 66 return
67 67
68 68 # get consistent snapshot of repo. lock during scan so lock not
69 69 # needed while we stream, and commits can happen.
70 70 try:
71 71 repolock = repo.lock()
72 72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 74 fileobj.write('2\n')
75 75 return
76 76
77 77 fileobj.write('0\n')
78 78 repo.ui.debug('scanning\n')
79 79 entries = []
80 80 total_bytes = 0
81 for name, size in walkrepo(repo.path):
81 for name, size in walkrepo(repo.spath):
82 82 entries.append((name, size))
83 83 total_bytes += size
84 84 repolock.release()
85 85
86 86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 87 (len(entries), total_bytes))
88 88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 89 for name, size in entries:
90 90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 91 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.opener(name), limit=size):
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 93 fileobj.write(chunk)
94 94 flush = getattr(fileobj, 'flush', None)
95 95 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now