##// END OF EJS Templates
fix file handling bugs on windows....
Vadim Gelfer -
r2176:9b42304d default
parent child Browse files
Show More
@@ -1,154 +1,156 b''
1 # appendfile.py - special classes to make repo updates atomic
1 # appendfile.py - special classes to make repo updates atomic
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from demandload import *
8 from demandload import *
9 demandload(globals(), "cStringIO changelog errno manifest os tempfile")
9 demandload(globals(), "cStringIO changelog errno manifest os tempfile util")
10
10
11 # writes to metadata files are ordered. reads: changelog, manifest,
11 # writes to metadata files are ordered. reads: changelog, manifest,
12 # normal files. writes: normal files, manifest, changelog.
12 # normal files. writes: normal files, manifest, changelog.
13
13
14 # manifest contains pointers to offsets in normal files. changelog
14 # manifest contains pointers to offsets in normal files. changelog
15 # contains pointers to offsets in manifest. if reader reads old
15 # contains pointers to offsets in manifest. if reader reads old
16 # changelog while manifest or normal files are written, it has no
16 # changelog while manifest or normal files are written, it has no
17 # pointers into new parts of those files that are maybe not consistent
17 # pointers into new parts of those files that are maybe not consistent
18 # yet, so will not read them.
18 # yet, so will not read them.
19
19
20 # localrepo.addchangegroup thinks it writes changelog first, then
20 # localrepo.addchangegroup thinks it writes changelog first, then
21 # manifest, then normal files (this is order they are available, and
21 # manifest, then normal files (this is order they are available, and
22 # needed for computing linkrev fields), but uses appendfile to hide
22 # needed for computing linkrev fields), but uses appendfile to hide
23 # updates from readers. data not written to manifest or changelog
23 # updates from readers. data not written to manifest or changelog
24 # until all normal files updated. write manifest first, then
24 # until all normal files updated. write manifest first, then
25 # changelog.
25 # changelog.
26
26
27 # with this write ordering, readers cannot see inconsistent view of
27 # with this write ordering, readers cannot see inconsistent view of
28 # repo during update.
28 # repo during update.
29
29
30 class appendfile(object):
30 class appendfile(object):
31 '''implement enough of file protocol to append to revlog file.
31 '''implement enough of file protocol to append to revlog file.
32 appended data is written to temp file. reads and seeks span real
32 appended data is written to temp file. reads and seeks span real
33 file and temp file. readers cannot see appended data until
33 file and temp file. readers cannot see appended data until
34 writedata called.'''
34 writedata called.'''
35
35
36 def __init__(self, fp, tmpname):
36 def __init__(self, fp, tmpname):
37 if tmpname:
37 if tmpname:
38 self.tmpname = tmpname
38 self.tmpname = tmpname
39 self.tmpfp = open(self.tmpname, 'ab+')
39 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
40 else:
40 else:
41 fd, self.tmpname = tempfile.mkstemp()
41 fd, self.tmpname = tempfile.mkstemp()
42 self.tmpfp = os.fdopen(fd, 'ab+')
42 os.close(fd)
43 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
43 self.realfp = fp
44 self.realfp = fp
44 self.offset = fp.tell()
45 self.offset = fp.tell()
45 # real file is not written by anyone else. cache its size so
46 # real file is not written by anyone else. cache its size so
46 # seek and read can be fast.
47 # seek and read can be fast.
47 self.realsize = os.fstat(fp.fileno()).st_size
48 self.realsize = util.fstat(fp).st_size
49 self.name = fp.name
48
50
49 def end(self):
51 def end(self):
50 self.tmpfp.flush() # make sure the stat is correct
52 self.tmpfp.flush() # make sure the stat is correct
51 return self.realsize + os.fstat(self.tmpfp.fileno()).st_size
53 return self.realsize + util.fstat(self.tmpfp).st_size
52
54
53 def tell(self):
55 def tell(self):
54 return self.offset
56 return self.offset
55
57
56 def flush(self):
58 def flush(self):
57 self.tmpfp.flush()
59 self.tmpfp.flush()
58
60
59 def close(self):
61 def close(self):
60 self.realfp.close()
62 self.realfp.close()
61 self.tmpfp.close()
63 self.tmpfp.close()
62
64
63 def seek(self, offset, whence=0):
65 def seek(self, offset, whence=0):
64 '''virtual file offset spans real file and temp file.'''
66 '''virtual file offset spans real file and temp file.'''
65 if whence == 0:
67 if whence == 0:
66 self.offset = offset
68 self.offset = offset
67 elif whence == 1:
69 elif whence == 1:
68 self.offset += offset
70 self.offset += offset
69 elif whence == 2:
71 elif whence == 2:
70 self.offset = self.end() + offset
72 self.offset = self.end() + offset
71
73
72 if self.offset < self.realsize:
74 if self.offset < self.realsize:
73 self.realfp.seek(self.offset)
75 self.realfp.seek(self.offset)
74 else:
76 else:
75 self.tmpfp.seek(self.offset - self.realsize)
77 self.tmpfp.seek(self.offset - self.realsize)
76
78
77 def read(self, count=-1):
79 def read(self, count=-1):
78 '''only trick here is reads that span real file and temp file.'''
80 '''only trick here is reads that span real file and temp file.'''
79 fp = cStringIO.StringIO()
81 fp = cStringIO.StringIO()
80 old_offset = self.offset
82 old_offset = self.offset
81 if self.offset < self.realsize:
83 if self.offset < self.realsize:
82 s = self.realfp.read(count)
84 s = self.realfp.read(count)
83 fp.write(s)
85 fp.write(s)
84 self.offset += len(s)
86 self.offset += len(s)
85 if count > 0:
87 if count > 0:
86 count -= len(s)
88 count -= len(s)
87 if count != 0:
89 if count != 0:
88 if old_offset != self.offset:
90 if old_offset != self.offset:
89 self.tmpfp.seek(self.offset - self.realsize)
91 self.tmpfp.seek(self.offset - self.realsize)
90 s = self.tmpfp.read(count)
92 s = self.tmpfp.read(count)
91 fp.write(s)
93 fp.write(s)
92 self.offset += len(s)
94 self.offset += len(s)
93 return fp.getvalue()
95 return fp.getvalue()
94
96
95 def write(self, s):
97 def write(self, s):
96 '''append to temp file.'''
98 '''append to temp file.'''
97 self.tmpfp.seek(0, 2)
99 self.tmpfp.seek(0, 2)
98 self.tmpfp.write(s)
100 self.tmpfp.write(s)
99 # all writes are appends, so offset must go to end of file.
101 # all writes are appends, so offset must go to end of file.
100 self.offset = self.realsize + self.tmpfp.tell()
102 self.offset = self.realsize + self.tmpfp.tell()
101
103
102 class appendopener(object):
104 class appendopener(object):
103 '''special opener for files that only read or append.'''
105 '''special opener for files that only read or append.'''
104
106
105 def __init__(self, opener):
107 def __init__(self, opener):
106 self.realopener = opener
108 self.realopener = opener
107 # key: file name, value: appendfile name
109 # key: file name, value: appendfile name
108 self.tmpnames = {}
110 self.tmpnames = {}
109
111
110 def __call__(self, name, mode='r'):
112 def __call__(self, name, mode='r'):
111 '''open file.'''
113 '''open file.'''
112
114
113 assert mode in 'ra+'
115 assert mode in 'ra+'
114 try:
116 try:
115 realfp = self.realopener(name, 'r')
117 realfp = self.realopener(name, 'r')
116 except IOError, err:
118 except IOError, err:
117 if err.errno != errno.ENOENT: raise
119 if err.errno != errno.ENOENT: raise
118 realfp = self.realopener(name, 'w+')
120 realfp = self.realopener(name, 'w+')
119 tmpname = self.tmpnames.get(name)
121 tmpname = self.tmpnames.get(name)
120 fp = appendfile(realfp, tmpname)
122 fp = appendfile(realfp, tmpname)
121 if tmpname is None:
123 if tmpname is None:
122 self.tmpnames[name] = fp.tmpname
124 self.tmpnames[name] = fp.tmpname
123 return fp
125 return fp
124
126
125 def writedata(self):
127 def writedata(self):
126 '''copy data from temp files to real files.'''
128 '''copy data from temp files to real files.'''
127 # write .d file before .i file.
129 # write .d file before .i file.
128 tmpnames = self.tmpnames.items()
130 tmpnames = self.tmpnames.items()
129 tmpnames.sort()
131 tmpnames.sort()
130 for name, tmpname in tmpnames:
132 for name, tmpname in tmpnames:
131 fp = open(tmpname, 'rb')
133 fp = open(tmpname, 'rb')
132 s = fp.read()
134 s = fp.read()
133 fp.close()
135 fp.close()
134 os.unlink(tmpname)
136 os.unlink(tmpname)
135 fp = self.realopener(name, 'a')
137 fp = self.realopener(name, 'a')
136 fp.write(s)
138 fp.write(s)
137 fp.close()
139 fp.close()
138
140
139 # files for changelog and manifest are in different appendopeners, so
141 # files for changelog and manifest are in different appendopeners, so
140 # not mixed up together.
142 # not mixed up together.
141
143
142 class appendchangelog(changelog.changelog, appendopener):
144 class appendchangelog(changelog.changelog, appendopener):
143 def __init__(self, opener, version):
145 def __init__(self, opener, version):
144 appendopener.__init__(self, opener)
146 appendopener.__init__(self, opener)
145 changelog.changelog.__init__(self, self, version)
147 changelog.changelog.__init__(self, self, version)
146 def checkinlinesize(self, fp, tr):
148 def checkinlinesize(self, fp, tr):
147 return
149 return
148
150
149 class appendmanifest(manifest.manifest, appendopener):
151 class appendmanifest(manifest.manifest, appendopener):
150 def __init__(self, opener, version):
152 def __init__(self, opener, version):
151 appendopener.__init__(self, opener)
153 appendopener.__init__(self, opener)
152 manifest.manifest.__init__(self, self, version)
154 manifest.manifest.__init__(self, self, version)
153 def checkinlinesize(self, fp, tr):
155 def checkinlinesize(self, fp, tr):
154 return
156 return
@@ -1,207 +1,207 b''
1 """
1 """
2 bundlerepo.py - repository class for viewing uncompressed bundles
2 bundlerepo.py - repository class for viewing uncompressed bundles
3
3
4 This provides a read-only repository interface to bundles as if
4 This provides a read-only repository interface to bundles as if
5 they were part of the actual repository.
5 they were part of the actual repository.
6
6
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "changegroup util os struct")
16 demandload(globals(), "changegroup util os struct")
17
17
18 import localrepo, changelog, manifest, filelog, revlog
18 import localrepo, changelog, manifest, filelog, revlog
19
19
20 class bundlerevlog(revlog.revlog):
20 class bundlerevlog(revlog.revlog):
21 def __init__(self, opener, indexfile, datafile, bundlefile,
21 def __init__(self, opener, indexfile, datafile, bundlefile,
22 linkmapper=None):
22 linkmapper=None):
23 # How it works:
23 # How it works:
24 # to retrieve a revision, we need to know the offset of
24 # to retrieve a revision, we need to know the offset of
25 # the revision in the bundlefile (an opened file).
25 # the revision in the bundlefile (an opened file).
26 #
26 #
27 # We store this offset in the index (start), to differentiate a
27 # We store this offset in the index (start), to differentiate a
28 # rev in the bundle and from a rev in the revlog, we check
28 # rev in the bundle and from a rev in the revlog, we check
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 # (it is bigger since we store the node to which the delta is)
30 # (it is bigger since we store the node to which the delta is)
31 #
31 #
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
32 revlog.revlog.__init__(self, opener, indexfile, datafile)
33 self.bundlefile = bundlefile
33 self.bundlefile = bundlefile
34 self.basemap = {}
34 self.basemap = {}
35 def chunkpositer():
35 def chunkpositer():
36 for chunk in changegroup.chunkiter(bundlefile):
36 for chunk in changegroup.chunkiter(bundlefile):
37 pos = bundlefile.tell()
37 pos = bundlefile.tell()
38 yield chunk, pos - len(chunk)
38 yield chunk, pos - len(chunk)
39 n = self.count()
39 n = self.count()
40 prev = None
40 prev = None
41 for chunk, start in chunkpositer():
41 for chunk, start in chunkpositer():
42 size = len(chunk)
42 size = len(chunk)
43 if size < 80:
43 if size < 80:
44 raise util.Abort("invalid changegroup")
44 raise util.Abort("invalid changegroup")
45 start += 80
45 start += 80
46 size -= 80
46 size -= 80
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 if node in self.nodemap:
48 if node in self.nodemap:
49 prev = node
49 prev = node
50 continue
50 continue
51 for p in (p1, p2):
51 for p in (p1, p2):
52 if not p in self.nodemap:
52 if not p in self.nodemap:
53 raise RevlogError(_("unknown parent %s") % short(p1))
53 raise RevlogError(_("unknown parent %s") % short(p1))
54 if linkmapper is None:
54 if linkmapper is None:
55 link = n
55 link = n
56 else:
56 else:
57 link = linkmapper(cs)
57 link = linkmapper(cs)
58
58
59 if not prev:
59 if not prev:
60 prev = p1
60 prev = p1
61 # start, size, base is not used, link, p1, p2, delta ref
61 # start, size, base is not used, link, p1, p2, delta ref
62 if self.version == 0:
62 if self.version == 0:
63 e = (start, size, None, link, p1, p2, node)
63 e = (start, size, None, link, p1, p2, node)
64 else:
64 else:
65 e = (self.offset_type(start, 0), size, -1, None, link,
65 e = (self.offset_type(start, 0), size, -1, None, link,
66 self.rev(p1), self.rev(p2), node)
66 self.rev(p1), self.rev(p2), node)
67 self.basemap[n] = prev
67 self.basemap[n] = prev
68 self.index.append(e)
68 self.index.append(e)
69 self.nodemap[node] = n
69 self.nodemap[node] = n
70 prev = node
70 prev = node
71 n += 1
71 n += 1
72
72
73 def bundle(self, rev):
73 def bundle(self, rev):
74 """is rev from the bundle"""
74 """is rev from the bundle"""
75 if rev < 0:
75 if rev < 0:
76 return False
76 return False
77 return rev in self.basemap
77 return rev in self.basemap
78 def bundlebase(self, rev): return self.basemap[rev]
78 def bundlebase(self, rev): return self.basemap[rev]
79 def chunk(self, rev, df=None):
79 def chunk(self, rev, df=None):
80 # Warning: in case of bundle, the diff is against bundlebase,
80 # Warning: in case of bundle, the diff is against bundlebase,
81 # not against rev - 1
81 # not against rev - 1
82 # XXX: could use some caching
82 # XXX: could use some caching
83 if not self.bundle(rev):
83 if not self.bundle(rev):
84 return revlog.revlog.chunk(self, rev)
84 return revlog.revlog.chunk(self, rev)
85 self.bundlefile.seek(self.start(rev))
85 self.bundlefile.seek(self.start(rev))
86 return self.bundlefile.read(self.length(rev))
86 return self.bundlefile.read(self.length(rev))
87
87
88 def revdiff(self, rev1, rev2):
88 def revdiff(self, rev1, rev2):
89 """return or calculate a delta between two revisions"""
89 """return or calculate a delta between two revisions"""
90 if self.bundle(rev1) and self.bundle(rev2):
90 if self.bundle(rev1) and self.bundle(rev2):
91 # hot path for bundle
91 # hot path for bundle
92 revb = self.rev(self.bundlebase(rev2))
92 revb = self.rev(self.bundlebase(rev2))
93 if revb == rev1:
93 if revb == rev1:
94 return self.chunk(rev2)
94 return self.chunk(rev2)
95 elif not self.bundle(rev1) and not self.bundle(rev2):
95 elif not self.bundle(rev1) and not self.bundle(rev2):
96 return revlog.revlog.chunk(self, rev1, rev2)
96 return revlog.revlog.chunk(self, rev1, rev2)
97
97
98 return self.diff(self.revision(self.node(rev1)),
98 return self.diff(self.revision(self.node(rev1)),
99 self.revision(self.node(rev2)))
99 self.revision(self.node(rev2)))
100
100
101 def revision(self, node):
101 def revision(self, node):
102 """return an uncompressed revision of a given"""
102 """return an uncompressed revision of a given"""
103 if node == nullid: return ""
103 if node == nullid: return ""
104
104
105 text = None
105 text = None
106 chain = []
106 chain = []
107 iter_node = node
107 iter_node = node
108 rev = self.rev(iter_node)
108 rev = self.rev(iter_node)
109 # reconstruct the revision if it is from a changegroup
109 # reconstruct the revision if it is from a changegroup
110 while self.bundle(rev):
110 while self.bundle(rev):
111 if self.cache and self.cache[0] == iter_node:
111 if self.cache and self.cache[0] == iter_node:
112 text = self.cache[2]
112 text = self.cache[2]
113 break
113 break
114 chain.append(rev)
114 chain.append(rev)
115 iter_node = self.bundlebase(rev)
115 iter_node = self.bundlebase(rev)
116 rev = self.rev(iter_node)
116 rev = self.rev(iter_node)
117 if text is None:
117 if text is None:
118 text = revlog.revlog.revision(self, iter_node)
118 text = revlog.revlog.revision(self, iter_node)
119
119
120 while chain:
120 while chain:
121 delta = self.chunk(chain.pop())
121 delta = self.chunk(chain.pop())
122 text = self.patches(text, [delta])
122 text = self.patches(text, [delta])
123
123
124 p1, p2 = self.parents(node)
124 p1, p2 = self.parents(node)
125 if node != revlog.hash(text, p1, p2):
125 if node != revlog.hash(text, p1, p2):
126 raise RevlogError(_("integrity check failed on %s:%d")
126 raise RevlogError(_("integrity check failed on %s:%d")
127 % (self.datafile, self.rev(node)))
127 % (self.datafile, self.rev(node)))
128
128
129 self.cache = (node, self.rev(node), text)
129 self.cache = (node, self.rev(node), text)
130 return text
130 return text
131
131
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 raise NotImplementedError
133 raise NotImplementedError
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
134 def addgroup(self, revs, linkmapper, transaction, unique=0):
135 raise NotImplementedError
135 raise NotImplementedError
136 def strip(self, rev, minlink):
136 def strip(self, rev, minlink):
137 raise NotImplementedError
137 raise NotImplementedError
138 def checksize(self):
138 def checksize(self):
139 raise NotImplementedError
139 raise NotImplementedError
140
140
141 class bundlechangelog(bundlerevlog, changelog.changelog):
141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 def __init__(self, opener, bundlefile):
142 def __init__(self, opener, bundlefile):
143 changelog.changelog.__init__(self, opener)
143 changelog.changelog.__init__(self, opener)
144 bundlerevlog.__init__(self, opener, "00changelog.i", "00changelog.d",
144 bundlerevlog.__init__(self, opener, "00changelog.i", "00changelog.d",
145 bundlefile)
145 bundlefile)
146
146
147 class bundlemanifest(bundlerevlog, manifest.manifest):
147 class bundlemanifest(bundlerevlog, manifest.manifest):
148 def __init__(self, opener, bundlefile, linkmapper):
148 def __init__(self, opener, bundlefile, linkmapper):
149 manifest.manifest.__init__(self, opener)
149 manifest.manifest.__init__(self, opener)
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
151 bundlefile, linkmapper)
151 bundlefile, linkmapper)
152
152
153 class bundlefilelog(bundlerevlog, filelog.filelog):
153 class bundlefilelog(bundlerevlog, filelog.filelog):
154 def __init__(self, opener, path, bundlefile, linkmapper):
154 def __init__(self, opener, path, bundlefile, linkmapper):
155 filelog.filelog.__init__(self, opener, path)
155 filelog.filelog.__init__(self, opener, path)
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
157 bundlefile, linkmapper)
157 bundlefile, linkmapper)
158
158
159 class bundlerepository(localrepo.localrepository):
159 class bundlerepository(localrepo.localrepository):
160 def __init__(self, ui, path, bundlename):
160 def __init__(self, ui, path, bundlename):
161 localrepo.localrepository.__init__(self, ui, path)
161 localrepo.localrepository.__init__(self, ui, path)
162 f = open(bundlename, "rb")
162 f = open(bundlename, "rb")
163 s = os.fstat(f.fileno())
163 s = util.fstat(f)
164 self.bundlefile = f
164 self.bundlefile = f
165 header = self.bundlefile.read(6)
165 header = self.bundlefile.read(6)
166 if not header.startswith("HG"):
166 if not header.startswith("HG"):
167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
167 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
168 elif not header.startswith("HG10"):
168 elif not header.startswith("HG10"):
169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
169 raise util.Abort(_("%s: unknown bundle version") % bundlename)
170 elif header == "HG10BZ":
170 elif header == "HG10BZ":
171 raise util.Abort(_("%s: compressed bundle not supported")
171 raise util.Abort(_("%s: compressed bundle not supported")
172 % bundlename)
172 % bundlename)
173 elif header == "HG10UN":
173 elif header == "HG10UN":
174 # uncompressed bundle supported
174 # uncompressed bundle supported
175 pass
175 pass
176 else:
176 else:
177 raise util.Abort(_("%s: unknown bundle compression type")
177 raise util.Abort(_("%s: unknown bundle compression type")
178 % bundlename)
178 % bundlename)
179 self.changelog = bundlechangelog(self.opener, self.bundlefile)
179 self.changelog = bundlechangelog(self.opener, self.bundlefile)
180 self.manifest = bundlemanifest(self.opener, self.bundlefile,
180 self.manifest = bundlemanifest(self.opener, self.bundlefile,
181 self.changelog.rev)
181 self.changelog.rev)
182 # dict with the mapping 'filename' -> position in the bundle
182 # dict with the mapping 'filename' -> position in the bundle
183 self.bundlefilespos = {}
183 self.bundlefilespos = {}
184 while 1:
184 while 1:
185 f = changegroup.getchunk(self.bundlefile)
185 f = changegroup.getchunk(self.bundlefile)
186 if not f:
186 if not f:
187 break
187 break
188 self.bundlefilespos[f] = self.bundlefile.tell()
188 self.bundlefilespos[f] = self.bundlefile.tell()
189 for c in changegroup.chunkiter(self.bundlefile):
189 for c in changegroup.chunkiter(self.bundlefile):
190 pass
190 pass
191
191
192 def dev(self):
192 def dev(self):
193 return -1
193 return -1
194
194
195 def file(self, f):
195 def file(self, f):
196 if f[0] == '/':
196 if f[0] == '/':
197 f = f[1:]
197 f = f[1:]
198 if f in self.bundlefilespos:
198 if f in self.bundlefilespos:
199 self.bundlefile.seek(self.bundlefilespos[f])
199 self.bundlefile.seek(self.bundlefilespos[f])
200 return bundlefilelog(self.opener, f, self.bundlefile,
200 return bundlefilelog(self.opener, f, self.bundlefile,
201 self.changelog.rev)
201 self.changelog.rev)
202 else:
202 else:
203 return filelog.filelog(self.opener, f)
203 return filelog.filelog(self.opener, f)
204
204
205 def close(self):
205 def close(self):
206 """Close assigned bundle file immediately."""
206 """Close assigned bundle file immediately."""
207 self.bundlefile.close()
207 self.bundlefile.close()
@@ -1,1241 +1,1241 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 from node import *
13 from node import *
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import demandload
15 from demandload import demandload
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 demandload(globals(), "sha struct zlib")
17 demandload(globals(), "sha struct util zlib")
18
18
19 # revlog version strings
19 # revlog version strings
20 REVLOGV0 = 0
20 REVLOGV0 = 0
21 REVLOGNG = 1
21 REVLOGNG = 1
22
22
23 # revlog flags
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
24 REVLOGNGINLINEDATA = (1 << 16)
25
25
26 def flagstr(flag):
26 def flagstr(flag):
27 if flag == "inline":
27 if flag == "inline":
28 return REVLOGNGINLINEDATA
28 return REVLOGNGINLINEDATA
29 raise RevlogError(_("unknown revlog flag %s" % flag))
29 raise RevlogError(_("unknown revlog flag %s" % flag))
30
30
31 def hash(text, p1, p2):
31 def hash(text, p1, p2):
32 """generate a hash from the given text and its parent hashes
32 """generate a hash from the given text and its parent hashes
33
33
34 This hash combines both the current file contents and its history
34 This hash combines both the current file contents and its history
35 in a manner that makes it easy to distinguish nodes with the same
35 in a manner that makes it easy to distinguish nodes with the same
36 content in the revision graph.
36 content in the revision graph.
37 """
37 """
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 s = sha.new(l[0])
40 s = sha.new(l[0])
41 s.update(l[1])
41 s.update(l[1])
42 s.update(text)
42 s.update(text)
43 return s.digest()
43 return s.digest()
44
44
45 def compress(text):
45 def compress(text):
46 """ generate a possibly-compressed representation of text """
46 """ generate a possibly-compressed representation of text """
47 if not text: return ("", text)
47 if not text: return ("", text)
48 if len(text) < 44:
48 if len(text) < 44:
49 if text[0] == '\0': return ("", text)
49 if text[0] == '\0': return ("", text)
50 return ('u', text)
50 return ('u', text)
51 bin = zlib.compress(text)
51 bin = zlib.compress(text)
52 if len(bin) > len(text):
52 if len(bin) > len(text):
53 if text[0] == '\0': return ("", text)
53 if text[0] == '\0': return ("", text)
54 return ('u', text)
54 return ('u', text)
55 return ("", bin)
55 return ("", bin)
56
56
57 def decompress(bin):
57 def decompress(bin):
58 """ decompress the given input """
58 """ decompress the given input """
59 if not bin: return bin
59 if not bin: return bin
60 t = bin[0]
60 t = bin[0]
61 if t == '\0': return bin
61 if t == '\0': return bin
62 if t == 'x': return zlib.decompress(bin)
62 if t == 'x': return zlib.decompress(bin)
63 if t == 'u': return bin[1:]
63 if t == 'u': return bin[1:]
64 raise RevlogError(_("unknown compression type %r") % t)
64 raise RevlogError(_("unknown compression type %r") % t)
65
65
66 indexformatv0 = ">4l20s20s20s"
66 indexformatv0 = ">4l20s20s20s"
67 v0shaoffset = 56
67 v0shaoffset = 56
68 # index ng:
68 # index ng:
69 # 6 bytes offset
69 # 6 bytes offset
70 # 2 bytes flags
70 # 2 bytes flags
71 # 4 bytes compressed length
71 # 4 bytes compressed length
72 # 4 bytes uncompressed length
72 # 4 bytes uncompressed length
73 # 4 bytes: base rev
73 # 4 bytes: base rev
74 # 4 bytes link rev
74 # 4 bytes link rev
75 # 4 bytes parent 1 rev
75 # 4 bytes parent 1 rev
76 # 4 bytes parent 2 rev
76 # 4 bytes parent 2 rev
77 # 32 bytes: nodeid
77 # 32 bytes: nodeid
78 indexformatng = ">Qiiiiii20s12x"
78 indexformatng = ">Qiiiiii20s12x"
79 ngshaoffset = 32
79 ngshaoffset = 32
80 versionformat = ">i"
80 versionformat = ">i"
81
81
82 class lazyparser(object):
82 class lazyparser(object):
83 """
83 """
84 this class avoids the need to parse the entirety of large indices
84 this class avoids the need to parse the entirety of large indices
85 """
85 """
86 def __init__(self, dataf, size, indexformat, shaoffset):
86 def __init__(self, dataf, size, indexformat, shaoffset):
87 self.dataf = dataf
87 self.dataf = dataf
88 self.format = indexformat
88 self.format = indexformat
89 self.s = struct.calcsize(indexformat)
89 self.s = struct.calcsize(indexformat)
90 self.indexformat = indexformat
90 self.indexformat = indexformat
91 self.datasize = size
91 self.datasize = size
92 self.l = size/self.s
92 self.l = size/self.s
93 self.index = [None] * self.l
93 self.index = [None] * self.l
94 self.map = {nullid: -1}
94 self.map = {nullid: -1}
95 self.allmap = 0
95 self.allmap = 0
96 self.all = 0
96 self.all = 0
97 self.mapfind_count = 0
97 self.mapfind_count = 0
98 self.shaoffset = shaoffset
98 self.shaoffset = shaoffset
99
99
100 def loadmap(self):
100 def loadmap(self):
101 """
101 """
102 during a commit, we need to make sure the rev being added is
102 during a commit, we need to make sure the rev being added is
103 not a duplicate. This requires loading the entire index,
103 not a duplicate. This requires loading the entire index,
104 which is fairly slow. loadmap can load up just the node map,
104 which is fairly slow. loadmap can load up just the node map,
105 which takes much less time.
105 which takes much less time.
106 """
106 """
107 if self.allmap: return
107 if self.allmap: return
108 start = 0
108 start = 0
109 end = self.datasize
109 end = self.datasize
110 self.allmap = 1
110 self.allmap = 1
111 cur = 0
111 cur = 0
112 count = 0
112 count = 0
113 blocksize = self.s * 256
113 blocksize = self.s * 256
114 self.dataf.seek(0)
114 self.dataf.seek(0)
115 while cur < end:
115 while cur < end:
116 data = self.dataf.read(blocksize)
116 data = self.dataf.read(blocksize)
117 off = 0
117 off = 0
118 for x in xrange(256):
118 for x in xrange(256):
119 n = data[off + self.shaoffset:off + self.shaoffset + 20]
119 n = data[off + self.shaoffset:off + self.shaoffset + 20]
120 self.map[n] = count
120 self.map[n] = count
121 count += 1
121 count += 1
122 if count >= self.l:
122 if count >= self.l:
123 break
123 break
124 off += self.s
124 off += self.s
125 cur += blocksize
125 cur += blocksize
126
126
127 def loadblock(self, blockstart, blocksize, data=None):
127 def loadblock(self, blockstart, blocksize, data=None):
128 if self.all: return
128 if self.all: return
129 if data is None:
129 if data is None:
130 self.dataf.seek(blockstart)
130 self.dataf.seek(blockstart)
131 data = self.dataf.read(blocksize)
131 data = self.dataf.read(blocksize)
132 lend = len(data) / self.s
132 lend = len(data) / self.s
133 i = blockstart / self.s
133 i = blockstart / self.s
134 off = 0
134 off = 0
135 for x in xrange(lend):
135 for x in xrange(lend):
136 if self.index[i + x] == None:
136 if self.index[i + x] == None:
137 b = data[off : off + self.s]
137 b = data[off : off + self.s]
138 self.index[i + x] = b
138 self.index[i + x] = b
139 n = b[self.shaoffset:self.shaoffset + 20]
139 n = b[self.shaoffset:self.shaoffset + 20]
140 self.map[n] = i + x
140 self.map[n] = i + x
141 off += self.s
141 off += self.s
142
142
143 def findnode(self, node):
143 def findnode(self, node):
144 """search backwards through the index file for a specific node"""
144 """search backwards through the index file for a specific node"""
145 if self.allmap: return None
145 if self.allmap: return None
146
146
147 # hg log will cause many many searches for the manifest
147 # hg log will cause many many searches for the manifest
148 # nodes. After we get called a few times, just load the whole
148 # nodes. After we get called a few times, just load the whole
149 # thing.
149 # thing.
150 if self.mapfind_count > 8:
150 if self.mapfind_count > 8:
151 self.loadmap()
151 self.loadmap()
152 if node in self.map:
152 if node in self.map:
153 return node
153 return node
154 return None
154 return None
155 self.mapfind_count += 1
155 self.mapfind_count += 1
156 last = self.l - 1
156 last = self.l - 1
157 while self.index[last] != None:
157 while self.index[last] != None:
158 if last == 0:
158 if last == 0:
159 self.all = 1
159 self.all = 1
160 self.allmap = 1
160 self.allmap = 1
161 return None
161 return None
162 last -= 1
162 last -= 1
163 end = (last + 1) * self.s
163 end = (last + 1) * self.s
164 blocksize = self.s * 256
164 blocksize = self.s * 256
165 while end >= 0:
165 while end >= 0:
166 start = max(end - blocksize, 0)
166 start = max(end - blocksize, 0)
167 self.dataf.seek(start)
167 self.dataf.seek(start)
168 data = self.dataf.read(end - start)
168 data = self.dataf.read(end - start)
169 findend = end - start
169 findend = end - start
170 while True:
170 while True:
171 # we're searching backwards, so weh have to make sure
171 # we're searching backwards, so weh have to make sure
172 # we don't find a changeset where this node is a parent
172 # we don't find a changeset where this node is a parent
173 off = data.rfind(node, 0, findend)
173 off = data.rfind(node, 0, findend)
174 findend = off
174 findend = off
175 if off >= 0:
175 if off >= 0:
176 i = off / self.s
176 i = off / self.s
177 off = i * self.s
177 off = i * self.s
178 n = data[off + self.shaoffset:off + self.shaoffset + 20]
178 n = data[off + self.shaoffset:off + self.shaoffset + 20]
179 if n == node:
179 if n == node:
180 self.map[n] = i + start / self.s
180 self.map[n] = i + start / self.s
181 return node
181 return node
182 else:
182 else:
183 break
183 break
184 end -= blocksize
184 end -= blocksize
185 return None
185 return None
186
186
187 def loadindex(self, i=None, end=None):
187 def loadindex(self, i=None, end=None):
188 if self.all: return
188 if self.all: return
189 all = False
189 all = False
190 if i == None:
190 if i == None:
191 blockstart = 0
191 blockstart = 0
192 blocksize = (512 / self.s) * self.s
192 blocksize = (512 / self.s) * self.s
193 end = self.datasize
193 end = self.datasize
194 all = True
194 all = True
195 else:
195 else:
196 if end:
196 if end:
197 blockstart = i * self.s
197 blockstart = i * self.s
198 end = end * self.s
198 end = end * self.s
199 blocksize = end - blockstart
199 blocksize = end - blockstart
200 else:
200 else:
201 blockstart = (i & ~(32)) * self.s
201 blockstart = (i & ~(32)) * self.s
202 blocksize = self.s * 64
202 blocksize = self.s * 64
203 end = blockstart + blocksize
203 end = blockstart + blocksize
204 while blockstart < end:
204 while blockstart < end:
205 self.loadblock(blockstart, blocksize)
205 self.loadblock(blockstart, blocksize)
206 blockstart += blocksize
206 blockstart += blocksize
207 if all: self.all = True
207 if all: self.all = True
208
208
209 class lazyindex(object):
209 class lazyindex(object):
210 """a lazy version of the index array"""
210 """a lazy version of the index array"""
211 def __init__(self, parser):
211 def __init__(self, parser):
212 self.p = parser
212 self.p = parser
213 def __len__(self):
213 def __len__(self):
214 return len(self.p.index)
214 return len(self.p.index)
215 def load(self, pos):
215 def load(self, pos):
216 if pos < 0:
216 if pos < 0:
217 pos += len(self.p.index)
217 pos += len(self.p.index)
218 self.p.loadindex(pos)
218 self.p.loadindex(pos)
219 return self.p.index[pos]
219 return self.p.index[pos]
220 def __getitem__(self, pos):
220 def __getitem__(self, pos):
221 ret = self.p.index[pos] or self.load(pos)
221 ret = self.p.index[pos] or self.load(pos)
222 if isinstance(ret, str):
222 if isinstance(ret, str):
223 ret = struct.unpack(self.p.indexformat, ret)
223 ret = struct.unpack(self.p.indexformat, ret)
224 return ret
224 return ret
225 def __setitem__(self, pos, item):
225 def __setitem__(self, pos, item):
226 self.p.index[pos] = item
226 self.p.index[pos] = item
227 def __delitem__(self, pos):
227 def __delitem__(self, pos):
228 del self.p.index[pos]
228 del self.p.index[pos]
229 def append(self, e):
229 def append(self, e):
230 self.p.index.append(e)
230 self.p.index.append(e)
231
231
232 class lazymap(object):
232 class lazymap(object):
233 """a lazy version of the node map"""
233 """a lazy version of the node map"""
234 def __init__(self, parser):
234 def __init__(self, parser):
235 self.p = parser
235 self.p = parser
236 def load(self, key):
236 def load(self, key):
237 n = self.p.findnode(key)
237 n = self.p.findnode(key)
238 if n == None:
238 if n == None:
239 raise KeyError(key)
239 raise KeyError(key)
240 def __contains__(self, key):
240 def __contains__(self, key):
241 if key in self.p.map:
241 if key in self.p.map:
242 return True
242 return True
243 self.p.loadmap()
243 self.p.loadmap()
244 return key in self.p.map
244 return key in self.p.map
245 def __iter__(self):
245 def __iter__(self):
246 yield nullid
246 yield nullid
247 for i in xrange(self.p.l):
247 for i in xrange(self.p.l):
248 ret = self.p.index[i]
248 ret = self.p.index[i]
249 if not ret:
249 if not ret:
250 self.p.loadindex(i)
250 self.p.loadindex(i)
251 ret = self.p.index[i]
251 ret = self.p.index[i]
252 if isinstance(ret, str):
252 if isinstance(ret, str):
253 ret = struct.unpack(self.p.indexformat, ret)
253 ret = struct.unpack(self.p.indexformat, ret)
254 yield ret[-1]
254 yield ret[-1]
255 def __getitem__(self, key):
255 def __getitem__(self, key):
256 try:
256 try:
257 return self.p.map[key]
257 return self.p.map[key]
258 except KeyError:
258 except KeyError:
259 try:
259 try:
260 self.load(key)
260 self.load(key)
261 return self.p.map[key]
261 return self.p.map[key]
262 except KeyError:
262 except KeyError:
263 raise KeyError("node " + hex(key))
263 raise KeyError("node " + hex(key))
264 def __setitem__(self, key, val):
264 def __setitem__(self, key, val):
265 self.p.map[key] = val
265 self.p.map[key] = val
266 def __delitem__(self, key):
266 def __delitem__(self, key):
267 del self.p.map[key]
267 del self.p.map[key]
268
268
269 class RevlogError(Exception): pass
269 class RevlogError(Exception): pass
270
270
271 class revlog(object):
271 class revlog(object):
272 """
272 """
273 the underlying revision storage object
273 the underlying revision storage object
274
274
275 A revlog consists of two parts, an index and the revision data.
275 A revlog consists of two parts, an index and the revision data.
276
276
277 The index is a file with a fixed record size containing
277 The index is a file with a fixed record size containing
278 information on each revision, includings its nodeid (hash), the
278 information on each revision, includings its nodeid (hash), the
279 nodeids of its parents, the position and offset of its data within
279 nodeids of its parents, the position and offset of its data within
280 the data file, and the revision it's based on. Finally, each entry
280 the data file, and the revision it's based on. Finally, each entry
281 contains a linkrev entry that can serve as a pointer to external
281 contains a linkrev entry that can serve as a pointer to external
282 data.
282 data.
283
283
284 The revision data itself is a linear collection of data chunks.
284 The revision data itself is a linear collection of data chunks.
285 Each chunk represents a revision and is usually represented as a
285 Each chunk represents a revision and is usually represented as a
286 delta against the previous chunk. To bound lookup time, runs of
286 delta against the previous chunk. To bound lookup time, runs of
287 deltas are limited to about 2 times the length of the original
287 deltas are limited to about 2 times the length of the original
288 version data. This makes retrieval of a version proportional to
288 version data. This makes retrieval of a version proportional to
289 its size, or O(1) relative to the number of revisions.
289 its size, or O(1) relative to the number of revisions.
290
290
291 Both pieces of the revlog are written to in an append-only
291 Both pieces of the revlog are written to in an append-only
292 fashion, which means we never need to rewrite a file to insert or
292 fashion, which means we never need to rewrite a file to insert or
293 remove data, and can use some simple techniques to avoid the need
293 remove data, and can use some simple techniques to avoid the need
294 for locking while reading.
294 for locking while reading.
295 """
295 """
296 def __init__(self, opener, indexfile, datafile, defversion=0):
296 def __init__(self, opener, indexfile, datafile, defversion=0):
297 """
297 """
298 create a revlog object
298 create a revlog object
299
299
300 opener is a function that abstracts the file opening operation
300 opener is a function that abstracts the file opening operation
301 and can be used to implement COW semantics or the like.
301 and can be used to implement COW semantics or the like.
302 """
302 """
303 self.indexfile = indexfile
303 self.indexfile = indexfile
304 self.datafile = datafile
304 self.datafile = datafile
305 self.opener = opener
305 self.opener = opener
306
306
307 self.indexstat = None
307 self.indexstat = None
308 self.cache = None
308 self.cache = None
309 self.chunkcache = None
309 self.chunkcache = None
310 self.defversion = defversion
310 self.defversion = defversion
311 self.load()
311 self.load()
312
312
313 def load(self):
313 def load(self):
314 v = self.defversion
314 v = self.defversion
315 try:
315 try:
316 f = self.opener(self.indexfile)
316 f = self.opener(self.indexfile)
317 i = f.read(4)
317 i = f.read(4)
318 f.seek(0)
318 f.seek(0)
319 except IOError, inst:
319 except IOError, inst:
320 if inst.errno != errno.ENOENT:
320 if inst.errno != errno.ENOENT:
321 raise
321 raise
322 i = ""
322 i = ""
323 else:
323 else:
324 try:
324 try:
325 st = os.fstat(f.fileno())
325 st = util.fstat(f)
326 except AttributeError, inst:
326 except AttributeError, inst:
327 st = None
327 st = None
328 else:
328 else:
329 oldst = self.indexstat
329 oldst = self.indexstat
330 if (oldst and st.st_dev == oldst.st_dev
330 if (oldst and st.st_dev == oldst.st_dev
331 and st.st_ino == oldst.st_ino
331 and st.st_ino == oldst.st_ino
332 and st.st_mtime == oldst.st_mtime
332 and st.st_mtime == oldst.st_mtime
333 and st.st_ctime == oldst.st_ctime):
333 and st.st_ctime == oldst.st_ctime):
334 return
334 return
335 self.indexstat = st
335 self.indexstat = st
336 if len(i) > 0:
336 if len(i) > 0:
337 v = struct.unpack(versionformat, i)[0]
337 v = struct.unpack(versionformat, i)[0]
338 flags = v & ~0xFFFF
338 flags = v & ~0xFFFF
339 fmt = v & 0xFFFF
339 fmt = v & 0xFFFF
340 if fmt == 0:
340 if fmt == 0:
341 if flags:
341 if flags:
342 raise RevlogError(_("index %s invalid flags %x for format v0" %
342 raise RevlogError(_("index %s invalid flags %x for format v0" %
343 (self.indexfile, flags)))
343 (self.indexfile, flags)))
344 elif fmt == REVLOGNG:
344 elif fmt == REVLOGNG:
345 if flags & ~REVLOGNGINLINEDATA:
345 if flags & ~REVLOGNGINLINEDATA:
346 raise RevlogError(_("index %s invalid flags %x for revlogng" %
346 raise RevlogError(_("index %s invalid flags %x for revlogng" %
347 (self.indexfile, flags)))
347 (self.indexfile, flags)))
348 else:
348 else:
349 raise RevlogError(_("index %s invalid format %d" %
349 raise RevlogError(_("index %s invalid format %d" %
350 (self.indexfile, fmt)))
350 (self.indexfile, fmt)))
351 self.version = v
351 self.version = v
352 if v == 0:
352 if v == 0:
353 self.indexformat = indexformatv0
353 self.indexformat = indexformatv0
354 shaoffset = v0shaoffset
354 shaoffset = v0shaoffset
355 else:
355 else:
356 self.indexformat = indexformatng
356 self.indexformat = indexformatng
357 shaoffset = ngshaoffset
357 shaoffset = ngshaoffset
358
358
359 if i:
359 if i:
360 if not self.inlinedata() and st and st.st_size > 10000:
360 if not self.inlinedata() and st and st.st_size > 10000:
361 # big index, let's parse it on demand
361 # big index, let's parse it on demand
362 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
362 parser = lazyparser(f, st.st_size, self.indexformat, shaoffset)
363 self.index = lazyindex(parser)
363 self.index = lazyindex(parser)
364 self.nodemap = lazymap(parser)
364 self.nodemap = lazymap(parser)
365 else:
365 else:
366 i = f.read()
366 i = f.read()
367 self.parseindex(i)
367 self.parseindex(i)
368 if self.inlinedata():
368 if self.inlinedata():
369 # we've already got the entire data file read in, save it
369 # we've already got the entire data file read in, save it
370 # in the chunk data
370 # in the chunk data
371 self.chunkcache = (0, i)
371 self.chunkcache = (0, i)
372 if self.version != 0:
372 if self.version != 0:
373 e = list(self.index[0])
373 e = list(self.index[0])
374 type = self.ngtype(e[0])
374 type = self.ngtype(e[0])
375 e[0] = self.offset_type(0, type)
375 e[0] = self.offset_type(0, type)
376 self.index[0] = e
376 self.index[0] = e
377 else:
377 else:
378 self.nodemap = { nullid: -1}
378 self.nodemap = { nullid: -1}
379 self.index = []
379 self.index = []
380
380
381
381
382 def parseindex(self, data):
382 def parseindex(self, data):
383 s = struct.calcsize(self.indexformat)
383 s = struct.calcsize(self.indexformat)
384 l = len(data)
384 l = len(data)
385 self.index = []
385 self.index = []
386 self.nodemap = {nullid: -1}
386 self.nodemap = {nullid: -1}
387 inline = self.inlinedata()
387 inline = self.inlinedata()
388 off = 0
388 off = 0
389 n = 0
389 n = 0
390 while off < l:
390 while off < l:
391 e = struct.unpack(self.indexformat, data[off:off + s])
391 e = struct.unpack(self.indexformat, data[off:off + s])
392 self.index.append(e)
392 self.index.append(e)
393 self.nodemap[e[-1]] = n
393 self.nodemap[e[-1]] = n
394 n += 1
394 n += 1
395 off += s
395 off += s
396 if inline:
396 if inline:
397 off += e[1]
397 off += e[1]
398
398
399 def ngoffset(self, q):
399 def ngoffset(self, q):
400 if q & 0xFFFF:
400 if q & 0xFFFF:
401 raise RevlogError(_('%s: incompatible revision flag %x') %
401 raise RevlogError(_('%s: incompatible revision flag %x') %
402 (self.indexfile, type))
402 (self.indexfile, type))
403 return long(q >> 16)
403 return long(q >> 16)
404
404
405 def ngtype(self, q):
405 def ngtype(self, q):
406 return int(q & 0xFFFF)
406 return int(q & 0xFFFF)
407
407
408 def offset_type(self, offset, type):
408 def offset_type(self, offset, type):
409 return long(long(offset) << 16 | type)
409 return long(long(offset) << 16 | type)
410
410
411 def loadindex(self, start, end):
411 def loadindex(self, start, end):
412 """load a block of indexes all at once from the lazy parser"""
412 """load a block of indexes all at once from the lazy parser"""
413 if isinstance(self.index, lazyindex):
413 if isinstance(self.index, lazyindex):
414 self.index.p.loadindex(start, end)
414 self.index.p.loadindex(start, end)
415
415
416 def loadindexmap(self):
416 def loadindexmap(self):
417 """loads both the map and the index from the lazy parser"""
417 """loads both the map and the index from the lazy parser"""
418 if isinstance(self.index, lazyindex):
418 if isinstance(self.index, lazyindex):
419 p = self.index.p
419 p = self.index.p
420 p.loadindex()
420 p.loadindex()
421 self.nodemap = p.map
421 self.nodemap = p.map
422
422
423 def loadmap(self):
423 def loadmap(self):
424 """loads the map from the lazy parser"""
424 """loads the map from the lazy parser"""
425 if isinstance(self.nodemap, lazymap):
425 if isinstance(self.nodemap, lazymap):
426 self.nodemap.p.loadmap()
426 self.nodemap.p.loadmap()
427 self.nodemap = self.nodemap.p.map
427 self.nodemap = self.nodemap.p.map
428
428
429 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
429 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
430 def tip(self): return self.node(len(self.index) - 1)
430 def tip(self): return self.node(len(self.index) - 1)
431 def count(self): return len(self.index)
431 def count(self): return len(self.index)
432 def node(self, rev):
432 def node(self, rev):
433 return (rev < 0) and nullid or self.index[rev][-1]
433 return (rev < 0) and nullid or self.index[rev][-1]
434 def rev(self, node):
434 def rev(self, node):
435 try:
435 try:
436 return self.nodemap[node]
436 return self.nodemap[node]
437 except KeyError:
437 except KeyError:
438 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
438 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
439 def linkrev(self, node): return self.index[self.rev(node)][-4]
439 def linkrev(self, node): return self.index[self.rev(node)][-4]
440 def parents(self, node):
440 def parents(self, node):
441 if node == nullid: return (nullid, nullid)
441 if node == nullid: return (nullid, nullid)
442 r = self.rev(node)
442 r = self.rev(node)
443 d = self.index[r][-3:-1]
443 d = self.index[r][-3:-1]
444 if self.version == 0:
444 if self.version == 0:
445 return d
445 return d
446 return [ self.node(x) for x in d ]
446 return [ self.node(x) for x in d ]
447 def start(self, rev):
447 def start(self, rev):
448 if rev < 0:
448 if rev < 0:
449 return -1
449 return -1
450 if self.version != 0:
450 if self.version != 0:
451 return self.ngoffset(self.index[rev][0])
451 return self.ngoffset(self.index[rev][0])
452 return self.index[rev][0]
452 return self.index[rev][0]
453
453
454 def end(self, rev): return self.start(rev) + self.length(rev)
454 def end(self, rev): return self.start(rev) + self.length(rev)
455
455
456 def size(self, rev):
456 def size(self, rev):
457 """return the length of the uncompressed text for a given revision"""
457 """return the length of the uncompressed text for a given revision"""
458 l = -1
458 l = -1
459 if self.version != 0:
459 if self.version != 0:
460 l = self.index[rev][2]
460 l = self.index[rev][2]
461 if l >= 0:
461 if l >= 0:
462 return l
462 return l
463
463
464 t = self.revision(self.node(rev))
464 t = self.revision(self.node(rev))
465 return len(t)
465 return len(t)
466
466
467 # alternate implementation, The advantage to this code is it
467 # alternate implementation, The advantage to this code is it
468 # will be faster for a single revision. But, the results are not
468 # will be faster for a single revision. But, the results are not
469 # cached, so finding the size of every revision will be slower.
469 # cached, so finding the size of every revision will be slower.
470 """
470 """
471 if self.cache and self.cache[1] == rev:
471 if self.cache and self.cache[1] == rev:
472 return len(self.cache[2])
472 return len(self.cache[2])
473
473
474 base = self.base(rev)
474 base = self.base(rev)
475 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
475 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
476 base = self.cache[1]
476 base = self.cache[1]
477 text = self.cache[2]
477 text = self.cache[2]
478 else:
478 else:
479 text = self.revision(self.node(base))
479 text = self.revision(self.node(base))
480
480
481 l = len(text)
481 l = len(text)
482 for x in xrange(base + 1, rev + 1):
482 for x in xrange(base + 1, rev + 1):
483 l = mdiff.patchedsize(l, self.chunk(x))
483 l = mdiff.patchedsize(l, self.chunk(x))
484 return l
484 return l
485 """
485 """
486
486
487 def length(self, rev):
487 def length(self, rev):
488 if rev < 0:
488 if rev < 0:
489 return 0
489 return 0
490 else:
490 else:
491 return self.index[rev][1]
491 return self.index[rev][1]
492 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
492 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
493
493
494 def reachable(self, rev, stop=None):
494 def reachable(self, rev, stop=None):
495 reachable = {}
495 reachable = {}
496 visit = [rev]
496 visit = [rev]
497 reachable[rev] = 1
497 reachable[rev] = 1
498 if stop:
498 if stop:
499 stopn = self.rev(stop)
499 stopn = self.rev(stop)
500 else:
500 else:
501 stopn = 0
501 stopn = 0
502 while visit:
502 while visit:
503 n = visit.pop(0)
503 n = visit.pop(0)
504 if n == stop:
504 if n == stop:
505 continue
505 continue
506 if n == nullid:
506 if n == nullid:
507 continue
507 continue
508 for p in self.parents(n):
508 for p in self.parents(n):
509 if self.rev(p) < stopn:
509 if self.rev(p) < stopn:
510 continue
510 continue
511 if p not in reachable:
511 if p not in reachable:
512 reachable[p] = 1
512 reachable[p] = 1
513 visit.append(p)
513 visit.append(p)
514 return reachable
514 return reachable
515
515
516 def nodesbetween(self, roots=None, heads=None):
516 def nodesbetween(self, roots=None, heads=None):
517 """Return a tuple containing three elements. Elements 1 and 2 contain
517 """Return a tuple containing three elements. Elements 1 and 2 contain
518 a final list bases and heads after all the unreachable ones have been
518 a final list bases and heads after all the unreachable ones have been
519 pruned. Element 0 contains a topologically sorted list of all
519 pruned. Element 0 contains a topologically sorted list of all
520
520
521 nodes that satisfy these constraints:
521 nodes that satisfy these constraints:
522 1. All nodes must be descended from a node in roots (the nodes on
522 1. All nodes must be descended from a node in roots (the nodes on
523 roots are considered descended from themselves).
523 roots are considered descended from themselves).
524 2. All nodes must also be ancestors of a node in heads (the nodes in
524 2. All nodes must also be ancestors of a node in heads (the nodes in
525 heads are considered to be their own ancestors).
525 heads are considered to be their own ancestors).
526
526
527 If roots is unspecified, nullid is assumed as the only root.
527 If roots is unspecified, nullid is assumed as the only root.
528 If heads is unspecified, it is taken to be the output of the
528 If heads is unspecified, it is taken to be the output of the
529 heads method (i.e. a list of all nodes in the repository that
529 heads method (i.e. a list of all nodes in the repository that
530 have no children)."""
530 have no children)."""
531 nonodes = ([], [], [])
531 nonodes = ([], [], [])
532 if roots is not None:
532 if roots is not None:
533 roots = list(roots)
533 roots = list(roots)
534 if not roots:
534 if not roots:
535 return nonodes
535 return nonodes
536 lowestrev = min([self.rev(n) for n in roots])
536 lowestrev = min([self.rev(n) for n in roots])
537 else:
537 else:
538 roots = [nullid] # Everybody's a descendent of nullid
538 roots = [nullid] # Everybody's a descendent of nullid
539 lowestrev = -1
539 lowestrev = -1
540 if (lowestrev == -1) and (heads is None):
540 if (lowestrev == -1) and (heads is None):
541 # We want _all_ the nodes!
541 # We want _all_ the nodes!
542 return ([self.node(r) for r in xrange(0, self.count())],
542 return ([self.node(r) for r in xrange(0, self.count())],
543 [nullid], list(self.heads()))
543 [nullid], list(self.heads()))
544 if heads is None:
544 if heads is None:
545 # All nodes are ancestors, so the latest ancestor is the last
545 # All nodes are ancestors, so the latest ancestor is the last
546 # node.
546 # node.
547 highestrev = self.count() - 1
547 highestrev = self.count() - 1
548 # Set ancestors to None to signal that every node is an ancestor.
548 # Set ancestors to None to signal that every node is an ancestor.
549 ancestors = None
549 ancestors = None
550 # Set heads to an empty dictionary for later discovery of heads
550 # Set heads to an empty dictionary for later discovery of heads
551 heads = {}
551 heads = {}
552 else:
552 else:
553 heads = list(heads)
553 heads = list(heads)
554 if not heads:
554 if not heads:
555 return nonodes
555 return nonodes
556 ancestors = {}
556 ancestors = {}
557 # Start at the top and keep marking parents until we're done.
557 # Start at the top and keep marking parents until we're done.
558 nodestotag = heads[:]
558 nodestotag = heads[:]
559 # Turn heads into a dictionary so we can remove 'fake' heads.
559 # Turn heads into a dictionary so we can remove 'fake' heads.
560 # Also, later we will be using it to filter out the heads we can't
560 # Also, later we will be using it to filter out the heads we can't
561 # find from roots.
561 # find from roots.
562 heads = dict.fromkeys(heads, 0)
562 heads = dict.fromkeys(heads, 0)
563 # Remember where the top was so we can use it as a limit later.
563 # Remember where the top was so we can use it as a limit later.
564 highestrev = max([self.rev(n) for n in nodestotag])
564 highestrev = max([self.rev(n) for n in nodestotag])
565 while nodestotag:
565 while nodestotag:
566 # grab a node to tag
566 # grab a node to tag
567 n = nodestotag.pop()
567 n = nodestotag.pop()
568 # Never tag nullid
568 # Never tag nullid
569 if n == nullid:
569 if n == nullid:
570 continue
570 continue
571 # A node's revision number represents its place in a
571 # A node's revision number represents its place in a
572 # topologically sorted list of nodes.
572 # topologically sorted list of nodes.
573 r = self.rev(n)
573 r = self.rev(n)
574 if r >= lowestrev:
574 if r >= lowestrev:
575 if n not in ancestors:
575 if n not in ancestors:
576 # If we are possibly a descendent of one of the roots
576 # If we are possibly a descendent of one of the roots
577 # and we haven't already been marked as an ancestor
577 # and we haven't already been marked as an ancestor
578 ancestors[n] = 1 # Mark as ancestor
578 ancestors[n] = 1 # Mark as ancestor
579 # Add non-nullid parents to list of nodes to tag.
579 # Add non-nullid parents to list of nodes to tag.
580 nodestotag.extend([p for p in self.parents(n) if
580 nodestotag.extend([p for p in self.parents(n) if
581 p != nullid])
581 p != nullid])
582 elif n in heads: # We've seen it before, is it a fake head?
582 elif n in heads: # We've seen it before, is it a fake head?
583 # So it is, real heads should not be the ancestors of
583 # So it is, real heads should not be the ancestors of
584 # any other heads.
584 # any other heads.
585 heads.pop(n)
585 heads.pop(n)
586 if not ancestors:
586 if not ancestors:
587 return nonodes
587 return nonodes
588 # Now that we have our set of ancestors, we want to remove any
588 # Now that we have our set of ancestors, we want to remove any
589 # roots that are not ancestors.
589 # roots that are not ancestors.
590
590
591 # If one of the roots was nullid, everything is included anyway.
591 # If one of the roots was nullid, everything is included anyway.
592 if lowestrev > -1:
592 if lowestrev > -1:
593 # But, since we weren't, let's recompute the lowest rev to not
593 # But, since we weren't, let's recompute the lowest rev to not
594 # include roots that aren't ancestors.
594 # include roots that aren't ancestors.
595
595
596 # Filter out roots that aren't ancestors of heads
596 # Filter out roots that aren't ancestors of heads
597 roots = [n for n in roots if n in ancestors]
597 roots = [n for n in roots if n in ancestors]
598 # Recompute the lowest revision
598 # Recompute the lowest revision
599 if roots:
599 if roots:
600 lowestrev = min([self.rev(n) for n in roots])
600 lowestrev = min([self.rev(n) for n in roots])
601 else:
601 else:
602 # No more roots? Return empty list
602 # No more roots? Return empty list
603 return nonodes
603 return nonodes
604 else:
604 else:
605 # We are descending from nullid, and don't need to care about
605 # We are descending from nullid, and don't need to care about
606 # any other roots.
606 # any other roots.
607 lowestrev = -1
607 lowestrev = -1
608 roots = [nullid]
608 roots = [nullid]
609 # Transform our roots list into a 'set' (i.e. a dictionary where the
609 # Transform our roots list into a 'set' (i.e. a dictionary where the
610 # values don't matter.
610 # values don't matter.
611 descendents = dict.fromkeys(roots, 1)
611 descendents = dict.fromkeys(roots, 1)
612 # Also, keep the original roots so we can filter out roots that aren't
612 # Also, keep the original roots so we can filter out roots that aren't
613 # 'real' roots (i.e. are descended from other roots).
613 # 'real' roots (i.e. are descended from other roots).
614 roots = descendents.copy()
614 roots = descendents.copy()
615 # Our topologically sorted list of output nodes.
615 # Our topologically sorted list of output nodes.
616 orderedout = []
616 orderedout = []
617 # Don't start at nullid since we don't want nullid in our output list,
617 # Don't start at nullid since we don't want nullid in our output list,
618 # and if nullid shows up in descedents, empty parents will look like
618 # and if nullid shows up in descedents, empty parents will look like
619 # they're descendents.
619 # they're descendents.
620 for r in xrange(max(lowestrev, 0), highestrev + 1):
620 for r in xrange(max(lowestrev, 0), highestrev + 1):
621 n = self.node(r)
621 n = self.node(r)
622 isdescendent = False
622 isdescendent = False
623 if lowestrev == -1: # Everybody is a descendent of nullid
623 if lowestrev == -1: # Everybody is a descendent of nullid
624 isdescendent = True
624 isdescendent = True
625 elif n in descendents:
625 elif n in descendents:
626 # n is already a descendent
626 # n is already a descendent
627 isdescendent = True
627 isdescendent = True
628 # This check only needs to be done here because all the roots
628 # This check only needs to be done here because all the roots
629 # will start being marked is descendents before the loop.
629 # will start being marked is descendents before the loop.
630 if n in roots:
630 if n in roots:
631 # If n was a root, check if it's a 'real' root.
631 # If n was a root, check if it's a 'real' root.
632 p = tuple(self.parents(n))
632 p = tuple(self.parents(n))
633 # If any of its parents are descendents, it's not a root.
633 # If any of its parents are descendents, it's not a root.
634 if (p[0] in descendents) or (p[1] in descendents):
634 if (p[0] in descendents) or (p[1] in descendents):
635 roots.pop(n)
635 roots.pop(n)
636 else:
636 else:
637 p = tuple(self.parents(n))
637 p = tuple(self.parents(n))
638 # A node is a descendent if either of its parents are
638 # A node is a descendent if either of its parents are
639 # descendents. (We seeded the dependents list with the roots
639 # descendents. (We seeded the dependents list with the roots
640 # up there, remember?)
640 # up there, remember?)
641 if (p[0] in descendents) or (p[1] in descendents):
641 if (p[0] in descendents) or (p[1] in descendents):
642 descendents[n] = 1
642 descendents[n] = 1
643 isdescendent = True
643 isdescendent = True
644 if isdescendent and ((ancestors is None) or (n in ancestors)):
644 if isdescendent and ((ancestors is None) or (n in ancestors)):
645 # Only include nodes that are both descendents and ancestors.
645 # Only include nodes that are both descendents and ancestors.
646 orderedout.append(n)
646 orderedout.append(n)
647 if (ancestors is not None) and (n in heads):
647 if (ancestors is not None) and (n in heads):
648 # We're trying to figure out which heads are reachable
648 # We're trying to figure out which heads are reachable
649 # from roots.
649 # from roots.
650 # Mark this head as having been reached
650 # Mark this head as having been reached
651 heads[n] = 1
651 heads[n] = 1
652 elif ancestors is None:
652 elif ancestors is None:
653 # Otherwise, we're trying to discover the heads.
653 # Otherwise, we're trying to discover the heads.
654 # Assume this is a head because if it isn't, the next step
654 # Assume this is a head because if it isn't, the next step
655 # will eventually remove it.
655 # will eventually remove it.
656 heads[n] = 1
656 heads[n] = 1
657 # But, obviously its parents aren't.
657 # But, obviously its parents aren't.
658 for p in self.parents(n):
658 for p in self.parents(n):
659 heads.pop(p, None)
659 heads.pop(p, None)
660 heads = [n for n in heads.iterkeys() if heads[n] != 0]
660 heads = [n for n in heads.iterkeys() if heads[n] != 0]
661 roots = roots.keys()
661 roots = roots.keys()
662 assert orderedout
662 assert orderedout
663 assert roots
663 assert roots
664 assert heads
664 assert heads
665 return (orderedout, roots, heads)
665 return (orderedout, roots, heads)
666
666
667 def heads(self, start=None):
667 def heads(self, start=None):
668 """return the list of all nodes that have no children
668 """return the list of all nodes that have no children
669
669
670 if start is specified, only heads that are descendants of
670 if start is specified, only heads that are descendants of
671 start will be returned
671 start will be returned
672
672
673 """
673 """
674 if start is None:
674 if start is None:
675 start = nullid
675 start = nullid
676 reachable = {start: 1}
676 reachable = {start: 1}
677 heads = {start: 1}
677 heads = {start: 1}
678 startrev = self.rev(start)
678 startrev = self.rev(start)
679
679
680 for r in xrange(startrev + 1, self.count()):
680 for r in xrange(startrev + 1, self.count()):
681 n = self.node(r)
681 n = self.node(r)
682 for pn in self.parents(n):
682 for pn in self.parents(n):
683 if pn in reachable:
683 if pn in reachable:
684 reachable[n] = 1
684 reachable[n] = 1
685 heads[n] = 1
685 heads[n] = 1
686 if pn in heads:
686 if pn in heads:
687 del heads[pn]
687 del heads[pn]
688 return heads.keys()
688 return heads.keys()
689
689
690 def children(self, node):
690 def children(self, node):
691 """find the children of a given node"""
691 """find the children of a given node"""
692 c = []
692 c = []
693 p = self.rev(node)
693 p = self.rev(node)
694 for r in range(p + 1, self.count()):
694 for r in range(p + 1, self.count()):
695 n = self.node(r)
695 n = self.node(r)
696 for pn in self.parents(n):
696 for pn in self.parents(n):
697 if pn == node:
697 if pn == node:
698 c.append(n)
698 c.append(n)
699 continue
699 continue
700 elif pn == nullid:
700 elif pn == nullid:
701 continue
701 continue
702 return c
702 return c
703
703
704 def lookup(self, id):
704 def lookup(self, id):
705 """locate a node based on revision number or subset of hex nodeid"""
705 """locate a node based on revision number or subset of hex nodeid"""
706 try:
706 try:
707 rev = int(id)
707 rev = int(id)
708 if str(rev) != id: raise ValueError
708 if str(rev) != id: raise ValueError
709 if rev < 0: rev = self.count() + rev
709 if rev < 0: rev = self.count() + rev
710 if rev < 0 or rev >= self.count(): raise ValueError
710 if rev < 0 or rev >= self.count(): raise ValueError
711 return self.node(rev)
711 return self.node(rev)
712 except (ValueError, OverflowError):
712 except (ValueError, OverflowError):
713 c = []
713 c = []
714 for n in self.nodemap:
714 for n in self.nodemap:
715 if hex(n).startswith(id):
715 if hex(n).startswith(id):
716 c.append(n)
716 c.append(n)
717 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
717 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
718 if len(c) < 1: raise RevlogError(_("No match found"))
718 if len(c) < 1: raise RevlogError(_("No match found"))
719 return c[0]
719 return c[0]
720
720
721 return None
721 return None
722
722
723 def diff(self, a, b):
723 def diff(self, a, b):
724 """return a delta between two revisions"""
724 """return a delta between two revisions"""
725 return mdiff.textdiff(a, b)
725 return mdiff.textdiff(a, b)
726
726
727 def patches(self, t, pl):
727 def patches(self, t, pl):
728 """apply a list of patches to a string"""
728 """apply a list of patches to a string"""
729 return mdiff.patches(t, pl)
729 return mdiff.patches(t, pl)
730
730
731 def chunk(self, rev, df=None, cachelen=4096):
731 def chunk(self, rev, df=None, cachelen=4096):
732 start, length = self.start(rev), self.length(rev)
732 start, length = self.start(rev), self.length(rev)
733 inline = self.inlinedata()
733 inline = self.inlinedata()
734 if inline:
734 if inline:
735 start += (rev + 1) * struct.calcsize(self.indexformat)
735 start += (rev + 1) * struct.calcsize(self.indexformat)
736 end = start + length
736 end = start + length
737 def loadcache(df):
737 def loadcache(df):
738 cache_length = max(cachelen, length) # 4k
738 cache_length = max(cachelen, length) # 4k
739 if not df:
739 if not df:
740 if inline:
740 if inline:
741 df = self.opener(self.indexfile)
741 df = self.opener(self.indexfile)
742 else:
742 else:
743 df = self.opener(self.datafile)
743 df = self.opener(self.datafile)
744 df.seek(start)
744 df.seek(start)
745 self.chunkcache = (start, df.read(cache_length))
745 self.chunkcache = (start, df.read(cache_length))
746
746
747 if not self.chunkcache:
747 if not self.chunkcache:
748 loadcache(df)
748 loadcache(df)
749
749
750 cache_start = self.chunkcache[0]
750 cache_start = self.chunkcache[0]
751 cache_end = cache_start + len(self.chunkcache[1])
751 cache_end = cache_start + len(self.chunkcache[1])
752 if start >= cache_start and end <= cache_end:
752 if start >= cache_start and end <= cache_end:
753 # it is cached
753 # it is cached
754 offset = start - cache_start
754 offset = start - cache_start
755 else:
755 else:
756 loadcache(df)
756 loadcache(df)
757 offset = 0
757 offset = 0
758
758
759 #def checkchunk():
759 #def checkchunk():
760 # df = self.opener(self.datafile)
760 # df = self.opener(self.datafile)
761 # df.seek(start)
761 # df.seek(start)
762 # return df.read(length)
762 # return df.read(length)
763 #assert s == checkchunk()
763 #assert s == checkchunk()
764 return decompress(self.chunkcache[1][offset:offset + length])
764 return decompress(self.chunkcache[1][offset:offset + length])
765
765
766 def delta(self, node):
766 def delta(self, node):
767 """return or calculate a delta between a node and its predecessor"""
767 """return or calculate a delta between a node and its predecessor"""
768 r = self.rev(node)
768 r = self.rev(node)
769 return self.revdiff(r - 1, r)
769 return self.revdiff(r - 1, r)
770
770
771 def revdiff(self, rev1, rev2):
771 def revdiff(self, rev1, rev2):
772 """return or calculate a delta between two revisions"""
772 """return or calculate a delta between two revisions"""
773 b1 = self.base(rev1)
773 b1 = self.base(rev1)
774 b2 = self.base(rev2)
774 b2 = self.base(rev2)
775 if b1 == b2 and rev1 + 1 == rev2:
775 if b1 == b2 and rev1 + 1 == rev2:
776 return self.chunk(rev2)
776 return self.chunk(rev2)
777 else:
777 else:
778 return self.diff(self.revision(self.node(rev1)),
778 return self.diff(self.revision(self.node(rev1)),
779 self.revision(self.node(rev2)))
779 self.revision(self.node(rev2)))
780
780
781 def revision(self, node):
781 def revision(self, node):
782 """return an uncompressed revision of a given"""
782 """return an uncompressed revision of a given"""
783 if node == nullid: return ""
783 if node == nullid: return ""
784 if self.cache and self.cache[0] == node: return self.cache[2]
784 if self.cache and self.cache[0] == node: return self.cache[2]
785
785
786 # look up what we need to read
786 # look up what we need to read
787 text = None
787 text = None
788 rev = self.rev(node)
788 rev = self.rev(node)
789 base = self.base(rev)
789 base = self.base(rev)
790
790
791 if self.inlinedata():
791 if self.inlinedata():
792 # we probably have the whole chunk cached
792 # we probably have the whole chunk cached
793 df = None
793 df = None
794 else:
794 else:
795 df = self.opener(self.datafile)
795 df = self.opener(self.datafile)
796
796
797 # do we have useful data cached?
797 # do we have useful data cached?
798 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
798 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
799 base = self.cache[1]
799 base = self.cache[1]
800 text = self.cache[2]
800 text = self.cache[2]
801 self.loadindex(base, rev + 1)
801 self.loadindex(base, rev + 1)
802 else:
802 else:
803 self.loadindex(base, rev + 1)
803 self.loadindex(base, rev + 1)
804 text = self.chunk(base, df=df)
804 text = self.chunk(base, df=df)
805
805
806 bins = []
806 bins = []
807 for r in xrange(base + 1, rev + 1):
807 for r in xrange(base + 1, rev + 1):
808 bins.append(self.chunk(r, df=df))
808 bins.append(self.chunk(r, df=df))
809
809
810 text = self.patches(text, bins)
810 text = self.patches(text, bins)
811
811
812 p1, p2 = self.parents(node)
812 p1, p2 = self.parents(node)
813 if node != hash(text, p1, p2):
813 if node != hash(text, p1, p2):
814 raise RevlogError(_("integrity check failed on %s:%d")
814 raise RevlogError(_("integrity check failed on %s:%d")
815 % (self.datafile, rev))
815 % (self.datafile, rev))
816
816
817 self.cache = (node, rev, text)
817 self.cache = (node, rev, text)
818 return text
818 return text
819
819
820 def checkinlinesize(self, tr, fp=None):
820 def checkinlinesize(self, tr, fp=None):
821 if not self.inlinedata():
821 if not self.inlinedata():
822 return
822 return
823 if not fp:
823 if not fp:
824 fp = self.opener(self.indexfile, 'r')
824 fp = self.opener(self.indexfile, 'r')
825 fp.seek(0, 2)
825 fp.seek(0, 2)
826 size = fp.tell()
826 size = fp.tell()
827 if size < 131072:
827 if size < 131072:
828 return
828 return
829 trinfo = tr.find(self.indexfile)
829 trinfo = tr.find(self.indexfile)
830 if trinfo == None:
830 if trinfo == None:
831 raise RevlogError(_("%s not found in the transaction" %
831 raise RevlogError(_("%s not found in the transaction" %
832 self.indexfile))
832 self.indexfile))
833
833
834 trindex = trinfo[2]
834 trindex = trinfo[2]
835 dataoff = self.start(trindex)
835 dataoff = self.start(trindex)
836
836
837 tr.add(self.datafile, dataoff)
837 tr.add(self.datafile, dataoff)
838 df = self.opener(self.datafile, 'w')
838 df = self.opener(self.datafile, 'w')
839 calc = struct.calcsize(self.indexformat)
839 calc = struct.calcsize(self.indexformat)
840 for r in xrange(self.count()):
840 for r in xrange(self.count()):
841 start = self.start(r) + (r + 1) * calc
841 start = self.start(r) + (r + 1) * calc
842 length = self.length(r)
842 length = self.length(r)
843 fp.seek(start)
843 fp.seek(start)
844 d = fp.read(length)
844 d = fp.read(length)
845 df.write(d)
845 df.write(d)
846 fp.close()
846 fp.close()
847 df.close()
847 df.close()
848 fp = self.opener(self.indexfile, 'w', atomictemp=True)
848 fp = self.opener(self.indexfile, 'w', atomictemp=True)
849 self.version &= ~(REVLOGNGINLINEDATA)
849 self.version &= ~(REVLOGNGINLINEDATA)
850 if self.count():
850 if self.count():
851 x = self.index[0]
851 x = self.index[0]
852 e = struct.pack(self.indexformat, *x)[4:]
852 e = struct.pack(self.indexformat, *x)[4:]
853 l = struct.pack(versionformat, self.version)
853 l = struct.pack(versionformat, self.version)
854 fp.write(l)
854 fp.write(l)
855 fp.write(e)
855 fp.write(e)
856
856
857 for i in xrange(1, self.count()):
857 for i in xrange(1, self.count()):
858 x = self.index[i]
858 x = self.index[i]
859 e = struct.pack(self.indexformat, *x)
859 e = struct.pack(self.indexformat, *x)
860 fp.write(e)
860 fp.write(e)
861
861
862 # if we don't call rename, the temp file will never replace the
862 # if we don't call rename, the temp file will never replace the
863 # real index
863 # real index
864 fp.rename()
864 fp.rename()
865
865
866 tr.replace(self.indexfile, trindex * calc)
866 tr.replace(self.indexfile, trindex * calc)
867 self.chunkcache = None
867 self.chunkcache = None
868
868
869 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
869 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
870 """add a revision to the log
870 """add a revision to the log
871
871
872 text - the revision data to add
872 text - the revision data to add
873 transaction - the transaction object used for rollback
873 transaction - the transaction object used for rollback
874 link - the linkrev data to add
874 link - the linkrev data to add
875 p1, p2 - the parent nodeids of the revision
875 p1, p2 - the parent nodeids of the revision
876 d - an optional precomputed delta
876 d - an optional precomputed delta
877 """
877 """
878 if text is None: text = ""
878 if text is None: text = ""
879 if p1 is None: p1 = self.tip()
879 if p1 is None: p1 = self.tip()
880 if p2 is None: p2 = nullid
880 if p2 is None: p2 = nullid
881
881
882 node = hash(text, p1, p2)
882 node = hash(text, p1, p2)
883
883
884 if node in self.nodemap:
884 if node in self.nodemap:
885 return node
885 return node
886
886
887 n = self.count()
887 n = self.count()
888 t = n - 1
888 t = n - 1
889
889
890 if n:
890 if n:
891 base = self.base(t)
891 base = self.base(t)
892 start = self.start(base)
892 start = self.start(base)
893 end = self.end(t)
893 end = self.end(t)
894 if not d:
894 if not d:
895 prev = self.revision(self.tip())
895 prev = self.revision(self.tip())
896 d = self.diff(prev, str(text))
896 d = self.diff(prev, str(text))
897 data = compress(d)
897 data = compress(d)
898 l = len(data[1]) + len(data[0])
898 l = len(data[1]) + len(data[0])
899 dist = end - start + l
899 dist = end - start + l
900
900
901 # full versions are inserted when the needed deltas
901 # full versions are inserted when the needed deltas
902 # become comparable to the uncompressed text
902 # become comparable to the uncompressed text
903 if not n or dist > len(text) * 2:
903 if not n or dist > len(text) * 2:
904 data = compress(text)
904 data = compress(text)
905 l = len(data[1]) + len(data[0])
905 l = len(data[1]) + len(data[0])
906 base = n
906 base = n
907 else:
907 else:
908 base = self.base(t)
908 base = self.base(t)
909
909
910 offset = 0
910 offset = 0
911 if t >= 0:
911 if t >= 0:
912 offset = self.end(t)
912 offset = self.end(t)
913
913
914 if self.version == 0:
914 if self.version == 0:
915 e = (offset, l, base, link, p1, p2, node)
915 e = (offset, l, base, link, p1, p2, node)
916 else:
916 else:
917 e = (self.offset_type(offset, 0), l, len(text),
917 e = (self.offset_type(offset, 0), l, len(text),
918 base, link, self.rev(p1), self.rev(p2), node)
918 base, link, self.rev(p1), self.rev(p2), node)
919
919
920 self.index.append(e)
920 self.index.append(e)
921 self.nodemap[node] = n
921 self.nodemap[node] = n
922 entry = struct.pack(self.indexformat, *e)
922 entry = struct.pack(self.indexformat, *e)
923
923
924 if not self.inlinedata():
924 if not self.inlinedata():
925 transaction.add(self.datafile, offset)
925 transaction.add(self.datafile, offset)
926 transaction.add(self.indexfile, n * len(entry))
926 transaction.add(self.indexfile, n * len(entry))
927 f = self.opener(self.datafile, "a")
927 f = self.opener(self.datafile, "a")
928 if data[0]:
928 if data[0]:
929 f.write(data[0])
929 f.write(data[0])
930 f.write(data[1])
930 f.write(data[1])
931 f.close()
931 f.close()
932 f = self.opener(self.indexfile, "a")
932 f = self.opener(self.indexfile, "a")
933 else:
933 else:
934 f = self.opener(self.indexfile, "a+")
934 f = self.opener(self.indexfile, "a+")
935 f.seek(0, 2)
935 f.seek(0, 2)
936 transaction.add(self.indexfile, f.tell(), self.count() - 1)
936 transaction.add(self.indexfile, f.tell(), self.count() - 1)
937
937
938 if len(self.index) == 1 and self.version != 0:
938 if len(self.index) == 1 and self.version != 0:
939 l = struct.pack(versionformat, self.version)
939 l = struct.pack(versionformat, self.version)
940 f.write(l)
940 f.write(l)
941 entry = entry[4:]
941 entry = entry[4:]
942
942
943 f.write(entry)
943 f.write(entry)
944
944
945 if self.inlinedata():
945 if self.inlinedata():
946 f.write(data[0])
946 f.write(data[0])
947 f.write(data[1])
947 f.write(data[1])
948 self.checkinlinesize(transaction, f)
948 self.checkinlinesize(transaction, f)
949
949
950 self.cache = (node, n, text)
950 self.cache = (node, n, text)
951 return node
951 return node
952
952
953 def ancestor(self, a, b):
953 def ancestor(self, a, b):
954 """calculate the least common ancestor of nodes a and b"""
954 """calculate the least common ancestor of nodes a and b"""
955
955
956 # start with some short cuts for the linear cases
956 # start with some short cuts for the linear cases
957 if a == b:
957 if a == b:
958 return a
958 return a
959 ra = self.rev(a)
959 ra = self.rev(a)
960 rb = self.rev(b)
960 rb = self.rev(b)
961 if ra < rb:
961 if ra < rb:
962 last = b
962 last = b
963 first = a
963 first = a
964 else:
964 else:
965 last = a
965 last = a
966 first = b
966 first = b
967
967
968 # reachable won't include stop in the list, so we have to use a parent
968 # reachable won't include stop in the list, so we have to use a parent
969 reachable = self.reachable(last, stop=self.parents(first)[0])
969 reachable = self.reachable(last, stop=self.parents(first)[0])
970 if first in reachable:
970 if first in reachable:
971 return first
971 return first
972
972
973 # calculate the distance of every node from root
973 # calculate the distance of every node from root
974 dist = {nullid: 0}
974 dist = {nullid: 0}
975 for i in xrange(self.count()):
975 for i in xrange(self.count()):
976 n = self.node(i)
976 n = self.node(i)
977 p1, p2 = self.parents(n)
977 p1, p2 = self.parents(n)
978 dist[n] = max(dist[p1], dist[p2]) + 1
978 dist[n] = max(dist[p1], dist[p2]) + 1
979
979
980 # traverse ancestors in order of decreasing distance from root
980 # traverse ancestors in order of decreasing distance from root
981 def ancestors(node):
981 def ancestors(node):
982 # we store negative distances because heap returns smallest member
982 # we store negative distances because heap returns smallest member
983 h = [(-dist[node], node)]
983 h = [(-dist[node], node)]
984 seen = {}
984 seen = {}
985 while h:
985 while h:
986 d, n = heapq.heappop(h)
986 d, n = heapq.heappop(h)
987 if n not in seen:
987 if n not in seen:
988 seen[n] = 1
988 seen[n] = 1
989 yield (-d, n)
989 yield (-d, n)
990 for p in self.parents(n):
990 for p in self.parents(n):
991 heapq.heappush(h, (-dist[p], p))
991 heapq.heappush(h, (-dist[p], p))
992
992
993 def generations(node):
993 def generations(node):
994 sg, s = None, {}
994 sg, s = None, {}
995 for g,n in ancestors(node):
995 for g,n in ancestors(node):
996 if g != sg:
996 if g != sg:
997 if sg:
997 if sg:
998 yield sg, s
998 yield sg, s
999 sg, s = g, {n:1}
999 sg, s = g, {n:1}
1000 else:
1000 else:
1001 s[n] = 1
1001 s[n] = 1
1002 yield sg, s
1002 yield sg, s
1003
1003
1004 x = generations(a)
1004 x = generations(a)
1005 y = generations(b)
1005 y = generations(b)
1006 gx = x.next()
1006 gx = x.next()
1007 gy = y.next()
1007 gy = y.next()
1008
1008
1009 # increment each ancestor list until it is closer to root than
1009 # increment each ancestor list until it is closer to root than
1010 # the other, or they match
1010 # the other, or they match
1011 while 1:
1011 while 1:
1012 #print "ancestor gen %s %s" % (gx[0], gy[0])
1012 #print "ancestor gen %s %s" % (gx[0], gy[0])
1013 if gx[0] == gy[0]:
1013 if gx[0] == gy[0]:
1014 # find the intersection
1014 # find the intersection
1015 i = [ n for n in gx[1] if n in gy[1] ]
1015 i = [ n for n in gx[1] if n in gy[1] ]
1016 if i:
1016 if i:
1017 return i[0]
1017 return i[0]
1018 else:
1018 else:
1019 #print "next"
1019 #print "next"
1020 gy = y.next()
1020 gy = y.next()
1021 gx = x.next()
1021 gx = x.next()
1022 elif gx[0] < gy[0]:
1022 elif gx[0] < gy[0]:
1023 #print "next y"
1023 #print "next y"
1024 gy = y.next()
1024 gy = y.next()
1025 else:
1025 else:
1026 #print "next x"
1026 #print "next x"
1027 gx = x.next()
1027 gx = x.next()
1028
1028
1029 def group(self, nodelist, lookup, infocollect=None):
1029 def group(self, nodelist, lookup, infocollect=None):
1030 """calculate a delta group
1030 """calculate a delta group
1031
1031
1032 Given a list of changeset revs, return a set of deltas and
1032 Given a list of changeset revs, return a set of deltas and
1033 metadata corresponding to nodes. the first delta is
1033 metadata corresponding to nodes. the first delta is
1034 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1034 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1035 have this parent as it has all history before these
1035 have this parent as it has all history before these
1036 changesets. parent is parent[0]
1036 changesets. parent is parent[0]
1037 """
1037 """
1038 revs = [self.rev(n) for n in nodelist]
1038 revs = [self.rev(n) for n in nodelist]
1039
1039
1040 # if we don't have any revisions touched by these changesets, bail
1040 # if we don't have any revisions touched by these changesets, bail
1041 if not revs:
1041 if not revs:
1042 yield changegroup.closechunk()
1042 yield changegroup.closechunk()
1043 return
1043 return
1044
1044
1045 # add the parent of the first rev
1045 # add the parent of the first rev
1046 p = self.parents(self.node(revs[0]))[0]
1046 p = self.parents(self.node(revs[0]))[0]
1047 revs.insert(0, self.rev(p))
1047 revs.insert(0, self.rev(p))
1048
1048
1049 # build deltas
1049 # build deltas
1050 for d in xrange(0, len(revs) - 1):
1050 for d in xrange(0, len(revs) - 1):
1051 a, b = revs[d], revs[d + 1]
1051 a, b = revs[d], revs[d + 1]
1052 nb = self.node(b)
1052 nb = self.node(b)
1053
1053
1054 if infocollect is not None:
1054 if infocollect is not None:
1055 infocollect(nb)
1055 infocollect(nb)
1056
1056
1057 d = self.revdiff(a, b)
1057 d = self.revdiff(a, b)
1058 p = self.parents(nb)
1058 p = self.parents(nb)
1059 meta = nb + p[0] + p[1] + lookup(nb)
1059 meta = nb + p[0] + p[1] + lookup(nb)
1060 yield changegroup.genchunk("%s%s" % (meta, d))
1060 yield changegroup.genchunk("%s%s" % (meta, d))
1061
1061
1062 yield changegroup.closechunk()
1062 yield changegroup.closechunk()
1063
1063
1064 def addgroup(self, revs, linkmapper, transaction, unique=0):
1064 def addgroup(self, revs, linkmapper, transaction, unique=0):
1065 """
1065 """
1066 add a delta group
1066 add a delta group
1067
1067
1068 given a set of deltas, add them to the revision log. the
1068 given a set of deltas, add them to the revision log. the
1069 first delta is against its parent, which should be in our
1069 first delta is against its parent, which should be in our
1070 log, the rest are against the previous delta.
1070 log, the rest are against the previous delta.
1071 """
1071 """
1072
1072
1073 #track the base of the current delta log
1073 #track the base of the current delta log
1074 r = self.count()
1074 r = self.count()
1075 t = r - 1
1075 t = r - 1
1076 node = None
1076 node = None
1077
1077
1078 base = prev = -1
1078 base = prev = -1
1079 start = end = textlen = 0
1079 start = end = textlen = 0
1080 if r:
1080 if r:
1081 end = self.end(t)
1081 end = self.end(t)
1082
1082
1083 ifh = self.opener(self.indexfile, "a+")
1083 ifh = self.opener(self.indexfile, "a+")
1084 ifh.seek(0, 2)
1084 ifh.seek(0, 2)
1085 transaction.add(self.indexfile, ifh.tell(), self.count())
1085 transaction.add(self.indexfile, ifh.tell(), self.count())
1086 if self.inlinedata():
1086 if self.inlinedata():
1087 dfh = None
1087 dfh = None
1088 else:
1088 else:
1089 transaction.add(self.datafile, end)
1089 transaction.add(self.datafile, end)
1090 dfh = self.opener(self.datafile, "a")
1090 dfh = self.opener(self.datafile, "a")
1091
1091
1092 # loop through our set of deltas
1092 # loop through our set of deltas
1093 chain = None
1093 chain = None
1094 for chunk in revs:
1094 for chunk in revs:
1095 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1095 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1096 link = linkmapper(cs)
1096 link = linkmapper(cs)
1097 if node in self.nodemap:
1097 if node in self.nodemap:
1098 # this can happen if two branches make the same change
1098 # this can happen if two branches make the same change
1099 # if unique:
1099 # if unique:
1100 # raise RevlogError(_("already have %s") % hex(node[:4]))
1100 # raise RevlogError(_("already have %s") % hex(node[:4]))
1101 chain = node
1101 chain = node
1102 continue
1102 continue
1103 delta = chunk[80:]
1103 delta = chunk[80:]
1104
1104
1105 for p in (p1, p2):
1105 for p in (p1, p2):
1106 if not p in self.nodemap:
1106 if not p in self.nodemap:
1107 raise RevlogError(_("unknown parent %s") % short(p1))
1107 raise RevlogError(_("unknown parent %s") % short(p1))
1108
1108
1109 if not chain:
1109 if not chain:
1110 # retrieve the parent revision of the delta chain
1110 # retrieve the parent revision of the delta chain
1111 chain = p1
1111 chain = p1
1112 if not chain in self.nodemap:
1112 if not chain in self.nodemap:
1113 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1113 raise RevlogError(_("unknown base %s") % short(chain[:4]))
1114
1114
1115 # full versions are inserted when the needed deltas become
1115 # full versions are inserted when the needed deltas become
1116 # comparable to the uncompressed text or when the previous
1116 # comparable to the uncompressed text or when the previous
1117 # version is not the one we have a delta against. We use
1117 # version is not the one we have a delta against. We use
1118 # the size of the previous full rev as a proxy for the
1118 # the size of the previous full rev as a proxy for the
1119 # current size.
1119 # current size.
1120
1120
1121 if chain == prev:
1121 if chain == prev:
1122 tempd = compress(delta)
1122 tempd = compress(delta)
1123 cdelta = tempd[0] + tempd[1]
1123 cdelta = tempd[0] + tempd[1]
1124 textlen = mdiff.patchedsize(textlen, delta)
1124 textlen = mdiff.patchedsize(textlen, delta)
1125
1125
1126 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1126 if chain != prev or (end - start + len(cdelta)) > textlen * 2:
1127 # flush our writes here so we can read it in revision
1127 # flush our writes here so we can read it in revision
1128 if dfh:
1128 if dfh:
1129 dfh.flush()
1129 dfh.flush()
1130 ifh.flush()
1130 ifh.flush()
1131 text = self.revision(chain)
1131 text = self.revision(chain)
1132 text = self.patches(text, [delta])
1132 text = self.patches(text, [delta])
1133 chk = self.addrevision(text, transaction, link, p1, p2)
1133 chk = self.addrevision(text, transaction, link, p1, p2)
1134 if chk != node:
1134 if chk != node:
1135 raise RevlogError(_("consistency error adding group"))
1135 raise RevlogError(_("consistency error adding group"))
1136 textlen = len(text)
1136 textlen = len(text)
1137 else:
1137 else:
1138 if self.version == 0:
1138 if self.version == 0:
1139 e = (end, len(cdelta), base, link, p1, p2, node)
1139 e = (end, len(cdelta), base, link, p1, p2, node)
1140 else:
1140 else:
1141 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1141 e = (self.offset_type(end, 0), len(cdelta), textlen, base,
1142 link, self.rev(p1), self.rev(p2), node)
1142 link, self.rev(p1), self.rev(p2), node)
1143 self.index.append(e)
1143 self.index.append(e)
1144 self.nodemap[node] = r
1144 self.nodemap[node] = r
1145 if self.inlinedata():
1145 if self.inlinedata():
1146 ifh.write(struct.pack(self.indexformat, *e))
1146 ifh.write(struct.pack(self.indexformat, *e))
1147 ifh.write(cdelta)
1147 ifh.write(cdelta)
1148 self.checkinlinesize(transaction, ifh)
1148 self.checkinlinesize(transaction, ifh)
1149 if not self.inlinedata():
1149 if not self.inlinedata():
1150 dfh = self.opener(self.datafile, "a")
1150 dfh = self.opener(self.datafile, "a")
1151 ifh = self.opener(self.indexfile, "a")
1151 ifh = self.opener(self.indexfile, "a")
1152 else:
1152 else:
1153 if not dfh:
1153 if not dfh:
1154 # addrevision switched from inline to conventional
1154 # addrevision switched from inline to conventional
1155 # reopen the index
1155 # reopen the index
1156 dfh = self.opener(self.datafile, "a")
1156 dfh = self.opener(self.datafile, "a")
1157 ifh = self.opener(self.indexfile, "a")
1157 ifh = self.opener(self.indexfile, "a")
1158 dfh.write(cdelta)
1158 dfh.write(cdelta)
1159 ifh.write(struct.pack(self.indexformat, *e))
1159 ifh.write(struct.pack(self.indexformat, *e))
1160
1160
1161 t, r, chain, prev = r, r + 1, node, node
1161 t, r, chain, prev = r, r + 1, node, node
1162 base = self.base(t)
1162 base = self.base(t)
1163 start = self.start(base)
1163 start = self.start(base)
1164 end = self.end(t)
1164 end = self.end(t)
1165
1165
1166 if node is None:
1166 if node is None:
1167 raise RevlogError(_("group to be added is empty"))
1167 raise RevlogError(_("group to be added is empty"))
1168 return node
1168 return node
1169
1169
1170 def strip(self, rev, minlink):
1170 def strip(self, rev, minlink):
1171 if self.count() == 0 or rev >= self.count():
1171 if self.count() == 0 or rev >= self.count():
1172 return
1172 return
1173
1173
1174 if isinstance(self.index, lazyindex):
1174 if isinstance(self.index, lazyindex):
1175 self.loadindexmap()
1175 self.loadindexmap()
1176
1176
1177 # When stripping away a revision, we need to make sure it
1177 # When stripping away a revision, we need to make sure it
1178 # does not actually belong to an older changeset.
1178 # does not actually belong to an older changeset.
1179 # The minlink parameter defines the oldest revision
1179 # The minlink parameter defines the oldest revision
1180 # we're allowed to strip away.
1180 # we're allowed to strip away.
1181 while minlink > self.index[rev][-4]:
1181 while minlink > self.index[rev][-4]:
1182 rev += 1
1182 rev += 1
1183 if rev >= self.count():
1183 if rev >= self.count():
1184 return
1184 return
1185
1185
1186 # first truncate the files on disk
1186 # first truncate the files on disk
1187 end = self.start(rev)
1187 end = self.start(rev)
1188 if not self.inlinedata():
1188 if not self.inlinedata():
1189 df = self.opener(self.datafile, "a")
1189 df = self.opener(self.datafile, "a")
1190 df.truncate(end)
1190 df.truncate(end)
1191 end = rev * struct.calcsize(self.indexformat)
1191 end = rev * struct.calcsize(self.indexformat)
1192 else:
1192 else:
1193 end += rev * struct.calcsize(self.indexformat)
1193 end += rev * struct.calcsize(self.indexformat)
1194
1194
1195 indexf = self.opener(self.indexfile, "a")
1195 indexf = self.opener(self.indexfile, "a")
1196 indexf.truncate(end)
1196 indexf.truncate(end)
1197
1197
1198 # then reset internal state in memory to forget those revisions
1198 # then reset internal state in memory to forget those revisions
1199 self.cache = None
1199 self.cache = None
1200 self.chunkcache = None
1200 self.chunkcache = None
1201 for x in xrange(rev, self.count()):
1201 for x in xrange(rev, self.count()):
1202 del self.nodemap[self.node(x)]
1202 del self.nodemap[self.node(x)]
1203
1203
1204 del self.index[rev:]
1204 del self.index[rev:]
1205
1205
1206 def checksize(self):
1206 def checksize(self):
1207 expected = 0
1207 expected = 0
1208 if self.count():
1208 if self.count():
1209 expected = self.end(self.count() - 1)
1209 expected = self.end(self.count() - 1)
1210
1210
1211 try:
1211 try:
1212 f = self.opener(self.datafile)
1212 f = self.opener(self.datafile)
1213 f.seek(0, 2)
1213 f.seek(0, 2)
1214 actual = f.tell()
1214 actual = f.tell()
1215 dd = actual - expected
1215 dd = actual - expected
1216 except IOError, inst:
1216 except IOError, inst:
1217 if inst.errno != errno.ENOENT:
1217 if inst.errno != errno.ENOENT:
1218 raise
1218 raise
1219 dd = 0
1219 dd = 0
1220
1220
1221 try:
1221 try:
1222 f = self.opener(self.indexfile)
1222 f = self.opener(self.indexfile)
1223 f.seek(0, 2)
1223 f.seek(0, 2)
1224 actual = f.tell()
1224 actual = f.tell()
1225 s = struct.calcsize(self.indexformat)
1225 s = struct.calcsize(self.indexformat)
1226 i = actual / s
1226 i = actual / s
1227 di = actual - (i * s)
1227 di = actual - (i * s)
1228 if self.inlinedata():
1228 if self.inlinedata():
1229 databytes = 0
1229 databytes = 0
1230 for r in xrange(self.count()):
1230 for r in xrange(self.count()):
1231 databytes += self.length(r)
1231 databytes += self.length(r)
1232 dd = 0
1232 dd = 0
1233 di = actual - self.count() * s - databytes
1233 di = actual - self.count() * s - databytes
1234 except IOError, inst:
1234 except IOError, inst:
1235 if inst.errno != errno.ENOENT:
1235 if inst.errno != errno.ENOENT:
1236 raise
1236 raise
1237 di = 0
1237 di = 0
1238
1238
1239 return (dd, di)
1239 return (dd, di)
1240
1240
1241
1241
@@ -1,155 +1,155 b''
1 # sshrepo.py - ssh repository proxy class for mercurial
1 # sshrepo.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from remoterepo import *
9 from remoterepo import *
10 from i18n import gettext as _
10 from i18n import gettext as _
11 from demandload import *
11 from demandload import *
12 demandload(globals(), "hg os re stat")
12 demandload(globals(), "hg os re stat")
13
13
14 class sshrepository(remoterepository):
14 class sshrepository(remoterepository):
15 def __init__(self, ui, path):
15 def __init__(self, ui, path):
16 self.url = path
16 self.url = path
17 self.ui = ui
17 self.ui = ui
18
18
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
19 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
20 if not m:
20 if not m:
21 raise hg.RepoError(_("couldn't parse destination %s") % path)
21 raise hg.RepoError(_("couldn't parse destination %s") % path)
22
22
23 self.user = m.group(2)
23 self.user = m.group(2)
24 self.host = m.group(3)
24 self.host = m.group(3)
25 self.port = m.group(5)
25 self.port = m.group(5)
26 self.path = m.group(7) or "."
26 self.path = m.group(7) or "."
27
27
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
28 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
29 args = self.port and ("%s -p %s") % (args, self.port) or args
29 args = self.port and ("%s -p %s") % (args, self.port) or args
30
30
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
31 sshcmd = self.ui.config("ui", "ssh", "ssh")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
32 remotecmd = self.ui.config("ui", "remotecmd", "hg")
33 cmd = '%s %s "%s -R %s serve --stdio"'
33 cmd = '%s %s "%s -R %s serve --stdio"'
34 cmd = cmd % (sshcmd, args, remotecmd, self.path)
34 cmd = cmd % (sshcmd, args, remotecmd, self.path)
35
35
36 ui.note('running %s\n' % cmd)
36 ui.note('running %s\n' % cmd)
37 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
37 self.pipeo, self.pipei, self.pipee = os.popen3(cmd, 'b')
38
38
39 # skip any noise generated by remote shell
39 # skip any noise generated by remote shell
40 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
40 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
41 l1 = ""
41 l1 = ""
42 l2 = "dummy"
42 l2 = "dummy"
43 max_noise = 500
43 max_noise = 500
44 while l2 and max_noise:
44 while l2 and max_noise:
45 l2 = r.readline()
45 l2 = r.readline()
46 self.readerr()
46 self.readerr()
47 if l1 == "1\n" and l2 == "\n":
47 if l1 == "1\n" and l2 == "\n":
48 break
48 break
49 if l1:
49 if l1:
50 ui.debug(_("remote: "), l1)
50 ui.debug(_("remote: "), l1)
51 l1 = l2
51 l1 = l2
52 max_noise -= 1
52 max_noise -= 1
53 else:
53 else:
54 if l1:
54 if l1:
55 ui.debug(_("remote: "), l1)
55 ui.debug(_("remote: "), l1)
56 raise hg.RepoError(_("no response from remote hg"))
56 raise hg.RepoError(_("no response from remote hg"))
57
57
58 def readerr(self):
58 def readerr(self):
59 while 1:
59 while 1:
60 size = os.fstat(self.pipee.fileno())[stat.ST_SIZE]
60 size = util.fstat(self.pipee).st_size
61 if size == 0: break
61 if size == 0: break
62 l = self.pipee.readline()
62 l = self.pipee.readline()
63 if not l: break
63 if not l: break
64 self.ui.status(_("remote: "), l)
64 self.ui.status(_("remote: "), l)
65
65
66 def __del__(self):
66 def __del__(self):
67 try:
67 try:
68 self.pipeo.close()
68 self.pipeo.close()
69 self.pipei.close()
69 self.pipei.close()
70 # read the error descriptor until EOF
70 # read the error descriptor until EOF
71 for l in self.pipee:
71 for l in self.pipee:
72 self.ui.status(_("remote: "), l)
72 self.ui.status(_("remote: "), l)
73 self.pipee.close()
73 self.pipee.close()
74 except:
74 except:
75 pass
75 pass
76
76
77 def dev(self):
77 def dev(self):
78 return -1
78 return -1
79
79
80 def do_cmd(self, cmd, **args):
80 def do_cmd(self, cmd, **args):
81 self.ui.debug(_("sending %s command\n") % cmd)
81 self.ui.debug(_("sending %s command\n") % cmd)
82 self.pipeo.write("%s\n" % cmd)
82 self.pipeo.write("%s\n" % cmd)
83 for k, v in args.items():
83 for k, v in args.items():
84 self.pipeo.write("%s %d\n" % (k, len(v)))
84 self.pipeo.write("%s %d\n" % (k, len(v)))
85 self.pipeo.write(v)
85 self.pipeo.write(v)
86 self.pipeo.flush()
86 self.pipeo.flush()
87
87
88 return self.pipei
88 return self.pipei
89
89
90 def call(self, cmd, **args):
90 def call(self, cmd, **args):
91 r = self.do_cmd(cmd, **args)
91 r = self.do_cmd(cmd, **args)
92 l = r.readline()
92 l = r.readline()
93 self.readerr()
93 self.readerr()
94 try:
94 try:
95 l = int(l)
95 l = int(l)
96 except:
96 except:
97 raise hg.RepoError(_("unexpected response '%s'") % l)
97 raise hg.RepoError(_("unexpected response '%s'") % l)
98 return r.read(l)
98 return r.read(l)
99
99
100 def lock(self):
100 def lock(self):
101 self.call("lock")
101 self.call("lock")
102 return remotelock(self)
102 return remotelock(self)
103
103
104 def unlock(self):
104 def unlock(self):
105 self.call("unlock")
105 self.call("unlock")
106
106
107 def heads(self):
107 def heads(self):
108 d = self.call("heads")
108 d = self.call("heads")
109 try:
109 try:
110 return map(bin, d[:-1].split(" "))
110 return map(bin, d[:-1].split(" "))
111 except:
111 except:
112 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
112 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
113
113
114 def branches(self, nodes):
114 def branches(self, nodes):
115 n = " ".join(map(hex, nodes))
115 n = " ".join(map(hex, nodes))
116 d = self.call("branches", nodes=n)
116 d = self.call("branches", nodes=n)
117 try:
117 try:
118 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
118 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
119 return br
119 return br
120 except:
120 except:
121 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
121 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
122
122
123 def between(self, pairs):
123 def between(self, pairs):
124 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
124 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
125 d = self.call("between", pairs=n)
125 d = self.call("between", pairs=n)
126 try:
126 try:
127 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
127 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
128 return p
128 return p
129 except:
129 except:
130 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
130 raise hg.RepoError(_("unexpected response '%s'") % (d[:400] + "..."))
131
131
132 def changegroup(self, nodes, kind):
132 def changegroup(self, nodes, kind):
133 n = " ".join(map(hex, nodes))
133 n = " ".join(map(hex, nodes))
134 f = self.do_cmd("changegroup", roots=n)
134 f = self.do_cmd("changegroup", roots=n)
135 return self.pipei
135 return self.pipei
136
136
137 def addchangegroup(self, cg):
137 def addchangegroup(self, cg):
138 d = self.call("addchangegroup")
138 d = self.call("addchangegroup")
139 if d:
139 if d:
140 raise hg.RepoError(_("push refused: %s"), d)
140 raise hg.RepoError(_("push refused: %s"), d)
141
141
142 while 1:
142 while 1:
143 d = cg.read(4096)
143 d = cg.read(4096)
144 if not d: break
144 if not d: break
145 self.pipeo.write(d)
145 self.pipeo.write(d)
146 self.readerr()
146 self.readerr()
147
147
148 self.pipeo.flush()
148 self.pipeo.flush()
149
149
150 self.readerr()
150 self.readerr()
151 l = int(self.pipei.readline())
151 l = int(self.pipei.readline())
152 r = self.pipei.read(l)
152 r = self.pipei.read(l)
153 if not r:
153 if not r:
154 return 1
154 return 1
155 return int(r)
155 return int(r)
@@ -1,845 +1,865 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8
8
9 This contains helper routines that are independent of the SCM core and hide
9 This contains helper routines that are independent of the SCM core and hide
10 platform-specific details from the core.
10 platform-specific details from the core.
11 """
11 """
12
12
13 import os, errno
13 import os, errno
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import *
15 from demandload import *
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 demandload(globals(), "threading time")
17 demandload(globals(), "threading time")
18
18
19 def pipefilter(s, cmd):
19 def pipefilter(s, cmd):
20 '''filter string S through command CMD, returning its output'''
20 '''filter string S through command CMD, returning its output'''
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
22 def writer():
22 def writer():
23 try:
23 try:
24 pin.write(s)
24 pin.write(s)
25 pin.close()
25 pin.close()
26 except IOError, inst:
26 except IOError, inst:
27 if inst.errno != errno.EPIPE:
27 if inst.errno != errno.EPIPE:
28 raise
28 raise
29
29
30 # we should use select instead on UNIX, but this will work on most
30 # we should use select instead on UNIX, but this will work on most
31 # systems, including Windows
31 # systems, including Windows
32 w = threading.Thread(target=writer)
32 w = threading.Thread(target=writer)
33 w.start()
33 w.start()
34 f = pout.read()
34 f = pout.read()
35 pout.close()
35 pout.close()
36 w.join()
36 w.join()
37 return f
37 return f
38
38
39 def tempfilter(s, cmd):
39 def tempfilter(s, cmd):
40 '''filter string S through a pair of temporary files with CMD.
40 '''filter string S through a pair of temporary files with CMD.
41 CMD is used as a template to create the real command to be run,
41 CMD is used as a template to create the real command to be run,
42 with the strings INFILE and OUTFILE replaced by the real names of
42 with the strings INFILE and OUTFILE replaced by the real names of
43 the temporary files generated.'''
43 the temporary files generated.'''
44 inname, outname = None, None
44 inname, outname = None, None
45 try:
45 try:
46 infd, inname = tempfile.mkstemp(prefix='hgfin')
46 infd, inname = tempfile.mkstemp(prefix='hgfin')
47 fp = os.fdopen(infd, 'wb')
47 fp = os.fdopen(infd, 'wb')
48 fp.write(s)
48 fp.write(s)
49 fp.close()
49 fp.close()
50 outfd, outname = tempfile.mkstemp(prefix='hgfout')
50 outfd, outname = tempfile.mkstemp(prefix='hgfout')
51 os.close(outfd)
51 os.close(outfd)
52 cmd = cmd.replace('INFILE', inname)
52 cmd = cmd.replace('INFILE', inname)
53 cmd = cmd.replace('OUTFILE', outname)
53 cmd = cmd.replace('OUTFILE', outname)
54 code = os.system(cmd)
54 code = os.system(cmd)
55 if code: raise Abort(_("command '%s' failed: %s") %
55 if code: raise Abort(_("command '%s' failed: %s") %
56 (cmd, explain_exit(code)))
56 (cmd, explain_exit(code)))
57 return open(outname, 'rb').read()
57 return open(outname, 'rb').read()
58 finally:
58 finally:
59 try:
59 try:
60 if inname: os.unlink(inname)
60 if inname: os.unlink(inname)
61 except: pass
61 except: pass
62 try:
62 try:
63 if outname: os.unlink(outname)
63 if outname: os.unlink(outname)
64 except: pass
64 except: pass
65
65
66 filtertable = {
66 filtertable = {
67 'tempfile:': tempfilter,
67 'tempfile:': tempfilter,
68 'pipe:': pipefilter,
68 'pipe:': pipefilter,
69 }
69 }
70
70
71 def filter(s, cmd):
71 def filter(s, cmd):
72 "filter a string through a command that transforms its input to its output"
72 "filter a string through a command that transforms its input to its output"
73 for name, fn in filtertable.iteritems():
73 for name, fn in filtertable.iteritems():
74 if cmd.startswith(name):
74 if cmd.startswith(name):
75 return fn(s, cmd[len(name):].lstrip())
75 return fn(s, cmd[len(name):].lstrip())
76 return pipefilter(s, cmd)
76 return pipefilter(s, cmd)
77
77
78 def find_in_path(name, path, default=None):
78 def find_in_path(name, path, default=None):
79 '''find name in search path. path can be string (will be split
79 '''find name in search path. path can be string (will be split
80 with os.pathsep), or iterable thing that returns strings. if name
80 with os.pathsep), or iterable thing that returns strings. if name
81 found, return path to name. else return default.'''
81 found, return path to name. else return default.'''
82 if isinstance(path, str):
82 if isinstance(path, str):
83 path = path.split(os.pathsep)
83 path = path.split(os.pathsep)
84 for p in path:
84 for p in path:
85 p_name = os.path.join(p, name)
85 p_name = os.path.join(p, name)
86 if os.path.exists(p_name):
86 if os.path.exists(p_name):
87 return p_name
87 return p_name
88 return default
88 return default
89
89
90 def patch(strip, patchname, ui):
90 def patch(strip, patchname, ui):
91 """apply the patch <patchname> to the working directory.
91 """apply the patch <patchname> to the working directory.
92 a list of patched files is returned"""
92 a list of patched files is returned"""
93 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
93 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
94 fp = os.popen('"%s" -p%d < "%s"' % (patcher, strip, patchname))
94 fp = os.popen('"%s" -p%d < "%s"' % (patcher, strip, patchname))
95 files = {}
95 files = {}
96 for line in fp:
96 for line in fp:
97 line = line.rstrip()
97 line = line.rstrip()
98 ui.status("%s\n" % line)
98 ui.status("%s\n" % line)
99 if line.startswith('patching file '):
99 if line.startswith('patching file '):
100 pf = parse_patch_output(line)
100 pf = parse_patch_output(line)
101 files.setdefault(pf, 1)
101 files.setdefault(pf, 1)
102 code = fp.close()
102 code = fp.close()
103 if code:
103 if code:
104 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
104 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
105 return files.keys()
105 return files.keys()
106
106
107 def binary(s):
107 def binary(s):
108 """return true if a string is binary data using diff's heuristic"""
108 """return true if a string is binary data using diff's heuristic"""
109 if s and '\0' in s[:4096]:
109 if s and '\0' in s[:4096]:
110 return True
110 return True
111 return False
111 return False
112
112
113 def unique(g):
113 def unique(g):
114 """return the uniq elements of iterable g"""
114 """return the uniq elements of iterable g"""
115 seen = {}
115 seen = {}
116 for f in g:
116 for f in g:
117 if f not in seen:
117 if f not in seen:
118 seen[f] = 1
118 seen[f] = 1
119 yield f
119 yield f
120
120
121 class Abort(Exception):
121 class Abort(Exception):
122 """Raised if a command needs to print an error and exit."""
122 """Raised if a command needs to print an error and exit."""
123
123
124 def always(fn): return True
124 def always(fn): return True
125 def never(fn): return False
125 def never(fn): return False
126
126
127 def patkind(name, dflt_pat='glob'):
127 def patkind(name, dflt_pat='glob'):
128 """Split a string into an optional pattern kind prefix and the
128 """Split a string into an optional pattern kind prefix and the
129 actual pattern."""
129 actual pattern."""
130 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
130 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
131 if name.startswith(prefix + ':'): return name.split(':', 1)
131 if name.startswith(prefix + ':'): return name.split(':', 1)
132 return dflt_pat, name
132 return dflt_pat, name
133
133
134 def globre(pat, head='^', tail='$'):
134 def globre(pat, head='^', tail='$'):
135 "convert a glob pattern into a regexp"
135 "convert a glob pattern into a regexp"
136 i, n = 0, len(pat)
136 i, n = 0, len(pat)
137 res = ''
137 res = ''
138 group = False
138 group = False
139 def peek(): return i < n and pat[i]
139 def peek(): return i < n and pat[i]
140 while i < n:
140 while i < n:
141 c = pat[i]
141 c = pat[i]
142 i = i+1
142 i = i+1
143 if c == '*':
143 if c == '*':
144 if peek() == '*':
144 if peek() == '*':
145 i += 1
145 i += 1
146 res += '.*'
146 res += '.*'
147 else:
147 else:
148 res += '[^/]*'
148 res += '[^/]*'
149 elif c == '?':
149 elif c == '?':
150 res += '.'
150 res += '.'
151 elif c == '[':
151 elif c == '[':
152 j = i
152 j = i
153 if j < n and pat[j] in '!]':
153 if j < n and pat[j] in '!]':
154 j += 1
154 j += 1
155 while j < n and pat[j] != ']':
155 while j < n and pat[j] != ']':
156 j += 1
156 j += 1
157 if j >= n:
157 if j >= n:
158 res += '\\['
158 res += '\\['
159 else:
159 else:
160 stuff = pat[i:j].replace('\\','\\\\')
160 stuff = pat[i:j].replace('\\','\\\\')
161 i = j + 1
161 i = j + 1
162 if stuff[0] == '!':
162 if stuff[0] == '!':
163 stuff = '^' + stuff[1:]
163 stuff = '^' + stuff[1:]
164 elif stuff[0] == '^':
164 elif stuff[0] == '^':
165 stuff = '\\' + stuff
165 stuff = '\\' + stuff
166 res = '%s[%s]' % (res, stuff)
166 res = '%s[%s]' % (res, stuff)
167 elif c == '{':
167 elif c == '{':
168 group = True
168 group = True
169 res += '(?:'
169 res += '(?:'
170 elif c == '}' and group:
170 elif c == '}' and group:
171 res += ')'
171 res += ')'
172 group = False
172 group = False
173 elif c == ',' and group:
173 elif c == ',' and group:
174 res += '|'
174 res += '|'
175 elif c == '\\':
175 elif c == '\\':
176 p = peek()
176 p = peek()
177 if p:
177 if p:
178 i += 1
178 i += 1
179 res += re.escape(p)
179 res += re.escape(p)
180 else:
180 else:
181 res += re.escape(c)
181 res += re.escape(c)
182 else:
182 else:
183 res += re.escape(c)
183 res += re.escape(c)
184 return head + res + tail
184 return head + res + tail
185
185
186 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
186 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
187
187
188 def pathto(n1, n2):
188 def pathto(n1, n2):
189 '''return the relative path from one place to another.
189 '''return the relative path from one place to another.
190 this returns a path in the form used by the local filesystem, not hg.'''
190 this returns a path in the form used by the local filesystem, not hg.'''
191 if not n1: return localpath(n2)
191 if not n1: return localpath(n2)
192 a, b = n1.split('/'), n2.split('/')
192 a, b = n1.split('/'), n2.split('/')
193 a.reverse()
193 a.reverse()
194 b.reverse()
194 b.reverse()
195 while a and b and a[-1] == b[-1]:
195 while a and b and a[-1] == b[-1]:
196 a.pop()
196 a.pop()
197 b.pop()
197 b.pop()
198 b.reverse()
198 b.reverse()
199 return os.sep.join((['..'] * len(a)) + b)
199 return os.sep.join((['..'] * len(a)) + b)
200
200
201 def canonpath(root, cwd, myname):
201 def canonpath(root, cwd, myname):
202 """return the canonical path of myname, given cwd and root"""
202 """return the canonical path of myname, given cwd and root"""
203 if root == os.sep:
203 if root == os.sep:
204 rootsep = os.sep
204 rootsep = os.sep
205 else:
205 else:
206 rootsep = root + os.sep
206 rootsep = root + os.sep
207 name = myname
207 name = myname
208 if not os.path.isabs(name):
208 if not os.path.isabs(name):
209 name = os.path.join(root, cwd, name)
209 name = os.path.join(root, cwd, name)
210 name = os.path.normpath(name)
210 name = os.path.normpath(name)
211 if name.startswith(rootsep):
211 if name.startswith(rootsep):
212 name = name[len(rootsep):]
212 name = name[len(rootsep):]
213 audit_path(name)
213 audit_path(name)
214 return pconvert(name)
214 return pconvert(name)
215 elif name == root:
215 elif name == root:
216 return ''
216 return ''
217 else:
217 else:
218 # Determine whether `name' is in the hierarchy at or beneath `root',
218 # Determine whether `name' is in the hierarchy at or beneath `root',
219 # by iterating name=dirname(name) until that causes no change (can't
219 # by iterating name=dirname(name) until that causes no change (can't
220 # check name == '/', because that doesn't work on windows). For each
220 # check name == '/', because that doesn't work on windows). For each
221 # `name', compare dev/inode numbers. If they match, the list `rel'
221 # `name', compare dev/inode numbers. If they match, the list `rel'
222 # holds the reversed list of components making up the relative file
222 # holds the reversed list of components making up the relative file
223 # name we want.
223 # name we want.
224 root_st = os.stat(root)
224 root_st = os.stat(root)
225 rel = []
225 rel = []
226 while True:
226 while True:
227 try:
227 try:
228 name_st = os.stat(name)
228 name_st = os.stat(name)
229 except OSError:
229 except OSError:
230 break
230 break
231 if os.path.samestat(name_st, root_st):
231 if os.path.samestat(name_st, root_st):
232 rel.reverse()
232 rel.reverse()
233 name = os.path.join(*rel)
233 name = os.path.join(*rel)
234 audit_path(name)
234 audit_path(name)
235 return pconvert(name)
235 return pconvert(name)
236 dirname, basename = os.path.split(name)
236 dirname, basename = os.path.split(name)
237 rel.append(basename)
237 rel.append(basename)
238 if dirname == name:
238 if dirname == name:
239 break
239 break
240 name = dirname
240 name = dirname
241
241
242 raise Abort('%s not under root' % myname)
242 raise Abort('%s not under root' % myname)
243
243
244 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
244 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
245 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
245 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
246
246
247 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
247 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
248 if os.name == 'nt':
248 if os.name == 'nt':
249 dflt_pat = 'glob'
249 dflt_pat = 'glob'
250 else:
250 else:
251 dflt_pat = 'relpath'
251 dflt_pat = 'relpath'
252 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
252 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
253
253
254 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
254 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
255 """build a function to match a set of file patterns
255 """build a function to match a set of file patterns
256
256
257 arguments:
257 arguments:
258 canonroot - the canonical root of the tree you're matching against
258 canonroot - the canonical root of the tree you're matching against
259 cwd - the current working directory, if relevant
259 cwd - the current working directory, if relevant
260 names - patterns to find
260 names - patterns to find
261 inc - patterns to include
261 inc - patterns to include
262 exc - patterns to exclude
262 exc - patterns to exclude
263 head - a regex to prepend to patterns to control whether a match is rooted
263 head - a regex to prepend to patterns to control whether a match is rooted
264
264
265 a pattern is one of:
265 a pattern is one of:
266 'glob:<rooted glob>'
266 'glob:<rooted glob>'
267 're:<rooted regexp>'
267 're:<rooted regexp>'
268 'path:<rooted path>'
268 'path:<rooted path>'
269 'relglob:<relative glob>'
269 'relglob:<relative glob>'
270 'relpath:<relative path>'
270 'relpath:<relative path>'
271 'relre:<relative regexp>'
271 'relre:<relative regexp>'
272 '<rooted path or regexp>'
272 '<rooted path or regexp>'
273
273
274 returns:
274 returns:
275 a 3-tuple containing
275 a 3-tuple containing
276 - list of explicit non-pattern names passed in
276 - list of explicit non-pattern names passed in
277 - a bool match(filename) function
277 - a bool match(filename) function
278 - a bool indicating if any patterns were passed in
278 - a bool indicating if any patterns were passed in
279
279
280 todo:
280 todo:
281 make head regex a rooted bool
281 make head regex a rooted bool
282 """
282 """
283
283
284 def contains_glob(name):
284 def contains_glob(name):
285 for c in name:
285 for c in name:
286 if c in _globchars: return True
286 if c in _globchars: return True
287 return False
287 return False
288
288
289 def regex(kind, name, tail):
289 def regex(kind, name, tail):
290 '''convert a pattern into a regular expression'''
290 '''convert a pattern into a regular expression'''
291 if kind == 're':
291 if kind == 're':
292 return name
292 return name
293 elif kind == 'path':
293 elif kind == 'path':
294 return '^' + re.escape(name) + '(?:/|$)'
294 return '^' + re.escape(name) + '(?:/|$)'
295 elif kind == 'relglob':
295 elif kind == 'relglob':
296 return head + globre(name, '(?:|.*/)', tail)
296 return head + globre(name, '(?:|.*/)', tail)
297 elif kind == 'relpath':
297 elif kind == 'relpath':
298 return head + re.escape(name) + tail
298 return head + re.escape(name) + tail
299 elif kind == 'relre':
299 elif kind == 'relre':
300 if name.startswith('^'):
300 if name.startswith('^'):
301 return name
301 return name
302 return '.*' + name
302 return '.*' + name
303 return head + globre(name, '', tail)
303 return head + globre(name, '', tail)
304
304
305 def matchfn(pats, tail):
305 def matchfn(pats, tail):
306 """build a matching function from a set of patterns"""
306 """build a matching function from a set of patterns"""
307 if not pats:
307 if not pats:
308 return
308 return
309 matches = []
309 matches = []
310 for k, p in pats:
310 for k, p in pats:
311 try:
311 try:
312 pat = '(?:%s)' % regex(k, p, tail)
312 pat = '(?:%s)' % regex(k, p, tail)
313 matches.append(re.compile(pat).match)
313 matches.append(re.compile(pat).match)
314 except re.error:
314 except re.error:
315 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
315 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
316 else: raise Abort("invalid pattern (%s): %s" % (k, p))
316 else: raise Abort("invalid pattern (%s): %s" % (k, p))
317
317
318 def buildfn(text):
318 def buildfn(text):
319 for m in matches:
319 for m in matches:
320 r = m(text)
320 r = m(text)
321 if r:
321 if r:
322 return r
322 return r
323
323
324 return buildfn
324 return buildfn
325
325
326 def globprefix(pat):
326 def globprefix(pat):
327 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
327 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
328 root = []
328 root = []
329 for p in pat.split(os.sep):
329 for p in pat.split(os.sep):
330 if contains_glob(p): break
330 if contains_glob(p): break
331 root.append(p)
331 root.append(p)
332 return '/'.join(root)
332 return '/'.join(root)
333
333
334 pats = []
334 pats = []
335 files = []
335 files = []
336 roots = []
336 roots = []
337 for kind, name in [patkind(p, dflt_pat) for p in names]:
337 for kind, name in [patkind(p, dflt_pat) for p in names]:
338 if kind in ('glob', 'relpath'):
338 if kind in ('glob', 'relpath'):
339 name = canonpath(canonroot, cwd, name)
339 name = canonpath(canonroot, cwd, name)
340 if name == '':
340 if name == '':
341 kind, name = 'glob', '**'
341 kind, name = 'glob', '**'
342 if kind in ('glob', 'path', 're'):
342 if kind in ('glob', 'path', 're'):
343 pats.append((kind, name))
343 pats.append((kind, name))
344 if kind == 'glob':
344 if kind == 'glob':
345 root = globprefix(name)
345 root = globprefix(name)
346 if root: roots.append(root)
346 if root: roots.append(root)
347 elif kind == 'relpath':
347 elif kind == 'relpath':
348 files.append((kind, name))
348 files.append((kind, name))
349 roots.append(name)
349 roots.append(name)
350
350
351 patmatch = matchfn(pats, '$') or always
351 patmatch = matchfn(pats, '$') or always
352 filematch = matchfn(files, '(?:/|$)') or always
352 filematch = matchfn(files, '(?:/|$)') or always
353 incmatch = always
353 incmatch = always
354 if inc:
354 if inc:
355 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
355 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
356 excmatch = lambda fn: False
356 excmatch = lambda fn: False
357 if exc:
357 if exc:
358 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
358 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
359
359
360 return (roots,
360 return (roots,
361 lambda fn: (incmatch(fn) and not excmatch(fn) and
361 lambda fn: (incmatch(fn) and not excmatch(fn) and
362 (fn.endswith('/') or
362 (fn.endswith('/') or
363 (not pats and not files) or
363 (not pats and not files) or
364 (pats and patmatch(fn)) or
364 (pats and patmatch(fn)) or
365 (files and filematch(fn)))),
365 (files and filematch(fn)))),
366 (inc or exc or (pats and pats != [('glob', '**')])) and True)
366 (inc or exc or (pats and pats != [('glob', '**')])) and True)
367
367
368 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
368 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
369 '''enhanced shell command execution.
369 '''enhanced shell command execution.
370 run with environment maybe modified, maybe in different dir.
370 run with environment maybe modified, maybe in different dir.
371
371
372 if command fails and onerr is None, return status. if ui object,
372 if command fails and onerr is None, return status. if ui object,
373 print error message and return status, else raise onerr object as
373 print error message and return status, else raise onerr object as
374 exception.'''
374 exception.'''
375 oldenv = {}
375 oldenv = {}
376 for k in environ:
376 for k in environ:
377 oldenv[k] = os.environ.get(k)
377 oldenv[k] = os.environ.get(k)
378 if cwd is not None:
378 if cwd is not None:
379 oldcwd = os.getcwd()
379 oldcwd = os.getcwd()
380 try:
380 try:
381 for k, v in environ.iteritems():
381 for k, v in environ.iteritems():
382 os.environ[k] = str(v)
382 os.environ[k] = str(v)
383 if cwd is not None and oldcwd != cwd:
383 if cwd is not None and oldcwd != cwd:
384 os.chdir(cwd)
384 os.chdir(cwd)
385 rc = os.system(cmd)
385 rc = os.system(cmd)
386 if rc and onerr:
386 if rc and onerr:
387 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
387 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
388 explain_exit(rc)[0])
388 explain_exit(rc)[0])
389 if errprefix:
389 if errprefix:
390 errmsg = '%s: %s' % (errprefix, errmsg)
390 errmsg = '%s: %s' % (errprefix, errmsg)
391 try:
391 try:
392 onerr.warn(errmsg + '\n')
392 onerr.warn(errmsg + '\n')
393 except AttributeError:
393 except AttributeError:
394 raise onerr(errmsg)
394 raise onerr(errmsg)
395 return rc
395 return rc
396 finally:
396 finally:
397 for k, v in oldenv.iteritems():
397 for k, v in oldenv.iteritems():
398 if v is None:
398 if v is None:
399 del os.environ[k]
399 del os.environ[k]
400 else:
400 else:
401 os.environ[k] = v
401 os.environ[k] = v
402 if cwd is not None and oldcwd != cwd:
402 if cwd is not None and oldcwd != cwd:
403 os.chdir(oldcwd)
403 os.chdir(oldcwd)
404
404
405 def rename(src, dst):
405 def rename(src, dst):
406 """forcibly rename a file"""
406 """forcibly rename a file"""
407 try:
407 try:
408 os.rename(src, dst)
408 os.rename(src, dst)
409 except:
409 except OSError, err:
410 os.unlink(dst)
410 # on windows, rename to existing file is not allowed, so we
411 # must delete destination first. but if file is open, unlink
412 # schedules it for delete but does not delete it. rename
413 # happens immediately even for open files, so we create
414 # temporary file, delete it, rename destination to that name,
415 # then delete that. then rename is safe to do.
416 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
417 os.close(fd)
418 os.unlink(temp)
419 os.rename(dst, temp)
420 os.unlink(temp)
411 os.rename(src, dst)
421 os.rename(src, dst)
412
422
413 def unlink(f):
423 def unlink(f):
414 """unlink and remove the directory if it is empty"""
424 """unlink and remove the directory if it is empty"""
415 os.unlink(f)
425 os.unlink(f)
416 # try removing directories that might now be empty
426 # try removing directories that might now be empty
417 try:
427 try:
418 os.removedirs(os.path.dirname(f))
428 os.removedirs(os.path.dirname(f))
419 except OSError:
429 except OSError:
420 pass
430 pass
421
431
422 def copyfiles(src, dst, hardlink=None):
432 def copyfiles(src, dst, hardlink=None):
423 """Copy a directory tree using hardlinks if possible"""
433 """Copy a directory tree using hardlinks if possible"""
424
434
425 if hardlink is None:
435 if hardlink is None:
426 hardlink = (os.stat(src).st_dev ==
436 hardlink = (os.stat(src).st_dev ==
427 os.stat(os.path.dirname(dst)).st_dev)
437 os.stat(os.path.dirname(dst)).st_dev)
428
438
429 if os.path.isdir(src):
439 if os.path.isdir(src):
430 os.mkdir(dst)
440 os.mkdir(dst)
431 for name in os.listdir(src):
441 for name in os.listdir(src):
432 srcname = os.path.join(src, name)
442 srcname = os.path.join(src, name)
433 dstname = os.path.join(dst, name)
443 dstname = os.path.join(dst, name)
434 copyfiles(srcname, dstname, hardlink)
444 copyfiles(srcname, dstname, hardlink)
435 else:
445 else:
436 if hardlink:
446 if hardlink:
437 try:
447 try:
438 os_link(src, dst)
448 os_link(src, dst)
439 except (IOError, OSError):
449 except (IOError, OSError):
440 hardlink = False
450 hardlink = False
441 shutil.copy(src, dst)
451 shutil.copy(src, dst)
442 else:
452 else:
443 shutil.copy(src, dst)
453 shutil.copy(src, dst)
444
454
445 def audit_path(path):
455 def audit_path(path):
446 """Abort if path contains dangerous components"""
456 """Abort if path contains dangerous components"""
447 parts = os.path.normcase(path).split(os.sep)
457 parts = os.path.normcase(path).split(os.sep)
448 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
458 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
449 or os.pardir in parts):
459 or os.pardir in parts):
450 raise Abort(_("path contains illegal component: %s\n") % path)
460 raise Abort(_("path contains illegal component: %s\n") % path)
451
461
452 def opener(base, audit=True):
453 """
454 return a function that opens files relative to base
455
456 this function is used to hide the details of COW semantics and
457 remote file access from higher level code.
458 """
459 p = base
460 audit_p = audit
461
462 def mktempcopy(name):
463 d, fn = os.path.split(name)
464 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
465 fp = os.fdopen(fd, "wb")
466 try:
467 fp.write(file(name, "rb").read())
468 except:
469 try: os.unlink(temp)
470 except: pass
471 raise
472 fp.close()
473 st = os.lstat(name)
474 os.chmod(temp, st.st_mode)
475 return temp
476
477 class atomictempfile(file):
478 """the file will only be copied when rename is called"""
479 def __init__(self, name, mode):
480 self.__name = name
481 self.temp = mktempcopy(name)
482 file.__init__(self, self.temp, mode)
483 def rename(self):
484 if not self.closed:
485 file.close(self)
486 rename(self.temp, self.__name)
487 def __del__(self):
488 if not self.closed:
489 try:
490 os.unlink(self.temp)
491 except: pass
492 file.close(self)
493
494 class atomicfile(atomictempfile):
495 """the file will only be copied on close"""
496 def __init__(self, name, mode):
497 atomictempfile.__init__(self, name, mode)
498 def close(self):
499 self.rename()
500 def __del__(self):
501 self.rename()
502
503 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
504 if audit_p:
505 audit_path(path)
506 f = os.path.join(p, path)
507
508 if not text:
509 mode += "b" # for that other OS
510
511 if mode[0] != "r":
512 try:
513 nlink = nlinks(f)
514 except OSError:
515 d = os.path.dirname(f)
516 if not os.path.isdir(d):
517 os.makedirs(d)
518 else:
519 if atomic:
520 return atomicfile(f, mode)
521 elif atomictemp:
522 return atomictempfile(f, mode)
523 if nlink > 1:
524 rename(mktempcopy(f), f)
525 return file(f, mode)
526
527 return o
528
529 def _makelock_file(info, pathname):
462 def _makelock_file(info, pathname):
530 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
463 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
531 os.write(ld, info)
464 os.write(ld, info)
532 os.close(ld)
465 os.close(ld)
533
466
534 def _readlock_file(pathname):
467 def _readlock_file(pathname):
535 return file(pathname).read()
468 return posixfile(pathname).read()
536
469
537 def nlinks(pathname):
470 def nlinks(pathname):
538 """Return number of hardlinks for the given file."""
471 """Return number of hardlinks for the given file."""
539 return os.stat(pathname).st_nlink
472 return os.stat(pathname).st_nlink
540
473
541 if hasattr(os, 'link'):
474 if hasattr(os, 'link'):
542 os_link = os.link
475 os_link = os.link
543 else:
476 else:
544 def os_link(src, dst):
477 def os_link(src, dst):
545 raise OSError(0, _("Hardlinks not supported"))
478 raise OSError(0, _("Hardlinks not supported"))
546
479
480 def fstat(fp):
481 '''stat file object that may not have fileno method.'''
482 try:
483 return os.fstat(fp.fileno())
484 except AttributeError:
485 return os.stat(fp.name)
486
487 posixfile = file
488
547 # Platform specific variants
489 # Platform specific variants
548 if os.name == 'nt':
490 if os.name == 'nt':
549 demandload(globals(), "msvcrt")
491 demandload(globals(), "msvcrt")
550 nulldev = 'NUL:'
492 nulldev = 'NUL:'
551
493
552 class winstdout:
494 class winstdout:
553 '''stdout on windows misbehaves if sent through a pipe'''
495 '''stdout on windows misbehaves if sent through a pipe'''
554
496
555 def __init__(self, fp):
497 def __init__(self, fp):
556 self.fp = fp
498 self.fp = fp
557
499
558 def __getattr__(self, key):
500 def __getattr__(self, key):
559 return getattr(self.fp, key)
501 return getattr(self.fp, key)
560
502
561 def close(self):
503 def close(self):
562 try:
504 try:
563 self.fp.close()
505 self.fp.close()
564 except: pass
506 except: pass
565
507
566 def write(self, s):
508 def write(self, s):
567 try:
509 try:
568 return self.fp.write(s)
510 return self.fp.write(s)
569 except IOError, inst:
511 except IOError, inst:
570 if inst.errno != 0: raise
512 if inst.errno != 0: raise
571 self.close()
513 self.close()
572 raise IOError(errno.EPIPE, 'Broken pipe')
514 raise IOError(errno.EPIPE, 'Broken pipe')
573
515
574 sys.stdout = winstdout(sys.stdout)
516 sys.stdout = winstdout(sys.stdout)
575
517
576 def system_rcpath():
518 def system_rcpath():
577 try:
519 try:
578 return system_rcpath_win32()
520 return system_rcpath_win32()
579 except:
521 except:
580 return [r'c:\mercurial\mercurial.ini']
522 return [r'c:\mercurial\mercurial.ini']
581
523
582 def os_rcpath():
524 def os_rcpath():
583 '''return default os-specific hgrc search path'''
525 '''return default os-specific hgrc search path'''
584 return system_rcpath() + [os.path.join(os.path.expanduser('~'),
526 return system_rcpath() + [os.path.join(os.path.expanduser('~'),
585 'mercurial.ini')]
527 'mercurial.ini')]
586
528
587 def parse_patch_output(output_line):
529 def parse_patch_output(output_line):
588 """parses the output produced by patch and returns the file name"""
530 """parses the output produced by patch and returns the file name"""
589 pf = output_line[14:]
531 pf = output_line[14:]
590 if pf[0] == '`':
532 if pf[0] == '`':
591 pf = pf[1:-1] # Remove the quotes
533 pf = pf[1:-1] # Remove the quotes
592 return pf
534 return pf
593
535
594 def testpid(pid):
536 def testpid(pid):
595 '''return False if pid dead, True if running or not known'''
537 '''return False if pid dead, True if running or not known'''
596 return True
538 return True
597
539
598 def is_exec(f, last):
540 def is_exec(f, last):
599 return last
541 return last
600
542
601 def set_exec(f, mode):
543 def set_exec(f, mode):
602 pass
544 pass
603
545
604 def set_binary(fd):
546 def set_binary(fd):
605 msvcrt.setmode(fd.fileno(), os.O_BINARY)
547 msvcrt.setmode(fd.fileno(), os.O_BINARY)
606
548
607 def pconvert(path):
549 def pconvert(path):
608 return path.replace("\\", "/")
550 return path.replace("\\", "/")
609
551
610 def localpath(path):
552 def localpath(path):
611 return path.replace('/', '\\')
553 return path.replace('/', '\\')
612
554
613 def normpath(path):
555 def normpath(path):
614 return pconvert(os.path.normpath(path))
556 return pconvert(os.path.normpath(path))
615
557
616 makelock = _makelock_file
558 makelock = _makelock_file
617 readlock = _readlock_file
559 readlock = _readlock_file
618
560
619 def explain_exit(code):
561 def explain_exit(code):
620 return _("exited with status %d") % code, code
562 return _("exited with status %d") % code, code
621
563
622 try:
564 try:
623 # override functions with win32 versions if possible
565 # override functions with win32 versions if possible
624 from util_win32 import *
566 from util_win32 import *
625 except ImportError:
567 except ImportError:
626 pass
568 pass
627
569
628 else:
570 else:
629 nulldev = '/dev/null'
571 nulldev = '/dev/null'
630
572
631 def rcfiles(path):
573 def rcfiles(path):
632 rcs = [os.path.join(path, 'hgrc')]
574 rcs = [os.path.join(path, 'hgrc')]
633 rcdir = os.path.join(path, 'hgrc.d')
575 rcdir = os.path.join(path, 'hgrc.d')
634 try:
576 try:
635 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
577 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
636 if f.endswith(".rc")])
578 if f.endswith(".rc")])
637 except OSError, inst: pass
579 except OSError, inst: pass
638 return rcs
580 return rcs
639
581
640 def os_rcpath():
582 def os_rcpath():
641 '''return default os-specific hgrc search path'''
583 '''return default os-specific hgrc search path'''
642 path = []
584 path = []
643 if len(sys.argv) > 0:
585 if len(sys.argv) > 0:
644 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
586 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
645 '/../etc/mercurial'))
587 '/../etc/mercurial'))
646 path.extend(rcfiles('/etc/mercurial'))
588 path.extend(rcfiles('/etc/mercurial'))
647 path.append(os.path.expanduser('~/.hgrc'))
589 path.append(os.path.expanduser('~/.hgrc'))
648 path = [os.path.normpath(f) for f in path]
590 path = [os.path.normpath(f) for f in path]
649 return path
591 return path
650
592
651 def parse_patch_output(output_line):
593 def parse_patch_output(output_line):
652 """parses the output produced by patch and returns the file name"""
594 """parses the output produced by patch and returns the file name"""
653 pf = output_line[14:]
595 pf = output_line[14:]
654 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
596 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
655 pf = pf[1:-1] # Remove the quotes
597 pf = pf[1:-1] # Remove the quotes
656 return pf
598 return pf
657
599
658 def is_exec(f, last):
600 def is_exec(f, last):
659 """check whether a file is executable"""
601 """check whether a file is executable"""
660 return (os.stat(f).st_mode & 0100 != 0)
602 return (os.stat(f).st_mode & 0100 != 0)
661
603
662 def set_exec(f, mode):
604 def set_exec(f, mode):
663 s = os.stat(f).st_mode
605 s = os.stat(f).st_mode
664 if (s & 0100 != 0) == mode:
606 if (s & 0100 != 0) == mode:
665 return
607 return
666 if mode:
608 if mode:
667 # Turn on +x for every +r bit when making a file executable
609 # Turn on +x for every +r bit when making a file executable
668 # and obey umask.
610 # and obey umask.
669 umask = os.umask(0)
611 umask = os.umask(0)
670 os.umask(umask)
612 os.umask(umask)
671 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
613 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
672 else:
614 else:
673 os.chmod(f, s & 0666)
615 os.chmod(f, s & 0666)
674
616
675 def set_binary(fd):
617 def set_binary(fd):
676 pass
618 pass
677
619
678 def pconvert(path):
620 def pconvert(path):
679 return path
621 return path
680
622
681 def localpath(path):
623 def localpath(path):
682 return path
624 return path
683
625
684 normpath = os.path.normpath
626 normpath = os.path.normpath
685
627
686 def makelock(info, pathname):
628 def makelock(info, pathname):
687 try:
629 try:
688 os.symlink(info, pathname)
630 os.symlink(info, pathname)
689 except OSError, why:
631 except OSError, why:
690 if why.errno == errno.EEXIST:
632 if why.errno == errno.EEXIST:
691 raise
633 raise
692 else:
634 else:
693 _makelock_file(info, pathname)
635 _makelock_file(info, pathname)
694
636
695 def readlock(pathname):
637 def readlock(pathname):
696 try:
638 try:
697 return os.readlink(pathname)
639 return os.readlink(pathname)
698 except OSError, why:
640 except OSError, why:
699 if why.errno == errno.EINVAL:
641 if why.errno == errno.EINVAL:
700 return _readlock_file(pathname)
642 return _readlock_file(pathname)
701 else:
643 else:
702 raise
644 raise
703
645
704 def testpid(pid):
646 def testpid(pid):
705 '''return False if pid dead, True if running or not sure'''
647 '''return False if pid dead, True if running or not sure'''
706 try:
648 try:
707 os.kill(pid, 0)
649 os.kill(pid, 0)
708 return True
650 return True
709 except OSError, inst:
651 except OSError, inst:
710 return inst.errno != errno.ESRCH
652 return inst.errno != errno.ESRCH
711
653
712 def explain_exit(code):
654 def explain_exit(code):
713 """return a 2-tuple (desc, code) describing a process's status"""
655 """return a 2-tuple (desc, code) describing a process's status"""
714 if os.WIFEXITED(code):
656 if os.WIFEXITED(code):
715 val = os.WEXITSTATUS(code)
657 val = os.WEXITSTATUS(code)
716 return _("exited with status %d") % val, val
658 return _("exited with status %d") % val, val
717 elif os.WIFSIGNALED(code):
659 elif os.WIFSIGNALED(code):
718 val = os.WTERMSIG(code)
660 val = os.WTERMSIG(code)
719 return _("killed by signal %d") % val, val
661 return _("killed by signal %d") % val, val
720 elif os.WIFSTOPPED(code):
662 elif os.WIFSTOPPED(code):
721 val = os.WSTOPSIG(code)
663 val = os.WSTOPSIG(code)
722 return _("stopped by signal %d") % val, val
664 return _("stopped by signal %d") % val, val
723 raise ValueError(_("invalid exit code"))
665 raise ValueError(_("invalid exit code"))
724
666
667 def opener(base, audit=True):
668 """
669 return a function that opens files relative to base
670
671 this function is used to hide the details of COW semantics and
672 remote file access from higher level code.
673 """
674 p = base
675 audit_p = audit
676
677 def mktempcopy(name):
678 d, fn = os.path.split(name)
679 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
680 os.close(fd)
681 fp = posixfile(temp, "wb")
682 try:
683 fp.write(posixfile(name, "rb").read())
684 except:
685 try: os.unlink(temp)
686 except: pass
687 raise
688 fp.close()
689 st = os.lstat(name)
690 os.chmod(temp, st.st_mode)
691 return temp
692
693 class atomictempfile(posixfile):
694 """the file will only be copied when rename is called"""
695 def __init__(self, name, mode):
696 self.__name = name
697 self.temp = mktempcopy(name)
698 posixfile.__init__(self, self.temp, mode)
699 def rename(self):
700 if not self.closed:
701 posixfile.close(self)
702 rename(self.temp, self.__name)
703 def __del__(self):
704 if not self.closed:
705 try:
706 os.unlink(self.temp)
707 except: pass
708 posixfile.close(self)
709
710 class atomicfile(atomictempfile):
711 """the file will only be copied on close"""
712 def __init__(self, name, mode):
713 atomictempfile.__init__(self, name, mode)
714 def close(self):
715 self.rename()
716 def __del__(self):
717 self.rename()
718
719 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
720 if audit_p:
721 audit_path(path)
722 f = os.path.join(p, path)
723
724 if not text:
725 mode += "b" # for that other OS
726
727 if mode[0] != "r":
728 try:
729 nlink = nlinks(f)
730 except OSError:
731 d = os.path.dirname(f)
732 if not os.path.isdir(d):
733 os.makedirs(d)
734 else:
735 if atomic:
736 return atomicfile(f, mode)
737 elif atomictemp:
738 return atomictempfile(f, mode)
739 if nlink > 1:
740 rename(mktempcopy(f), f)
741 return posixfile(f, mode)
742
743 return o
744
725 class chunkbuffer(object):
745 class chunkbuffer(object):
726 """Allow arbitrary sized chunks of data to be efficiently read from an
746 """Allow arbitrary sized chunks of data to be efficiently read from an
727 iterator over chunks of arbitrary size."""
747 iterator over chunks of arbitrary size."""
728
748
729 def __init__(self, in_iter, targetsize = 2**16):
749 def __init__(self, in_iter, targetsize = 2**16):
730 """in_iter is the iterator that's iterating over the input chunks.
750 """in_iter is the iterator that's iterating over the input chunks.
731 targetsize is how big a buffer to try to maintain."""
751 targetsize is how big a buffer to try to maintain."""
732 self.in_iter = iter(in_iter)
752 self.in_iter = iter(in_iter)
733 self.buf = ''
753 self.buf = ''
734 self.targetsize = int(targetsize)
754 self.targetsize = int(targetsize)
735 if self.targetsize <= 0:
755 if self.targetsize <= 0:
736 raise ValueError(_("targetsize must be greater than 0, was %d") %
756 raise ValueError(_("targetsize must be greater than 0, was %d") %
737 targetsize)
757 targetsize)
738 self.iterempty = False
758 self.iterempty = False
739
759
740 def fillbuf(self):
760 def fillbuf(self):
741 """Ignore target size; read every chunk from iterator until empty."""
761 """Ignore target size; read every chunk from iterator until empty."""
742 if not self.iterempty:
762 if not self.iterempty:
743 collector = cStringIO.StringIO()
763 collector = cStringIO.StringIO()
744 collector.write(self.buf)
764 collector.write(self.buf)
745 for ch in self.in_iter:
765 for ch in self.in_iter:
746 collector.write(ch)
766 collector.write(ch)
747 self.buf = collector.getvalue()
767 self.buf = collector.getvalue()
748 self.iterempty = True
768 self.iterempty = True
749
769
750 def read(self, l):
770 def read(self, l):
751 """Read L bytes of data from the iterator of chunks of data.
771 """Read L bytes of data from the iterator of chunks of data.
752 Returns less than L bytes if the iterator runs dry."""
772 Returns less than L bytes if the iterator runs dry."""
753 if l > len(self.buf) and not self.iterempty:
773 if l > len(self.buf) and not self.iterempty:
754 # Clamp to a multiple of self.targetsize
774 # Clamp to a multiple of self.targetsize
755 targetsize = self.targetsize * ((l // self.targetsize) + 1)
775 targetsize = self.targetsize * ((l // self.targetsize) + 1)
756 collector = cStringIO.StringIO()
776 collector = cStringIO.StringIO()
757 collector.write(self.buf)
777 collector.write(self.buf)
758 collected = len(self.buf)
778 collected = len(self.buf)
759 for chunk in self.in_iter:
779 for chunk in self.in_iter:
760 collector.write(chunk)
780 collector.write(chunk)
761 collected += len(chunk)
781 collected += len(chunk)
762 if collected >= targetsize:
782 if collected >= targetsize:
763 break
783 break
764 if collected < targetsize:
784 if collected < targetsize:
765 self.iterempty = True
785 self.iterempty = True
766 self.buf = collector.getvalue()
786 self.buf = collector.getvalue()
767 s, self.buf = self.buf[:l], buffer(self.buf, l)
787 s, self.buf = self.buf[:l], buffer(self.buf, l)
768 return s
788 return s
769
789
770 def filechunkiter(f, size = 65536):
790 def filechunkiter(f, size = 65536):
771 """Create a generator that produces all the data in the file size
791 """Create a generator that produces all the data in the file size
772 (default 65536) bytes at a time. Chunks may be less than size
792 (default 65536) bytes at a time. Chunks may be less than size
773 bytes if the chunk is the last chunk in the file, or the file is a
793 bytes if the chunk is the last chunk in the file, or the file is a
774 socket or some other type of file that sometimes reads less data
794 socket or some other type of file that sometimes reads less data
775 than is requested."""
795 than is requested."""
776 s = f.read(size)
796 s = f.read(size)
777 while len(s) > 0:
797 while len(s) > 0:
778 yield s
798 yield s
779 s = f.read(size)
799 s = f.read(size)
780
800
781 def makedate():
801 def makedate():
782 lt = time.localtime()
802 lt = time.localtime()
783 if lt[8] == 1 and time.daylight:
803 if lt[8] == 1 and time.daylight:
784 tz = time.altzone
804 tz = time.altzone
785 else:
805 else:
786 tz = time.timezone
806 tz = time.timezone
787 return time.mktime(lt), tz
807 return time.mktime(lt), tz
788
808
789 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
809 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
790 """represent a (unixtime, offset) tuple as a localized time.
810 """represent a (unixtime, offset) tuple as a localized time.
791 unixtime is seconds since the epoch, and offset is the time zone's
811 unixtime is seconds since the epoch, and offset is the time zone's
792 number of seconds away from UTC. if timezone is false, do not
812 number of seconds away from UTC. if timezone is false, do not
793 append time zone to string."""
813 append time zone to string."""
794 t, tz = date or makedate()
814 t, tz = date or makedate()
795 s = time.strftime(format, time.gmtime(float(t) - tz))
815 s = time.strftime(format, time.gmtime(float(t) - tz))
796 if timezone:
816 if timezone:
797 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
817 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
798 return s
818 return s
799
819
800 def shortuser(user):
820 def shortuser(user):
801 """Return a short representation of a user name or email address."""
821 """Return a short representation of a user name or email address."""
802 f = user.find('@')
822 f = user.find('@')
803 if f >= 0:
823 if f >= 0:
804 user = user[:f]
824 user = user[:f]
805 f = user.find('<')
825 f = user.find('<')
806 if f >= 0:
826 if f >= 0:
807 user = user[f+1:]
827 user = user[f+1:]
808 return user
828 return user
809
829
810 def walkrepos(path):
830 def walkrepos(path):
811 '''yield every hg repository under path, recursively.'''
831 '''yield every hg repository under path, recursively.'''
812 def errhandler(err):
832 def errhandler(err):
813 if err.filename == path:
833 if err.filename == path:
814 raise err
834 raise err
815
835
816 for root, dirs, files in os.walk(path, onerror=errhandler):
836 for root, dirs, files in os.walk(path, onerror=errhandler):
817 for d in dirs:
837 for d in dirs:
818 if d == '.hg':
838 if d == '.hg':
819 yield root
839 yield root
820 dirs[:] = []
840 dirs[:] = []
821 break
841 break
822
842
823 _rcpath = None
843 _rcpath = None
824
844
825 def rcpath():
845 def rcpath():
826 '''return hgrc search path. if env var HGRCPATH is set, use it.
846 '''return hgrc search path. if env var HGRCPATH is set, use it.
827 for each item in path, if directory, use files ending in .rc,
847 for each item in path, if directory, use files ending in .rc,
828 else use item.
848 else use item.
829 make HGRCPATH empty to only look in .hg/hgrc of current repo.
849 make HGRCPATH empty to only look in .hg/hgrc of current repo.
830 if no HGRCPATH, use default os-specific path.'''
850 if no HGRCPATH, use default os-specific path.'''
831 global _rcpath
851 global _rcpath
832 if _rcpath is None:
852 if _rcpath is None:
833 if 'HGRCPATH' in os.environ:
853 if 'HGRCPATH' in os.environ:
834 _rcpath = []
854 _rcpath = []
835 for p in os.environ['HGRCPATH'].split(os.pathsep):
855 for p in os.environ['HGRCPATH'].split(os.pathsep):
836 if not p: continue
856 if not p: continue
837 if os.path.isdir(p):
857 if os.path.isdir(p):
838 for f in os.listdir(p):
858 for f in os.listdir(p):
839 if f.endswith('.rc'):
859 if f.endswith('.rc'):
840 _rcpath.append(os.path.join(p, f))
860 _rcpath.append(os.path.join(p, f))
841 else:
861 else:
842 _rcpath.append(p)
862 _rcpath.append(p)
843 else:
863 else:
844 _rcpath = os_rcpath()
864 _rcpath = os_rcpath()
845 return _rcpath
865 return _rcpath
@@ -1,171 +1,280 b''
1 # util_win32.py - utility functions that use win32 API
1 # util_win32.py - utility functions that use win32 API
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of
6 # This software may be used and distributed according to the terms of
7 # the GNU General Public License, incorporated herein by reference.
7 # the GNU General Public License, incorporated herein by reference.
8
8
9 # Mark Hammond's win32all package allows better functionality on
9 # Mark Hammond's win32all package allows better functionality on
10 # Windows. this module overrides definitions in util.py. if not
10 # Windows. this module overrides definitions in util.py. if not
11 # available, import of this module will fail, and generic code will be
11 # available, import of this module will fail, and generic code will be
12 # used.
12 # used.
13
13
14 import win32api
14 import win32api
15
15
16 from demandload import *
16 from demandload import *
17 from i18n import gettext as _
17 from i18n import gettext as _
18 demandload(globals(), 'errno os pywintypes win32con win32file win32process')
18 demandload(globals(), 'errno os pywintypes win32con win32file win32process')
19 demandload(globals(), 'winerror')
19 demandload(globals(), 'cStringIO winerror')
20
20
21 class WinError(OSError):
21 class WinError:
22 winerror_map = {
22 winerror_map = {
23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
29 winerror.ERROR_BAD_COMMAND: errno.EIO,
29 winerror.ERROR_BAD_COMMAND: errno.EIO,
30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
41 winerror.ERROR_BUSY: errno.EBUSY,
41 winerror.ERROR_BUSY: errno.EBUSY,
42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
45 winerror.ERROR_CANTOPEN: errno.EIO,
45 winerror.ERROR_CANTOPEN: errno.EIO,
46 winerror.ERROR_CANTREAD: errno.EIO,
46 winerror.ERROR_CANTREAD: errno.EIO,
47 winerror.ERROR_CANTWRITE: errno.EIO,
47 winerror.ERROR_CANTWRITE: errno.EIO,
48 winerror.ERROR_CRC: errno.EIO,
48 winerror.ERROR_CRC: errno.EIO,
49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
52 winerror.ERROR_DIRECTORY: errno.EINVAL,
52 winerror.ERROR_DIRECTORY: errno.EINVAL,
53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
54 winerror.ERROR_DISK_CHANGE: errno.EIO,
54 winerror.ERROR_DISK_CHANGE: errno.EIO,
55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
63 winerror.ERROR_GEN_FAILURE: errno.EIO,
63 winerror.ERROR_GEN_FAILURE: errno.EIO,
64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
84 winerror.ERROR_IO_DEVICE: errno.EIO,
84 winerror.ERROR_IO_DEVICE: errno.EIO,
85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
86 winerror.ERROR_LOCKED: errno.EBUSY,
86 winerror.ERROR_LOCKED: errno.EBUSY,
87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
91 winerror.ERROR_MORE_DATA: errno.EPIPE,
91 winerror.ERROR_MORE_DATA: errno.EPIPE,
92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
93 winerror.ERROR_NOACCESS: errno.EFAULT,
93 winerror.ERROR_NOACCESS: errno.EFAULT,
94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
96 winerror.ERROR_NOT_READY: errno.EAGAIN,
96 winerror.ERROR_NOT_READY: errno.EAGAIN,
97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
98 winerror.ERROR_NO_DATA: errno.EPIPE,
98 winerror.ERROR_NO_DATA: errno.EPIPE,
99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
102 winerror.ERROR_OPEN_FAILED: errno.EIO,
102 winerror.ERROR_OPEN_FAILED: errno.EIO,
103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOTDIR,
108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
114 winerror.ERROR_READ_FAULT: errno.EIO,
114 winerror.ERROR_READ_FAULT: errno.EIO,
115 winerror.ERROR_SEEK: errno.EIO,
115 winerror.ERROR_SEEK: errno.EIO,
116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
120 winerror.ERROR_SWAPERROR: errno.ENOENT,
120 winerror.ERROR_SWAPERROR: errno.ENOENT,
121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
126 winerror.ERROR_WRITE_FAULT: errno.EIO,
126 winerror.ERROR_WRITE_FAULT: errno.EIO,
127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
128 }
128 }
129
129
130 def __init__(self, err):
130 def __init__(self, err):
131 self.win_errno, self.win_function, self.win_strerror = err
131 self.win_errno, self.win_function, self.win_strerror = err
132 if self.win_strerror.endswith('.'):
133 self.win_strerror = self.win_strerror[:-1]
134
135 class WinIOError(WinError, IOError):
136 def __init__(self, err, filename=None):
137 WinError.__init__(self, err)
138 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
139 self.win_strerror)
140 self.filename = filename
141
142 class WinOSError(WinError, OSError):
143 def __init__(self, err):
144 WinError.__init__(self, err)
132 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
145 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
133 self.win_strerror)
146 self.win_strerror)
134
147
135 def os_link(src, dst):
148 def os_link(src, dst):
136 # NB will only succeed on NTFS
149 # NB will only succeed on NTFS
137 try:
150 try:
138 win32file.CreateHardLink(dst, src)
151 win32file.CreateHardLink(dst, src)
139 except pywintypes.error, details:
152 except pywintypes.error, details:
140 raise WinError(details)
153 raise WinOSError(details)
141
154
142 def nlinks(pathname):
155 def nlinks(pathname):
143 """Return number of hardlinks for the given file."""
156 """Return number of hardlinks for the given file."""
144 try:
157 try:
145 fh = win32file.CreateFile(pathname,
158 fh = win32file.CreateFile(pathname,
146 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
159 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
147 None, win32file.OPEN_EXISTING, 0, None)
160 None, win32file.OPEN_EXISTING, 0, None)
148 res = win32file.GetFileInformationByHandle(fh)
161 res = win32file.GetFileInformationByHandle(fh)
149 fh.Close()
162 fh.Close()
150 return res[7]
163 return res[7]
151 except pywintypes.error:
164 except pywintypes.error:
152 return os.stat(pathname).st_nlink
165 return os.stat(pathname).st_nlink
153
166
154 def testpid(pid):
167 def testpid(pid):
155 '''return True if pid is still running or unable to
168 '''return True if pid is still running or unable to
156 determine, False otherwise'''
169 determine, False otherwise'''
157 try:
170 try:
158 handle = win32api.OpenProcess(
171 handle = win32api.OpenProcess(
159 win32con.PROCESS_QUERY_INFORMATION, False, pid)
172 win32con.PROCESS_QUERY_INFORMATION, False, pid)
160 if handle:
173 if handle:
161 status = win32process.GetExitCodeProcess(handle)
174 status = win32process.GetExitCodeProcess(handle)
162 return status == win32con.STILL_ACTIVE
175 return status == win32con.STILL_ACTIVE
163 except pywintypes.error, details:
176 except pywintypes.error, details:
164 return details[0] != winerror.ERROR_INVALID_PARAMETER
177 return details[0] != winerror.ERROR_INVALID_PARAMETER
165 return True
178 return True
166
179
167 def system_rcpath_win32():
180 def system_rcpath_win32():
168 '''return default os-specific hgrc search path'''
181 '''return default os-specific hgrc search path'''
169 proc = win32api.GetCurrentProcess()
182 proc = win32api.GetCurrentProcess()
170 filename = win32process.GetModuleFileNameEx(proc, 0)
183 filename = win32process.GetModuleFileNameEx(proc, 0)
171 return [os.path.join(os.path.dirname(filename), 'mercurial.ini')]
184 return [os.path.join(os.path.dirname(filename), 'mercurial.ini')]
185
186 class posixfile(object):
187 '''file object with posix-like semantics. on windows, normal
188 files can not be deleted or renamed if they are open. must open
189 with win32file.FILE_SHARE_DELETE. this flag does not exist on
190 windows <= nt.'''
191
192 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
193 # but does not work at all. wrap win32 file api instead.
194
195 def __init__(self, name, mode='rb'):
196 access = 0
197 if 'r' in mode or '+' in mode:
198 access |= win32file.GENERIC_READ
199 if 'w' in mode or 'a' in mode:
200 access |= win32file.GENERIC_WRITE
201 if 'r' in mode:
202 creation = win32file.OPEN_EXISTING
203 elif 'a' in mode:
204 creation = win32file.OPEN_ALWAYS
205 else:
206 creation = win32file.CREATE_ALWAYS
207 try:
208 self.handle = win32file.CreateFile(name,
209 access,
210 win32file.FILE_SHARE_READ |
211 win32file.FILE_SHARE_WRITE |
212 win32file.FILE_SHARE_DELETE,
213 None,
214 creation,
215 win32file.FILE_ATTRIBUTE_NORMAL,
216 0)
217 except pywintypes.error, err:
218 raise WinIOError(err, name)
219 self.closed = False
220 self.name = name
221 self.mode = mode
222
223 def read(self, count=-1):
224 try:
225 cs = cStringIO.StringIO()
226 while count:
227 wincount = int(count)
228 if wincount == -1:
229 wincount = 1048576
230 val, data = win32file.ReadFile(self.handle, wincount)
231 if not data: break
232 cs.write(data)
233 if count != -1:
234 count -= len(data)
235 return cs.getvalue()
236 except pywintypes.error, err:
237 raise WinIOError(err)
238
239 def write(self, data):
240 try:
241 if 'a' in self.mode:
242 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
243 nwrit = 0
244 while nwrit < len(data):
245 val, nwrit = win32file.WriteFile(self.handle, data)
246 data = data[nwrit:]
247 except pywintypes.error, err:
248 raise WinIOError(err)
249
250 def seek(self, pos, whence=0):
251 try:
252 win32file.SetFilePointer(self.handle, int(pos), whence)
253 except pywintypes.error, err:
254 raise WinIOError(err)
255
256 def tell(self):
257 try:
258 return win32file.SetFilePointer(self.handle, 0,
259 win32file.FILE_CURRENT)
260 except pywintypes.error, err:
261 raise WinIOError(err)
262
263 def close(self):
264 if not self.closed:
265 self.handle = None
266 self.closed = True
267
268 def flush(self):
269 try:
270 win32file.FlushFileBuffers(self.handle)
271 except pywintypes.error, err:
272 raise WinIOError(err)
273
274 def truncate(self, pos=0):
275 try:
276 win32file.SetFilePointer(self.handle, int(pos),
277 win32file.FILE_BEGIN)
278 win32file.SetEndOfFile(self.handle)
279 except pywintypes.error, err:
280 raise WinIOError(err)
General Comments 0
You need to be logged in to leave comments. Login now