<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE ep-patent-document PUBLIC "-//EPO//EP PATENT DOCUMENT 1.7//EN" "ep-patent-document-v1-7.dtd">
<!-- This XML data has been generated under the supervision of the European Patent Office -->
<ep-patent-document id="EP21957505A1" file="EP21957505NWA1.xml" lang="en" country="EP" doc-number="4403508" kind="A1" date-publ="20240724" status="n" dtd-version="ep-patent-document-v1-7">
<SDOBI lang="en"><B000><eptags><B001EP>ATBECHDEDKESFRGBGRITLILUNLSEMCPTIESILTLVFIROMKCYALTRBGCZEEHUPLSKBAHRIS..MTNORSMESMMAKHTNMD..........</B001EP><B005EP>J</B005EP><B007EP>0009011-RPUB02</B007EP></eptags></B000><B100><B110>4403508</B110><B120><B121>EUROPEAN PATENT APPLICATION</B121><B121EP>published in accordance with Art. 153(4) EPC</B121EP></B120><B130>A1</B130><B140><date>20240724</date></B140><B190>EP</B190></B100><B200><B210>21957505.7</B210><B220><date>20210916</date></B220><B240><B241><date>20240220</date></B241></B240><B250>ja</B250><B251EP>en</B251EP><B260>en</B260></B200><B400><B405><date>20240724</date><bnum>202430</bnum></B405><B430><date>20240724</date><bnum>202430</bnum></B430></B400><B500><B510EP><classification-ipcr sequence="1"><text>B66B  11/02        20060101AFI20230324BHEP        </text></classification-ipcr><classification-ipcr sequence="2"><text>H04R   3/00        20060101ALI20230324BHEP        </text></classification-ipcr><classification-ipcr sequence="3"><text>G10K  15/04        20060101ALI20230324BHEP        </text></classification-ipcr></B510EP><B520EP><classifications-cpc><classification-cpc sequence="1"><text>G10K  15/04        20130101 LI20230410BCEP        </text></classification-cpc><classification-cpc sequence="2"><text>H04R   3/00        20130101 LI20230410BCEP        </text></classification-cpc><classification-cpc sequence="3"><text>B66B  11/02        20130101 LI20230410BCEP        </text></classification-cpc></classifications-cpc></B520EP><B540><B541>de</B541><B542>AKUSTISCHES SYSTEM FÜR GESCHLOSSENE RÄUME</B542><B541>en</B541><B542>ACOUSTIC SYSTEM FOR CLOSED SPACES</B542><B541>fr</B541><B542>SYSTÈME ACOUSTIQUE POUR ESPACES CLOS</B542></B540><B590><B598>4</B598></B590></B500><B700><B710><B711><snm>MITSUBISHI ELECTRIC CORPORATION</snm><iid>101904247</iid><irf>008546954</irf><adr><str>7-3, Marunouchi 2-chome</str><city>Chiyoda-ku
Tokyo 100-8310</city><ctry>JP</ctry></adr></B711></B710><B720><B721><snm>FUJIWARA Susumu</snm><adr><city>Tokyo 100-8310</city><ctry>JP</ctry></adr></B721><B721><snm>TARUISHI Keigo</snm><adr><city>Tokyo 100-8310</city><ctry>JP</ctry></adr></B721><B721><snm>AIKAWA Masami</snm><adr><city>Tokyo 100-8310</city><ctry>JP</ctry></adr></B721></B720><B740><B741><snm>Mewburn Ellis LLP</snm><iid>101783151</iid><adr><str>Aurora Building
Counterslip</str><city>Bristol BS1 6BX</city><ctry>GB</ctry></adr></B741></B740></B700><B800><B840><ctry>AL</ctry><ctry>AT</ctry><ctry>BE</ctry><ctry>BG</ctry><ctry>CH</ctry><ctry>CY</ctry><ctry>CZ</ctry><ctry>DE</ctry><ctry>DK</ctry><ctry>EE</ctry><ctry>ES</ctry><ctry>FI</ctry><ctry>FR</ctry><ctry>GB</ctry><ctry>GR</ctry><ctry>HR</ctry><ctry>HU</ctry><ctry>IE</ctry><ctry>IS</ctry><ctry>IT</ctry><ctry>LI</ctry><ctry>LT</ctry><ctry>LU</ctry><ctry>LV</ctry><ctry>MC</ctry><ctry>MK</ctry><ctry>MT</ctry><ctry>NL</ctry><ctry>NO</ctry><ctry>PL</ctry><ctry>PT</ctry><ctry>RO</ctry><ctry>RS</ctry><ctry>SE</ctry><ctry>SI</ctry><ctry>SK</ctry><ctry>SM</ctry><ctry>TR</ctry></B840><B844EP><B845EP><ctry>BA</ctry></B845EP><B845EP><ctry>ME</ctry></B845EP></B844EP><B848EP><B849EP><ctry>KH</ctry></B849EP><B849EP><ctry>MA</ctry></B849EP><B849EP><ctry>MD</ctry></B849EP><B849EP><ctry>TN</ctry></B849EP></B848EP><B860><B861><dnum><anum>JP2021034018</anum></dnum><date>20210916</date></B861><B862>ja</B862></B860><B870><B871><dnum><pnum>WO2023042323</pnum></dnum><date>20230323</date><bnum>202312</bnum></B871></B870></B800></SDOBI>
<abstract id="abst" lang="en">
<p id="pa01" num="0001">An enclosed-space sound system includes a speaker system that is located in an enclosed space and that includes a speaker unit, a memory configured to store sound content, and a sound-field control unit configured to send out a sound signal based on the sound content toward the enclosed space from the speaker system, and the sound content includes a natural environmental sound that represents an environmental sound generated in nature, and a chord serial sound obtained by combining chords that include a consonance and a dissonance.<img id="iaf01" file="imgaf001.tif" wi="78" he="104" img-content="drawing" img-format="tif"/></p>
</abstract>
<description id="desc" lang="en"><!-- EPO <DP n="1"> -->
<heading id="h0001">Technical Field</heading>
<p id="p0001" num="0001">The present disclosure relates to an enclosed-space sound system for emitting a sound to an enclosed space such as an inside of an elevator car.</p>
<heading id="h0002">Background Art</heading>
<p id="p0002" num="0002">Some elevator car is provided with a speaker for providing an audio guide to a user inside the car. Furthermore, an intercom used by a user to communicate with a person outside the car in emergency situations is provided in the car. The speaker and the intercom are, for example, provided in a car operating panel.</p>
<p id="p0003" num="0003">Furthermore, some elevator that provides not only an audio guide, but also background music (BGM) in a car has been proposed (see, for example, Patent Literature 1).</p>
<p id="p0004" num="0004">The elevator described in Patent Literature 1 has a single speaker that is provided in the car and gives a guide announcement for passenger and background music (BGM). The elevator has a microphone that is provided inside the car, outside the car, in a hoistway, or in an elevator landing area. The microphone is used to measure background noise around a place where the microphone is provided, and a microphone measurement result is used for adjustment of an announcement sound volume. Note that the background noise is noise present in a certain place even in a case where main noise has stopped in the place. Furthermore, the elevator described in Patent Literature 1 has a BGM sound volume automatic adjustment device that adjusts a sound volume of BGM emitted from the speaker provided in the car. The BGM sound volume automatic adjustment device obtains information on the elevator<!-- EPO <DP n="2"> --> and information on a building where the elevator is installed from an information center via an elevator control device or a communication control device. The BGM sound volume automatic adjustment device finds a corresponding BGM sound volume from a sound volume adjustment map set in advance on the basis of the obtained elevator information and building information and sets a sound volume of BGM emitted from the speaker provided in the car.</p>
<p id="p0005" num="0005">In general, it is required to keep some degree of sealability and quietness in an internal space of an elevator car. The same applies to an internal space of a public transport vehicle such as a train, a bus, and a taxi and a waiting space such as a waiting room of a hospital and a pharmacy. In such a special narrow enclosed space different from a general residential space, a user is together with strangers and therefore finds it hard to have even conversation. As a result, many users feel "uncomfortable" and "unpleasant", which lead to stress.</p>
<p id="p0006" num="0006">Therefore, Patent Literature 1 proposes playing music as background music (BGM) from the speaker provided in the car. In Patent Literature 1, a sound volume of the BGM is adjusted on the basis of the elevator information (e.g., a car capacity (the number of people that the car accommodates)) and building information (e.g., an intended purpose of a building) such that the BGM does not become unpleasant to the ears of a user of the elevator.</p>
<heading id="h0003">Citation List</heading>
<heading id="h0004">Patent Literature</heading>
<p id="p0007" num="0007">Patent Literature 1: <patcit id="pcit0001" dnum="JP2010222127A"><text>Japanese Unexamined Patent Application Publication No. 2010-222127</text></patcit></p>
<heading id="h0005">Summary of Invention</heading>
<heading id="h0006">Technical Problem</heading>
<p id="p0008" num="0008"><!-- EPO <DP n="3"> --> In Patent Literature 1, music is emitted from the speaker used for an announcement for passenger guide. That is, the speaker for announcement is used as a speaker for providing music. For this reason, the speaker needs to be disposed, for example, in an operating panel and needs to be light in weight, thin, small in size, and monaural reproduction due to influence of an environment in which the speaker is provided in the operating panel. As a result, sound quality during music reproduction is very poor, which is a sound emission state that is clearly different, for example, from that of music reproduced by a household audio apparatus.</p>
<p id="p0009" num="0009">Furthermore, in general, there are very few elevators that reproduce music, and in most cases, only a required minimum number of speakers, that is, only one speaker is provided. There is almost no elevator that provides music to a user of the elevator actively, constantly, or for some purpose.</p>
<p id="p0010" num="0010">Furthermore, although Patent Literature 1 proposes playing music as BGM, a type of music is not mentioned in particular.</p>
<p id="p0011" num="0011">Since the music is actually selected by an owner of the building or a person in charge of, for example, an elevator maintenance company, the music is basically selected, for example, on the basis of a personal taste of the owner of the building or the person in charge. Furthermore, since there is no sound content for elevators, existing sound content is used in general. Furthermore, at present, there is almost no attempt to create special sound content in consideration of comfort and a reduction in stress of an elevator user. As a result, even if music is played in an elevator car, the BGM may undesirably give an unpleasant feeling to a user of the elevator. Furthermore, since the same BGM is always used or the BGM is selected by a system regardless of whether or not the user likes it, there arises, for example, a problem in that a genre of the music does not meet a taste of a user of the elevator. In this case, the BGM may be perceived as noise by the user. As described above, according to the BGM reproduction of Patent Literature 1, stress resulting from user's "uncomfortable<!-- EPO <DP n="4"> --> feeling" and "unpleasant feeling" cannot be reduced, and in some cases, the stress of the user of the elevator may increase.</p>
<p id="p0012" num="0012">The present disclosure has been accomplished to solve the above problems, and an object of the present disclosure is to provide an enclosed-space sound system that can reduce stress of a user in an enclosed space by reproducing sound content obtained by combining a natural environmental sound that is generated in nature and a chord serial sound including a consonance and a dissonance.</p>
<heading id="h0007">Solution to Problem</heading>
<p id="p0013" num="0013">An enclosed-space sound system according to an embodiment of the present disclosure includes a speaker system that is located in an enclosed space and that includes a speaker unit, a memory configured to store sound content, and a sound-field control unit configured to send out a sound signal based on the sound content toward the enclosed space from the speaker system, and the sound content includes a natural environmental sound that represents an environmental sound generated in nature, and a chord serial sound obtained by combining chords that include a consonance and a dissonance.</p>
<heading id="h0008">Advantageous Effects of Invention</heading>
<p id="p0014" num="0014">With the enclosed-space sound system according to an embodiment of the present disclosure, sound content obtained by combining a natural environmental sound generated in nature and a chord serial sound including a consonance and a dissonance is reproduced, and thereby stress of a user in an enclosed space can be reduced.</p>
<heading id="h0009">Brief Description of Drawings</heading>
<p id="p0015" num="0015">
<ul id="ul0001" list-style="none" compact="compact">
<li>[<figref idref="f0001">Fig. 1] Fig. 1</figref> is a perspective view illustrating a configuration of an elevator 1 according to Embodiment 1.</li>
<li>[<figref idref="f0002">Fig. 2] Fig. 2</figref> illustrates an internal space of a car 5 of the elevator 1 according to Embodiment 1.<!-- EPO <DP n="5"> --></li>
<li>[<figref idref="f0003">Fig. 3] Fig. 3</figref> is a configuration diagram illustrating a configuration of a sound content generation device 40 that generates sound content 30 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0004">Fig. 4] Fig. 4</figref> is an explanatory view for explaining a configuration of the sound content 30 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0005">Fig. 5] Fig. 5</figref> is an explanatory view for explaining a configuration of a chord serial sound 30B included in the sound content 30 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0005">Fig. 6] Fig. 6</figref> is a front view illustrating a configuration of the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0006">Fig. 7] Fig. 7</figref> is a plan view illustrating a layout of speaker cabinets 20 of the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0006">Fig. 8] Fig. 8</figref> is a side view illustrating an example of a configuration of the speaker cabinet 20 according to Embodiment 1.</li>
<li>[<figref idref="f0006">Fig. 9] Fig. 9</figref> is a front view illustrating a configuration of the speaker cabinet 20 of <figref idref="f0006">Fig. 8</figref>.</li>
<li>[<figref idref="f0007">Fig. 10] Fig. 10</figref> is a side view illustrating a configuration of a modification of the speaker cabinet 20 according to Embodiment 1.</li>
<li>[<figref idref="f0007">Fig. 11] Fig. 11</figref> is a front view illustrating a configuration of the speaker cabinet 20 of <figref idref="f0007">Fig. 10</figref>.</li>
<li>[<figref idref="f0007">Fig. 12] Fig. 12</figref> is a front view schematically illustrating a configuration of a modification of the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0008">Fig. 13] Fig. 13</figref> is a plan view schematically illustrating a configuration of another modification of the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0008">Fig. 14] Fig. 14</figref> is an explanatory view schematically illustrating a temporal change of a sound pressure level of the sound content 30 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0009">Fig. 15] Fig. 15</figref> is a basic explanatory view of a consonance 33 and a dissonance 34 used in the sound system 13 according to Embodiment 1.<!-- EPO <DP n="6"> --></li>
<li>[<figref idref="f0009">Fig. 16] Fig. 16</figref> is an explanatory view illustrating a relationship between a "lower note" and a "higher note" that constitute a chord used in the sound system 13 according to Embodiment 1 in a form of a list by use of ordinal numbers.</li>
<li>[<figref idref="f0010">Fig. 17] Fig. 17</figref> illustrates an example of definition of the consonance 33 and the dissonance 34 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0010">Fig. 18] Fig. 18</figref> illustrates an example of the consonance 33 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0011">Fig. 19] Fig. 19</figref> illustrates an example of the consonance 33 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0011">Fig. 20] Fig. 20</figref> illustrates an example of the dissonance 34 used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0012">Fig. 21] Fig. 21</figref> is an explanatory view illustrating an example of characteristics of a frequency band used as the chord serial sound 30B used in the sound system 13 according to Embodiment 1.</li>
<li>[<figref idref="f0012">Fig. 22] Fig. 22</figref> illustrates instantaneous frequency characteristics obtained in a case where FFT processing is performed on a time waveform at a position of a point (B) in <figref idref="f0004">Fig. 4</figref>.</li>
<li>[<figref idref="f0013">Fig. 23] Fig. 23</figref> illustrates instantaneous frequency characteristics obtained in a case where FFT processing is performed on a time waveform at a position of a point (A) in <figref idref="f0004">Fig. 4</figref>.</li>
<li>[<figref idref="f0013">Fig. 24] Fig. 24</figref> is an explanatory view illustrating an example of signal processing performed on the sound content 30 according to Embodiment 1.</li>
<li>[<figref idref="f0014">Fig. 25] Fig. 25</figref> is an explanatory view illustrating an example of signal processing performed on the sound content 30 according to Embodiment 1.</li>
<li>[<figref idref="f0014">Fig. 26] Fig. 26</figref> is an explanatory view illustrating an example of signal processing performed on the sound content 30 according to Embodiment 1.</li>
<li>[<figref idref="f0015">Fig. 27] Fig. 27</figref> is an explanatory view illustrating an example of signal processing performed on the sound content 30 according to Embodiment 1.</li>
<li>[<figref idref="f0015">Fig. 28] Fig. 28</figref> is a schematic view illustrating results of human subjective and physiological rating by use of an SD method.<!-- EPO <DP n="7"> --></li>
<li>[<figref idref="f0016">Fig. 29] Fig. 29</figref> illustrates an example of the additional sound 32 inserted into the natural environmental sound 30A of the sound content 30 for each season and for each living time zone.</li>
</ul></p>
<heading id="h0010">Description of Embodiments</heading>
<p id="p0016" num="0016">An embodiment of an enclosed-space sound system according to the present disclosure is described below with reference to the drawings. The present disclosure is not limited to the embodiment below and can be modified in various ways without departing from the spirit of the present disclosure. Furthermore, the present disclosure encompasses every combination of configurations that can be combined among configurations illustrated in the embodiments below and modifications of the embodiments. In the drawings, constituent elements given identical reference signs are identical or corresponding constituent elements throughout the entire specification. Note that in the drawings, a relative dimensional relationship, a shape, and others of each constituent element may be different from those of an actual one.</p>
<heading id="h0011">Embodiment 1</heading>
<p id="p0017" num="0017">An enclosed-space sound system according to Embodiment 1 is applied to an enclosed space that is required to keep some degree of sealability and quietness. Examples of the enclosed space include an internal space of an elevator car, internal spaces of public transport vehicles such as a train, a bus, and a taxi, and waiting spaces such as waiting rooms of a hospital and a pharmacy. That is, the enclosed space to which the enclosed-space sound system according to Embodiment 1 is applied is a special narrow enclosed space different from a general residential space. Furthermore, specifically, the enclosed space according to Embodiment 1 is a space that accommodates two or more people and is a space whose entrance and exit are closed and a person inside thus cannot go out for a certain period in principle. The following describes, as an example of the enclosed space, a space inside an elevator car.</p>
<p id="p0018" num="0018"><!-- EPO <DP n="8"> --> <figref idref="f0001">Fig. 1</figref> is a perspective view illustrating a configuration of an elevator 1 according to Embodiment 1. As illustrated in <figref idref="f0001">Fig. 1</figref>, the elevator 1 is installed in a building and moves up or down in a hoistway 2. A hoisting machine 3 is provided in an upper portion of the hoistway 2. A main rope 4 is suspended around a sheave 3a of the hoisting machine 3. A car 5 and a counterweight 6 are connected to respective ends of the main rope 4. The car 5 and the counterweight 6 are hung on the sheave 3a in a well bucket style by the main rope 4. Furthermore, an elevator control panel 7 is provided in an upper portion of the hoistway 2. The elevator control panel 7 is connected to the hoisting machine 3 by a communication line and is connected to the car 5 by a control cable 8. The control cable 8 transmits electric power and a control signal to the car 5. The control cable 8 is also called a travelling cable.</p>
<p id="p0019" num="0019">The car 5 includes four side boards 5a, a floor board 5b, and a ceiling board 5c. The four side boards 5a are each disposed on the corresponding one of a right side, a left side, a front side, and a back side of the car 5. Furthermore, the side board 5a on the front side among the four side boards 5a is provided with a car door 5d. The car door 5d is engaged with a landing door (not illustrated) installed at a landing area and opens and closes when the car 5 stops at a landing area of each floor.</p>
<p id="p0020" num="0020">As illustrated in <figref idref="f0001">Fig. 1</figref>, a car control device 9 and a sound-field control device 21 are provided on an upper surface of the ceiling board 5c of the car 5. The car control device 9 controls operation of each device provided in the car 5. Examples of the device provided in the car 5 include the car door 5d, a lighting device 5e (see <figref idref="f0002">Fig. 2</figref>), and a car operating panel 5f (see <figref idref="f0002">Fig. 2</figref>). The sound-field control device 21 controls overall operation of an enclosed-space sound system 13 (see <figref idref="f0005">Fig. 6</figref>), which will be described later, such that a three-dimensional sound field 27 (see <figref idref="f0005">Fig. 6</figref>) is formed in the entire internal space of the car 5. Hereinafter, the enclosed-space sound system 13 is simply referred to as a sound system 13.</p>
<p id="p0021" num="0021"><!-- EPO <DP n="9"> --> As illustrated in <figref idref="f0001">Fig. 1</figref>, a suspended ceiling 10 is fixed to a lower surface of the ceiling board 5c of the car 5. The suspended ceiling 10 is located in the internal space of the car 5. The suspended ceiling 10 has a cuboid shape. The suspended ceiling 10 has four side surfaces 10a and a lower surface 10b (see <figref idref="f0002">Fig. 2</figref>). Furthermore, the suspended ceiling 10 may further have an upper surface, which is opposite to the lower surface 10b. Alternatively, the suspended ceiling 10 may have a rectangular flat plate shape. In this case, the suspended ceiling 10 has a lower surface 10b and a plurality of support pillars (not illustrated) that fix the lower surface 10b to the ceiling board 5c of the car 5. It is desirable to provide these support pillars at four corners of the suspended ceiling 10. In an internal space of the suspended ceiling 10, the lighting device 5e (see <figref idref="f0002">Fig. 2</figref>), an emergency speaker 5g (see <figref idref="f0002">Fig. 2</figref>), and a speaker system 22 of the sound system 13 (see <figref idref="f0005">Fig. 6</figref>) are provided. Note that although the sound-field control device 21 is provided on the upper surface of the ceiling board 5c of the car 5 in the above description as illustrated in <figref idref="f0001">Fig. 1</figref>, the sound-field control device 21 may also be disposed in the internal space of the suspended ceiling 10. A gap 11 (see <figref idref="f0002">Figs. 2</figref> and <figref idref="f0005">6</figref>) of a certain distance D is present between the side surface 10a of the suspended ceiling 10 and the side board 5a of the car 5. Hereinafter, the certain distance D is referred to as a first distance D.</p>
<p id="p0022" num="0022">Although a case where the elevator 1 is a rope-type elevator is illustrated in the example of <figref idref="f0001">Fig. 1</figref>, this case is not restrictive. The elevator 1 may be, for example, an elevator of other types such as a linear elevator.</p>
<p id="p0023" num="0023"><figref idref="f0002">Fig. 2</figref> illustrates the internal space of the car 5 of the elevator 1 according to Embodiment 1. As illustrated in <figref idref="f0002">Fig. 2</figref>, the internal space of the car 5 is surrounded by the four side boards 5a, the floor board 5b, and the lower surface 10b of the suspended ceiling 10. The internal space of the car 5 has, for example, a cuboid shape. The floor board 5b is a rectangular flat surface installed in a horizontal direction. Each side board 5a is a rectangular flat surface installed in a perpendicular direction. The perpendicular direction is, for example, a vertical direction. The lower surface 10b of<!-- EPO <DP n="10"> --> the suspended ceiling 10 faces the floor board 5b. The lower surface 10b of the suspended ceiling 10 is a rectangular flat surface installed in the horizontal direction. The lighting device 5e is provided to the suspended ceiling 10. A main body of the lighting device 5e is provided in the internal space of the suspended ceiling 10. The lighting device 5e is, for example, an LED lighting device. As illustrated in <figref idref="f0002">Fig. 2</figref>, an irradiation surface 5ea of the lighting device 5e faces the floor board 5b. The lighting device 5e irradiates the internal space of the car 5 with light emitted from the irradiation surface 5ea. Furthermore, the suspended ceiling 10 is provided with the emergency speaker 5g for giving an emergency message from a control room of the building. The emergency speaker 5g may be used to give not only an emergency message, but also an audio message to a user such as "door is closing".</p>
<p id="p0024" num="0024">The side board 5a on the front side among the four side boards 5a is provided with the car door 5d, as described above. Furthermore, the side board 5a on the front side is provided with the car operating panel 5f, as illustrated in <figref idref="f0002">Fig. 2</figref>. The car operating panel 5f has a plurality of car call registration buttons provided corresponding to respective floors and door opening and closing buttons that control an openingclosing action of the car door 5d. Furthermore, the car operating panel 5f has an intercom device 5h used by a user to communicate with an outside in situations such as emergency situations.</p>
<p id="p0025" num="0025">As illustrated in <figref idref="f0002">Fig. 2</figref>, the car control device 9 is connected to the elevator control panel 7, for example, by the control cable 8 (see <figref idref="f0001">Fig. 1</figref>). As illustrated in <figref idref="f0002">Fig. 2</figref>, the car control device 9 has an input unit 9a, a control unit 9b, an output unit 9c, and a memory 9d. The input unit 9a inputs a control signal from the elevator control panel 7 to the control unit 9b. The control unit 9b controls operation of each device provided in the car 5 on the basis of the control signal. The output unit 9c outputs a drive signal to each device under control of the control unit 9b. Furthermore, the output unit 9c transmits a signal such as car call registration input through the car operating panel 5f by a user to the elevator control panel 7 under control of the control unit 9b. The<!-- EPO <DP n="11"> --> memory 9d stores, in the memory 9d, a computation result of the control unit 9b, various kinds of data and programs used for control of the control unit 9b, and others.</p>
<p id="p0026" num="0026">The sound-field control device 21 is one of constituent elements of the sound system 13. The sound-field control device 21 and the speaker system 22 (see <figref idref="f0005">Fig. 6</figref>), which will be described later, form the sound system 13. As illustrated in <figref idref="f0002">Fig. 2</figref>, the sound-field control device 21 has a sound-field control unit 21a, an output unit 21b, a memory 21c, and a timer unit 21d. The sound-field control unit 21a controls operation of the sound system 13 such that a high-quality sound field is created in the internal space of the car 5. The output unit 21b transmits a drive signal and reproduction data of a sound signal to a speaker cabinet 20 (see <figref idref="f0005">Fig. 6</figref>) under control of the sound-field control unit 21a. The memory 21c stores, in the memory 21c, sound content 30 (see <figref idref="f0004">Fig. 4</figref>) obtained by mixing down, for example, a natural environmental sound that represents a sound generated in nature and a chord serial sound obtained by combining a consonance and a dissonance. The memory 21c further stores, in the memory 21c, a computation result of the sound-field control unit 21a, various kinds of data and programs used for control of the sound-field control unit 21a, and others. The sound-field control unit 21a reproduces the sound content 30 stored in the memory 21c and sends out a sound signal based on the sound content 30 toward the internal space of the car 5 from the speaker system 22. The timer unit 21d counts current date and time and holds current date and time data. The timer unit 21d has, as the date and time data, data of month and day in an annual calendar and data of a time. The sound-field control unit 21a may acquire the date and time data from the timer unit 21d and switch sound contents 30 depending on a season and a living time zone on the basis of the date and time data.</p>
<p id="p0027" num="0027">The sound content 30 stored in the memory 21c is, for example, generated by a sound content generation device 40 that is externally provided and is stored in advance in the memory 21c of the sound-field control device 21. The sound content generation<!-- EPO <DP n="12"> --> device 40 generates the sound content 30 by combining a natural environmental sound 30A (see <figref idref="f0004">Fig. 4</figref>) and a chord serial sound 30B (see <figref idref="f0004">Fig. 4</figref>).</p>
<heading id="h0012">[Sound Content Generation Device 40]</heading>
<p id="p0028" num="0028"><figref idref="f0003">Fig. 3</figref> is a configuration diagram illustrating a configuration of the sound content generation device 40 that generates the sound content 30 used in the sound system 13 according to Embodiment 1. <figref idref="f0004">Fig. 4</figref> is an explanatory view for explaining a configuration of the sound content 30 used in the sound system 13 according to Embodiment 1. In <figref idref="f0004">Fig. 4</figref>, the horizontal axis represents time and the vertical axis represents a sound pressure level. The sound content 30 is, for example, generated by the sound content generation device 40. The sound content 30 includes the natural environmental sound 30A and the chord serial sound 30B. The natural environmental sound 30A is a sound that represents an environmental sound generated in nature. The chord serial sound 30B is a combination of chords that include a consonance and a dissonance. The sound content 30 is formed by mixing down the natural environmental sound 30A and the chord serial sound 30B. That is, the sound content 30 is obtained by adding the chord serial sound 30B to the natural environmental sound 30A. The natural environmental sound 30A and the chord serial sound 30B are concurrently emitted into the car 5 from the speaker system 22. The example illustrated in <figref idref="f0004">Fig. 4</figref> is an example illustrating temporal changes of the natural environmental sound 30A and the chord serial sound 30B and an example of time waveforms of the natural environmental sound 30A and the chord serial sound 30B before mixing-down.</p>
<p id="p0029" num="0029">As illustrated in <figref idref="f0003">Fig. 3</figref>, the sound content generation device 40 includes an input unit 41, a natural environmental sound generation unit 42, a chord serial sound generation unit 43, a signal processing unit 44, a mixing-down processing unit 45, an output unit 46, and a memory 47.</p>
<p id="p0030" num="0030"><!-- EPO <DP n="13"> --> The input unit 41 has a first input unit 41a and a second input unit 41b. The first input unit 41a receives material data of a natural environmental sound that expresses a sound generated in nature from a recorder (not illustrated), a memory (not illustrated), or a sound material database 60 that is externally provided. The material data of the natural environmental sound may be either recorded sound data obtained by recording a sound that is actually generated in nature or pseudo data that is artificially generated and sounds like a sound in nature. The second input unit 41b receives sound data of a consonance and a dissonance from a musical instrument (not illustrated) or the sound material database 60 that is externally provided. The sound material database 60 is a database in which various kinds of sound materials such as the recorded sound data obtained by recording a sound that is actually generated in nature, the pseudo data that is artificially generated and sounds like a sound in nature, sound effects, chords, and human voice are stored.</p>
<p id="p0031" num="0031">The natural environmental sound generation unit 42 generates the natural environmental sound 30A by use of the material data of the natural environmental sound input to the first input unit 41a. As illustrated in <figref idref="f0004">Fig. 4</figref>, the natural environmental sound 30A includes a natural background sound (hereinafter referred to as a natural BG sound) 31 and an additional sound 32 added to the natural BG sound 31. The natural BG sound 31 is a sound generated by an environmental state in nature. The natural BG sound 31 includes at least one of a sound of trees shaking in the wind, a sound of water flowing in a river or sea, a sound of a crowd, and a sound of movement of an artificial object such as a car and a train. The additional sound 32 is a sound generated by behavior of a living organism in nature. The additional sound 32 includes at least one of chirping of one or more birds, a sound of wings of one or more flying birds, a flying sound at takeoff of one or more birds, a sound of chirping of one or more insects, a cry of one or more animals, and human voice. The natural environmental sound generation unit 42 adds the additional sound 32 to the natural BG sound 31 on the basis of a preset timing adjustment rule. The timing adjustment rule is stored in the memory 47. Note that two or more kinds of timing adjustment rules may be stored in<!-- EPO <DP n="14"> --> the memory 47. In this case, a timing adjustment rule to be used is selected from among the two or more kinds of timing adjustment rules by a user. Alternatively, a timing of addition of the additional sound 32 to the natural BG sound 31 may be input by a user who operates the sound content generation device 40 without using the timing adjustment rule in the memory 47. As illustrated in <figref idref="f0004">Fig. 4</figref>, the natural BG sound 31 is set such that the natural BG sound 31 is to be continuously emitted over a whole time length L of the natural environmental sound 30A. The whole time length L of the natural environmental sound 30A is 2 minutes or less. The additional sound 32 is individually set in each time section 35 obtained by dividing the time length L. Each time section 35 thus has a time length set in accordance with the timing adjustment rule. A sound pressure level of the additional sound 32 is higher than a sound pressure level of the natural BG sound 31. The sound pressure level of the additional sound 32 is set higher than the sound pressure level of the natural BG sound 31 on the basis of a preset sound pressure adjustment rule. The sound pressure adjustment rule is stored in the memory 47. Two or more kinds of sound pressure adjustment rules may be stored in the memory 47. In this case, a sound pressure adjustment rule to be used is selected from among the two or more kinds of sound pressure adjustment rules by a user. Alternatively, sound pressure levels of the natural BG sound 31 and the additional sound 32 may be input by a user who operates the sound content generation device 40 without using the sound pressure adjustment rule in the memory 47.</p>
<p id="p0032" num="0032">The chord serial sound generation unit 43 generates the chord serial sound 30B by use of sound data of a consonance and a dissonance input to the second input unit 41b. The chord serial sound 30B is a combination of a consonance 33 and a dissonance 34. <figref idref="f0005">Fig. 5</figref> is an explanatory view for explaining a configuration of the chord serial sound 30B included in the sound content 30 used in the sound system 13 according to Embodiment 1. The chord serial sound 30B is generated by the chord serial sound generation unit 43 provided in the sound content generation device 40. As illustrated in <figref idref="f0005">Fig. 5</figref>, the chord serial sound 30B is constituted such that the consonance 33 and the dissonance 34 are alternately arranged, for example, in an<!-- EPO <DP n="15"> --> order of consonance 33 → dissonance 34 → consonance 33. A time length L1 of the consonance 33 is identical to a time length L2 of the dissonance 34 or is longer than the time length L2. The following describes an example of time allocation of the time length L1 of the consonance 33 and the time length L2 of the dissonance 34. However, the time allocation can be decided in any ways as appropriate without being limited to the following examples.</p>
<p id="p0033" num="0033">
<ol id="ol0001" compact="compact" ol-style="">
<li>(a) The time length L1 of the consonance 33 is set to 2 seconds, and the time length L2 of the dissonance 34 is set to 1 second.</li>
<li>(b) The time length L1 of the consonance 33 is set to 3 seconds, and the time length L2 of the dissonance 34 is set to 1 second.</li>
<li>(c) The time length L1 of the consonance 33 is set to 2 seconds, and the time length L2 of the dissonance 34 is set to 2 seconds.</li>
<li>(d) The time length L1 of the consonance 33 is set to 1 second, and the time length L2 of the dissonance 34 is set to 1 second.</li>
</ol></p>
<p id="p0034" num="0034">In this way, the consonance 33 and the dissonance 34 are alternately arranged on the basis of a preset time allocation rule. The time allocation rule is stored in the memory 47. As indicated by (a) to (d) above, the memory 47 stores, in the memory 47, two or more kinds of time allocation rules. The user selects a time allocation rule to be used from among these time allocation rules. The whole time length L of the chord serial sound 30B is 2 minutes or less. Since the chord serial sound 30B is repeatedly reproduced, a chord at an end of the chord serial sound 30B is set to the dissonance 34 in a case where a chord at a beginning of the chord serial sound 30B is the consonance 33, as illustrated in the example of <figref idref="f0005">Fig. 5</figref>. Conversely, in a case where the chord at the beginning of the chord serial sound 30B is the dissonance 34, the chord at the end of the chord serial sound 30B is set to the consonance 33. In this way, in a case where the chord serial sound 30B is repeatedly reproduced in a loop, it is possible to prevent the consonances 33 or the dissonances 34 from being arranged side by side at a joint<!-- EPO <DP n="16"> --> part. Therefore, the consonance 33 and the dissonance 34 are arranged successively even at the joint part.</p>
<p id="p0035" num="0035">The signal processing unit 44 performs one or more kinds of signal processing on the natural environmental sound 30A generated by the natural environmental sound generation unit 42 and the chord serial sound 30B generated by the chord serial sound generation unit 43 as needed. A timing of the signal processing may be before mixing-down processing performed by the mixing-down processing unit 45 or may be after the mixing-down processing. Furthermore, the signal processing unit 44 may perform the signal processing on only one of the natural environmental sound 30A and the chord serial sound 30B. Examples of the signal processing include a plurality of kinds of processing such as generation of an introduction part 36 and others, which will be described later, adjustment of a sound pressure level, and phase control processing. The signal processing will be described later.</p>
<p id="p0036" num="0036">The mixing-down processing unit 45 generates the sound content 30 by mixing down the natural environmental sound 30A and the chord serial sound 30B. The mixing-down processing unit 45 mixes down the natural environmental sound 30A and the chord serial sound 30B on the basis of a preset synchronization timing adjustment rule. The synchronization timing adjustment rule is stored in the memory 47.</p>
<p id="p0037" num="0037">A hardware configuration of the car control device 9 is described below. Functions of the input unit 9a, the control unit 9b, and the output unit 9c of the car control device 9 are implemented by a processing circuit. The processing circuit is dedicated hardware or a processor. The dedicated hardware is, for example, an application specific integrated circuit (ASIC) or a field programmable gate array (FPGA). The processor executes a program stored in a memory. The memory 9d is a memory. The memory is a non-volatile or volatile semiconductor memory such as a random access memory (RAM), a read only memory (ROM), a flash memory, and an erasable<!-- EPO <DP n="17"> --> programmable ROM (EPROM) or a disc such as a magnetic disc, a flexible disc, and an optical disc.</p>
<p id="p0038" num="0038">A hardware configuration of the sound-field control device 21 is described. Functions of the sound-field control unit 21a, the output unit 21b, and the timer unit 21d of the sound-field control device 21 are implemented by a processing circuit. The processing circuit is dedicated hardware or a processor. The dedicated hardware and processor may be identical to those described above, and therefore description of the dedicated hardware and processor is omitted. The memory 21c is a memory. The memory may be identical to that described above, and therefore description of the memory is omitted.</p>
<p id="p0039" num="0039">A hardware configuration of the sound content generation device 40 is described. Functions of the input unit 41, the natural environmental sound generation unit 42, the chord serial sound generation unit 43, the signal processing unit 44, the mixing-down processing unit 45, and the output unit 46 of the sound content generation device 40 are implemented by a processing circuit. The processing circuit is dedicated hardware or a processor. The dedicated hardware and processor may be identical to those described above, and therefore description of the dedicated hardware and processor is omitted. The memory 47 is a memory. The memory may be identical to that described above, and therefore description of the memory is omitted.</p>
<p id="p0040" num="0040">Note that software configurations of the sound material database 60 and the sound content generation device 40 may be, for example, virtual singer software that is application of a singing voice synthesizing technique such as vocaloid (registered trademark).</p>
<p id="p0041" num="0041"><figref idref="f0005">Fig. 6</figref> is a front view illustrating a configuration of the sound system 13 according to Embodiment 1. <figref idref="f0006">Fig. 7</figref> is a plan view illustrating a layout of the speaker cabinets 20 of the sound system 13 according to Embodiment 1. <figref idref="f0006">Fig. 7</figref> illustrates a state where<!-- EPO <DP n="18"> --> the suspended ceiling 10 of the car 5 is looked up at from the floor board 5b of the car 5. In <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>, a height direction of the car 5 is a Y direction, a width direction of the car 5 is an X direction, and a depth direction of the car 5 is a Z direction. The Y direction is, for example, a vertical direction. When left, right, front, and back in the car 5 are defined, the X direction is a left-right direction of the car 5, and the Z direction is a front-back direction of the car 5, as illustrated in <figref idref="f0006">Fig. 7</figref>.</p>
<p id="p0042" num="0042">As illustrated in <figref idref="f0005">Fig. 6</figref>, the sound system 13 includes the speaker system 22 provided to a ceiling of the enclosed space and the sound-field control device 21. The speaker system 22 includes one or more speaker cabinets 20. Each speaker cabinet 20 includes one or more speaker units 23. The sound system 13 forms a sound field 27 and emits a sound to a user of the car 5. In Embodiment 1, the sound content 30 obtained by mixing down the natural environmental sound 30A that is generated in nature such as a murmur of a stream or chirping of a bird and the chord serial sound 30B obtained by combining the consonance 33 and the dissonance 34 is used as the sound. In Embodiment 1, a sound-field environment of two or more channels is created in the enclosed space, and the sound content 30 is reproduced in this sound-field environment. In this way, the sound content 30 can be emitted to the enclosed space from plural directions, and a "comfortable feeling" can be given to an auditory sense of a user in the enclosed space. As a result, an unpleasant element such as stress during stay in a narrow space can be reduced.</p>
<p id="p0043" num="0043">The natural environmental sound 30A that is a main part of the sound content 30 is constituted such that a user can feel, from the sound, a season such as spring, summer, autumn, and winter, which, for example, anyone who lives in Japan can feel, and a living time zone such as dawn, daytime, evening, and night. This allows the user to perceive a sense of a time zone and a sense of a season from the "sound" even in an enclosed space where an external environment cannot be seen. Furthermore, the natural environmental sound 30A has a content configuration without noisiness or other state excluding an unpleasant factor such as noise and is thus formed not to give an<!-- EPO <DP n="19"> --> auditory unpleasant feeling. Specifically, the natural environmental sound 30A is a combination of a sound source type such as a flow of wind and a river or chirping of a bird in nature, a time zone, and a frequency band.</p>
<p id="p0044" num="0044">In Embodiment 1, the speaker system 22 includes two speaker cabinets 20, as illustrated in <figref idref="f0005">Fig. 6</figref>. However, the number of speaker cabinets 20 is not limited to two and may be any number of two or more. This can form the sound field 27 of two or more channels in the enclosed space. Each speaker cabinet 20 is provided in the internal space of the suspended ceiling 10, as illustrated in <figref idref="f0005">Fig. 6</figref>. However, a position of each speaker cabinet 20 is not limited to an inside of the suspended ceiling 10, and each speaker cabinet 20 may be provided to at least one of the ceiling board 5c of the car 5, the side board 5a of the car 5, and the floor board 5b of the car 5. Each speaker cabinet 20 includes the speaker unit 23 and a housing 25. Note that although the speaker system 22 includes the speaker cabinets 20 in Embodiment 1, this case is not restrictive. That is, the speaker system 22 may include only one or more speaker units 23 without including the speaker cabinets 20. Furthermore, although the speaker unit 23 and the speaker cabinet 20 are disposed in the suspended ceiling 10 in Embodiment 1, this case is not restrictive. That is, the speaker unit 23 and the speaker cabinet 20 may be disposed at another position such as the side board 5a of the car 5. Furthermore, although the number of speaker cabinets 20 provided in the car 5 is any number of two or more in the above description, this case is not restrictive. That is, the number of speaker cabinets 20 provided in the car 5 and the number of speaker units 23 may be any number of one or more, and these numbers may be decided as appropriate depending on a capacity, an intended purpose, and other features of the car 5.</p>
<p id="p0045" num="0045"><figref idref="f0006">Fig. 8</figref> is a side view illustrating an example of a configuration of the speaker cabinet 20 according to Embodiment 1. <figref idref="f0006">Fig. 9</figref> is a front view illustrating the configuration of the speaker cabinet 20 of <figref idref="f0006">Fig. 8</figref>. As illustrated in <figref idref="f0006">Figs. 8 and 9</figref>, the speaker cabinet 20 includes the speaker unit 23 and the housing 25. The speaker unit<!-- EPO <DP n="20"> --> 23 is housed in the housing 25. The speaker unit 23 is provided on a front surface 25a of the housing 25 and has an emission surface 23a that emits a sound toward an outside. The housing 25 has, for example, a cuboid shape. The housing 25 is a hollow sealed device. The emission surface 23a of the speaker unit 23 is fitted into an installation hole provided in the front surface 25a of the housing 25 and is exposed to an outside through the installation hole. All other parts of the speaker unit 23 are provided in the housing 25. A sound from the emission surface 23a of the speaker unit 23 is thus emitted only in a direction indicated by arrow A in <figref idref="f0006">Fig. 8</figref> and is not emitted to an outside through parts of the housing 25 other than the emission surface 23a.</p>
<p id="p0046" num="0046"><figref idref="f0007">Fig. 10</figref> is a side view illustrating a configuration of a modification of the speaker cabinet 20 according to Embodiment 1. <figref idref="f0007">Fig. 11</figref> is a front view illustrating the configuration of the speaker cabinet 20 of <figref idref="f0007">Fig. 10</figref>. As illustrated in <figref idref="f0007">Figs. 10 and 11</figref>, the speaker cabinet 20 may have two or more speaker units 23 housed in the housing 25. In this case, for example, one speaker unit 23-1 may be a full-range speaker, and the other speaker unit 23-2 may be a tweeter. The full-range speaker reproduces a range from a low range to a high range by itself. In Embodiment 1, in a case where the single speaker unit 23 is housed in the housing 25 of the speaker cabinet 20, the speaker unit 23 is a full-range speaker. The tweeter is a speaker exclusive for a low range used to assist the full-range speaker. It is difficult to reproduce a range from a low range to a high range by a single speaker, and sound quality may become insufficient. In such a case, the tweeter is used to supplement the insufficiency. The two or more speaker units 23 disposed in the housing 25 may be different kinds of speaker units as described above or may be speaker units of an identical kind. It is, however, desirable that one speaker is a full-range speaker and the other speaker is a speaker exclusive for a low range or exclusive for a high range used to assist the full-range speaker. In this case, it is possible to cope with a wide frequency band from a low range to a high range and to perform sound emission for each narrow frequency band. As described above, in a case where one speaker cabinet 20 includes a plurality of speaker units 23, an improvement in sound quality and enlargement of a<!-- EPO <DP n="21"> --> reproduction band can be achieved by the speaker cabinet 20 alone. As a result, it is possible to easily obtain a "high-sound-quality system" that can cover a wide frequency band.</p>
<heading id="h0013">[Indirect Sound Emission]</heading>
<p id="p0047" num="0047">Description is provided with reference back to <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>. As illustrated in <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>, the speaker cabinets 20 are disposed in the internal space of the suspended ceiling 10. A height of the suspended ceiling 10 in the Y direction (a height direction of the car 5) is, for example, approximately 5 cm. As illustrated in <figref idref="f0003">Fig. 3</figref>, a height H1 of the housing 25 of the speaker cabinet 20 in the Y direction (the height direction of the car 5) is thus less than or equal to 5 cm. Alternatively, the height H1 is in a range of 3 cm to 20 cm. Therefore, the height H1 of the housing 25 is limited by the height of the suspended ceiling 10 in the Y direction (the height direction of the car 5). Furthermore, the emission surface 23a of the speaker unit 23 is disposed and faces the side board 5a of the car 5, as illustrated in <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>. The emission surface 23a is disposed along an edge of the side surface 10a of the suspended ceiling 10. As illustrated in <figref idref="f0006">Fig. 7</figref>, the emission surface 23a is located within a same plane as the side surface 10a of the suspended ceiling 10. A position of the emission surface 23a in the X direction (a width direction of the car 5) thus matches or almost matches a position of the side surface 10a of the suspended ceiling 10 in the X direction. The side surface 10a of the suspended ceiling 10 has an opening at a position corresponding to the position of the emission surface 23a. Note that the whole side surface 10a of the suspended ceiling 10 may be opened. A sound emitted from the emission surface 23a is thus not blocked by the side surface 10a of the suspended ceiling 10. Furthermore, as described above, the gap 11 of the first distance D is present between the side surface 10a of the suspended ceiling 10 and the side board 5a of the car 5. The first distance D is approximately 5 cm. Note that the first distance D is set as appropriate within a range of 2 cm to 20 cm, desirably within a range of 3 cm to 10 cm in accordance with the specifications of the car 5 of the elevator 1. As illustrated in <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>, a sound emitted from the emission surface 23a of the<!-- EPO <DP n="22"> --> speaker unit 23 is emitted in the direction indicated by arrow A. Then, the sound is reflected by the side board 5a of the car 5 and becomes a reflected sound. The reflected sound travels in a direction indicated by arrow B, as illustrated in <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>. As described above, in Embodiment 1, the speaker unit 23 performs "indirect sound emission" of emitting a sound to a user by utilizing reflection of the side board 5a of the car 5.</p>
<p id="p0048" num="0048">In Embodiment 1, the emission surface 23a of the speaker unit 23 is disposed in proximity with the side board 5a of the car 5 and faces the side board 5a of the car 5 with the gap 11 having the first distance D interposed between the emission surface 23a and the car 5. The first distance D is, for example, approximately 5 cm, as described above. Therefore, a sound emitted from the emission surface 23a of the speaker unit 23 is reflected by the side board 5a of the car 5 immediately after the emission before a sound pressure level decreases.</p>
<p id="p0049" num="0049">Furthermore, as illustrated in <figref idref="f0006">Fig. 7</figref>, the speaker cabinet 20 is disposed further rearward than a central part of the suspended ceiling 10 in the Z direction (a depth direction of the car 5). Note that a position of the speaker cabinet 20 in the Z direction is not limited to this position, and the speaker cabinet 20 may be provided in a central part of the suspended ceiling 10 in the Z direction or may be provided further forward than the central part of the suspended ceiling 10 in the Z direction. Furthermore, as illustrated in <figref idref="f0005">Fig. 6</figref>, the speaker cabinet 20 is disposed in a central portion of the suspended ceiling 10 in the Y direction (the height direction of the car 5). Note that the position of the speaker cabinet 20 in the Y direction is not limited to this position, and may be a position above the central portion of the suspended ceiling 10 in the Y direction or may be a position below the central portion.</p>
<p id="p0050" num="0050">The speaker unit 23 provided in one of the two speaker cabinets 20 illustrated in <figref idref="f0006">Fig. 7</figref> is referred to as a speaker unit 23R. Furthermore, the speaker unit 23 provided in the speaker cabinet 20 is referred to as a speaker unit 23L. The speaker unit 23R<!-- EPO <DP n="23"> --> and the speaker unit 23L are disposed apart from each other. Note that the speaker cabinet 20 that houses the speaker unit 23R and the speaker cabinet 20 that houses the speaker unit 23L are disposed apart from each other by a certain distance centered on a central portion of the suspended ceiling 10 in the X direction. The certain distance is referred to as a second distance D2. The second distance D2 is determined by a dimension of the car 5 in the X direction, the first distance D, and a dimension of the housing 25 in the X direction. The speaker unit 23R and the speaker unit 23L are disposed such that back surfaces of the speaker unit 23R and the speaker unit 23L face each other. As illustrated in <figref idref="f0006">Fig. 7</figref>, the emission surface 23a of the speaker unit 23R is thus disposed and faces the side board 5a of the car 5 on the right side. On the other hand, the emission surface 23a of the speaker unit 23L is disposed and faces the side board 5a of the car 5 on the left side. Each of the emission surfaces 23a of the speaker units 23R and 23L is disposed and faces the gap 11. Each of the emission surfaces 23a of the speaker units 23R and 23L is disposed within a same plane as the side surface 10a of the suspended ceiling 10 on the corresponding one of the left side and the right side.</p>
<p id="p0051" num="0051">In general, a user stands and faces the car door 5d in the car 5 of the elevator 1. Therefore, a sound emitted from the speaker unit 23R mainly reaches the right ear of the user, and a sound emitted from the speaker unit 23L mainly reaches the left ear of the user. Hereinafter, the sound emitted from the speaker unit 23R is referred to as a "right-side sound", and the sound emitted from the speaker unit 23L is referred to as a "left-side sound".</p>
<heading id="h0014">[Direct Sound Emission]</heading>
<p id="p0052" num="0052">A direction in which the speaker cabinet 20 is provided is not limited to the case of <figref idref="f0005">Figs. 6</figref> and <figref idref="f0006">7</figref>. <figref idref="f0007">Fig. 12</figref> is a front view schematically illustrating a configuration of a modification of the sound system 13 according to Embodiment 1.</p>
<p id="p0053" num="0053"><!-- EPO <DP n="24"> --> In <figref idref="f0007">Fig. 12</figref>, two speaker units 23R-1 and 23L-1 are provided and face the floor board 5b of the car 5. Emission surfaces 23a of the speaker units 23R-1 and 23L-1 are thus disposed and face the floor board 5b of the car 5, as illustrated in <figref idref="f0007">Fig. 12</figref>. Note that the speaker cabinet 20 that houses the speaker unit 23R-1 and the speaker cabinet 20 that houses the speaker unit 23L-1 are disposed apart from each other by a certain distance centered on the central portion of the suspended ceiling 10 in the X direction. The certain distance is referred to as a third distance D3. The third distance D3 may be identical to the second distance D2 illustrated in <figref idref="f0006">Fig. 7</figref> or may be different from the second distance D2.</p>
<p id="p0054" num="0054">As illustrated in <figref idref="f0007">Fig. 12</figref>, each of the emission surfaces 23a of the speaker units 23R-1 and 23L-1 is disposed within a same plane as the lower surface 10b of the suspended ceiling 10. A position of each emission surface 23a in the Y direction (the height direction of the car 5) thus matches or almost matches a position of the lower surface 10b of the suspended ceiling 10 in the Y direction. Furthermore, portions of the emission surfaces 23a of the speaker units 23R-1 and 23L-1 are fitted into respective attachment holes provided in the lower surface 10b of the suspended ceiling 10. Each of the emission surfaces 23a of the speaker units 23R-1 and 23L-1 is exposed to an outside through the attachment hole. A sound emitted from each of the emission surfaces 23a of the speaker units 23R-1 and 23L-1 is thus not blocked by the lower surface 10b of the suspended ceiling 10.</p>
<p id="p0055" num="0055">As illustrated in <figref idref="f0007">Fig. 12</figref>, a sound emitted from the speaker units 23R-1 and 23L-1 is emitted in the direction indicated by arrow A from the emission surface 23a. As described above, the speaker units 23R-1 and 23L-1 perform "direct sound emission" of directly emitting a sound to a user from the suspended ceiling 10.</p>
<heading id="h0015">[Combination of Indirect Sound Emission and Direct Sound Emission]</heading>
<p id="p0056" num="0056"><figref idref="f0008">Fig. 13</figref> is a plan view schematically illustrating a configuration of another modification of the sound system 13 according to Embodiment 1. <figref idref="f0008">Fig. 13</figref> illustrates a<!-- EPO <DP n="25"> --> state where the lower surface 10b of the suspended ceiling 10 is viewed from the floor board 5b. In <figref idref="f0008">Fig. 13</figref>, four speaker units 23R-1, 23R-2, 23L-1, and 23L-2 are provided. In <figref idref="f0008">Fig. 13</figref>, two speaker units 23R-2 and 23L-2 among the four speaker units 23R-1, 23R-2, 23L-1, and 23L-2 are provided and face the side board 5a of the car 5 on the front side. Furthermore, the other two speaker units 23R-1 and 23L-1 are provided and face the floor board 5b of the car 5. The emission surfaces 23a of the speaker units 23R-1 and 23L-1 are thus disposed and face the floor board 5b of the car 5, as illustrated in <figref idref="f0007">Fig. 12</figref>.</p>
<p id="p0057" num="0057">Description is provided in more detail. As illustrated in <figref idref="f0008">Fig. 13</figref>, the two speaker units 23R-2 and 23L-2 on the front side are provided and face the side board 5a of the car 5 on the front side. The speaker cabinet 20 that houses the speaker unit 23R-2 and the speaker cabinet 20 that houses the speaker unit 23L-2 are disposed apart from each other by a certain distance centered on the central portion of the suspended ceiling 10 in the X direction. The certain distance may be, for example, identical to the third distance D3 illustrated in <figref idref="f0007">Fig. 12</figref> or may be, for example, different from the third distance D3.</p>
<p id="p0058" num="0058">Each of the emission surfaces 23a of the speaker units 23R-2 and 23L-2 is thus disposed and faces the side board 5a of the car 5. Furthermore, each of the emission surfaces 23a is disposed along a side of the side surface 10a of the suspended ceiling 10. A position of each emission surface 23a in the Z direction (the depth direction of the car 5) thus matches or almost matches a position of the side surface 10a of the suspended ceiling 10 in the Z direction.</p>
<p id="p0059" num="0059">As described above, the gap 11 of the first distance D is present between the side board of the suspended ceiling 10 and the side board 5a of the car 5. As illustrated in <figref idref="f0008">Fig. 13</figref>, a sound emitted from the speaker units 23R-2 and 23L-2 is emitted in a direction indicated by arrow A from the emission surface 23a. Then, the sound is reflected by the side board 5a of the car 5 and becomes a reflected sound. The<!-- EPO <DP n="26"> --> reflected sound travels in a direction indicated by arrow B, as illustrated in <figref idref="f0008">Fig. 13</figref>. As described above, the speaker units 23R-2 and 23L-2 perform "indirect sound emission" of emitting a sound to a user from the suspended ceiling 10 by utilizing reflection of the side board 5a of the car 5.</p>
<p id="p0060" num="0060">On the other hand, the two speaker units 23R-1 and 23L-1 on the back side are provided and face the floor board 5b of the car 5, as described above with reference to <figref idref="f0007">Fig. 12</figref>. Accordingly, as described above, the two speaker units 23R-1 and 23L-1 on the back side perform "direct sound emission" of directly emitting a sound to a user from the suspended ceiling 10. In Embodiment 1, "indirect sound emission" and "direct sound emission" may be combined, as in the modification of <figref idref="f0008">Fig. 13</figref>. Note that in this case, in <figref idref="f0008">Fig. 13</figref>, the speaker units 23R and 23L illustrated in <figref idref="f0006">Fig. 7</figref> may be provided instead of the speaker units 23R-2 and 23L-2.</p>
<p id="p0061" num="0061">The speaker unit 23 may be provided at any place on the lower surface 10b of the suspended ceiling 10 in the car 5. Examples of a pattern in which the speaker unit 23 is provided include a case where the speaker units 23 are located on respective right and left sides as illustrated in <figref idref="f0006">Fig. 7</figref>, a case where the speaker units 23 are provided on respective front and back sides, and a case where the speaker units 23 are provided at respective corners of the lower surface 10b of the suspended ceiling 10, and these cases can be combined freely. Note, however, that sound quality is good in a case where the speaker units 23 are apart from each other to some degree. Therefore, in Embodiment 1, the speaker cabinets 20 that each house the speaker unit 23 are disposed apart from each other by the second distance D2 or the third distance D3.</p>
<heading id="h0016">[Height at which Speaker Cabinet 20 Is Provided]</heading>
<p id="p0062" num="0062">The speaker cabinet 20 may be provided inside the floor board 5b of the car 5. However, in a case where the number of users is large, it is difficult for a sound signal emitted from under the feet of the users to reach positions of ears of the users since the users' bodies themselves serve as sound absorbers and reflectors. As a result, the<!-- EPO <DP n="27"> --> sound field 27 based on high-sound-quality sound reproduction cannot be created in the car 5. Therefore, in Embodiment 1, the speaker cabinet 20 is basically provided at a position higher than the chest of a user to accomplish high-sound-quality reproduction. It is therefore desirable to provide the speaker cabinet 20 in the suspended ceiling 10 or on an upper portion of the side board 5a of the car 5.</p>
<heading id="h0017">[Sound Field 27]</heading>
<p id="p0063" num="0063">The sound field 27 generated by the sound system 13 is, for example, a range indicated by the broken line in <figref idref="f0005">Fig. 6</figref>. Specifically, a height H2 of a lower limit 27a of the sound field 27 is, for example, approximately 1.0 m to 1.8 m, desirably 1.6 m to 1.8 m from the floor board 5b of the car 5. Furthermore, a height of an upper limit of the sound field 27 is, for example, 1.8 m to 2.0 m from the floor board 5b of the car 5. It is therefore desirable to form the sound field 27 such that a height of the sound field 27 from the floor board 5b is in a range of 1.6 m to 1.8 m. Therefore, the sound field 27 is generated above the lower limit 27a in the car 5. As a result, the sound field 27 is formed around the head of a user, as illustrated in <figref idref="f0005">Fig. 6</figref>. Note that the height H2 of the lower limit 27a of the sound field 27 is set on the basis of an average body height of users (excluding junior-high-school kids and younger kids). Note that in a range of a height from the floor board 5b of 0 m to less than 1.6 m, a good sound field cannot be formed in a case where a plurality of users are in the car 5 since a sound is blocked or absorbed by the bodies of the users, as described above. Furthermore, in a range of the height from the floor board 5b exceeding 1.8 m, the sound field 27 is deviated above the heads of users, users' hearing becomes hard. The range where the sound field 27 is generated is not limited to the range of 1.6 m to 1.8 m. That is, the height H2 of the lower limit 27a of the sound field 27 is desirably, for example, in a range of 1.0 m to 1.8 m from the floor board 5b of the car 5 since it is only necessary that the sound field 27 is generated in a range higher than the chest of a user on the basis of an average body height of users (excluding junior-high-school kids and younger kids).</p>
<heading id="h0018">[Configuration of Sound Content 30]</heading><!-- EPO <DP n="28"> -->
<p id="p0064" num="0064">Next, a configuration of the sound content 30 according to Embodiment 1 is described in detail with reference to <figref idref="f0004">Fig. 4</figref>. The sound content 30 is sound data for reproducing a sound signal sent out from the speaker system 22 under control of the sound-field control unit 21a. As described above, the sound content 30 is obtained by mixing down the natural environmental sound 30A and the chord serial sound 30B obtained by combining the consonance 33 and the dissonance 34. The upper stage of <figref idref="f0004">Fig. 4</figref> is an example of a time waveform of the natural environmental sound 30A, and the lower stage of <figref idref="f0004">Fig. 4</figref> is an example of a time waveform of the chord serial sound 30B.</p>
<p id="p0065" num="0065">In the natural environmental sound 30A illustrated in the upper stage of <figref idref="f0004">Fig. 4</figref>, (1) to (3) indicate the additional sound 32. Specifically, (1) indicates a cry of one or more animals, (2) indicates chirping of one or more birds, and (3) indicates a flying sound at takeoff of one or more birds and chirping of one or more birds. In the natural environmental sound 30A illustrated in the upper stage of <figref idref="f0004">Fig. 4</figref>, (4) indicates the natural BG sound 31. Specifically, (4) includes at least one of a sound of trees shaking in the wind, a sound of water flowing in a river or sea, a sound of a crowd, human voice, and a sound of movement of an artificial object such as a car and a train. As described above, the natural BG sound 31 is set throughout the whole time length L of the sound content 30. On the other hand, the additional sound 32 is temporally spaced apart from another one.</p>
<p id="p0066" num="0066">In the upper stage and the lower stage of <figref idref="f0004">Fig. 4</figref>, (5) indicates a state of fade-in of the sound content 30, and (6) indicates a state of fade-out of the sound content 30. Note that the "fade-in" means that a sound pressure level of sound content gradually increases, and fade-in processing means processing of gradually increasing a sound pressure level of sound content. Furthermore, the "fade-out" means that a sound pressure level of sound content gradually decreases, and fade-out processing means processing of gradually decreasing a sound pressure level of sound content. That is, the fade-in processing is performed on a beginning part of the sound content 30, and<!-- EPO <DP n="29"> --> the fade-out processing is performed on an end part of the sound content 30. Therefore, in a case where the sound content 30 is reproduced in a loop, a sound pressure level at a joint part 30a (see <figref idref="f0008">Fig. 14</figref>) where the beginning part of the sound content 30 and the end part of the sound content 30 are joined to each other is lowest.</p>
<p id="p0067" num="0067">The chord serial sound 30B illustrated in the lower stage of <figref idref="f0004">Fig. 4</figref> is constituted such that the consonance 33 and the dissonance 34 are alternately arranged with passage of time, as illustrated in <figref idref="f0005">Fig. 5</figref>. As described above with reference to <figref idref="f0005">Fig. 5</figref>, the time length L1 of the consonance 33 is longer than or equal to the time length L2 of the dissonance 34. Furthermore, one of a chord at a beginning and a chord at an end of the chord serial sound 30B is the consonance 33, and the other one of the chords is the dissonance 34.</p>
<p id="p0068" num="0068">In Embodiment 1, the whole time length L of the sound content 30 (i.e., a sound signal) is set to 2 minutes or less. That is, the time length L of the sound content 30 is 2 minutes (i.e., 120 seconds) at maximum possible. Although an up-down movement time of the car 5 of the elevator 1 depends on a height of the building, the movement time of the car 5 is approximately 2 minutes or less in many cases even when the building is tall. The reason is as follows. A space in the car 5 is an enclosed space. When users are restrained for a long time in the enclosed space, the users cannot even move, and therefore a stressful condition continues. Furthermore, in the car 5, strangers are extremely close to each other in the enclosed space. It can be said that this is an undesirable state from the perspective of crime prevention. For these reasons, in many cases, an actual travelling time of the car 5 of the elevator 1 is limited to 90 seconds or less, and even in a case of a super tall building, the actual travelling time of the car 5 is limited to 90 seconds or less to 120 seconds or less. Therefore, in Embodiment 1, the time length L of a single piece of sound content 30 is set to 2 minutes or less, that is, 90 seconds or less to 120 seconds or less. In the example of <figref idref="f0004">Fig. 4</figref>, the time length L of the sound content 30 is set to 90 seconds. Furthermore, the sound content 30 set to 2 minutes to or less is repeatedly reproduced continuously<!-- EPO <DP n="30"> --> in the car 5 under control of the sound-field control unit 21a. The sound content 30 thus repeatedly reproduced is generated and has a melody that changes on a constant cycle.</p>
<p id="p0069" num="0069">However, the sound content 30 may be used under an environment other than an elevator. In this case, the time length L of the sound content 30 may be longer than 2 minutes. However, even in this case, one of a chord at a beginning and a chord at an end of the chord serial sound 30B is the consonance 33, and the other one of the chords is the dissonance 34. In this way, the consonance 33 and the dissonance 34 are linked at the joint part 30a of the sound content 30 when the sound content 30 is reproduced in a loop.</p>
<p id="p0070" num="0070">Furthermore, when the sound content 30 is reproduced in a loop, an abnormal sound (pop noise) sometimes occurs at the joint part 30a of the sound content 30. The abnormal sound (pop noise) is, for example, caused due to performance of an acoustic circuit or a data recording device in which the sound content 30 is stored, especially degradation of the data recording device in which the sound content 30 is stored. Since a user in the car 5 is in a quiet environment, the abnormal sound (pop noise) is remarkably easily heard by the user and is sometimes perceived as a very unpleasant abnormal sound. To prevent an unpleasant feeling caused by the abnormal sound (pop noise), the sound content 30 has the following configuration in Embodiment 1.</p>
<p id="p0071" num="0071"><figref idref="f0008">Fig. 14</figref> is an explanatory view schematically illustrating a temporal change of a sound pressure level of the sound content 30 used in the sound system 13 according to Embodiment 1. <figref idref="f0008">Fig. 14</figref> schematically illustrates a sound pressure level of the whole sound content 30 combining a sound pressure level of the natural environmental sound 30A and a sound pressure level of the chord serial sound 30B of the sound content 30.</p>
<p id="p0072" num="0072">As illustrated in <figref idref="f0008">Fig. 14</figref>, the sound content 30 is reproduced in a loop on a cycle corresponding to the time length L. In <figref idref="f0008">Fig. 14</figref>, (5) indicates a fade-in state of the sound<!-- EPO <DP n="31"> --> content 30, and (6) indicates a fade-out state of the sound content 30. In this way, fade-in processing of gradually increasing a sound pressure level is performed at a beginning part of the sound content 30 at which reproduction starts, and fade-in processing of gradually increasing a sound pressure level is performed at an end part of the sound content 30 at which reproduction ends. Therefore, in a case where the sound content 30 is reproduced in a loop, the (6) fade-out processing and the (5) fade-in processing are performed at the joint part 30a of the sound content 30, as illustrated in <figref idref="f0008">Fig. 14</figref>. A time length L3 of the joint part 30a is set to 3 seconds or less. In the fade-out processing, the sound pressure level of the sound content 30 is decreased by 6 dB as compared with the original sound pressure level. In the fade-in processing, the sound pressure level of the sound content 30 decreased by the fade-out processing is increased by 6 dB to become the original sound pressure level. Note that an amount of the increase or decrease of the sound pressure level is not limited to 6 dB and may be 6 dB ± α (α is any value).</p>
<p id="p0073" num="0073">By thus performing the fade-out processing at the joint part 30a such that the sound pressure level of the sound content 30 is decreased by 6 dB as compared with the original sound pressure level, it is possible to prevent an abnormal sound (pop noise) that occurs at the joint part 30a from being remarkably heard. Furthermore, in a quiet space of a general silent elevator, many users have an uncomfortable feeling and an unpleasant feeling. In Embodiment 1, the fade-out processing and the fade-in processing of the joint part 30a are performed within 3 seconds in total, and therefore only instantaneous silence occurs. It is therefore possible to prevent a user from having an unpleasant feeling, which a user has in a quite space of a general elevator.</p>
<p id="p0074" num="0074">In general, the car 5 stops at a floor designated by a user in response to a user's operation of a button while the car 5 is moving up and down. During that time, the sound content 30 is repeatedly reproduced. Therefore, a user cannot always hear the sound content 30 from the beginning part. Some users of the elevator 1 may hear the sound content 30 in the middle of reproduction. Furthermore, some users may use the<!-- EPO <DP n="32"> --> elevator 1 to move one floor (e.g., from the fourth floor to the fifth floor) although many users use the elevator 1 to move plural floors (e.g., from the first floor to the tenth floor).</p>
<p id="p0075" num="0075">In a case where a user uses the elevator 1 to move only one floor, it generally takes 10 seconds or less to complete a series of actions "the user gets on the car 5" → "the car 5 moves" → "the car 5 stops". When music is reproduced in a usual way in the car 5, reproduction of the music does not end in 10 seconds, and therefore the user is forced to stop listening to the music in the middle of reproduction of the music. Some users may like the music and want to listen to the music longer, but even such users need to get off the car 5 at a floor designated by the users. This may rather end up giving stress to the users. In Embodiment 1, the sound content 30 that does not give stress and an unpleasant feeling, for example, even to a user who uses the elevator 1 to move one floor is thus emitted. Specifically, not music that is a "sound having meaning", but "natural environmental sound 30A (a sound generated in nature)" having no special meaning = "sound having no meaning" is used so that stress and an unpleasant feeling are not given even in a case where the elevator 1 is used for a short time. Furthermore, as described later, the "chord serial sound 30B" is merely a collection of a series of chords in which the consonance 33 and the dissonance 34 are arranged, unlike general music. Therefore, the "chord serial sound 30B" is also a "sound having no meaning". In a case where a "sound having no meaning" is reproduced, even in a case where a user is forced to stop listening to the sound in the middle of reproduction of the sound, a possibility of giving stress to the user is extremely low.</p>
<p id="p0076" num="0076">A sound pressure level of the chord serial sound 30B is lower by 3 dB to 6 dB in average than a sound pressure level of the natural environmental sound 30A. Accordingly, the natural environmental sound 30A becomes a main sound of the sound content 30, and the chord serial sound 30B becomes a background sound (back sound) of the natural environmental sound 30A. The natural environmental sound 30A is obtained by adding the additional sound 32 such as chirping of a bird to the natural BG<!-- EPO <DP n="33"> --> sound 31 such as a murmur of a stream, as described above. Therefore, the natural environmental sound 30A fluctuates in sound pressure level and tends to be intermittent. On the other hand, the chord serial sound 30B has a musical pitch and tone configuration of a constant cycle. Therefore, even in a case where the sound pressure level of the natural environmental sound 30A temporarily becomes low, the chord serial sound 30B is presented to a user with an almost identical sound pressure level.</p>
<heading id="h0019">[Chord Serial Sound 30B]</heading>
<p id="p0077" num="0077">Next, a configuration of the chord serial sound 30B is described. As described above with reference to <figref idref="f0005">Fig. 5</figref>, the chord serial sound 30B is a collection of a series of chords in which the consonance 33 and the dissonance 34 are arranged. The consonance 33 and the dissonance 34 are described below.</p>
<p id="p0078" num="0078"><figref idref="f0009">Fig. 15</figref> is a basic explanatory view of the consonance 33 and the dissonance 34 used in the sound system 13 according to Embodiment 1. As illustrated in <figref idref="f0009">Fig. 15</figref>, the consonance 33 and the dissonance 34 that form the chord serial sound 30B are each a chord constituted by sounds within 1 octave.</p>
<p id="p0079" num="0079">As illustrated in <figref idref="f0009">Fig. 15</figref>, as a basic of a chord, how much two notes are apart from each other is expressed in stages by use of ordinal numbers. Hereinafter, the two notes are referred to as a "lower note" and a "higher note". In <figref idref="f0009">Fig. 15</figref>, each "higher note" is expressed by use of an ordinal number in a case where the "lower note" is low "do". A note of the same pitch is called "perfect first" or "unison", an interval of one semitone is called "minor second", and an interval of two semitones is called "major second". Similarly, an interval of three semitones is called "minor third", and an interval of four semitones is called "major third". Furthermore, an interval of 12 semitones is called "perfect eighth" or "octave". Therefore, as illustrated in <figref idref="f0009">Fig. 15</figref>, in a case where the "lower note" is low "do", "re" is called "major second", "mi" is called "major third", and high "do" is called "perfect eighth". <figref idref="f0009">Fig. 16</figref> is an explanatory view illustrating a<!-- EPO <DP n="34"> --> relationship between the "lower note" and the "higher note" that constitutes a chord used in the sound system 13 according to Embodiment 1 in the form of a list by use of ordinal numbers.</p>
<p id="p0080" num="0080">In a case where two or more notes occur simultaneously, either a consonant state or a dissonant state occurs. The consonance 33 is a chord in the consonant state where the two or more notes that occur simultaneously are harmonious. The dissonance 34 is a chord in the dissonant state where the two or more notes that occur simultaneously are unharmonious. However, there is no clear distinction between the consonant state and the dissonant state, and a degree of harmoniousness between two notes is an important element. In acousticopsychology, the "lower note" and the "higher note" sound more harmonious to human ears as a ratio of the number of oscillations (frequency) of the "lower note" and the number of oscillations (frequency) of the "higher note" becomes closer to a simple integer ratio, and sound less harmonious to human ears as the ratio becomes complicated. <figref idref="f0010">Fig. 17</figref> illustrates an example of definition of the consonance 33 and the dissonance 34 used in the sound system 13 according to Embodiment 1. As illustrated in <figref idref="f0010">Fig. 17</figref>, the consonance 33 includes, for example, perfect first, perfect eighth, perfect fifth, perfect fourth, major third, minor third, major sixth, and minor sixth. Furthermore, the dissonance 34 includes, for example, major second, minor second, major seventh, minor seventh, and others. <figref idref="f0010">Figs. 18</figref> and <figref idref="f0011">19</figref> illustrate examples of the consonance 33 used in the sound system 13 according to Embodiment 1. <figref idref="f0011">Fig. 20</figref> illustrates an example of the dissonance 34 used in the sound system 13 according to Embodiment 1. In Embodiment 1, the consonance 33 and the dissonance 34 to be used are selected as appropriate from among these consonances 33 and dissonances 34. By alternately arranging the selected consonance 33 and dissonance 34, the chord serial sound 30B is generated.</p>
<p id="p0081" num="0081">The dissonance 34 has a ratio of the numbers of oscillations (frequency) that is not an integer ratio, unlike the consonance 33. That is, in the dissonance 34, one note has a frequency component that is not an integer multiple of a frequency of the other<!-- EPO <DP n="35"> --> note. Therefore, the dissonance 34 can presents a tone change. In the chord serial sound 30B, the consonance 33 and the dissonance 34 are alternately reproduced. Therefore, a user alternately hears a tone of a constant cycle produced by the consonance 33 and a tone causing a periodic change produced by the dissonance 34 and therefore feels an auditory change. As a result, a cocktail-party effect is produced, and therefore user's attention is focused on the chord serial sound 30B, and an unpleasant element such as stress is reduced. Note that the cocktail-party effect is brain activity of unconsciously hearing out only information related to oneself or information interesting to an individual from among ambient sounds. A human brain has sorting capability of naturally distinguishing voice of a conversation partner even though an ambient noise level is quite high in a gathering where a large number of people are conversing such as a cocktail party. This sorting capability is the cocktail-party effect.</p>
<p id="p0082" num="0082">The chord serial sound 30B, in which the dissonance 34 is inserted between the consonances 33, allows a user to feel an auditory change. As a result, once the user becomes aware of the chord serial sound 30B, user's attention is focused on the chord serial sound 30B due to the cocktail-party effect. Therefore, the user subconsciously listens to the chord serial sound 30B. This lessens a user's sense of being in an enclosed space of the car 5 of the elevator. As a result, user's uncomfortable feeling and unpleasant feeling are reduced.</p>
<p id="p0083" num="0083">In general, a dissonance produces "tension" that makes a person uneasy or excited, and a consonance produces "relaxation" that makes a person peaceful or calm. When the "relaxed" state continues, a person feels bored, and the cocktail-party effect does not occur. Therefore, in the chord serial sound 30B according to Embodiment 1, "tension" of the dissonance 34 is inserted between "relaxation" of the consonances 33. That is, the chord serial sound 30B is repetition of "relaxation" and "tension". When "relaxation" occurs after "tension" that sometimes occurs, a user is freed from "tension"<!-- EPO <DP n="36"> --> and feels comfortable. As a result, the user can feel expanse of sound and can have a sense of openness, a refreshing feeling, and a comfortable feeling.</p>
<p id="p0084" num="0084"><figref idref="f0012">Fig. 21</figref> is an explanatory view illustrating an example of characteristics of a frequency band used as the chord serial sound 30B used in the sound system 13 according to Embodiment 1. In <figref idref="f0012">Fig. 21</figref>, the horizontal axis represents a frequency, and the vertical axis represents a sound pressure level. In <figref idref="f0012">Fig. 21</figref>, the thick line 50 represents a main band of the chord serial sound 30B, and the thin line 51 represents a sub band of the chord serial sound 30B.</p>
<p id="p0085" num="0085">As indicated by the thick line 50 in <figref idref="f0012">Fig. 21</figref>, the main band of the chord serial sound 30B is a frequency band higher than or equal to approximately 100 Hz and less than 800 Hz. A frequency of the chord serial sound 30B is thus basically the range of the main band. The chord serial sound 30B is constituted by sounds within 1 octave as described above so that the frequency does not markedly change. The reason is described below. Human auditory characteristics have a high hearing sensitivity to a frequency band higher than approximately 1 kHz. Therefore, when the frequency of the chord serial sound 30B is markedly changed from a low frequency band to a high frequency band, only a sound of a high frequency is heard by a user, and for example, a phenomenon that the natural environmental sound 30A becomes hard to hear occurs. In this case, there is no harmony between the natural environmental sound 30A and the chord serial sound 30B. This gives an auditory unpleasant feeling to the user. In Embodiment 1, the chord serial sound 30B thus basically uses the main band so that the frequency does not markedly change. As a result, harmony between the natural environmental sound 30A and the chord serial sound 30B is kept.</p>
<p id="p0086" num="0086">Note that in a case where it is desired to bring about change to the chord serial sound 30B for some reason, the chord serial sound 30B may partially use a sub band of a frequency band of 800 Hz to 2 kHz. Note that in a case where the sub band is used in the chord serial sound 30B, a time length of the sound is set to a short time shorter<!-- EPO <DP n="37"> --> than or equal to 2 seconds so that an auditory function produced by a melody as a musical sound remains although the auditory cocktail-party effect is less likely to occur.</p>
<heading id="h0020">[Natural Environmental Sound 30A]</heading>
<p id="p0087" num="0087">Next, a configuration of the natural environmental sound 30A is described. As described above with reference to <figref idref="f0004">Fig. 4</figref>, the natural environmental sound 30A is formed by adding the additional sound 32 such as chirping of a bird to the natural BG sound 31 such as a murmur of a stream. The natural environmental sound 30A is an environmental sound combining sounds from a plurality of sound sources existing in nature. Note that a sound source of an environmental sound may be an artificially created sound source.</p>
<p id="p0088" num="0088">A sound source configuration is, for example, as follows:
<ol id="ol0002" compact="compact" ol-style="">
<li>(1): a cry of one or more animals</li>
<li>(2): chirping of one or more birds</li>
<li>(3): a flying sound at takeoff of one or more birds</li>
<li>(4): at least one of a sound of trees shaking in the wind, a sound of water flowing in a river or sea, a sound of a crowd, human voice, and a sound of movement of an artificial object such as a car or a train.</li>
</ol></p>
<p id="p0089" num="0089">Among the above sound sources, the sound sources (1) to (3) are sound sources that constitute the natural BG sound 31 of <figref idref="f0004">Fig. 4</figref> and are sounds that call up, in a user, an image of an environmental state in nature. Each of the sound sources (1) to (3) is a sound source (hereinafter referred to as a first sound source) by use of an environment in nature. A sound of each of the sound sources (1) to (3) is a sound generated from the first sound source, that is, a sound based on an environmental state in nature. On the other hand, a sound of the sound source (4) is a sound that constitutes the additional sound 32 of <figref idref="f0004">Fig. 4</figref> and is a sound that calls up, in a user, an image of behavior of a living organism in nature. The sound source (4) is a sound source (hereinafter referred to as a second sound source) by use of, for example, a living<!-- EPO <DP n="38"> --> organism living in nature. A sound of the sound source (4) is a sound generated from the second sound source, that is, a sound based on behavior of a living organism in nature.</p>
<p id="p0090" num="0090">Not all of the time sections 35 that constitute the natural environmental sound 30A have an identical time length, and the time sections 35 are set to at least two kinds of time lengths. That is, the time sections 35 may be set to two kinds of time lengths such as 2 seconds, 3 seconds, 5 seconds, and 8 seconds.</p>
<p id="p0091" num="0091">The natural BG sound 31 is set such that the natural BG sound 31 is to be continuously emitted throughout all of the plurality of time sections 35. The additional sound 32 is individually set for each of the time sections 35 and is emitted for each of the time sections 35. A sound pressure level of the additional sound 32 is higher than a sound pressure level of the natural BG sound 31. A difference between the sound pressure level of the additional sound 32 and the sound pressure level of the natural BG sound 31 is 10 dB or more. When the difference in sound pressure level is too large, a user has an unpleasant feeling, and therefore an upper limit is set to approximately 20 dB. In this way, in Embodiment 1, the sound pressure level of the additional sound 32 is made higher by a range of +10 dB to +20 dB (instantaneous) than the sound pressure level of the natural BG sound 31. Thereby, the additional sound 32 is presented as a signal having a clearer sound pressure level than the natural BG sound 31.</p>
<p id="p0092" num="0092">Furthermore, as illustrated in the example of <figref idref="f0004">Fig. 4</figref>, not all of the time sections 35 have the additional sound 32, and there is a time section 35 for which the additional sound 32 is not set. In the example of <figref idref="f0004">Fig. 4</figref>, a time section 35 (hereinafter referred to as a "first time section") "with additional sound" to which the additional sound 32 is added and a time section 35 (hereinafter referred to as a "second time section") "without additional sound" to which the additional sound 32 is not added are set. This is because there is a high possibility of giving a "noisy" impression to a user when the additional sound 32 is set in all of the time sections 35. In Embodiment 1, the time<!-- EPO <DP n="39"> --> sections 35 are arranged such that at least one of adjacent time sections 35 becomes the second time section "without additional sound" to give a "pleasant" impression to a user. That is, at least one second time section "without additional sound" is disposed between adjacent first time sections "with additional sound". On the other hand, two or more second time sections "without additional sound" may be successively disposed.</p>
<p id="p0093" num="0093">Furthermore, the natural environmental sound 30A has an introduction part 36 that includes one or more time sections 35, an ending part 38 that includes one or more time sections 35, and an intermediate part 37 that is set between the introduction part 36 and the ending part 38 and includes one time section 35. In the example of <figref idref="f0004">Fig. 4</figref>, the introduction part 36 includes four time sections 35, the intermediate part 37 includes one time section 35, and the ending part 38 includes four time sections 35. The number of time sections 35 described above is merely an example, and the number of time sections 35 is not limited to this example. Although a case where the intermediate part 37 includes one time section 35 is described above, the intermediate part 37 may include two or more time sections 35.</p>
<p id="p0094" num="0094">Note that the time section 35 that constitutes the intermediate part 37 may have a longest time length among the plurality of time sections 35. Specifically, in a case where time lengths of the other time sections 35 are 2 seconds to 8 seconds, the time section 35 that constitutes the intermediate part 37 may have a time length of approximately 15 seconds. Among the plurality of time sections 35 included in the introduction part 36, a time length of a time section 35 having a maximum possible time length is referred to as a first time length. Among the time sections 35 included in the ending part 38, a time length of a time section 35 having a maximum possible time length is referred to as a second time length. In this case, when the time length of the time section 35 included in the intermediate part 37 is referred to as a third time length, the third time length may be set longer than each of the first time length and the second time length.</p>
<p id="p0095" num="0095"><!-- EPO <DP n="40"> --> By reproducing the natural environmental sound 30A including the additional sound 32 together with the chord serial sound 30B in the car 5, a "sense of tension" of keeping unnecessary quietness that brings a unique "uncomfortable feeling" in the elevator 1 can be reduced. Therefore, the natural environmental sound 30A according to Embodiment 1 uses a sound in nature. Furthermore, a series of presented sounds of the natural environmental sound 30A gradually varies in sound intensity with passage of time in a way such as the introduction part 36 → the intermediate part 37 → the ending part 38, as in the case of general music. Specifically, in the natural environmental sound 30A, it is desirable to set a sound pressure level of the additional sound 32 highest and set a time length of the additional sound 32 longest in the intermediate part 37.</p>
<p id="p0096" num="0096">To accomplish such desirable settings, a maximum possible value of the sound pressure level of the additional sound 32 in the time section 35 included in the introduction part 36 is referred to as a first level. A maximum possible value of the sound pressure level of the additional sound 32 in the time section 35 included in the ending part 38 is referred to as a second level. A maximum possible value of the sound pressure level of the additional sound 32 in the time section 35 included in the intermediate part 37 is referred to as a third level. In this case, in Embodiment 1, the third level is set higher than the first level and the second level, as illustrated in <figref idref="f0004">Fig. 4</figref>. In the example of <figref idref="f0004">Fig. 4</figref>, the third level is set approximately 1.5 times to 4 times higher than each of the first level and the second level. Therefore, a user hears the additional sound 32 of a high sound pressure level in the intermediate part 37 after hearing the additional sound 32 of a low sound pressure level in the introduction part 36. In this way, the user receives a change in sound intensity of the additional sound 32 with passage of time and does not hear a sudden change in sound. As a result, the user can hear a reproduced sound of the sound content 30 without a feeling of strangeness. Note that the maximum possible value of the sound pressure level of the additional sound 32 in the time section 35 included in the intermediate part 37 is set as the third level in the above description, an average value of the sound pressure level of the<!-- EPO <DP n="41"> --> additional sound 32 in the time section 35 included in the intermediate part 37 may be set as the third level.</p>
<p id="p0097" num="0097">Next, frequency bands of the natural BG sound 31 and the additional sound 32 are described with reference to <figref idref="f0012">Figs. 22</figref> and <figref idref="f0013">23</figref>. <figref idref="f0012">Fig. 22</figref> illustrates instantaneous frequency characteristics obtained by performing fast Fourier transform (FFT) processing on a time waveform at a position of the point (B) in <figref idref="f0004">Fig. 4</figref>. That is, <figref idref="f0012">Fig. 22</figref> illustrates instantaneous frequency characteristics of the additional sound 32. <figref idref="f0013">Fig. 23</figref> illustrates instantaneous frequency characteristics obtained by performing FFT processing on a time waveform at a position of the point (A) in <figref idref="f0004">Fig. 4</figref>. That is, <figref idref="f0013">Fig. 23</figref> illustrates instantaneous frequency characteristics of the natural BG sound 31. In <figref idref="f0012">Figs. 22</figref> and <figref idref="f0013">23</figref>, the horizontal axis represents a frequency, and the vertical axis represents a sound pressure level.</p>
<p id="p0098" num="0098">When <figref idref="f0012">Figs. 22</figref> and <figref idref="f0013">23</figref> are compared, a large change can be observed in frequency characteristics between 2,000 Hz to 10,000 Hz in <figref idref="f0012">Fig. 22</figref>. That is, in <figref idref="f0012">Fig. 22</figref>, a sound pressure level in a frequency band between 2,000 Hz to 10,000 Hz is remarkably high as compared with other parts. On the other hand, in <figref idref="f0013">Fig. 23</figref>, a large change in frequency characteristics is not observed in any frequency band. That is, the change in frequency characteristics observed in <figref idref="f0012">Fig. 22</figref> indicates a change in characteristics that occurs when the additional sound 32 is made larger by 10 dB or more than the natural BG sound 31 as described above. A user hears this change in sound pressure level, and thereby recognizes a sound of a frequency band whose sound pressure level has changed with certainty and subconsciously has a posture of listening to the sound of the frequency band. As a result, the user can concentrate on listening to the sound and bring about a change in mood.</p>
<p id="p0099" num="0099">Although the sound pressure level is changed in the frequency band between 2,000 Hz to 10,000 Hz in the example of <figref idref="f0012">Fig. 22</figref>, this is not restrictive. That is, it is important to change a sound pressure level in a frequency band of 800 Hz or higher.<!-- EPO <DP n="42"> --> This is because a frequency band that is easy for humans to hear is a band of 800 Hz to 15 kHz (a range indicated by the dotted-line frames in <figref idref="f0012">Figs. 22</figref> and <figref idref="f0013">23</figref>). By controlling a frequency in this band, user's attention can be focused on the sound, and control of increasing an interest in the sound can be performed since a physiological reaction of trying to listen to the sound is also utilized. For this reason, in Embodiment 1, the frequency of the additional sound 32 is set to 800 Hz or higher.</p>
<heading id="h0021">[Signal Processing in Signal Processing Unit 44]</heading>
<p id="p0100" num="0100">Next, signal processing performed by the signal processing unit 44 is described. The signal processing unit 44 performs the following signal processing on the natural environmental sound 30A and the chord serial sound 30B included in the sound content 30. Note, however, that the signal processing need not necessarily be performed and need just be performed as needed.</p>
<heading id="h0022">[Signal Processing on Natural Environmental Sound 30A]</heading>
<p id="p0101" num="0101">Phase processing such as reverberation and panning is not performed on the natural BG sound 31 included in the natural environmental sound 30A. However, in a case where it is determined that the natural BG sound 31 is in such a sound source state that a stereo feeling is auditorily low, the signal processing may be performed. Specifically, to auditorily obtain a sense of expanse of the sound, at least one of the following two kinds of signal processing (i) and (ii) may be performed on left and right signals of the natural BG sound 31. Note, here, that a sound emitted from the speaker unit 23R illustrated in <figref idref="f0006">Fig. 7</figref> is referred to as a "right-side signal", and a sound emitted from the speaker unit 23L illustrated in <figref idref="f0006">Fig. 7</figref> is referred to as a "left-side signal".</p>
<p id="p0102" num="0102">
<ol id="ol0003" compact="compact" ol-style="">
<li>(i) A delay time of 300 ms or less is set for one of the right-side signal and the left-side signal of the natural BG sound 31 such that the one is delayed as compared with the other one.</li>
<li>(ii) A gain difference in a range of ±3 dB to 6 dB is set for a sound pressure level of one of the right-side signal and the left-side signal of the natural BG sound 31 such<!-- EPO <DP n="43"> --> that the sound pressure level of the one becomes different from a sound pressure level of the other one.</li>
</ol></p>
<p id="p0103" num="0103">The signal processing (i) is described. A delay time is set for the natural BG sound 31 of the right-side signal such that the natural BG sound 31 of the right-side signal is delayed as compared with the natural BG sound 31 of the left-side signal. The delay time is set as appropriate within a range of more than 0 ms and 300 ms or less. This can produce a sense of expanse of the sound. Note that although a timing of emission of the left-side signal is made earlier than that of the right-side signal in Embodiment 1, the timing of emission of the right-side signal may be made earlier than that of the left-side signal.</p>
<p id="p0104" num="0104">Next, the signal processing (ii) is described. A gain difference is set for the sound pressure level of the right-side signal such that the sound pressure level of the right-side signal becomes higher than that of the left-side signal. An absolute value of a difference between the sound pressure level of the right-side signal and the sound pressure level of the left-side signal is in a range of 3 dB or more and 6 dB or less. This can produce a sense of expanse of the sound. Note that although the sound pressure level of the right side is made higher than that of the left side in Embodiment 1 since a dominant ear of a human is usually a right ear, the sound pressure level of the left side may be made higher than that of the right side.</p>
<p id="p0105" num="0105">Next, signal processing performed on the additional sound 32 is described with reference to <figref idref="f0013 f0014 f0015">Figs. 24 to 27. Figs. 24 to 27</figref> are explanatory views for explaining an example of signal processing performed on the sound content 30 according to Embodiment 1 according to Embodiment 1. The following describes, as an example, a case where the signal processing is performed on the additional sound 32. Note that the signal processing may be performed on the chord serial sound 30B. A case where the signal processing is performed on the chord serial sound 30B will be described later. In <figref idref="f0013">Fig. 24</figref>, panning processing of the left and right signals is performed. In <figref idref="f0013">Fig. 24</figref>, the<!-- EPO <DP n="44"> --> horizontal axis represents time, and the vertical axis represents an angle. <figref idref="f0013">Fig. 24</figref> illustrates a case where panning processing for making a user to feel as if a sound source has moved from right to left is performed. In <figref idref="f0014">Fig. 25</figref>, stereo widening processing is performed as signal processing. In <figref idref="f0014">Fig. 25</figref>, the horizontal axis represents time, and the vertical axis represents a stereo widening rate. <figref idref="f0014">Fig. 25</figref> illustrates a case where phase control processing is performed such that "wideness" and "narrowness" are repeatedly obtained throughout the whole time length of the sound content 30. <figref idref="f0014">Fig. 26</figref> illustrates an original waveform before reverberation processing is performed on the additional sound 32 used in the sound system 13 according to Embodiment 1. <figref idref="f0015">Fig. 27</figref> illustrates a waveform in a state where a reverberation component has been deleted from the original waveform of the additional sound 32 by performing reverberation processing on the additional sound 32 used in the sound system 13 according to Embodiment 1. The signal processing illustrated in <figref idref="f0013 f0014 f0015">Figs. 24 to 27</figref> is performed as needed on the additional sound 32 included in the natural environmental sound 30A.</p>
<p id="p0106" num="0106">As described above, signal processing such as panning processing, stereo widening processing, and reverberation processing is performed as needed on the additional sound 32 in the natural environmental sound 30A. Note that in a case where the signal processing is performed, the signal processing is performed on the basis of an auditory sense, and at least one of the following two kinds of signal processing (iii), (iv), and (v) is performed.</p>
<p id="p0107" num="0107">
<ul id="ul0002" list-style="none" compact="compact">
<li>(iii) The panning processing of the left and right signals of the additional sound 32 freely changes a range of 90 degrees to -90 degrees within the whole time length (e.g., 90 seconds) of the sound content 30.</li>
<li>(iv) The stereo widening processing of the left and right signals of the additional sound 32 freely changes a phase difference within a range of 20% to 240% of the left and right signals of the additional sound 32 within the whole time length (e.g., 90 seconds) of the sound content 30.<!-- EPO <DP n="45"> --></li>
<li>(v) The reverberation processing of the left and right signals of the additional sound 32 adjusts a reverberation component within a range of -100 ms to +100 ms of the left and right signals of the additional sound 32. That is, a reverberation component is deleted from the original waveform of the additional sound 32 or a reverberation component is added to the original waveform of the additional sound 32.</li>
</ul></p>
<p id="p0108" num="0108">The signal processing (iii) is described. In the panning processing of <figref idref="f0013">Fig. 24</figref>, panning of the left and right signals of the additional sound 32 is changed in a range of 90 degrees to -90 degrees within the whole time length (e.g., 90 seconds) of the sound content 30. As a result, a user has an impression that a sound source has moved from right to left. Therefore, in a case where the panning processing illustrated in <figref idref="f0008">Fig. 14</figref> is performed on a sound of wings of a flying bird, which is the sound source (5), a user can have an impression that a bird has taken off and moved from right to left.</p>
<p id="p0109" num="0109">The signal processing (iv) is described. In the stereo widening processing of <figref idref="f0014">Fig. 25</figref>, a phase difference is changed in a range of 20% to 240% of the left and right signals of the additional sound 32 within the whole time length (e.g., 90 seconds) of the sound content 30. In <figref idref="f0014">Fig. 25</figref>, a phase difference of 100% is a standard, and a user feels "narrowness" in a case where the phase difference is less than 100%. On the other hand, in a case where the phase difference is larger than 100%, the user is given an impression that a space has widened, and the user feels "wideness". <figref idref="f0014">Fig. 25</figref> is an example in which the processing is performed such that the "wideness" and the "narrowness" are repeatedly obtained within 90 seconds. In <figref idref="f0014">Fig. 25</figref>, in a period of 30 seconds, the "wideness" is gradually increased in the former 15 seconds, and the "narrowness" is gradually increased in the latter 15 seconds.</p>
<p id="p0110" num="0110">The signal processing (v) is described. Sounds emitted from the speaker unit 23R and the speaker unit 23L illustrated in <figref idref="f0006">Fig. 7</figref> are each first reflected by the side board 5a of the car 5 and then reaches user's ears. This is a sound that reaches the user by a shortest distance. However, actually, there is a sound that reaches the<!-- EPO <DP n="46"> --> user's ears after being reflected plural times by other parts such as the floor board 5b of the car 5, the lower surface 10b of the suspended ceiling 10, and the side board 5a of the car 5. Such a sound reflected plural times is called early reflection. A delay time of the early reflection is approximately several ms to 100 ms. The sound loses energy and an amount of the energy gradually attenuates every time the sound is reflected. Such an attenuating sound is called late reverberation. The delay time of the early reflection sound and a delay time and an attenuation time of the late reverberation sound vary depending on a material of the side board 5a and the floor board 5b of the car 5, the lower surface 10b of the suspended ceiling 10, and other parts and a capacity, a shape, and other features of the car 5. Therefore, the signal processing unit 44 deletes or adds a reverberation component in a range of -100 ms to +100 ms of the left and right signals of the additional sound 32 as needed. When there are too many reverberation components, the sound becomes offensive to the user. In this case, therefore, a reverberation component is deleted. <figref idref="f0014">Fig. 26</figref> illustrates the original waveform of the additional sound 32 before the reverberation processing, and <figref idref="f0015">Fig. 27</figref> illustrates a waveform in a state where a reverberation component is deleted from the original waveform of the additional sound 32 by performing the reverberation processing. When there are too few reverberation components, there is no sound expanse, and the sound becomes bleak and dull. In this case, a reverberation component is added. An increase-decrease amount is desirably in a range of 300 ms ± 100 ms in a case where a speed of a direct sound is 300 ms. By thus adjusting a time length of a reverberation component according to a material of the car 5 and other factors, it becomes easier for a user to hear the sound content 30, and the user can have a comfortable feeling.</p>
<p id="p0111" num="0111">The panning processing of <figref idref="f0013">Fig. 24</figref>, the stereo widening processing of <figref idref="f0014">Fig. 25</figref>, and the reverberation processing of <figref idref="f0015">Fig. 27</figref> are, for example, implemented by phase control processing. By adjusting a time difference Δt between signals by the phase control processing, the panning processing of <figref idref="f0013">Fig. 24</figref>, the stereo widening processing of <figref idref="f0014">Fig. 25</figref>, and the reverberation processing of <figref idref="f0015">Fig. 27</figref> can be implemented. Note that a<!-- EPO <DP n="47"> --> method for accomplishing the panning processing of <figref idref="f0013">Fig. 24</figref>, the stereo widening processing of <figref idref="f0014">Fig. 25</figref>, and the reverberation processing of <figref idref="f0015">Fig. 27</figref> is not limited to the phase control processing and may be any of other generally-known existing methods.</p>
<heading id="h0023">[Signal Processing on Chord Serial Sound 30B]</heading>
<p id="p0112" num="0112">The signal processing unit 44 also performs the signal processing such as panning, stereo widening, and reverberation described with reference to <figref idref="f0013">Figs. 24</figref> and <figref idref="f0014">25</figref> on the chord serial sound 30B. Since a method of these kinds of signal processing on the chord serial sound 30B is identical to the method of the signal processing performed on the additional sound 32 included in the natural environmental sound 30A, description of the method of signal processing on the chord serial sound 30B is omitted. Note that as for an effect produced in a case where the panning processing is performed on the chord serial sound 30B, a user can have an impression that a sound has moved from right to left, as in the case of the additional sound 32. Furthermore, in a case where the stereo widening processing is performed on the chord serial sound 30B, a user can be given an impression that a space has widened and the user can feel "wideness", as in the case of the additional sound 32. Furthermore, in a case where the reverberation processing is performed on the chord serial sound 30B, a user can have a "comfortable feeling", as in the case of the additional sound 32.</p>
<p id="p0113" num="0113">Alternatively, the signal processing described with reference to <figref idref="f0013 f0014 f0015">Figs. 24 to 27</figref> need not be performed on the chord serial sound 30B. An effect produced in this case is described. The signal processing such as panning, stereo widening, and reverberation is performed on the additional sound 32 in the natural environmental sound 30A. Furthermore, the natural BG sound 31 such as a murmur of a stream and the additional sound 32 such as chirping of a bird include an innate sense of movement by the nature of the sounds. Therefore, for example, in a case where the capacity of the car 5 is small, an influence such as expanding the sound field 27 more than necessary may occur when the signal processing is also performed on the chord serial sound 30B. In this case, there is a possibility that a sense of sound-image localization<!-- EPO <DP n="48"> --> of an emission sound emitted to a user in the car 5 is disturbed and the user has an auditory unpleasant feeling. Whether or not to perform the signal processing such as panning, stereo widening, and reverberation on the chord serial sound 30B thus may be determined as appropriate on the basis of the capacity of the car 5 and other factors.</p>
<heading id="h0024">[Control of Sound Pressure Level of Sound Content 30 during Stoppage of Car 5]</heading>
<p id="p0114" num="0114">Next, control of the sound pressure level of the sound content 30 in a case where the car 5 is stopped at a floor is described. The control of the sound pressure level of the sound content 30 that is being reproduced is performed by the sound-field control unit 21a of the sound-field control device 21 illustrated in <figref idref="f0002">Fig. 2</figref>. In a state where the car 5 is stopped at a floor and the car door 5d is opened, for example, a problem in that a user cannot hear an audio guide at the floor sometimes occurs. To cope with the problem, the sound-field control unit 21a thus may perform fade-out processing of gradually decreasing the sound pressure level of the sound content 30 before the car 5 stops at the floor. In this case, for example, the sound-field control unit 21a is capable of independently controlling the sound pressure level of the natural environmental sound 30A and the sound pressure level of the chord serial sound 30B. In this case, the sound-field control unit 21a need not decrease the sound pressure level of the chord serial sound 30B of the sound content 30 in a state where the car 5 is stopped at the floor and the car door 5d is opened. That is, in this case, the sound-field control unit 21a need not perform the fade-out processing on the sound pressure level of the chord serial sound 30B of the sound content 30. The reason is described below. As described above, the sound pressure level of the chord serial sound 30B is lower by a range of 3 dB to 6 dB in average than the sound pressure level of the natural environmental sound 30A. Therefore, even in a case where the chord serial sound 30B is emitted, for example, the problem in that the user cannot hear an audio guide at the floor does not occur. Conversely, the user may feel uneasy when the user cannot hear the sound content 30 due to a decrease of the sound pressure level of the whole sound content 30 when the car 5 stops at the floor. By continuously emitting the chord serial sound 30B at the same sound pressure level in the car 5, the user's sense of<!-- EPO <DP n="49"> --> uneasiness thus can be reduced. Furthermore, for example, in a case where a visually impaired person is waiting for an elevator at a landing area, the user can recognize the position of the car 5 by the chord serial sound 30B flowing out from the car 5 that is stopped. Therefore, the user can be guided toward the position of the car 5 by the chord serial sound 30B.</p>
<heading id="h0025">[Regarding Rating of Sound Content 30]</heading>
<p id="p0115" num="0115"><figref idref="f0015">Fig. 28</figref> is a schematic view illustrating results of human subjective and physiological rating by use of a semantic differential scale (SD) method. <figref idref="f0015">Fig. 28</figref> illustrates an example of a result of a subject test in which a user of the elevator 1 that was actually operating rated a subjective amount for an adequacy factor when the specifications of the sound content 30 were changed. Note that <figref idref="f0004">Fig. 4</figref> illustrates the sound content 30 that obtained a best rate in terms of comfort in the rating result of <figref idref="f0015">Fig. 28</figref>.</p>
<p id="p0116" num="0116">As for the sound content 30, a rating result by use of an SD method of rating an impression on a sound on a multi-point scale by use of a plurality of adjective pairs illustrated in <figref idref="f0015">Fig. 28</figref> was used. In the rating result obtained by the factor analysis, the sound content 30 according to Embodiment 1 obtained a high rate.</p>
<p id="p0117" num="0117"><figref idref="f0015">Fig. 28</figref> illustrates an example of adjective pairs used in the rating by use of the SD method. As illustrated in <figref idref="f0015">Fig. 28</figref>, the human subjective and physiological sound quality rating result by use of the SD method used seven adjective pairs, for each of which a rating was given on a five-point scale. Specifically, the seven adjective pairs are "safe-uneasy", "free-unfree", "relaxed-tense", "opened-closed", "refreshingannoying", "wide-narrow", and "comfortable-unpleasant". That is, in <figref idref="f0015">Fig. 28</figref>, targets to be rated include comfort and a sense of expanse.</p>
<p id="p0118" num="0118"><figref idref="f0015">Fig. 28</figref> illustrates a result obtained by conducting a test on 40 men and women, both young and old. A proportion of men and women of the subjects is 1 : 1, that is,<!-- EPO <DP n="50"> --> the subjects include 20 men and 20 women. Ages of the subjects are in their twenties to sixties. The subjects are not acquainted with each other. <figref idref="f0015">Fig. 28</figref> illustrates an average of obtained results. In each of the adjective pairs in <figref idref="f0015">Fig. 28</figref>, an adjective on the left side is an adjective corresponding to "pleasant" or "good", and an adjective on the right side is an adjective corresponding to "unpleasant" or "bad".</p>
<p id="p0119" num="0119">In <figref idref="f0015">Fig. 28</figref>, sound content emitted to the subjects is as follows:
<ol id="ol0004" compact="compact" ol-style="">
<li>(a): the sound content 30 according to Embodiment 1</li>
<li>(b): only the natural environmental sound 30A of the sound content 30</li>
<li>(c): music (pop music including a vocal)</li>
<li>(d): music (a symphony that does not include a vocal (a generally known song))</li>
<li>(e): a typical car room (no sound)</li>
</ol></p>
<p id="p0120" num="0120">Here, the sound content (a) is the sound content 30 according to Embodiment 1. That is, the sound content (a) includes the natural environmental sound 30A and the chord serial sound 30B. The sound content (b) includes only the natural environmental sound 30A of the sound content 30 according to Embodiment 1.</p>
<p id="p0121" num="0121">The sound content (c) is pop music including singing voice. The sound content (d) is a symphony that does not include singing voice. Correspondingly, the sound content (a) and (b) is "sound having no meaning", and the sound content (c) and (d) is "music" = "sound having meaning".</p>
<p id="p0122" num="0122">Furthermore, (e) is a state of some typical car room. That is, (e) is a silent state where no sound content is emitted in the car 5.</p>
<p id="p0123" num="0123"><figref idref="f0015">Fig. 28</figref> illustrates results of subjective and physiological ratings given by the subjects who heard the sound content (a) to (e) in the car 5. As a result, as illustrated in <figref idref="f0015">Fig. 28</figref>, in each adjective pair, the result of the sound content (a) is best, and the results of the sound content (c) and (d) are bad in general. This result shows that<!-- EPO <DP n="51"> --> users have preferences concerning the symphony of the sound content (d). Even an opinion "I can't find any reason to listen to a symphony in the car 5 of the elevator" was given. Furthermore, as for the pop music of the sound content (c), users' likings have large influence, and therefore reactions are separated into "pleasant" and "unpleasant".</p>
<p id="p0124" num="0124">Note that the silence in (e) was rated the worst in each adjective pair. That is, a silent state where no sound content is emitted in the car 5 is the most unpleasant for a user.</p>
<p id="p0125" num="0125">As is clear from <figref idref="f0015">Fig. 28</figref>, both of the result of the rating of the pop music of the sound content (c) and the result of the rating of the symphony of the sound content (d) shift from a "pleasant" element to an "unpleasant" element, as compared with the sound content 30 according to Embodiment 1. In particular, as for the symphony of the sound content (d), a lot of negative opinions "tense" and "narrow" were given by the subjects. This gives an impression that the symphony of the sound content (d) is unsuitable for the car 5. That is, it was clearly confirmed from the result of factor analysis on each sound content illustrated in <figref idref="f0015">Fig. 28</figref> that a better result was obtained for the sound content (a) than the other kinds of sound content. Therefore, it was confirmed that the sound content 30 constituted by a "sound having no meaning" according to Embodiment 1 can give a sense of safety, a sense of openness, and comfort to the subjects.</p>
<p id="p0126" num="0126">The result of the sound content (b) is good in general as compared with the sound content (c) and (d). The sound content (b) includes only the natural environmental sound 30A and does not include the chord serial sound 30B. However, when the sound content (a) and the sound content (b) are compared, the result of the sound content (a) according to Embodiment 1 is good in general. In particular, the sound content (a) has higher levels in terms of "free", "relaxed", "refreshing", and "comfortable" than does the sound content (b). This result shows that in a case where both of the natural environmental sound 30A and the chord serial sound 30B are<!-- EPO <DP n="52"> --> concurrently emitted in the car 5, a user can feel more comfortable than in a case where only the natural environmental sound 30A is emitted. In a portion of the natural environmental sound 30A where the additional sound 32 is not emitted and only the natural BG sound 31 is emitted, for example, only a sound of lapping of waves at a sand beach is emitted. In this case, there is a possibility that a user feels quietness in the car 5 and feels tense or closed. Therefore, in Embodiment 1, the natural environmental sound 30A and the chord serial sound 30B are concurrently emitted. In this case, even in the portion where the additional sound 32 is not emitted, the natural BG sound 31 and the chord serial sound 30B are concurrently emitted. Since the chord serial sound 30B includes the dissonance 34, the cocktail-party effect can be expected, as described above. Therefore, user's attention is focused on a sound produced by the natural BG sound 31 and the chord serial sound 30B, and an unpleasant element such as stress in an enclosed space is reduced.</p>
<p id="p0127" num="0127">As described above, it was confirmed from the rating result of <figref idref="f0015">Fig. 28</figref> that a sound generated in nature that everybody has heard before auditorily gives a sense of safety in a sealed narrow enclosed space such as an elevator used by strangers. Furthermore, by combining the chord serial sound 30B including the dissonance 34 with the natural environmental sound 30A, the user can be made more relaxed and comfortable. On the other hand, as for music content, likes and dislikes of users have an influence although it depends on contents, and it can be said that the users are given a sense different from a case of a natural sound since the users always hear the same song.</p>
<p id="p0128" num="0128">In the above description, a case where one or two pieces of sound content 30 are stored in the memory 21c of the sound-field control device 21 illustrated in <figref idref="f0002">Fig. 2</figref> is mainly described. However, this is not restrictive. The memory 21c may store, in the memory 21c, a plurality of pieces of sound content 30, each of which is prepared for corresponding season and living time zone. <figref idref="f0016">Fig. 29</figref> illustrates an example of a sound source of the additional sound 32 inserted into the natural environmental sound 30A of<!-- EPO <DP n="53"> --> the sound content 30 for each season and for each living time zone. As illustrated in <figref idref="f0016">Fig. 29</figref>, a kind of living organism used in the additional sound 32 is changed depending on a season and a living time zone. The natural BG sound 31 is any one of the sounds included in the sound source (4).</p>
<p id="p0129" num="0129">In this case, at least 16 (four seasons × four living time zones) pieces of sound content 30 are thus created. Specifically, for example, in a case where the season is "spring" and the living time zone is "early morning", the sound content 30 is created by adding, as the additional sound 32, at least one of a sparrow, a swallow, a Japanese bush warbler, and a Japanese burrowing cricket to the natural BG sound 31 that is any one of the sounds included in the sound source (4). Furthermore, for example, in a case where the season is "autumn" and the living time zone is "night", the sound content 30 is created by adding, as the additional sound 32, at least one of a horned owl, a Japanese bell cricket, and a pine cricket to the natural BG sound 31 that is any one of the sounds included in the sound source (4). In this way, the sound content 30 is prepared in advance for each season and for each living time zone, and these different pieces of sound content 30 thus prepared are stored in the memory 21c. The sound-field control unit 21a acquires current date and time data from the timer unit 21d and switches the sound content 30 to one corresponding to actual season and living time zone on the basis of the date and time data.</p>
<p id="p0130" num="0130">In Embodiment 1, the sound content 30 may be prepared for each season and for each living time zone, and sound contents 30 may be switched among these pieces of sound content 30 thus prepared according to actual season and living time zone, as described above. In this case, a user can auditorily feel season's transition, a change of a living time zone, and others without being bored. This is highly likely to lead to "healing" and "refreshing" of the user. Furthermore, some users who recognize switching of sound contents 30 may get a feeling of excitement and find it fun to use the car 5 of the elevator 1. In this way, by switching sound contents 30, stress of a user can be further reduced.<!-- EPO <DP n="54"> --></p>
<p id="p0131" num="0131">As described above, with the sound system 13 according to Embodiment 1, a combination of a plurality of sound sources generated in nature is reproduced, and concurrently the chord serial sound 30B obtained by combining the consonance 33 and the dissonance 34 is reproduced. By emitting the sound content 30 obtained by combining a natural sound and a chord toward a target enclosed space, stress of a user in an enclosed space can be reduced. As described above, in Embodiment 1, the chord serial sound 30B includes the consonance 33 and the dissonance 34. Furthermore, the chord serial sound 30B has periodicity indicated by (a) and (b) below. (a): The chord serial sound 30B has periodicity since the chord serial sound 30B having a time length L of 2 minutes or less is reproduced in a loop. (b): The chord serial sound 30B has periodicity since the dissonance 34 is inserted between the consonances 33 and, for example, a dissonance of 1 second follows a consonance of 2 seconds. Therefore, a tone change appears in the chord serial sound 30B on a constant cycle. By combining the chord serial sound 30B having such a periodic tone change and the natural environmental sound 30A, the space in the car 5 is made comfortable.</p>
<p id="p0132" num="0132">In Embodiment 1, the basic number of installed speaker cabinets 20 is two. By thus disposing the two or more speaker cabinets 20 in any ways, the sound content 30 is emitted toward the target enclosed space from plural directions. This can form a three-dimensional sound-field environment and obtain a more natural sense of sound field.</p>
<p id="p0133" num="0133">Furthermore, as illustrated in <figref idref="f0007">Figs. 10 and 11</figref>, the number of speaker units 23 mounted in each speaker cabinet 20 may be two or more. In this case, one speaker is a full-range speaker, and the other speaker is a speaker exclusive for a low range or exclusive for a high range used to assist the full-range speaker. In this way, the speaker cabinet 20 alone can cope with a range from a low range to a high range and can emit a sound for each narrow band of the wide frequency band. As a result, an<!-- EPO <DP n="55"> --> improvement in sound quality and enlargement of a reproduction band can be achieved, and a "high-sound-quality system" that can cover a wide frequency band can be easily obtained.</p>
<p id="p0134" num="0134">However, these cases are not restrictive, and the number of speaker cabinets 20 and the number of speaker units 23 may be each one. Even in this case, the sound-field control unit 21a emits the sound content 30 obtained by combining the natural environmental sound 30A and the chord serial sound 30B into the car 5. This leads to "healing" and "refreshing" of a user in the enclosed space, thereby allowing a further reduction in stress of the user.</p>
<p id="p0135" num="0135">In Embodiment 1, by emission of the above sound signal, a sound-field space is created above the head or chest of a user in an enclosed space such as the car 5 of the elevator 1 where people who do not know each other are often gathered. Therefore, the user can auditorily feel that the narrow space is wide once the user gets on the car 5. As a result, stress resulting from an "uncomfortable feeling" and an "unpleasant feeling", which the user has when being with a stranger under a narrow environment, can be reduced.</p>
<p id="p0136" num="0136">In Embodiment 1, a sound signal based on the sound content 30 obtained by combining the natural environmental sound 30A and the chord serial sound 30B is sent out from the speaker system 22. Such an emission sound by use of a sound generated in nature can make a user auditorily feel that a narrow space is wide even in an enclosed space such as the car 5 of the elevator 1 where people who do not know each other are often gathered, thereby reducing stress. Furthermore, a sound generated in nature is a "sound having no meaning" and is therefore not affected, for example, by users' favorite genres, and a possibility that users' opinions are divided is low. Furthermore, in a case where the sound content 30 is a "sound having no meaning", a user has no particular desire to hear the sound content 30 from the start or hear the sound content 30 to the end. Therefore, even when the user gets on or gets<!-- EPO <DP n="56"> --> off the car 5 in the middle of reproduction of the sound content 30, no special stress is given to the user.</p>
<p id="p0137" num="0137">Furthermore, as illustrated in <figref idref="f0016">Fig. 29</figref>, the sound content 30 may be prepared for each season and for each living time zone, and sound contents 30 may be switched according to actual season and living time zone. In this case, a user can feel, for example, season's transition and a change of a living time zone without being bored. This is highly likely to lead to "healing" and "refreshing" of the user. As a result, stress of the user can be further reduced.</p>
<p id="p0138" num="0138">Note that although the internal space of the car 5 of the elevator 1 is described as an example of the enclosed space in Embodiment 1, the enclosed space may be a waiting room of a hospital or a pharmacy. In a case where the enclosed space is a waiting room of a hospital or a pharmacy, the housing 25 of each speaker cabinet 20 is disposed on an upper surface of a ceiling board of the waiting room. That is, the housing 25 of each speaker cabinet 20 is provided in a ceiling space above the ceiling board. Furthermore, a height at which the sound field 27 is generated is, for example, set to a range of 1.2 m to 1.4 m by taking into consideration that a user is sitting on a chair.</p>
<p id="p0139" num="0139">Furthermore, the enclosed space may be an internal space of an automobile or a train. Examples of the automobile include a passenger car and a bus. In a case where the enclosed space is an internal space of a passenger car such as a taxi, the housing 25 of each speaker cabinet 20 is disposed in a ceiling of the internal space or in a dashboard at a driver's seat. In this case, a height at which the sound field 27 is generated is, for example, set to a range of 1.2 m to 1.4 m by taking into consideration that a user is sitting on a seat of the passenger car. On the other hand, in a case where the enclosed space is an internal space of a train or a bus, the housing 25 of each speaker cabinet 20 is disposed in a ceiling of the internal space. In this case, a height at which the sound field 27 is generated may be, for example, set to a range of<!-- EPO <DP n="57"> --> 1.6 m to 1.8 m in consideration of a standing user or may be set to a range of 1.2 m to 1.4 m in consideration of a user sitting on a seat.</p>
<heading id="h0026">Reference Signs List</heading>
<p id="p0140" num="0140">1: elevator, 2: hoistway, 3: hoisting machine, 3a: sheave, 4: main rope, 5: car, 5a: side board, 5b: floor board, 5c: ceiling board, 5d: car door, 5e: lighting device, 5ea: irradiation surface, 5f: car operating panel, 5g: emergency speaker, 5h: intercom device, 6: counterweight, 7: elevator control panel, 8: control cable, 9: car control device, 9a: input unit, 9b: control unit, 9c: output unit, 9d: memory, 10: suspended ceiling, 10a: side surface, 10b: lower surface, 11: gap, 13: enclosed-space sound system (sound system), 20: speaker cabinet, 21: sound-field control device, 21a: sound-field control unit, 21b: output unit, 21c: memory, 21d: timer unit, 22: speaker system, 23: speaker unit, 23-1: speaker unit, 23-2: speaker unit, 23L: speaker unit, 23L-1: speaker unit, 23L-2: speaker unit, 23R: speaker unit, 23R-1: speaker unit, 23R-2: speaker unit, 23a: emission surface, 25: housing, 25a: front surface, 27: sound field, 27a: lower limit, 30: sound content, 30A: natural environmental sound, 30B: chord serial sound, 30a: joint part, 31: natural BG sound, 32: additional sound, 33: consonance, 34: dissonance, 35: time section, 36: introduction part, 37: intermediate part, 38: ending part, 40: sound content generation device, 41: input unit, 41a: first input unit, 41b: second input unit, 42: natural environmental sound generation unit, 43: chord serial sound generation unit, 44: signal processing unit, 45: mixing-down processing unit, 46: output unit, 47: memory, 50: thick line, 51: thin line, 60: sound material database</p>
</description>
<claims id="claims01" lang="en"><!-- EPO <DP n="58"> -->
<claim id="c-en-0001" num="0001">
<claim-text>An enclosed-space sound system, comprising:
<claim-text>a speaker system that is located in an enclosed space and that includes a speaker unit;</claim-text>
<claim-text>a memory configured to store sound content; and</claim-text>
<claim-text>a sound-field control unit configured to send out a sound signal based on the sound content toward the enclosed space from the speaker system,</claim-text>
<claim-text>the sound content including</claim-text>
<claim-text>a natural environmental sound that represents an environmental sound generated in nature, and</claim-text>
<claim-text>a chord serial sound obtained by combining chords that include a consonance and a dissonance.</claim-text></claim-text></claim>
<claim id="c-en-0002" num="0002">
<claim-text>The enclosed-space sound system of claim 1, wherein
<claim-text>the sound content is obtained by adding the chord serial sound to the natural environmental sound, and</claim-text>
<claim-text>the natural environmental sound and the chord serial sound are concurrently emitted from the speaker system.</claim-text></claim-text></claim>
<claim id="c-en-0003" num="0003">
<claim-text>The enclosed-space sound system of claim 1 or 2, wherein the chord serial sound includes the consonance and the dissonance that are alternately arranged.</claim-text></claim>
<claim id="c-en-0004" num="0004">
<claim-text>The enclosed-space sound system of claim 3, wherein
<claim-text>a chord at a beginning of the chord serial sound is one of the consonance and the dissonance, and</claim-text>
<claim-text>a chord at an end of the chord serial sound is an other one of the consonance and the dissonance.</claim-text></claim-text></claim>
<claim id="c-en-0005" num="0005">
<claim-text><!-- EPO <DP n="59"> --> The enclosed-space sound system of any one of claims 1 to 4, wherein a time length of the consonance is identical to a time length of the dissonance or is longer than the time length of the dissonance.</claim-text></claim>
<claim id="c-en-0006" num="0006">
<claim-text>The enclosed-space sound system of any one of claims 1 to 5, wherein the chord serial sound includes the consonance within 1 octave and the dissonance within 1 octave.</claim-text></claim>
<claim id="c-en-0007" num="0007">
<claim-text>The enclosed-space sound system of any one of claims 1 to 6, wherein a sound pressure level of the natural environmental sound is higher than a sound pressure level of the chord serial sound.</claim-text></claim>
<claim id="c-en-0008" num="0008">
<claim-text>The enclosed-space sound system of claim 7, wherein a difference between the sound pressure level of the natural environmental sound and the sound pressure level of the chord serial sound is in a range of 3 dB or more and 6 dB or less.</claim-text></claim>
<claim id="c-en-0009" num="0009">
<claim-text>The enclosed-space sound system of any one of claims 1 to 8, wherein
<claim-text>the natural environmental sound includes a natural background sound that represents a sound generated by an environmental state in the nature and an additional sound added to the natural background sound, and</claim-text>
<claim-text>the natural environmental sound is a combination of the natural background sound and the additional sound and is obtained by adding the additional sound to the natural background sound.</claim-text></claim-text></claim>
<claim id="c-en-0010" num="0010">
<claim-text>The enclosed-space sound system of claim 9, wherein the natural background sound includes at least one of a sound of trees shaking in wind, a sound of water flowing in a river or sea, a sound of a crowd, a sound of movement of an artificial object, and human voice.</claim-text></claim>
<claim id="c-en-0011" num="0011">
<claim-text>The enclosed-space sound system of claim 9 or 10, wherein<!-- EPO <DP n="60"> -->
<claim-text>the additional sound is a sound generated by behavior of a living organism in the nature, and</claim-text>
<claim-text>the additional sound includes at least one of chirping of one or more birds, a flying sound of one or more flying birds, chirping of one or more insects, and a cry of one or more animals.</claim-text></claim-text></claim>
<claim id="c-en-0012" num="0012">
<claim-text>The enclosed-space sound system of any one of claims 9 to 11, wherein a sound pressure level of the additional sound is higher than a sound pressure level of the natural background sound.</claim-text></claim>
<claim id="c-en-0013" num="0013">
<claim-text>The enclosed-space sound system of claim 12, wherein a difference between the sound pressure level of the additional sound and the sound pressure level of the natural background sound is 10 dB or more.</claim-text></claim>
<claim id="c-en-0014" num="0014">
<claim-text>The enclosed-space sound system of any one of claims 1 to 13, wherein
<claim-text>a time length of the whole sound content is 2 minutes or less, and</claim-text>
<claim-text>the sound-field control unit repeatedly and continuously sends out the sound signal based on the sound content from the speaker system.</claim-text></claim-text></claim>
<claim id="c-en-0015" num="0015">
<claim-text>The enclosed-space sound system of claim 14, wherein fade-in processing of gradually increasing a sound pressure level of the sound content is performed on a beginning part of the sound content and fade-out processing of gradually decreasing the sound pressure level of the sound content is performed on an end part of the sound content, so that a sound pressure level of a joint part where the beginning part of the sound content and the end part of the sound content are joined to each other is lowest in a case where the sound signal based on the sound content is repeatedly and continuously sent out from the speaker system.</claim-text></claim>
<claim id="c-en-0016" num="0016">
<claim-text><!-- EPO <DP n="61"> --> The enclosed-space sound system of any one of claims 1 to 15, wherein a main frequency band of the chord serial sound is set more than or equal to 100 Hz and less than or equal to 800 Hz.</claim-text></claim>
<claim id="c-en-0017" num="0017">
<claim-text>The enclosed-space sound system of claim 9 or any one of claims 10 to 16 as dependent on claim 9, wherein a frequency band of the additional sound is set more than or equal to 800 Hz and less than or equal to 15 kHz.</claim-text></claim>
<claim id="c-en-0018" num="0018">
<claim-text>The enclosed-space sound system of any one of claims 1 to 17, wherein
<claim-text>the enclosed space is an internal space of a car of an elevator, and</claim-text>
<claim-text>the speaker unit is located at at least one of an inside of a suspended ceiling fixed to a ceiling board of the car of the elevator, the ceiling board of the car, a side board of the car, and a floor board of the car.</claim-text></claim-text></claim>
</claims>
<drawings id="draw" lang="en"><!-- EPO <DP n="62"> -->
<figure id="f0001" num="1"><img id="if0001" file="imgf0001.tif" wi="122" he="179" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="63"> -->
<figure id="f0002" num="2"><img id="if0002" file="imgf0002.tif" wi="131" he="208" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="64"> -->
<figure id="f0003" num="3"><img id="if0003" file="imgf0003.tif" wi="129" he="174" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="65"> -->
<figure id="f0004" num="4"><img id="if0004" file="imgf0004.tif" wi="140" he="188" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="66"> -->
<figure id="f0005" num="5,6"><img id="if0005" file="imgf0005.tif" wi="122" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="67"> -->
<figure id="f0006" num="7,8,9"><img id="if0006" file="imgf0006.tif" wi="116" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="68"> -->
<figure id="f0007" num="10,11,12"><img id="if0007" file="imgf0007.tif" wi="106" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="69"> -->
<figure id="f0008" num="13,14"><img id="if0008" file="imgf0008.tif" wi="133" he="223" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="70"> -->
<figure id="f0009" num="15,16"><img id="if0009" file="imgf0009.tif" wi="156" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="71"> -->
<figure id="f0010" num="17,18"><img id="if0010" file="imgf0010.tif" wi="132" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="72"> -->
<figure id="f0011" num="19,20"><img id="if0011" file="imgf0011.tif" wi="109" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="73"> -->
<figure id="f0012" num="21,22"><img id="if0012" file="imgf0012.tif" wi="152" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="74"> -->
<figure id="f0013" num="23,24"><img id="if0013" file="imgf0013.tif" wi="156" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="75"> -->
<figure id="f0014" num="25,26"><img id="if0014" file="imgf0014.tif" wi="160" he="207" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="76"> -->
<figure id="f0015" num="27,28"><img id="if0015" file="imgf0015.tif" wi="122" he="233" img-content="drawing" img-format="tif"/></figure><!-- EPO <DP n="77"> -->
<figure id="f0016" num="29"><img id="if0016" file="imgf0016.tif" wi="99" he="233" img-content="drawing" img-format="tif"/></figure>
</drawings>
<search-report-data id="srep" lang="en" srep-office="EP" date-produced=""><doc-page id="srep0001" file="srep0001.tif" wi="153" he="233" type="tif"/><doc-page id="srep0002" file="srep0002.tif" wi="153" he="233" type="tif"/></search-report-data>
<ep-reference-list id="ref-list">
<heading id="ref-h0001"><b>REFERENCES CITED IN THE DESCRIPTION</b></heading>
<p id="ref-p0001" num=""><i>This list of references cited by the applicant is for the reader's convenience only. It does not form part of the European patent document. Even though great care has been taken in compiling the references, errors or omissions cannot be excluded and the EPO disclaims all liability in this regard.</i></p>
<heading id="ref-h0002"><b>Patent documents cited in the description</b></heading>
<p id="ref-p0002" num="">
<ul id="ref-ul0001" list-style="bullet">
<li><patcit id="ref-pcit0001" dnum="JP2010222127A"><document-id><country>JP</country><doc-number>2010222127</doc-number><kind>A</kind></document-id></patcit><crossref idref="pcit0001">[0007]</crossref></li>
</ul></p>
</ep-reference-list>
</ep-patent-document>
