diff --git a/locale/fa/404.md b/locale/fa/404.md new file mode 100644 index 0000000000000..14c482bdfffba --- /dev/null +++ b/locale/fa/404.md @@ -0,0 +1,7 @@ +--- +layout: page.hbs +permalink: false +title: 404 +--- +## 404: Page could not be found +### ENOENT: no such file or directory diff --git a/locale/fa/about/community.md b/locale/fa/about/community.md new file mode 100644 index 0000000000000..895fa82ab6c23 --- /dev/null +++ b/locale/fa/about/community.md @@ -0,0 +1,56 @@ +--- +title: Community Committee +layout: about.hbs +--- + +# Community Committee + +The Community Committee (CommComm) is a top-level committee in the Node.js Foundation. The CommComm has authority over outward-facing community outreach efforts, including: + - Community [Evangelism](https://github.com/nodejs/evangelism) + - Education Initiatives + - Cultural Direction of Node.js Foundation + - Community Organization Outreach + - Translation and Internationalization + - Project Moderation/Mediation + - Public Outreach and [Publications](https://medium.com/the-node-js-collection) + +There are four types of involvement with the Community Committee: + + - A **Contributor** is any individual creating or commenting on an issue or pull request. + - A **Collaborator** is a contributor who has been given write access to the repository + - An **Observer** is any individual who has requested or been requested to attend a CommComm meeting. It is also the first step to becoming a Member. + - A **Member** is a collaborator with voting rights who has met the requirements of participation and voted in by the CommComm voting process. + +For the current list of Community Committee members, see the project's [README.md](https://github.com/nodejs/community-committee). + +## Contributors and Collaborators + +It is the mission of CommComm to further build out the Node.js Community. If you're reading this, you're already a part of that community – and as a part of the Node.js Community, we'd love to have your help! + +The [nodejs/community-committee](https://github.com/nodejs/community-committee) GitHub repository is a great place to start. Check out the [issues labeled "Good first issue"](https://github.com/nodejs/community-committee/labels/good%20first%20issue) to see where we're looking for help. If you have your own ideas on how we can engage and build the community, feel free to open your own issues, create pull requests with improvements to our existing work, or help us by sharing your thoughts and ideas in the ongoing discussions we're having in GitHub. + +You can further participate in our ongoing efforts around community building - like localization, evangelism, the Node.js Collection, and others - by digging into their respective repositories and getting involved! + +Before diving in, please be sure to read the [Collaborator Guide](https://github.com/nodejs/community-committee/blob/master/COLLABORATOR_GUIDE.md). + +If you're interested in participating in the Community Committee as a committee member, you should read the section below on **Observers and Membership**, and create an issue asking to be an Observer in our next Community Committee meeting. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). + +## Observers and Membership + +If you're interested in becoming more deeply involved with the Community Committee and its projects, we encourage you to become an active observer, and work toward achieving member status. To become a member you must: + + 1. Attend the bi-weekly meetings, investigate issues tagged as good first issue, file issues and pull requests, and provide insight via GitHub as a contributor or collaborator. + 2. Request to become an Observer by filing an issue. Once added as an Observer to meetings, we will track attendance and participation for 3 months, in accordance with our governance guidelines. You can find a great example of such an issue [here](https://github.com/nodejs/community-committee/issues/142). + 3. When you meet the 3 month minimum attendance, and participation expectations, the CommComm will vote to add you as a member. + +Membership is for 6 months. The group will ask on a regular basis if the expiring members would like to stay on. A member just needs to reply to renew. There is no fixed size of the CommComm. However, the expected target is between 9 and 12. You can read more about membership, and other administrative details, in our [Governance Guide](https://github.com/nodejs/community-committee/blob/master/GOVERNANCE.md). + +Regular CommComm meetings are held bi-monthly in a Zoom video conference, and broadcast live to the public on YouTube. Any community member or contributor can ask that something be added to the next meeting's agenda by logging a GitHub Issue. + +Meeting announcements and agendas are posted before the meeting begins in the organization's [GitHub issues](https://github.com/nodejs/community-committee/issues). You can also find the regularly scheduled meetings on the [Node.js Calendar](https://nodejs.org/calendar). To follow Node.js meeting livestreams on YouTube, subscribe to the Node.js Foundation [YouTube channel](https://www.youtube.com/channel/UCQPYJluYC_sn_Qz_XE-YbTQ). Be sure to click the bell to be notified of new videos! + +## Consensus Seeking Process + +The CommComm follows a [Consensus Seeking](https://en.wikipedia.org/wiki/Consensus-seeking_decision-making) decision making model. + +When an agenda item has appeared to reach a consensus, the moderator will ask "Does anyone object?" as a final call for dissent from the consensus. If a consensus cannot be reached that has no objections then a majority wins vote is called. It is expected that the majority of decisions made by the CommComm are via a consensus seeking process and that voting is only used as a last-resort. \ No newline at end of file diff --git a/locale/fa/about/governance.md b/locale/fa/about/governance.md new file mode 100644 index 0000000000000..720a6fe30d06c --- /dev/null +++ b/locale/fa/about/governance.md @@ -0,0 +1,139 @@ +--- +title: Project Governance +layout: about.hbs +--- +# Project Governance + +## Technical Steering Committee + +The project is jointly governed by a Technical Steering Committee (TSC) +which is responsible for high-level guidance of the project. + +The TSC has final authority over this project including: + +* Technical direction +* Project governance and process (including this policy) +* Contribution policy +* GitHub repository hosting +* Conduct guidelines +* Maintaining the list of additional Collaborators + +Initial membership invitations to the TSC were given to individuals who +had been active contributors, and who have significant +experience with the management of the project. Membership is +expected to evolve over time according to the needs of the project. + +For the current list of TSC members, see the project +[README.md](https://github.com/nodejs/node/blob/master/README.md#tsc-technical-steering-committee). + +## Collaborators + +The [nodejs/node](https://github.com/nodejs/node) GitHub repository is +maintained by the TSC and additional Collaborators who are added by the +TSC on an ongoing basis. + +Individuals making significant and valuable contributions are made +Collaborators and given commit-access to the project. These +individuals are identified by the TSC and their addition as +Collaborators is discussed during the weekly TSC meeting. + +_Note:_ If you make a significant contribution and are not considered +for commit-access, log an issue or contact a TSC member directly and it +will be brought up in the next TSC meeting. + +Modifications of the contents of the nodejs/node repository are made on +a collaborative basis. Anybody with a GitHub account may propose a +modification via pull request and it will be considered by the project +Collaborators. All pull requests must be reviewed and accepted by a +Collaborator with sufficient expertise who is able to take full +responsibility for the change. In the case of pull requests proposed +by an existing Collaborator, an additional Collaborator is required +for sign-off. Consensus should be sought if additional Collaborators +participate and there is disagreement around a particular +modification. See _Consensus Seeking Process_ below for further detail +on the consensus model used for governance. + +Collaborators may opt to elevate significant or controversial +modifications, or modifications that have not found consensus to the +TSC for discussion by assigning the ***tsc-agenda*** tag to a pull +request or issue. The TSC should serve as the final arbiter where +required. + +For the current list of Collaborators, see the project +[README.md](https://github.com/nodejs/node/blob/master/README.md#current-project-team-members). + +A guide for Collaborators is maintained in +[COLLABORATOR_GUIDE.md](https://github.com/nodejs/node/blob/master/COLLABORATOR_GUIDE.md). + +## TSC Membership + +TSC seats are not time-limited. There is no fixed size of the TSC. +However, the expected target is between 6 and 12, to ensure adequate +coverage of important areas of expertise, balanced with the ability to +make decisions efficiently. + +There is no specific set of requirements or qualifications for TSC +membership beyond these rules. + +The TSC may add additional members to the TSC by a standard TSC motion. + +A TSC member may be removed from the TSC by voluntary resignation, or by +a standard TSC motion. + +Changes to TSC membership should be posted in the agenda, and may be +suggested as any other agenda item (see "TSC Meetings" below). + +No more than 1/3 of the TSC members may be affiliated with the same +employer. If removal or resignation of a TSC member, or a change of +employment by a TSC member, creates a situation where more than 1/3 of +the TSC membership shares an employer, then the situation must be +immediately remedied by the resignation or removal of one or more TSC +members affiliated with the over-represented employer(s). + +## TSC Meetings + +The TSC meets weekly on a Google Hangout On Air. The meeting is run by +a designated moderator approved by the TSC. Each meeting should be +published to YouTube. + +Items are added to the TSC agenda which are considered contentious or +are modifications of governance, contribution policy, TSC membership, +or release process. + +The intention of the agenda is not to approve or review all patches. +That should happen continuously on GitHub and be handled by the larger +group of Collaborators. + +Any community member or contributor can ask that something be added to +the next meeting's agenda by logging a GitHub Issue. Any Collaborator, +TSC member or the moderator can add the item to the agenda by adding +the ***tsc-agenda*** tag to the issue. + +Prior to each TSC meeting, the moderator will share the Agenda with +members of the TSC. TSC members can add any items they like to the +agenda at the beginning of each meeting. The moderator and the TSC +cannot veto or remove items. + +The TSC may invite persons or representatives from certain projects to +participate in a non-voting capacity. These invitees currently are: + +* A representative from [build](https://github.com/node-forward/build) + chosen by that project. + +The moderator is responsible for summarizing the discussion of each +agenda item and sending it as a pull request after the meeting. + +## Consensus Seeking Process + +The TSC follows a +[Consensus Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making) +decision making model. + +When an agenda item has appeared to reach a consensus, the moderator +will ask "Does anyone object?" as a final call for dissent from the +consensus. + +If an agenda item cannot reach a consensus, a TSC member can call for +either a closing vote or a vote to table the issue to the next +meeting. The call for a vote must be approved by a majority of the TSC +or else the discussion will continue. Simple majority wins. diff --git a/locale/fa/about/index.md b/locale/fa/about/index.md new file mode 100644 index 0000000000000..b22b176df4a7c --- /dev/null +++ b/locale/fa/about/index.md @@ -0,0 +1,69 @@ +--- +layout: about.hbs +title: About +trademark: Trademark +--- +# About Node.js® + +As an asynchronous event driven JavaScript runtime, Node is designed to build +scalable network applications. In the following "hello world" example, many +connections can be handled concurrently. Upon each connection the callback is +fired, but if there is no work to be done, Node will sleep. + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World\n'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); +``` + +This is in contrast to today's more common concurrency model where OS threads +are employed. Thread-based networking is relatively inefficient and very +difficult to use. Furthermore, users of Node are free from worries of +dead-locking the process, since there are no locks. Almost no function in Node +directly performs I/O, so the process never blocks. Because nothing blocks, +scalable systems are very reasonable to develop in Node. + +If some of this language is unfamiliar, there is a full article on +[Blocking vs Non-Blocking][]. + +--- + +Node is similar in design to, and influenced by, systems like Ruby's +[Event Machine][] or Python's [Twisted][]. Node takes the event model a bit +further. It presents an [event loop][] as a runtime construct instead of as a library. In other systems there is always a blocking call to start the +event-loop. +Typically behavior is defined through callbacks at the beginning of a script +and at the end starts a server through a blocking call like +`EventMachine::run()`. In Node there is no such start-the-event-loop call. Node +simply enters the event loop after executing the input script. Node exits the +event loop when there are no more callbacks to perform. This behavior is like +browser JavaScript — the event loop is hidden from the user. + +HTTP is a first class citizen in Node, designed with streaming and low latency +in mind. This makes Node well suited for the foundation of a web library or +framework. + +Just because Node is designed without threads, doesn't mean you cannot take +advantage of multiple cores in your environment. Child processes can be spawned +by using our [`child_process.fork()`][] API, and are designed to be easy to +communicate with. Built upon that same interface is the [`cluster`][] module, +which allows you to share sockets between processes to enable load balancing +over your cores. + +[Blocking vs Non-Blocking]: https://nodejs.org/en/docs/guides/blocking-vs-non-blocking/ +[`child_process.fork()`]: https://nodejs.org/api/child_process.html#child_process_child_process_fork_modulepath_args_options +[`cluster`]: https://nodejs.org/api/cluster.html +[event loop]: https://nodejs.org/en/docs/guides/event-loop-timers-and-nexttick/ +[Event Machine]: https://github.com/eventmachine/eventmachine +[Twisted]: http://twistedmatrix.com/ diff --git a/locale/fa/about/privacy.md b/locale/fa/about/privacy.md new file mode 100644 index 0000000000000..7a9e098289fe7 --- /dev/null +++ b/locale/fa/about/privacy.md @@ -0,0 +1,94 @@ +--- +title: Privacy Policy +layout: about.hbs +--- +# Privacy Policy + +NODE.JS FOUNDATION (the "Foundation”) is committed to protecting the privacy of its users. This Privacy Policy (or the “Policy”) applies to its websites (whether currently or in the future supported, hosted or maintained, including without limitation nodejs.org, the “Sites”) and describes the information the Foundation collects about users of the Sites (“users”) and how that information may be used. + +Read the Privacy Policy carefully. By using any Site, you will be deemed to have accepted the terms of the Policy. If you do not agree to accept the terms of the Privacy Policy, you are directed to discontinue accessing or otherwise using the Sites or any materials obtained from the Sites. + +## Changes to the Privacy Policy +The Foundation reserves the right to update and change this Privacy Policy from time to time. Each time a user uses the Sites, the current version of the Privacy Policy applies. Accordingly, a user should check the date of this Privacy Policy (which appears at the top) and review for any changes since the last version. If a user does not agree to the Privacy Policy, the user should not use any of the Sites. Continued use any of the Sites following any revision of this Privacy Policy constitutes an acceptance of any change. + +## What Does this Privacy Policy Cover? +This Privacy Policy covers the Foundation’s treatment of aggregate information collected by the Sites and personal information that you provide in connection with your use of the Sites. This Policy does not apply to the practices of third parties that the Foundation does not own or control, including but not limited to third party services you access through the Foundation, or to individuals that the Foundation does not employ or manage. + +## Children Under 13 Years of Age +Unless specifically indicated within a Site, the Sites are not intended for minor children not of age (including without limitation those under 13), and they should not use the Sites. If you are under 18, you may use the Site only with involvement of a parent or guardian or if you are an emancipated minor. Except as specifically indicated within a Site, we do not knowingly collect or solicit information from, market to or accept services from children. If we become aware that a child under 13 has provided us with personal information without parental consent, we will take reasonable steps to remove such information and terminate the child’s account. If you become aware that a child has provided us with personally identifiable information without parental consent, please contact us at privacy@nodejs.org so we may remove the information. + +## Information About Users that the Foundation Collects +On the Sites, users may order products or services, and register to receive materials. Information collected on the Sites includes community forum content, diaries, profiles, photographs, names, unique identifiers (e.g., social media handles or usernames), contact and billing information (e.g., email address, postal address, telephone, fax), and transaction information. In order to access certain personalized services on the Sites, you may be asked to also create and store a username and password for an account from the Foundation. + +In order to tailor the Foundation’s subsequent communications to users and continuously improve the Sites’ products and services, the Foundation may also ask users to provide information regarding their interests, demographics, experience and detailed contact preferences. the Foundation and third party advertising companies may track information concerning a user’s use of the Sites, such as a user’s IP address. + +## How the Foundation Uses the Information Collected +The Foundation may use collected information for any lawful purpose related to the Foundation’s business, including, but not limited to: + +- To understand a user’s needs and create content that is relevant to the user; +- To generate statistical studies; +- To conduct market research and planning by sending user surveys; +- To notify user referrals of services, information, or products when a user requests that the Foundation send such information to referrals; +- To improve services, information, and products; +- To help a user complete a transaction, or provide services or customer support; +- To communicate back to the user; +- To update the user on services, information, and products; +- To personalize a Site for the user; +- To notify the user of any changes with a Site that may affect the user; +- To enforce terms of use on a Site; and +- To allow the user to purchase products, access services, or otherwise engage in activities the user selects. + +User names, identifications ("IDs"), and email addresses (as well as any additional information that a user may choose to post) may be publicly available on a Site when users voluntarily and publicly disclose personal information, such as when a user posts information in conjunction with content subject to an Open Source license, or as part of a message posted to a public forum or a publicly released software application. The personal information you may provide to the Foundation may reveal or allow others to discern aspects of your life that are not expressly stated in your profile (for example, your picture or your name may reveal your hair color, race or approximate age). By providing personal information to us when you create or update your account and profile or post a photograph, you are expressly and voluntarily accepting our Terms of Use and freely accepting and agreeing to our processing of your personal information in ways set out by this Privacy Policy. Supplying information to us, including any information deemed “sensitive” by applicable law, is entirely voluntary on your part. You may withdraw your consent to the Foundation’s collection and processing of your information by closing your account. You should be aware that your information may continue to be viewable to others after you close your account, such as on cached pages on Internet search engines. Users may not be able to change or remove public postings once posted. Such information may be used by visitors of these pages to send unsolicited messages. The Foundation is not responsible for any consequences which may occur from the third-party use of information that a user chooses to submit to public pages. + +## Opt Out +A user will always be able to make the decision whether to proceed with any activity that requests personal information including personally identifiable information. If a user does not provide requested information, the user may not be able to complete certain transactions. + +Users are not licensed to add other users to a Site (even users who entered into transactions with them) or to their mailing lists without written consent. +The Foundation encourages users to evaluate privacy and security policies of any of the Sites’ transaction partners before entering into transactions or choosing to disclose information. + +## Email +The Foundation may use (or provide to The Linux Foundation or other third party contractors to use) contact information received by the Foundation to email any user with respect to any Foundation or project of The Linux Foundation (a “Project”) opportunity, event or other matter. + +If a user no longer wishes to receive emails from the Foundation or any Project or any Site, the Foundation will (or, if applicable, have The Linux Foundation) provide instructions in each of its emails on how to be removed from any lists. The Foundation will make commercially reasonable efforts to honor such requests. + +## Photographs +Users may have the opportunity to submit photographs to the Sites for product promotions, contests, and other purposes to be disclosed at the time of request. In these circumstances, the Sites are designed to allow the public to view, download, save, and otherwise access the photographs posted. By submitting a photograph, users waive any privacy expectations users have with respect to the security of such photographs, and the Foundation’s use or exploitation of users’ likeness. You may submit a photograph only if you are the copyright holder or if you are authorized to do so under license by the copyright holder, and by submitting a photograph you agree to indemnify and hold the Foundation, its directors, officers, employees and agents harmless from any claims arising out of your submission. By submitting a photograph, you grant the Foundation a perpetual, worldwide, royalty-free license to use the photograph in any media now known of hereinafter invented for any business purpose that the Foundation, at its sole discretion, may decide. + +## Links to Third-Party Sites and Services +The Sites may permit you to access or link to third party websites and information on the Internet, and other websites may contain links to the Sites. When a user uses these links, the user leaves the Sites. The Foundation has not reviewed these third party sites, does not control, and is not responsible for, any of the third party sites, their content or privacy practices. The privacy and security practices of websites accessed from the Sites are not covered by this Privacy Policy, and the Foundation is not responsible for the privacy or security practices or the content of such websites, including but not limited to the third party services you access through the Foundation. If a user decides to access any of the linked sites, the Foundation encourages the user to read the privacy statements of those sites. The user accesses such sites at user’s own risk. + +We may receive information when you use your account to log into a third-party site or application in order to recommend tailored content or advertising to you and to improve your user experience on our site. We may provide reports containing aggregated impression information to third parties to measure Internet traffic and usage patterns. + +## Service Orders +To purchase services, users may be asked to be directed to a third party site, such as PayPal, to pay for their purchases. If applicable, the third party site may collect payment information directly to facilitate a transaction. The Foundation will only record the result of the transaction and any references to the transaction record provided by the third party site. The Foundation is not responsible for the services provided or information collected on such third party sites. + +## Sharing of Information +The Foundation may disclose personal or aggregate information that is associated with your profile as described in this Privacy Policy, as permitted by law or as reasonably necessary to: (1) comply with a legal requirement or process, including, but not limited to, civil and criminal subpoenas, court orders or other compulsory disclosures; (2) investigate and enforce this Privacy Policy or our then-current Terms of Use, if any; (3) respond to claims of a violation of the rights of third parties; (4) respond to customer service inquiries; (5) protect the rights, property, or safety of the Foundation, our users, or the public; or (6) as part of the sale of all or a portion of the assets of the Foundation or as a change in control of the organization or one of its affiliates or in preparation for any of these events. The Foundation reserves the right to supply any such information to any organization into which the Foundation may merge in the future or to which it may make any transfer. Any third party to which the Foundation transfers or sells all or any of its assets will have the right to use the personal and other information that you provide in the manner set out in this Privacy Policy. + +## Is Information About Me Secure? +To keep your information safe, prevent unauthorized access or disclosure, maintain data accuracy, and ensure the appropriate use of information, the Foundation implements industry-standard physical, electronic, and managerial procedures to safeguard and secure the information the Foundation collects. However, the Foundation does not guarantee that unauthorized third parties will never defeat measures taken to prevent improper use of personally identifiable information. + +Access to users’ nonpublic personally identifiable information is restricted to the Foundation and Linux Foundation personnel, including contractors for each such organization on a need-to-know basis. + +User passwords are keys to accounts. Use unique numbers, letters, and special characters for passwords and do not disclose passwords to other people in order to prevent loss of account control. Users are responsible for all actions taken in their accounts. Notify the Foundation of any password compromises, and change passwords periodically to maintain account protection. + +In the event the Foundation becomes aware that the security of a Site has been compromised or user’s personally identifiable information has been disclosed to unrelated third parties as a result of external activity, including but not limited to security attacks or fraud, the Foundation reserves the right to take reasonable appropriate measures, including but not limited to, investigation and reporting, and notification to and cooperation with law enforcement authorities. + +While our aim is to keep data from unauthorized or unsafe access, modification or destruction, no method of transmission on the Internet, or method of electronic storage, is 100% secure and we cannot guarantee its absolute security. + +## Data Protection +Given the international scope of the Foundation, personal information may be visible to persons outside your country of residence, including to persons in countries that your own country’s privacy laws and regulations deem deficient in ensuring an adequate level of protection for such information. If you are unsure whether this privacy statement is in conflict with applicable local rules, you should not submit your information. If you are located within the European Union, you should note that your information will be transferred to the United States, which is deemed by the European Union to have inadequate data protection. Nevertheless, in accordance with local laws implementing the European Union Privacy Directive on the protection of individuals with regard to the processing of personal data and on the free movement of such data, individuals located in countries outside of the United States of America who submit personal information do thereby consent to the general use of such information as provided in this Privacy Policy and to its transfer to and/or storage in the United States of America. By utilizing any Site and/or directly providing personal information to us, you hereby agree to and acknowledge your understanding of the terms of this Privacy Policy, and consent to have your personal data transferred to and processed in the United States and/or in other jurisdictions as determined by the Foundation, notwithstanding your country of origin, or country, state and/or province of residence. If you do not want your personal information collected and used by the Foundation, please do not visit or use the Sites. + +## Governing Law +This Privacy Policy is governed by the laws of the State of California, United States of America without giving any effect to the principles of conflicts of law. + +## California Privacy Rights +The California Online Privacy Protection Action (“CalOPPA”) permits customers who are California residents and who have provided the Foundation with “personal information” as defined in CalOPPA to request certain information about the disclosure of information to third parties for their direct marketing purposes. If you are a California resident with a question regarding this provision, please contact privacy@nodejs.org. + +Please note that the Foundation does not respond to “do not track” signals or other similar mechanisms intended to allow California residents to opt-out of Internet tracking under CalOPPA. The Foundation may track and/or disclose your online activities over time and across different websites to third parties when you use our services. + +## What to Do in the Event of Lost or Stolen Information +You must promptly notify us if you become aware that any information provided by or submitted to our Site or through our Product is lost, stolen, or used without permission at privacy@nodejs.org. + +## Questions or Concerns +If you have any questions or concerns regarding privacy at the Foundation, please send us a detailed message to [privacy@nodejs.org](mailto:privacy@nodejs.org). diff --git a/locale/fa/about/releases.md b/locale/fa/about/releases.md new file mode 100644 index 0000000000000..2028456e8c100 --- /dev/null +++ b/locale/fa/about/releases.md @@ -0,0 +1,86 @@ +--- +layout: about.hbs +title: Releases +--- +# Releases + +The core team defines the roadmap's scope, as informed by Node.js' community. +Releases happen as often as necessary and practical, but never before work is +complete. Bugs are unavoidable, but pressure to ship a release will never +prevail over ensuring the software is correct. The commitment to quality +software is a core tenet of the Node.js project. + +## Patches + +Patch releases: + +- Include bug, performance, and security fixes. +- Do not add nor change public interfaces. +- Do not alter the expected behavior of a given interface. +- Can correct behavior if it is out-of-sync with the documentation. +- Do not introduce changes which make seamless upgrades impossible. + +## Minors + +Minor releases: + +- Include additions and/or refinements of APIs and subsystems. +- Do not generally change APIs nor introduce backwards-incompatible breaking +changes, except where unavoidable. +- Are mostly additive releases. + +## Majors + +Major releases: + +- Usually introduce backwards-incompatible, breaking changes. +- Identify the API Node.js intends to support for the foreseeable future. +- Require conversation, care, collaboration and appropriate scoping by the team +and its users. + +## Scoping Features + +The team can add features and APIs into Node.js when: + +- The need is clear. +- The API or feature has known consumers. +- The API is clean, useful, and easy-to use. + +If when implementing core functionality for Node.js, the team or community may +identify another lower-level API which could have utility beyond Node.js. When +identified, Node.js can expose it for consumers. + +For example, consider the [`EventEmitter`] interface. The need to have an event +subscription model for core modules to consume was clear, and that abstraction +had utility beyond the Node.js core. It was not the case that its interface +couldn't be implemented externally to Node.js; instead, Node.js needed the +abstraction for itself, and also exposed it for use by Node.js consumers. + +Alternatively, it may be that many in the community adopt a pattern to handle +common needs which Node.js does not satisfy. It may be clear that Node.js +should deliver, by default, an API or feature for all Node.js consumers. +Another possibility is a commonly-used compiled asset which is difficult to +deliver across environments. Given this, Node.js may incorporate those changes +directly. + +The core team does not take the decision lightly to add a new API to Node.js. +Node.js has a strong commitment to backwards compatibility. As such, community +input and conversation must occur before the team takes action. Even if an API +is otherwise suitable for addition, the team must identify potential consumers. + +## Deprecation + +On occasion, the team must deprecate a feature or API of Node.js. Before coming +to any final conclusion, the team must identify the consumers of the API and how +they use it. Some questions to ask are: + +- If this API is widely used by the community, what is the need for flagging it +as deprecated? +- Do we have a replacement API, or is there a transitionary path? +- How long does the API remain deprecated before removal? +- Does an external module exist which its consumers can easily substitute? + +The team takes the same careful consideration when deprecating a Node.js API as +they do when adding another. + +[`EventEmitter`]: https://nodejs.org/api/events.html#events_class_eventemitter diff --git a/locale/fa/about/resources.md b/locale/fa/about/resources.md new file mode 100644 index 0000000000000..1686722289027 --- /dev/null +++ b/locale/fa/about/resources.md @@ -0,0 +1,31 @@ +--- +layout: about.hbs +title: Logos and Graphics +--- +# Resources + +## Logo Downloads + + Please review the [trademark policy](/about/trademark/) for information about permissible use of Node.js® logos and marks. + + Guidelines for the visual display of the Node.js mark are described in + the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). + + + + + + + + + + + + + + + + + + +
Node.js on light backgroundNode.js on dark background
Node.js standard AINode.js reversed AI
Node.js on light backgroundNode.js on dark background
Node.js standard with less color AINode.js reversed with less color AI
diff --git a/locale/fa/about/trademark.md b/locale/fa/about/trademark.md new file mode 100644 index 0000000000000..530b5cd11d6d5 --- /dev/null +++ b/locale/fa/about/trademark.md @@ -0,0 +1,28 @@ +--- +layout: about.hbs +title: Trademark Policy +--- +# Trademark Policy + +The Node.js trademarks, service marks, and graphics marks are symbols of the +quality, performance, and ease of use that people have come to associate with +the Node.js software and project. To ensure that the Node.js marks continue to +symbolize these qualities, we must ensure that the marks are only used in ways +that do not mislead people or cause them to confuse Node.js with other software +of lower quality. If we don’t ensure the marks are used in this way, it cannot +only confuse users, it can make it impossible to use the mark to protect +against people who maliciously exploit the mark in the future. The primary goal +of this policy is to make sure that this doesn’t happen to the Node.js mark, so +that the community and users of Node.js are always protected in the future. + +At the same time, we’d like community members to feel comfortable spreading the +word about Node.js and participating in the Node.js community. Keeping that +goal in mind, we’ve tried to make the policy as flexible and easy to understand +as legally possible. + +Please read the [full policy](/static/documents/trademark-policy.pdf). +If you have any questions don't hesitate to +[email us](mailto:trademark@nodejs.org). + +Guidelines for the visual display of the Node.js mark are described in +the [Visual Guidelines](/static/documents/foundation-visual-guidelines.pdf). diff --git a/locale/fa/about/working-groups.md b/locale/fa/about/working-groups.md new file mode 100644 index 0000000000000..3fb1c7e5b47da --- /dev/null +++ b/locale/fa/about/working-groups.md @@ -0,0 +1,241 @@ +--- +layout: about.hbs +title: Working Groups +--- +# Core Working Groups + + +Core Working Groups are created by the +[Technical Steering Committee (TSC)](https://github.com/nodejs/TSC/blob/master/TSC-Charter.md). + +## Current Working Groups + +* [Addon API](#addon-api) +* [Benchmarking](#benchmarking) +* [Build](#build) +* [Diagnostics](#diagnostics) +* [Docker](#docker) +* [Evangelism](#evangelism) +* [i18n](#i18n) +* [Release](#release) +* [Security](#security) +* [Streams](#streams) +* [Website](#website) + +### [Addon API](https://github.com/nodejs/nan) + +The Addon API Working Group is responsible for maintaining the NAN project and +corresponding _nan_ package in npm. The NAN project makes available an +abstraction layer for native add-on authors for Node.js, +assisting in the writing of code that is compatible with many actively used +versions of Node.js, V8 and libuv. + +Responsibilities include: +* Maintaining the [NAN](https://github.com/nodejs/nan) GitHub repository, + including code, issues and documentation. +* Maintaining the [addon-examples](https://github.com/nodejs/node-addon-examples) + GitHub repository, including code, issues and documentation. +* Maintaining the C++ Addon API within the Node.js project, in subordination to + the Node.js TSC. +* Maintaining the Addon documentation within the Node.js project, in + subordination to the Node.js TSC. +* Maintaining the _nan_ package in npm, releasing new versions as appropriate. +* Messaging about the future of the Node.js and NAN interface to give the + community advance notice of changes. + +The current members can be found in their +[README](https://github.com/nodejs/nan#collaborators). + +### [Benchmarking](https://github.com/nodejs/benchmarking) + +The purpose of the Benchmark Working Group is to gain consensus +on an agreed set of benchmarks that can be used to: + +* track and evangelize performance gains made between Node.js releases +* avoid performance regressions between releases + +Responsibilities include: +* Identifying 1 or more benchmarks that reflect customer usage. + Likely will need more than one to cover typical Node.js use cases + including low-latency and high concurrency +* Working to get community consensus on the list chosen +* Adding regular execution of chosen benchmarks to Node.js builds +* Tracking/publicizing performance between builds/releases + +### [Build](https://github.com/nodejs/build) + +The Build Working Group's purpose is to create and maintain a distributed +automation infrastructure. + +Responsibilities include: +* Producing packages for all target platforms. +* Running tests. +* Running performance testing and comparisons. +* Creating and managing build-containers. + +### [Diagnostics](https://github.com/nodejs/diagnostics) + +The Diagnostics Working Group's purpose is to surface a set of comprehensive, +documented, and extensible diagnostic interfaces for use by Node.js tools and +JavaScript VMs. + +Responsibilities include: +* Collaborating with V8 to integrate `v8_inspector` into Node.js. +* Collaborating with V8 to integrate `trace_event` into Node.js. +* Collaborating with Core to refine `async_wrap` and `async_hooks`. +* Maintaining and improving OS trace system integration (e.g. ETW, LTTNG, dtrace). +* Documenting diagnostic capabilities and APIs in Node.js and its components. +* Exploring opportunities and gaps, discussing feature requests, and addressing + conflicts in Node.js diagnostics. +* Fostering an ecosystem of diagnostics tools for Node.js. +* Defining and adding interfaces/APIs in order to allow dumps to be generated + when needed. +* Defining and adding common structures to the dumps generated in order to + support tools that want to introspect those dumps. + +### [Docker](https://github.com/nodejs/docker-node) + +The Docker Working Group's purpose is to build, maintain, and improve official +Docker images for the Node.js project. + +Responsibilities include: +* Keeping the official Docker images updated in line with new Node.js releases. +* Decide and implement image improvements and/or fixes. +* Maintain and improve the images' documentation. + +### [Evangelism](https://github.com/nodejs/evangelism) + +The Evangelism Working Group promotes the accomplishments +of Node.js and lets the community know how they can get involved. + +Responsibilities include: +* Facilitating project messaging. +* Managing official project social media. +* Handling the promotion of speakers for meetups and conferences. +* Handling the promotion of community events. +* Publishing regular update summaries and other promotional + content. + +### i18n + +The i18n Working Groups handle more than just translations. They +are endpoints for community members to collaborate with each +other in their language of choice. + +Each team is organized around a common spoken language. Each +language community might then produce multiple localizations for +various project resources. + +Responsibilities include: +* Translating any Node.js materials they believe are relevant to their + community. +* Reviewing processes for keeping translations up to date and of high quality. +* Managing and monitoring social media channels in their language. +* Promoting Node.js speakers for meetups and conferences in their language. + +Each language community maintains its own membership. + +* [nodejs-ar - Arabic (العَرَبِيَّة)](https://github.com/nodejs/nodejs-ar) +* [nodejs-bg - Bulgarian (български)](https://github.com/nodejs/nodejs-bg) +* [nodejs-bn - Bengali (বাংলা)](https://github.com/nodejs/nodejs-bn) +* [nodejs-zh-CN - Chinese (中文)](https://github.com/nodejs/nodejs-zh-CN) +* [nodejs-cs - Czech (Čeština)](https://github.com/nodejs/nodejs-cs) +* [nodejs-da - Danish (Dansk)](https://github.com/nodejs/nodejs-da) +* [nodejs-de - German (Deutsch)](https://github.com/nodejs/nodejs-de) +* [nodejs-el - Greek (Ελληνικά)](https://github.com/nodejs/nodejs-el) +* [nodejs-es - Spanish (Español)](https://github.com/nodejs/nodejs-es) +* [nodejs-fa - Persian (فارسی)](https://github.com/nodejs/nodejs-fa) +* [nodejs-fi - Finnish (Suomi)](https://github.com/nodejs/nodejs-fi) +* [nodejs-fr - French (Français)](https://github.com/nodejs/nodejs-fr) +* [nodejs-he - Hebrew (עברית)](https://github.com/nodejs/nodejs-he) +* [nodejs-hi - Hindi (हिन्दी)](https://github.com/nodejs/nodejs-hi) +* [nodejs-hu - Hungarian (Magyar)](https://github.com/nodejs/nodejs-hu) +* [nodejs-id - Indonesian (Bahasa Indonesia)](https://github.com/nodejs/nodejs-id) +* [nodejs-it - Italian (Italiano)](https://github.com/nodejs/nodejs-it) +* [nodejs-ja - Japanese (日本語)](https://github.com/nodejs/nodejs-ja) +* [nodejs-ka - Georgian (ქართული)](https://github.com/nodejs/nodejs-ka) +* [nodejs-ko - Korean (한국어)](https://github.com/nodejs/nodejs-ko) +* [nodejs-mk - Macedonian (Македонски)](https://github.com/nodejs/nodejs-mk) +* [nodejs-ms - Malay (بهاس ملايو‎)](https://github.com/nodejs/nodejs-ms) +* [nodejs-nl - Dutch (Nederlands)](https://github.com/nodejs/nodejs-nl) +* [nodejs-no - Norwegian (Norsk)](https://github.com/nodejs/nodejs-no) +* [nodejs-pl - Polish (Język Polski)](https://github.com/nodejs/nodejs-pl) +* [nodejs-pt - Portuguese (Português)](https://github.com/nodejs/nodejs-pt) +* [nodejs-ro - Romanian (Română)](https://github.com/nodejs/nodejs-ro) +* [nodejs-ru - Russian (Русский)](https://github.com/nodejs/nodejs-ru) +* [nodejs-sv - Swedish (Svenska)](https://github.com/nodejs/nodejs-sv) +* [nodejs-ta - Tamil (தமிழ்)](https://github.com/nodejs/nodejs-ta) +* [nodejs-tr - Turkish (Türkçe)](https://github.com/nodejs/nodejs-tr) +* [nodejs-zh-TW - Taiwanese (國語)](https://github.com/nodejs/nodejs-zh-TW) +* [nodejs-uk - Ukrainian (Українська)](https://github.com/nodejs/nodejs-uk) +* [nodejs-vi - Vietnamese (Tiếng Việt)](https://github.com/nodejs/nodejs-vi) + +### [Release](https://github.com/nodejs/LTS) +The Release Working Group manages the release process for Node.js. + +Responsibilities include: +* Define the release process. +* Define the content of releases. +* Generate and create releases. +* Test Releases. +* Manage the Long Term Support and Current branches including + backporting changes to these branches. +* Define the policy for what gets backported to release streams + +### [Security](https://github.com/nodejs/security-wg) + +The Security Working Group manages all aspects and processes linked to Node.js security. + +Responsibilities include: +* Define and maintain security policies and procedures for: + * the core Node.js project + * other projects maintained by the Node.js Technical Steering Committee (TSC). +* Work with the Node Security Platform to bring community vulnerability data into + the foundation as a shared asset. +* Ensure the vulnerability data is updated in an efficient and timely manner. + For example, ensuring there are well-documented processes for reporting + vulnerabilities in community modules. +* Review and recommend processes for handling of security reports (but not the + actual administration of security reports, which are reviewed by a group of people + directly delegated to by the TSC). +* Define and maintain policies and procedures for the coordination of security + concerns within the external Node.js open source ecosystem. +* Offer help to npm package maintainers to fix high-impact security bugs. +* Maintain and make available data on disclosed security vulnerabilities in: + * the core Node.js project + * other projects maintained by the Node.js Foundation technical group + * the external Node.js open source ecosystem +* Promote the improvement of security practices within the Node.js ecosystem. +* Recommend security improvements for the core Node.js project. +* Facilitate and promote the expansion of a healthy security service and product + provider ecosystem. + +### [Streams](https://github.com/nodejs/readable-stream) + +The Streams Working Group is dedicated to the support and improvement of the +Streams API as used in Node.js and the npm ecosystem. We seek to create a +composable API that solves the problem of representing multiple occurrences +of an event over time in a humane, low-overhead fashion. Improvements to the +API will be driven by the needs of the ecosystem; interoperability and +backwards compatibility with other solutions and prior versions are paramount +in importance. + +Responsibilities include: +* Addressing stream issues on the Node.js issue tracker. +* Authoring and editing stream documentation within the Node.js project. +* Reviewing changes to stream subclasses within the Node.js project. +* Redirecting changes to streams from the Node.js project to this project. +* Assisting in the implementation of stream providers within Node.js. +* Recommending versions of `readable-stream` to be included in Node.js. +* Messaging about the future of streams to give the community advance notice of changes. + +### [Website](https://github.com/nodejs/nodejs.org) + +The Website Working Group's purpose is to build and maintain a public +website for the Node.js project. + +Responsibilities include: +* Developing and maintaining a build and automation system for nodejs.org. +* Ensuring the site is regularly updated with changes made to Node.js, like + releases and features. +* Fostering and enabling a community of translators. diff --git a/locale/fa/docs/es6.md b/locale/fa/docs/es6.md new file mode 100644 index 0000000000000..953eb3d5ed65c --- /dev/null +++ b/locale/fa/docs/es6.md @@ -0,0 +1,45 @@ +--- +title: ECMAScript 2015 (ES6) and beyond +layout: docs.hbs +--- +# ECMAScript 2015 (ES6) and beyond + +Node.js is built against modern versions of [V8](https://developers.google.com/v8/). By keeping up-to-date with the latest releases of this engine, we ensure new features from the [JavaScript ECMA-262 specification](http://www.ecma-international.org/publications/standards/Ecma-262.htm) are brought to Node.js developers in a timely manner, as well as continued performance and stability improvements. + +All ECMAScript 2015 (ES6) features are split into three groups for **shipping**, **staged**, and **in progress** features: + +* All **shipping** features, which V8 considers stable, are turned **on by default on Node.js** and do **NOT** require any kind of runtime flag. +* **Staged** features, which are almost-completed features that are not considered stable by the V8 team, require a runtime flag: `--harmony`. +* **In progress** features can be activated individually by their respective harmony flag, although this is highly discouraged unless for testing purposes. Note: these flags are exposed by V8 and will potentially change without any deprecation notice. + +## Which features ship with which Node.js version by default? + +The website [node.green](http://node.green) provides an excellent overview over supported ECMAScript features in various versions of Node.js, based on kangax's compat-table. + +## Which features are in progress? + +New features are constantly being added to the V8 engine. Generally speaking, expect them to land on a future Node.js release, although timing is unknown. + +You may list all the *in progress* features available on each Node.js release by grepping through the `--v8-options` argument. Please note that these are incomplete and possibly broken features of V8, so use them at your own risk: + +```bash +node --v8-options | grep "in progress" +``` + +## What about the performance of a particular feature? + +The V8 team is constantly working to improve the performance of new language features to eventually reach parity with their transpiled or native counterparts in EcmaScript 5 and earlier. The current progress there is tracked on the website [six-speed](https://fhinkel.github.io/six-speed), which shows the performance of ES2015 and ESNext features compared to their native ES5 counterparts. + +The work on optimizing features introduced with ES2015 and beyond is coordinated via a [performance plan](https://docs.google.com/document/d/1EA9EbfnydAmmU_lM8R_uEMQ-U_v4l9zulePSBkeYWmY), where the V8 team gathers and coordinates areas that need improvement, and design documents to tackle those problems. + +## I have my infrastructure set up to leverage the --harmony flag. Should I remove it? + +The current behaviour of the `--harmony` flag on Node.js is to enable **staged** features only. After all, it is now a synonym of `--es_staging`. As mentioned above, these are completed features that have not been considered stable yet. If you want to play safe, especially on production environments, consider removing this runtime flag until it ships by default on V8 and, consequently, on Node.js. If you keep this enabled, you should be prepared for further Node.js upgrades to break your code if V8 changes their semantics to more closely follow the standard. + +## How do I find which version of V8 ships with a particular version of Node.js? + +Node.js provides a simple way to list all dependencies and respective versions that ship with a specific binary through the `process` global object. In case of the V8 engine, type the following in your terminal to retrieve its version: + +```bash +node -p process.versions.v8 +``` diff --git a/locale/fa/docs/guides/abi-stability.md b/locale/fa/docs/guides/abi-stability.md new file mode 100644 index 0000000000000..b010c70a7b0e2 --- /dev/null +++ b/locale/fa/docs/guides/abi-stability.md @@ -0,0 +1,118 @@ +--- +title: ABI Stability +layout: docs.hbs +--- + +# ABI Stability + +## Introduction +An Application Binary Interface (ABI) is a way for programs to call functions +and use data structures from other compiled programs. It is the compiled version +of an Application Programming Interface (API). In other words, the headers files +describing the classes, functions, data structures, enumerations, and constants +which enable an application to perform a desired task correspond by way of +compilation to a set of addresses and expected parameter values and memory +structure sizes and layouts with which the provider of the ABI was compiled. + +The application using the ABI must be compiled such that the available +addresses, expected parameter values, and memory structure sizes and layouts +agree with those with which the ABI provider was compiled. This is usually +accomplished by compiling against the headers provided by the ABI provider. + +Since the provider of the ABI and the user of the ABI may be compiled at +different times with different versions of the compiler, a portion of the +responsibility for ensuring ABI compatibility lies with the compiler. Different +versions of the compiler, perhaps provided by different vendors, must all +produce the same ABI from a header file with a certain content, and must produce +code for the application using the ABI that accesses the API described in a +given header according to the conventions of the ABI resulting from the +description in the header. Modern compilers have a fairly good track record of +not breaking the ABI compatibility of the applications they compile. + +The remaining responsibility for ensuring ABI compatibility lies with the team +maintaining the header files which provide the API that results, upon +compilation, in the ABI that is to remain stable. Changes to the header files +can be made, but the nature of the changes has to be closely tracked to ensure +that, upon compilation, the ABI does not change in a way that will render +existing users of the ABI incompatible with the new version. + +## ABI Stability in Node.js +Node.js provides header files maintained by several independent teams. For +example, header files such as `node.h` and `node_buffer.h` are maintained by +the Node.js team. `v8.h` is maintained by the V8 team, which, although in close +co-operation with the Node.js team, is independent, and with its own schedule +and priorities. Thus, the Node.js team has only partial control over the +changes that are introduced in the headers the project provides. As a result, +the Node.js project has adopted [semantic versioning](https://semver.org/). +This ensures that the APIs provided by the project will result in a stable ABI +for all minor and patch versions of Node.js released within one major version. +In practice, this means that the Node.js project has committed itself to +ensuring that a Node.js native addon compiled against a given major version of +Node.js will load successfully when loaded by any Node.js minor or patch version +within the major version against which it was compiled. + +## N-API +Demand has arisen for equipping Node.js with an API that results in an ABI that +remains stable across multiple Node.js major versions. The motivation for +creating such an API is as follows: +* The JavaScript language has remained compatible with itself since its very +early days, whereas the ABI of the engine executing the JavaScript code changes +with every major version of Node.js. This means that applications consisting of +Node.js packages written entirely in JavaScript need not be recompiled, +reinstalled, or redeployed as a new major version of Node.js is dropped into +the production environment in which such applications run. In contrast, if an +application depends on a package that contains a native addon, the application +has to be recompiled, reinstalled, and redeployed whenever a new major version +of Node.js is introduced into the production environment. This disparity +between Node.js packages containing native addons and those that are written +entirely in JavaScript has added to the maintenance burden of production +systems which rely on native addons. + +* Other projects have started to produce JavaScript interfaces that are +essentially alternative implementations of Node.js. Since these projects are +usually built on a different JavaScript engine than V8, their native addons +necessarily take on a different structure and use a different API. Nevertheless, +using a single API for a native addon across different implementations of the +Node.js JavaScript API would allow these projects to take advantage of the +ecosystem of JavaScript packages that has accrued around Node.js. + +* Node.js may contain a different JavaScript engine in the future. This means +that, externally, all Node.js interfaces would remain the same, but the V8 +header file would be absent. Such a step would cause the disruption of the +Node.js ecosystem in general, and that of the native addons in particular, if +an API that is JavaScript engine agnostic is not first provided by Node.js and +adopted by native addons. + +To these ends Node.js has introduced N-API in version 8.6.0 and marked it as a +stable component of the project as of Node.js 8.12.0. The API is defined in the +headers [`node_api.h`][] and [`node_api_types.h`][], and provides a forward- +compatibility guarantee that crosses the Node.js major version boundary. The +guarantee can be stated as follows: + +**A given version *n* of N-API will be available in the major version of +Node.js in which it was published, and in all subsequent versions of Node.js, +including subsequent major versions.** + +A native addon author can take advantage of the N-API forward compatibility +guarantee by ensuring that the addon makes use only of APIs defined in +`node_api.h` and data structures and constants defined in `node_api_types.h`. +By doing so, the author facilitates adoption of their addon by indicating to +production users that the maintenance burden for their application will increase +no more by the addition of the native addon to their project than it would by +the addition of a package written purely in JavaScript. + +N-API is versioned because new APIs are added from time to time. Unlike +semantic versioning, N-API versioning is cumulative. That is, each version of +N-API conveys the same meaning as a minor version in the semver system, meaning +that all changes made to N-API will be backwards compatible. Additionally, new +N-APIs are added under an experimental flag to give the community an opportunity +to vet them in a production environment. Experimental status means that, +although care has been taken to ensure that the new API will not have to be +modified in an ABI-incompatible way in the future, it has not yet been +sufficiently proven in production to be correct and useful as designed and, as +such, may undergo ABI-incompatible changes before it is finally incorporated +into a forthcoming version of N-API. That is, an experimental N-API is not yet +covered by the forward compatibility guarantee. + +[`node_api.h`]: https://github.com/nodejs/node/blob/master/src/node_api.h +[`node_api_types.h`]: https://github.com/nodejs/node/blob/master/src/node_api_types.h diff --git a/locale/fa/docs/guides/anatomy-of-an-http-transaction.md b/locale/fa/docs/guides/anatomy-of-an-http-transaction.md new file mode 100644 index 0000000000000..289514b9c5537 --- /dev/null +++ b/locale/fa/docs/guides/anatomy-of-an-http-transaction.md @@ -0,0 +1,430 @@ +--- +title: Anatomy of an HTTP Transaction +layout: docs.hbs +--- + +# Anatomy of an HTTP Transaction + +The purpose of this guide is to impart a solid understanding of the process of +Node.js HTTP handling. We'll assume that you know, in a general sense, how HTTP +requests work, regardless of language or programming environment. We'll also +assume a bit of familiarity with Node.js [`EventEmitters`][] and [`Streams`][]. +If you're not quite familiar with them, it's worth taking a quick read through +the API docs for each of those. + +## Create the Server + +Any node web server application will at some point have to create a web server +object. This is done by using [`createServer`][]. + +```javascript +const http = require('http'); + +const server = http.createServer((request, response) => { + // magic happens here! +}); +``` + +The function that's passed in to [`createServer`][] is called once for every +HTTP request that's made against that server, so it's called the request +handler. In fact, the [`Server`][] object returned by [`createServer`][] is an +[`EventEmitter`][], and what we have here is just shorthand for creating a +`server` object and then adding the listener later. + +```javascript +const server = http.createServer(); +server.on('request', (request, response) => { + // the same kind of magic happens here! +}); +``` + +When an HTTP request hits the server, node calls the request handler function +with a few handy objects for dealing with the transaction, `request` and +`response`. We'll get to those shortly. + +In order to actually serve requests, the [`listen`][] method needs to be called +on the `server` object. In most cases, all you'll need to pass to `listen` is +the port number you want the server to listen on. There are some other options +too, so consult the [API reference][]. + +## Method, URL and Headers + +When handling a request, the first thing you'll probably want to do is look at +the method and URL, so that appropriate actions can be taken. Node makes this +relatively painless by putting handy properties onto the `request` object. + +```javascript +const { method, url } = request; +``` +> **Note:** The `request` object is an instance of [`IncomingMessage`][]. + +The `method` here will always be a normal HTTP method/verb. The `url` is the +full URL without the server, protocol or port. For a typical URL, this means +everything after and including the third forward slash. + +Headers are also not far away. They're in their own object on `request` called +`headers`. + +```javascript +const { headers } = request; +const userAgent = headers['user-agent']; +``` + +It's important to note here that all headers are represented in lower-case only, +regardless of how the client actually sent them. This simplifies the task of +parsing headers for whatever purpose. + +If some headers are repeated, then their values are overwritten or joined +together as comma-separated strings, depending on the header. In some cases, +this can be problematic, so [`rawHeaders`][] is also available. + +## Request Body + +When receiving a `POST` or `PUT` request, the request body might be important to +your application. Getting at the body data is a little more involved than +accessing request headers. The `request` object that's passed in to a handler +implements the [`ReadableStream`][] interface. This stream can be listened to or +piped elsewhere just like any other stream. We can grab the data right out of +the stream by listening to the stream's `'data'` and `'end'` events. + +The chunk emitted in each `'data'` event is a [`Buffer`][]. If you know it's +going to be string data, the best thing to do is collect the data in an array, +then at the `'end'`, concatenate and stringify it. + +```javascript +let body = []; +request.on('data', (chunk) => { + body.push(chunk); +}).on('end', () => { + body = Buffer.concat(body).toString(); + // at this point, `body` has the entire request body stored in it as a string +}); +``` + +> **Note:** This may seem a tad tedious, and in many cases, it is. Luckily, +there are modules like [`concat-stream`][] and [`body`][] on [`npm`][] which can +help hide away some of this logic. It's important to have a good understanding +of what's going on before going down that road, and that's why you're here! + +## A Quick Thing About Errors + +Since the `request` object is a [`ReadableStream`][], it's also an +[`EventEmitter`][] and behaves like one when an error happens. + +An error in the `request` stream presents itself by emitting an `'error'` event +on the stream. **If you don't have a listener for that event, the error will be +*thrown*, which could crash your Node.js program.** You should therefore add an +`'error'` listener on your request streams, even if you just log it and +continue on your way. (Though it's probably best to send some kind of HTTP error +response. More on that later.) + +```javascript +request.on('error', (err) => { + // This prints the error message and stack trace to `stderr`. + console.error(err.stack); +}); +``` + +There are other ways of [handling these errors][] such as +other abstractions and tools, but always be aware that errors can and do happen, +and you're going to have to deal with them. + +## What We've Got so Far + +At this point, we've covered creating a server, and grabbing the method, URL, +headers and body out of requests. When we put that all together, it might look +something like this: + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // At this point, we have the headers, method, url and body, and can now + // do whatever we need to in order to respond to this request. + }); +}).listen(8080); // Activates this server, listening on port 8080. +``` + +If we run this example, we'll be able to *receive* requests, but not *respond* +to them. In fact, if you hit this example in a web browser, your request would +time out, as nothing is being sent back to the client. + +So far we haven't touched on the `response` object at all, which is an instance +of [`ServerResponse`][], which is a [`WritableStream`][]. It contains many +useful methods for sending data back to the client. We'll cover that next. + +## HTTP Status Code + +If you don't bother setting it, the HTTP status code on a response will always +be 200. Of course, not every HTTP response warrants this, and at some point +you'll definitely want to send a different status code. To do that, you can set +the `statusCode` property. + +```javascript +response.statusCode = 404; // Tell the client that the resource wasn't found. +``` + +There are some other shortcuts to this, as we'll see soon. + +## Setting Response Headers + +Headers are set through a convenient method called [`setHeader`][]. + +```javascript +response.setHeader('Content-Type', 'application/json'); +response.setHeader('X-Powered-By', 'bacon'); +``` + +When setting the headers on a response, the case is insensitive on their names. +If you set a header repeatedly, the last value you set is the value that gets +sent. + +## Explicitly Sending Header Data + +The methods of setting the headers and status code that we've already discussed +assume that you're using "implicit headers". This means you're counting on node +to send the headers for you at the correct time before you start sending body +data. + +If you want, you can *explicitly* write the headers to the response stream. +To do this, there's a method called [`writeHead`][], which writes the status +code and the headers to the stream. + +```javascript +response.writeHead(200, { + 'Content-Type': 'application/json', + 'X-Powered-By': 'bacon' +}); +``` + +Once you've set the headers (either implicitly or explicitly), you're ready to +start sending response data. + +## Sending Response Body + +Since the `response` object is a [`WritableStream`][], writing a response body +out to the client is just a matter of using the usual stream methods. + +```javascript +response.write(''); +response.write(''); +response.write('

Hello, World!

'); +response.write(''); +response.write(''); +response.end(); +``` + +The `end` function on streams can also take in some optional data to send as the +last bit of data on the stream, so we can simplify the example above as follows. + +```javascript +response.end('

Hello, World!

'); +``` + +> **Note:** It's important to set the status and headers *before* you start +writing chunks of data to the body. This makes sense, since headers come before +the body in HTTP responses. + +## Another Quick Thing About Errors + +The `response` stream can also emit `'error'` events, and at some point you're +going to have to deal with that as well. All of the advice for `request` stream +errors still applies here. + +## Put It All Together + +Now that we've learned about making HTTP responses, let's put it all together. +Building on the earlier example, we're going to make a server that sends back +all of the data that was sent to us by the user. We'll format that data as JSON +using `JSON.stringify`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + const { headers, method, url } = request; + let body = []; + request.on('error', (err) => { + console.error(err); + }).on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + // BEGINNING OF NEW STUFF + + response.on('error', (err) => { + console.error(err); + }); + + response.statusCode = 200; + response.setHeader('Content-Type', 'application/json'); + // Note: the 2 lines above could be replaced with this next one: + // response.writeHead(200, {'Content-Type': 'application/json'}) + + const responseBody = { headers, method, url, body }; + + response.write(JSON.stringify(responseBody)); + response.end(); + // Note: the 2 lines above could be replaced with this next one: + // response.end(JSON.stringify(responseBody)) + + // END OF NEW STUFF + }); +}).listen(8080); +``` + +## Echo Server Example + +Let's simplify the previous example to make a simple echo server, which just +sends whatever data is received in the request right back in the response. All +we need to do is grab the data from the request stream and write that data to +the response stream, similar to what we did previously. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); +}).listen(8080); +``` + +Now let's tweak this. We want to only send an echo under the following +conditions: + +* The request method is POST. +* The URL is `/echo`. + +In any other case, we want to simply respond with a 404. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + let body = []; + request.on('data', (chunk) => { + body.push(chunk); + }).on('end', () => { + body = Buffer.concat(body).toString(); + response.end(body); + }); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +> **Note:** By checking the URL in this way, we're doing a form of "routing". +Other forms of routing can be as simple as `switch` statements or as complex as +whole frameworks like [`express`][]. If you're looking for something that does +routing and nothing else, try [`router`][]. + +Great! Now let's take a stab at simplifying this. Remember, the `request` object +is a [`ReadableStream`][] and the `response` object is a [`WritableStream`][]. +That means we can use [`pipe`][] to direct data from one to the other. That's +exactly what we want for an echo server! + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +Yay streams! + +We're not quite done yet though. As mentioned multiple times in this guide, +errors can and do happen, and we need to deal with them. + +To handle errors on the request stream, we'll log the error to `stderr` and send +a 400 status code to indicate a `Bad Request`. In a real-world application, +though, we'd want to inspect the error to figure out what the correct status code +and message would be. As usual with errors, you should consult the +[`Error` documentation][]. + +On the response, we'll just log the error to `stderr`. + +```javascript +const http = require('http'); + +http.createServer((request, response) => { + request.on('error', (err) => { + console.error(err); + response.statusCode = 400; + response.end(); + }); + response.on('error', (err) => { + console.error(err); + }); + if (request.method === 'POST' && request.url === '/echo') { + request.pipe(response); + } else { + response.statusCode = 404; + response.end(); + } +}).listen(8080); +``` + +We've now covered most of the basics of handling HTTP requests. At this point, +you should be able to: + +* Instantiate an HTTP server with a request handler function, and have it listen +on a port. +* Get headers, URL, method and body data from `request` objects. +* Make routing decisions based on URL and/or other data in `request` objects. +* Send headers, HTTP status codes and body data via `response` objects. +* Pipe data from `request` objects and to `response` objects. +* Handle stream errors in both the `request` and `response` streams. + +From these basics, Node.js HTTP servers for many typical use cases can be +constructed. There are plenty of other things these APIs provide, so be sure to +read through the API docs for [`EventEmitters`][], [`Streams`][], and [`HTTP`][]. + + + +[`EventEmitters`]: https://nodejs.org/api/events.html +[`Streams`]: https://nodejs.org/api/stream.html +[`createServer`]: https://nodejs.org/api/http.html#http_http_createserver_requestlistener +[`Server`]: https://nodejs.org/api/http.html#http_class_http_server +[`listen`]: https://nodejs.org/api/http.html#http_server_listen_port_hostname_backlog_callback +[API reference]: https://nodejs.org/api/http.html +[`IncomingMessage`]: https://nodejs.org/api/http.html#http_class_http_incomingmessage +[`ReadableStream`]: https://nodejs.org/api/stream.html#stream_class_stream_readable +[`rawHeaders`]: https://nodejs.org/api/http.html#http_message_rawheaders +[`Buffer`]: https://nodejs.org/api/buffer.html +[`concat-stream`]: https://www.npmjs.com/package/concat-stream +[`body`]: https://www.npmjs.com/package/body +[`npm`]: https://www.npmjs.com +[`EventEmitter`]: https://nodejs.org/api/events.html#events_class_eventemitter +[handling these errors]: https://nodejs.org/api/errors.html +[`domains`]: https://nodejs.org/api/domain.html +[`ServerResponse`]: https://nodejs.org/api/http.html#http_class_http_serverresponse +[`setHeader`]: https://nodejs.org/api/http.html#http_response_setheader_name_value +[`WritableStream`]: https://nodejs.org/api/stream.html#stream_class_stream_writable +[`writeHead`]: https://nodejs.org/api/http.html#http_response_writehead_statuscode_statusmessage_headers +[`express`]: https://www.npmjs.com/package/express +[`router`]: https://www.npmjs.com/package/router +[`pipe`]: https://nodejs.org/api/stream.html#stream_readable_pipe_destination_options +[`Error` documentation]: https://nodejs.org/api/errors.html +[`HTTP`]: https://nodejs.org/api/http.html diff --git a/locale/fa/docs/guides/backpressuring-in-streams.md b/locale/fa/docs/guides/backpressuring-in-streams.md new file mode 100644 index 0000000000000..c02406cb87120 --- /dev/null +++ b/locale/fa/docs/guides/backpressuring-in-streams.md @@ -0,0 +1,636 @@ +--- +title: Backpressuring in Streams +layout: docs.hbs +--- + +# Backpressuring in Streams + +There is a general problem that occurs during data handling called +[`backpressure`][] and describes a buildup of data behind a buffer during data +transfer. When the receiving end of the transfer has complex operations, or is +slower for whatever reason, there is a tendency for data from the incoming +source to accumulate, like a clog. + +To solve this problem, there must be a delegation system in place to ensure a +smooth flow of data from one source to another. Different communities have +resolved this issue uniquely to their programs, Unix pipes and TCP sockets are +good examples of this, and is often times referred to as _flow control_. In +Node.js, streams have been the adopted solution. + +The purpose of this guide is to further detail what backpressure is, and how +exactly streams address this in Node.js' source code. The second part of +the guide will introduce suggested best practices to ensure your application's +code is safe and optimized when implementing streams. + +We assume a little familiarity with the general definition of +[`backpressure`][], [`Buffer`][], and [`EventEmitters`][] in Node.js, as well as +some experience with [`Stream`][]. If you haven't read through those docs, +it's not a bad idea to take a look at the API documentation first, as it will +help expand your understanding while reading this guide. + +## The Problem with Data Handling + +In a computer system, data is transferred from one process to another through +pipes, sockets, and signals. In Node.js, we find a similar mechanism called +[`Stream`][]. Streams are great! They do so much for Node.js and almost every +part of the internal codebase utilizes that module. As a developer, you +are more than encouraged to use them too! + +```javascript +const readline = require('readline'); + +// process.stdin and process.stdout are both instances of Streams +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +rl.question('Why should you use streams? ', (answer) => { + console.log(`Maybe it's ${answer}, maybe it's because they are awesome! :)`); + + rl.close(); +}); +``` + +A good example of why the backpressure mechanism implemented through streams is +a great optimization can be demonstrated by comparing the internal system tools +from Node.js' [`Stream`][] implementation. + +In one scenario, we will take a large file (approximately ~9gb) and compress it +using the familiar [`zip(1)`][] tool. + +``` +$ zip The.Matrix.1080p.mkv +``` + +While that will take a few minutes to complete, in another shell we may run +a script that takes Node.js' module [`zlib`][], that wraps around another +compression tool, [`gzip(1)`][]. + +```javascript +const gzip = require('zlib').createGzip(); +const fs = require('fs'); + +const inp = fs.createReadStream('The.Matrix.1080p.mkv'); +const out = fs.createWriteStream('The.Matrix.1080p.mkv.gz'); + +inp.pipe(gzip).pipe(out); +``` + +To test the results, try opening each compressed file. The file compressed by +the [`zip(1)`][] tool will notify you the file is corrupt, whereas the +compression finished by [`Stream`][] will decompress without error. + +Note: In this example, we use `.pipe()` to get the data source from one end +to the other. However, notice there are no proper error handlers attached. If +a chunk of data were to fail to be properly received, the `Readable` source or +`gzip` stream will not be destroyed. [`pump`][] is a utility tool that would +properly destroy all the streams in a pipeline if one of them fails or closes, +and is a must have in this case! + +[`pump`][] is only necessary for Nodejs 8.x or earlier, as for Node 10.x +or later version, [`pipeline`][] is introduced to replace for [`pump`][]. +This is a module method to pipe between streams forwarding errors and properly +cleaning up and provide a callback when the pipeline is complete. + +Here is an example of using pipeline: + +```javascript +const { pipeline } = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); + +// Use the pipeline API to easily pipe a series of streams +// together and get notified when the pipeline is fully done. +// A pipeline to gzip a potentially huge video file efficiently: + +pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + (err) => { + if (err) { + console.error('Pipeline failed', err); + } else { + console.log('Pipeline succeeded'); + } + } +); +``` +You can also call [`promisify`][] on pipeline to use it with `async` / `await`: + +```javascript +const stream = require('stream'); +const fs = require('fs'); +const zlib = require('zlib'); + +const pipeline = util.promisify(stream.pipeline); + +async function run() { + try { + await pipeline( + fs.createReadStream('The.Matrix.1080p.mkv'), + zlib.createGzip(), + fs.createWriteStream('The.Matrix.1080p.mkv.gz'), + ); + console.log('Pipeline succeeded'); + } catch (err) { + console.error('Pipeline failed', err); + } +} +``` + +## Too Much Data, Too Quickly + +There are instances where a [`Readable`][] stream might give data to the +[`Writable`][] much too quickly — much more than the consumer can handle! + +When that occurs, the consumer will begin to queue all the chunks of data for +later consumption. The write queue will get longer and longer, and because of +this more data must be kept in memory until the entire process has completed. + +Writing to a disk is a lot slower than reading from a disk, thus, when we are +trying to compress a file and write it to our hard disk, backpressure will +occur because the write disk will not be able to keep up with the speed from +the read. + +```javascript +// Secretly the stream is saying: "whoa, whoa! hang on, this is way too much!" +// Data will begin to build up on the read-side of the data buffer as +// `write` tries to keep up with the incoming data flow. +inp.pipe(gzip).pipe(outputFile); +``` +This is why a backpressure mechanism is important. If a backpressure system was +not present, the process would use up your system's memory, effectively slowing +down other processes, and monopolizing a large part of your system until +completion. + +This results in a few things: + +* Slowing down all other current processes +* A very overworked garbage collector +* Memory exhaustion + +In the following examples we will take out the [return value][] of the +`.write()` function and change it to `true`, which effectively disables +backpressure support in Node.js core. In any reference to 'modified' binary, +we are talking about running the `node` binary without the `return ret;` line, +and instead with the replaced `return true;`. + +## Excess Drag on Garbage Collection + +Let's take a look at a quick benchmark. Using the same example from above, we +ran a few time trials to get a median time for both binaries. + + +```javascript + trial (#) | `node` binary (ms) | modified `node` binary (ms) +================================================================= + 1 | 56924 | 55011 + 2 | 52686 | 55869 + 3 | 59479 | 54043 + 4 | 54473 | 55229 + 5 | 52933 | 59723 +================================================================= +average time: | 55299 | 55975 +``` + +Both take around a minute to run, so there's not much of a difference at all, +but let's take a closer look to confirm whether our suspicions are correct. We +use the Linux tool [`dtrace`][] to evaluate what's happening with the V8 garbage +collector. + +The GC (garbage collector) measured time indicates the intervals of a full cycle +of a single sweep done by the garbage collector: + + +```javascript +approx. time (ms) | GC (ms) | modified GC (ms) +================================================= + 0 | 0 | 0 + 1 | 0 | 0 + 40 | 0 | 2 + 170 | 3 | 1 + 300 | 3 | 1 + + * * * + * * * + * * * + + 39000 | 6 | 26 + 42000 | 6 | 21 + 47000 | 5 | 32 + 50000 | 8 | 28 + 54000 | 6 | 35 +``` +While the two processes start off the same and seem to work the GC at the same +rate, it becomes evident that after a few seconds with a properly working +backpressure system in place, it spreads the GC load across consistent +intervals of 4-8 milliseconds until the end of the data transfer. + +However, when a backpressure system is not in place, the V8 garbage collection +starts to drag out. The normal binary called the GC approximately __75__ +times in a minute, whereas, the modified binary fires only __36__ times. + +This is the slow and gradual debt accumulating from growing memory usage. As +data gets transferred, without a backpressure system in place, more memory is +being used for each chunk transfer. + +The more memory that is being allocated, the more the GC has to take care of in +one sweep. The bigger the sweep, the more the GC needs to decide what can be +freed up, and scanning for detached pointers in a larger memory space will +consume more computing power. + +## Memory Exhaustion + +To determine the memory consumption of each binary, we've clocked each process +with `/usr/bin/time -lp sudo ./node ./backpressure-example/zlib.js` +individually. + +This is the output on the normal binary: + + +```javascript +Respecting the return value of .write() +============================================= +real 58.88 +user 56.79 +sys 8.79 + 87810048 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 19427 page reclaims + 3134 page faults + 0 swaps + 5 block input operations + 194 block output operations + 0 messages sent + 0 messages received + 1 signals received + 12 voluntary context switches + 666037 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately +87.81 mb. + +And now changing the [return value][] of the [`.write()`][] function, we get: + + +```javascript +Without respecting the return value of .write(): +================================================== +real 54.48 +user 53.15 +sys 7.43 +1524965376 maximum resident set size + 0 average shared memory size + 0 average unshared data size + 0 average unshared stack size + 373617 page reclaims + 3139 page faults + 0 swaps + 18 block input operations + 199 block output operations + 0 messages sent + 0 messages received + 1 signals received + 25 voluntary context switches + 629566 involuntary context switches +``` + +The maximum byte size occupied by virtual memory turns out to be approximately +1.52 gb. + +Without streams in place to delegate the backpressure, there is an order of +magnitude greater of memory space being allocated - a huge margin of +difference between the same process! + +This experiment shows how optimized and cost-effective Node.js' backpressure +mechanism is for your computing system. Now, let's do a break down on how it +works! + +## How Does Backpressure Resolve These Issues? + +There are different functions to transfer data from one process to another. In +Node.js, there is an internal built-in function called [`.pipe()`][]. There are +[other packages][] out there you can use too! Ultimately though, at the basic +level of this process, we have two separate components: the _source_ of the +data and the _consumer_. + +When [`.pipe()`][] is called from the source, it signals to the consumer that +there is data to be transferred. The pipe function helps to set up the +appropriate backpressure closures for the event triggers. + +In Node.js the source is a [`Readable`][] stream and the consumer is the +[`Writable`][] stream (both of these may be interchanged with a [`Duplex`][] or +a [`Transform`][] stream, but that is out-of-scope for this guide). + +The moment that backpressure is triggered can be narrowed exactly to the return +value of a [`Writable`][]'s [`.write()`][] function. This return value is +determined by a few conditions, of course. + +In any scenario where the data buffer has exceeded the [`highWaterMark`][] or +the write queue is currently busy, [`.write()`][] will return `false`. + +When a `false` value is returned, the backpressure system kicks in. It will +pause the incoming [`Readable`][] stream from sending any data and wait until +the consumer is ready again. Once the data buffer is emptied, a [`.drain()`][] +event will be emitted and resume the incoming data flow. + +Once the queue is finished, backpressure will allow data to be sent again. +The space in memory that was being used will free itself up and prepare for the +next batch of data. + +This effectively allows a fixed amount of memory to be used at any given +time for a [`.pipe()`][] function. There will be no memory leakage, no +infinite buffering, and the garbage collector will only have to deal with +one area in memory! + +So, if backpressure is so important, why have you (probably) not heard of it? +Well the answer is simple: Node.js does all of this automatically for you. + +That's so great! But also not so great when we are trying to understand how to +implement our own custom streams. + +Note: In most machines, there is a byte size that determines when a buffer +is full (which will vary across different machines). Node.js allows you to set +your own custom [`highWaterMark`][], but commonly, the default is set to 16kb +(16384, or 16 for objectMode streams). In instances where you might +want to raise that value, go for it, but do so with caution! + +## Lifecycle of `.pipe()` + +To achieve a better understanding of backpressure, here is a flow-chart on the +lifecycle of a [`Readable`][] stream being [piped][] into a [`Writable`][] +stream: + + +```javascript + +===================+ + x--> Piping functions +--> src.pipe(dest) | + x are set up during |===================| + x the .pipe method. | Event callbacks | + +===============+ x |-------------------| + | Your Data | x They exist outside | .on('close', cb) | + +=======+=======+ x the data flow, but | .on('data', cb) | + | x importantly attach | .on('drain', cb) | + | x events, and their | .on('unpipe', cb) | ++---------v---------+ x respective callbacks. | .on('error', cb) | +| Readable Stream +----+ | .on('finish', cb) | ++-^-------^-------^-+ | | .on('end', cb) | + ^ | ^ | +-------------------+ + | | | | + | ^ | | + ^ ^ ^ | +-------------------+ +=================+ + ^ | ^ +----> Writable Stream +---------> .write(chunk) | + | | | +-------------------+ +=======+=========+ + | | | | + | ^ | +------------------v---------+ + ^ | +-> if (!chunk) | Is this chunk too big? | + ^ | | emit .end(); | Is the queue busy? | + | | +-> else +-------+----------------+---+ + | ^ | emit .write(); | | + | ^ ^ +--v---+ +---v---+ + | | ^-----------------------------------< No | | Yes | + ^ | +------+ +---v---+ + ^ | | + | ^ emit .pause(); +=================+ | + | ^---------------^-----------------------+ return false; <-----+---+ + | +=================+ | + | | + ^ when queue is empty +============+ | + ^------------^-----------------------< Buffering | | + | |============| | + +> emit .drain(); | ^Buffer^ | | + +> emit .resume(); +------------+ | + | ^Buffer^ | | + +------------+ add chunk to queue | + | <---^---------------------< + +============+ +``` + +Note: If you are setting up a pipeline to chain together a few streams to +manipulate your data, you will most likely be implementing [`Transform`][] +stream. + +In this case, your output from your [`Readable`][] stream will enter in the +[`Transform`][] and will pipe into the [`Writable`][]. + +```javascript +Readable.pipe(Transformable).pipe(Writable); +``` + +Backpressure will be automatically applied, but note the both the incoming and +outgoing `highWaterMark` of the [`Transform`][] stream may be manipulated and +will effect the backpressure system. + +## Backpressure Guidelines + +Since [Node.js v0.10][], the [`Stream`][] class has offered the ability to +modify the behaviour of the [`.read()`][] or [`.write()`][] by using the +underscore version of these respective functions ([`._read()`][] and +[`._write()`][]). + +There are guidelines documented for [implementing Readable streams][] and +[implementing Writable streams][]. We will assume you've read these over, and +the next section will go a little bit more in-depth. + +## Rules to Abide By When Implementing Custom Streams + +The golden rule of streams is __to always respect backpressure__. What +constitutes as best practice is non-contradictory practice. So long as you are +careful to avoid behaviours that conflict with internal backpressure support, +you can be sure you're following good practice. + +In general, + +1. Never `.push()` if you are not asked. +2. Never call `.write()` after it returns false but wait for 'drain' instead. +3. Streams changes between different Node.js versions, and the library you use. +Be careful and test things. + +Note: In regards to point 3, an incredibly useful package for building +browser streams is [`readable-stream`][]. Rodd Vagg has written a +[great blog post][] describing the utility of this library. In short, it +provides a type of automated graceful degradation for [`Readable`][] streams, +and supports older versions of browsers and Node.js. + +## Rules specific to Readable Streams + +So far, we have taken a look at how [`.write()`][] affects backpressure and have +focused much on the [`Writable`][] stream. Because of Node.js' functionality, +data is technically flowing downstream from [`Readable`][] to [`Writable`][]. +However, as we can observe in any transmission of data, matter, or energy, the +source is just as important as the destination and the [`Readable`][] stream +is vital to how backpressure is handled. + +Both these processes rely on one another to communicate effectively, if +the [`Readable`][] ignores when the [`Writable`][] stream asks for it to stop +sending in data, it can be just as problematic to when the [`.write()`][]'s return +value is incorrect. + +So, as well with respecting the [`.write()`][] return, we must also respect the +return value of [`.push()`][] used in the [`._read()`][] method. If +[`.push()`][] returns a `false` value, the stream will stop reading from the +source. Otherwise, it will continue without pause. + +Here is an example of bad practice using [`.push()`][]: +```javascript +// This is problematic as it completely ignores return value from push +// which may be a signal for backpressure from the destination stream! +class MyReadable extends Readable { + _read(size) { + let chunk; + while (null !== (chunk = getNextChunk())) { + this.push(chunk); + } + } +} +``` + +Additionally, from outside the custom stream, there are pratfalls for ignoring +backpressure. In this counter-example of good practice, the application's code +forces data through whenever it is available (signaled by the +[`.data` event][]): +```javascript +// This ignores the backpressure mechanisms Node.js has set in place, +// and unconditionally pushes through data, regardless if the +// destination stream is ready for it or not. +readable.on('data', (data) => + writable.write(data) +); +``` + +## Rules specific to Writable Streams + +Recall that a [`.write()`][] may return true or false dependent on some +conditions. Luckily for us, when building our own [`Writable`][] stream, +the [`stream state machine`][] will handle our callbacks and determine when to +handle backpressure and optimize the flow of data for us. + +However, when we want to use a [`Writable`][] directly, we must respect the +[`.write()`][] return value and pay close attention to these conditions: + +* If the write queue is busy, [`.write()`][] will return false. +* If the data chunk is too large, [`.write()`][] will return false (the limit +is indicated by the variable, [`highWaterMark`][]). + + +```javascript +// This writable is invalid because of the async nature of JavaScript callbacks. +// Without a return statement for each callback prior to the last, +// there is a great chance multiple callbacks will be called. +class MyWritable extends Writable { + _write(chunk, encoding, callback) { + if (chunk.toString().indexOf('a') >= 0) + callback(); + else if (chunk.toString().indexOf('b') >= 0) + callback(); + callback(); + } +} + +// The proper way to write this would be: + if (chunk.contains('a')) + return callback(); + else if (chunk.contains('b')) + return callback(); + callback(); +``` + +There are also some things to look out for when implementing [`._writev()`][]. +The function is coupled with [`.cork()`][], but there is a common mistake when +writing: +```javascript +// Using .uncork() twice here makes two calls on the C++ layer, rendering the +// cork/uncork technique useless. +ws.cork(); +ws.write('hello '); +ws.write('world '); +ws.uncork(); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +ws.uncork(); + +// The correct way to write this is to utilize process.nextTick(), which fires +// on the next event loop. +ws.cork(); +ws.write('hello '); +ws.write('world '); +process.nextTick(doUncork, ws); + +ws.cork(); +ws.write('from '); +ws.write('Matteo'); +process.nextTick(doUncork, ws); + +// as a global function +function doUncork(stream) { + stream.uncork(); +} +``` + +[`.cork()`][] can be called as many times we want, we just need to be careful to +call [`.uncork()`][] the same amount of times to make it flow again. + +## Conclusion + +Streams are an often used module in Node.js. They are important to the internal +structure, and for developers, to expand and connect across the Node.js modules +ecosystem. + +Hopefully, you will now be able to troubleshoot, safely code your own +[`Writable`][] and [`Readable`][] streams with backpressure in mind, and share +your knowledge with colleagues and friends. + +Be sure to read up more on [`Stream`][] for other API functions to help +improve and unleash your streaming capabilities when building an application with +Node.js. + + +[`Stream`]: https://nodejs.org/api/stream.html +[`Buffer`]: https://nodejs.org/api/buffer.html +[`EventEmitters`]: https://nodejs.org/api/events.html +[`Writable`]: https://nodejs.org/api/stream.html#stream_writable_streams +[`Readable`]: https://nodejs.org/api/stream.html#stream_readable_streams +[`Duplex`]: https://nodejs.org/api/stream.html#stream_duplex_and_transform_streams +[`Transform`]: https://nodejs.org/api/stream.html#stream_duplex_and_transform_streams +[`zlib`]: https://nodejs.org/api/zlib.html +[`.drain()`]: https://nodejs.org/api/stream.html#stream_event_drain +[`.data` event]: https://nodejs.org/api/stream.html#stream_event_data +[`.read()`]: https://nodejs.org/docs/latest/api/stream.html#stream_readable_read_size +[`.write()`]: https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback +[`._read()`]: https://nodejs.org/docs/latest/api/stream.html#stream_readable_read_size_1 +[`._write()`]: https://nodejs.org/docs/latest/api/stream.html#stream_writable_write_chunk_encoding_callback_1 +[`._writev()`]: https://nodejs.org/api/stream.html#stream_writable_writev_chunks_callback +[`.cork()`]: https://nodejs.org/api/stream.html#stream_writable_cork +[`.uncork()`]: https://nodejs.org/api/stream.html#stream_writable_uncork + +[`.push()`]: https://nodejs.org/docs/latest/api/stream.html#stream_readable_push_chunk_encoding + +[implementing Writable streams]: https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_writable_stream +[implementing Readable streams]: https://nodejs.org/docs/latest/api/stream.html#stream_implementing_a_readable_stream + +[other packages]: https://github.com/sindresorhus/awesome-nodejs#streams +[`backpressure`]: https://en.wikipedia.org/wiki/Back_pressure#Backpressure_in_information_technology +[Node.js v0.10]: https://nodejs.org/docs/v0.10.0/ +[`highWaterMark`]: https://nodejs.org/api/stream.html#stream_buffering +[return value]: https://github.com/nodejs/node/blob/55c42bc6e5602e5a47fb774009cfe9289cb88e71/lib/_stream_writable.js#L239 + +[`readable-stream`]: https://github.com/nodejs/readable-stream +[great blog post]:https://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html + +[`dtrace`]: http://dtrace.org/blogs/about/ +[`zip(1)`]: https://linux.die.net/man/1/zip +[`gzip(1)`]: https://linux.die.net/man/1/gzip +[`stream state machine`]: https://en.wikipedia.org/wiki/Finite-state_machine + +[`.pipe()`]: https://nodejs.org/docs/latest/api/stream.html#stream_readable_pipe_destination_options +[piped]: https://nodejs.org/docs/latest/api/stream.html#stream_readable_pipe_destination_options +[`pump`]: https://github.com/mafintosh/pump +[`pipeline`]: https://nodejs.org/api/stream.html#stream_stream_pipeline_streams_callback +[`promisify`]: https://nodejs.org/api/util.html#util_util_promisify_original diff --git a/locale/fa/docs/guides/blocking-vs-non-blocking.md b/locale/fa/docs/guides/blocking-vs-non-blocking.md new file mode 100644 index 0000000000000..6f9238e91d46f --- /dev/null +++ b/locale/fa/docs/guides/blocking-vs-non-blocking.md @@ -0,0 +1,148 @@ +--- +title: Overview of Blocking vs Non-Blocking +layout: docs.hbs +--- + +# Overview of Blocking vs Non-Blocking + +This overview covers the difference between **blocking** and **non-blocking** +calls in Node.js. This overview will refer to the event loop and libuv but no +prior knowledge of those topics is required. Readers are assumed to have a +basic understanding of the JavaScript language and Node.js callback pattern. + +> "I/O" refers primarily to interaction with the system's disk and +> network supported by [libuv](http://libuv.org/). + + +## Blocking + +**Blocking** is when the execution of additional JavaScript in the Node.js +process must wait until a non-JavaScript operation completes. This happens +because the event loop is unable to continue running JavaScript while a +**blocking** operation is occurring. + +In Node.js, JavaScript that exhibits poor performance due to being CPU intensive +rather than waiting on a non-JavaScript operation, such as I/O, isn't typically +referred to as **blocking**. Synchronous methods in the Node.js standard library +that use libuv are the most commonly used **blocking** operations. Native +modules may also have **blocking** methods. + +All of the I/O methods in the Node.js standard library provide asynchronous +versions, which are **non-blocking**, and accept callback functions. Some +methods also have **blocking** counterparts, which have names that end with +`Sync`. + + +## Comparing Code + +**Blocking** methods execute **synchronously** and **non-blocking** methods +execute **asynchronously**. + +Using the File System module as an example, this is a **synchronous** file read: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +``` + +And here is an equivalent **asynchronous** example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; +}); +``` + +The first example appears simpler than the second but has the disadvantage of +the second line **blocking** the execution of any additional JavaScript until +the entire file is read. Note that in the synchronous version if an error is +thrown it will need to be caught or the process will crash. In the asynchronous +version, it is up to the author to decide whether an error should throw as +shown. + +Let's expand our example a little bit: + +```js +const fs = require('fs'); +const data = fs.readFileSync('/file.md'); // blocks here until file is read +console.log(data); +// moreWork(); will run after console.log +``` + +And here is a similar, but not equivalent asynchronous example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +// moreWork(); will run before console.log +``` + +In the first example above, `console.log` will be called before `moreWork()`. In +the second example `fs.readFile()` is **non-blocking** so JavaScript execution +can continue and `moreWork()` will be called first. The ability to run +`moreWork()` without waiting for the file read to complete is a key design +choice that allows for higher throughput. + + +## Concurrency and Throughput + +JavaScript execution in Node.js is single threaded, so concurrency refers to the +event loop's capacity to execute JavaScript callback functions after completing +other work. Any code that is expected to run in a concurrent manner must allow +the event loop to continue running as non-JavaScript operations, like I/O, are +occurring. + +As an example, let's consider a case where each request to a web server takes +50ms to complete and 45ms of that 50ms is database I/O that can be done +asynchronously. Choosing **non-blocking** asynchronous operations frees up that +45ms per request to handle other requests. This is a significant difference in +capacity just by choosing to use **non-blocking** methods instead of +**blocking** methods. + +The event loop is different than models in many other languages where additional +threads may be created to handle concurrent work. + + +## Dangers of Mixing Blocking and Non-Blocking Code + +There are some patterns that should be avoided when dealing with I/O. Let's look +at an example: + +```js +const fs = require('fs'); +fs.readFile('/file.md', (err, data) => { + if (err) throw err; + console.log(data); +}); +fs.unlinkSync('/file.md'); +``` + +In the above example, `fs.unlinkSync()` is likely to be run before +`fs.readFile()`, which would delete `file.md` before it is actually read. A +better way to write this that is completely **non-blocking** and guaranteed to +execute in the correct order is: + + +```js +const fs = require('fs'); +fs.readFile('/file.md', (readFileErr, data) => { + if (readFileErr) throw readFileErr; + console.log(data); + fs.unlink('/file.md', (unlinkErr) => { + if (unlinkErr) throw unlinkErr; + }); +}); +``` + +The above places a **non-blocking** call to `fs.unlink()` within the callback of +`fs.readFile()` which guarantees the correct order of operations. + + +## Additional Resources + +- [libuv](http://libuv.org/) +- [About Node.js](https://nodejs.org/en/about/) diff --git a/locale/fa/docs/guides/buffer-constructor-deprecation.md b/locale/fa/docs/guides/buffer-constructor-deprecation.md new file mode 100644 index 0000000000000..d5d0c914e2499 --- /dev/null +++ b/locale/fa/docs/guides/buffer-constructor-deprecation.md @@ -0,0 +1,288 @@ +--- +title: Porting to the Buffer.from()/Buffer.alloc() API +layout: docs.hbs +--- + +# Porting to the `Buffer.from()`/`Buffer.alloc()` API + + +## Overview + +This guide explains how to migrate to safe `Buffer` constructor methods. The migration fixes the following deprecation warning: + +
+The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. +
+ +- [Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x](#variant-1) (*recommended*) +- [Variant 2: Use a polyfill](#variant-2) +- [Variant 3: Manual detection, with safeguards](#variant-3) + +### Finding problematic bits of code using `grep` + +Just run `grep -nrE '[^a-zA-Z](Slow)?Buffer\s*\(' --exclude-dir node_modules`. + +It will find all the potentially unsafe places in your own code (with some considerably unlikely +exceptions). + +### Finding problematic bits of code using Node.js 8 + +If you’re using Node.js ≥ 8.0.0 (which is recommended), Node.js exposes multiple options that help with finding the relevant pieces of code: + +- `--trace-warnings` will make Node.js show a stack trace for this warning and other warnings that are printed by Node.js. +- `--trace-deprecation` does the same thing, but only for deprecation warnings. +- `--pending-deprecation` will show more types of deprecation warnings. In particular, it will show the `Buffer()` deprecation warning, even on Node.js 8. + +You can set these flags using environment variables: + +```bash +$ export NODE_OPTIONS='--trace-warnings --pending-deprecation' +$ cat example.js +'use strict'; +const foo = new Buffer('foo'); +$ node example.js +(node:7147) [DEP0005] DeprecationWarning: The Buffer() and new Buffer() constructors are not recommended for use due to security and usability concerns. Please use the new Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() construction methods instead. + at showFlaggedDeprecation (buffer.js:127:13) + at new Buffer (buffer.js:148:3) + at Object. (/path/to/example.js:2:13) + [... more stack trace lines ...] +``` + +### Finding problematic bits of code using linters + +ESLint rules [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) +or +[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) +also find calls to deprecated `Buffer()` API. Those rules are included in some presets. + +There is a drawback, though, that it doesn't always +[work correctly](https://github.com/chalker/safer-buffer#why-not-safe-buffer) when `Buffer` is +overridden e.g. with a polyfill, so recommended is a combination of this and some other method +described above. + + +## Variant 1: Drop support for Node.js ≤ 4.4.x and 5.0.0 — 5.9.x + +This is the recommended solution nowadays that would imply only minimal overhead. + +The Node.js 5.x release line has been unsupported since July 2016, and the Node.js 4.x release line reaches its End of Life in April 2018 (→ [Schedule](https://github.com/nodejs/Release#release-schedule)). This means that these versions of Node.js will *not* receive any updates, even in case of security issues, so using these release lines should be avoided, if at all possible. + +What you would do in this case is to convert all `new Buffer()` or `Buffer()` calls to use `Buffer.alloc()` or `Buffer.from()`, in the following way: + +- For `new Buffer(number)`, replace it with `Buffer.alloc(number)`. +- For `new Buffer(string)` (or `new Buffer(string, encoding)`), replace it with `Buffer.from(string)` (or `Buffer.from(string, encoding)`). +- For all other combinations of arguments (these are much rarer), also replace `new Buffer(...arguments)` with `Buffer.from(...arguments)`. + +Note that `Buffer.alloc()` is also _faster_ on the current Node.js versions than +`new Buffer(size).fill(0)`, which is what you would otherwise need to ensure zero-filling. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) +or +[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) +is recommended to avoid accidental unsafe `Buffer` API usage. + +There is also a [JSCodeshift codemod](https://github.com/joyeecheung/node-dep-codemod#dep005) +for automatically migrating `Buffer` constructors to `Buffer.alloc()` or `Buffer.from()`. +Note that it currently only works with cases where the arguments are literals or where the +constructor is invoked with two arguments. + +_If you currently support those older Node.js versions and dropping support for them is not possible, or if you support older branches of your packages, consider using [Variant 2](#variant-2) +or [Variant 3](#variant-3) on older branches, so people using those older branches will also receive +the fix. That way, you will eradicate potential issues caused by unguarded `Buffer` API usage and +your users will not observe a runtime deprecation warning when running your code on Node.js 10._ + + +## Variant 2: Use a polyfill + +There are three different polyfills available: + +- **[safer-buffer](https://www.npmjs.com/package/safer-buffer)** is a drop-in replacement for the + entire `Buffer` API, that will _throw_ when using `new Buffer()`. + + You would take exactly the same steps as in [Variant 1](#variant-1), but with a polyfill + `const Buffer = require('safer-buffer').Buffer` in all files where you use the new `Buffer` API. + + Do not use the old `new Buffer()` API. In any files where the line above is added, + using old `new Buffer()` API will _throw_. + +- **[buffer-from](https://www.npmjs.com/package/buffer-from) and/or + [buffer-alloc](https://www.npmjs.com/package/buffer-alloc)** are + [ponyfills](https://ponyfill.com/) for their respective part of the `Buffer` API. You only need + to add the package(s) corresponding to the API you are using. + + You would import the module needed with an appropriate name, e.g. + `const bufferFrom = require('buffer-from')` and then use that instead of the call to + `new Buffer()`, e.g. `new Buffer('test')` becomes `bufferFrom('test')`. + + A downside with this approach is slightly more code changes to migrate off them (as you would be + using e.g. `Buffer.from()` under a different name). + +- **[safe-buffer](https://www.npmjs.com/package/safe-buffer)** is also a drop-in replacement for + the entire `Buffer` API, but using `new Buffer()` will still work as before. + + A downside to this approach is that it will allow you to also use the older `new Buffer()` API + in your code, which is problematic since it can cause issues in your code, and will start + emitting runtime deprecation warnings starting with Node.js 10 + ([read more here](https://github.com/chalker/safer-buffer#why-not-safe-buffer)). + +Note that in either case, it is important that you also remove all calls to the old `Buffer` +API manually — just throwing in `safe-buffer` doesn't fix the problem by itself, it just provides +a polyfill for the new API. I have seen people doing that mistake. + +Enabling ESLint rule [no-buffer-constructor](https://eslint.org/docs/rules/no-buffer-constructor) +or +[node/no-deprecated-api](https://github.com/mysticatea/eslint-plugin-node/blob/master/docs/rules/no-deprecated-api.md) +is recommended. + +_Don't forget to drop the polyfill usage once you drop support for Node.js < 4.5.0._ + + +## Variant 3 — Manual detection, with safeguards + +This is useful if you create `Buffer` instances in only a few places (e.g. one), or you have your own +wrapper around them. + +### `Buffer(0)` + +This special case for creating empty buffers can be safely replaced with `Buffer.concat([])`, which +returns the same result all the way down to Node.js 0.8.x. + +### `Buffer(notNumber)` + +Before: + +```js +const buf = new Buffer(notNumber, encoding); +``` + +After: + +```js +let buf; +if (Buffer.from && Buffer.from !== Uint8Array.from) { + buf = Buffer.from(notNumber, encoding); +} else { + if (typeof notNumber === 'number') { + throw new Error('The "size" argument must be not of type number.'); + } + buf = new Buffer(notNumber, encoding); +} +``` + +`encoding` is optional. + +Note that the `typeof notNumber` before `new Buffer()` is required (for cases when `notNumber` argument is not +hard-coded) and _is not caused by the deprecation of `Buffer` constructor_ — it's exactly _why_ the +`Buffer` constructor is deprecated. Ecosystem packages lacking this type-check caused numerous +security issues — situations when unsanitized user input could end up in the `Buffer(arg)` create +problems ranging from DoS to leaking sensitive information to the attacker from the process memory. + +When `notNumber` argument is hardcoded (e.g. literal `"abc"` or `[0,1,2]`), the `typeof` check can +be omitted. + +Also, note that using TypeScript does not fix this problem for you — when libs written in +`TypeScript` are used from JS, or when user input ends up there — it behaves exactly as pure JS, as +all type checks are translation-time only and are not present in the actual JS code which TS +compiles to. + +### `Buffer(number)` + +For Node.js 0.10.x (and below) support: + +```js +var buf; +if (Buffer.alloc) { + buf = Buffer.alloc(number); +} else { + buf = new Buffer(number); + buf.fill(0); +} +``` + +Otherwise (Node.js ≥ 0.12.x): + +```js +const buf = Buffer.alloc ? Buffer.alloc(number) : new Buffer(number).fill(0); +``` + +## Regarding `Buffer.allocUnsafe()` + +Be extra cautious when using `Buffer.allocUnsafe()`: + * Don't use it if you don't have a good reason to + * e.g. you probably won't ever see a performance difference for small buffers, in fact, those + might be even faster with `Buffer.alloc()`, + * if your code is not in the hot code path — you also probably won't notice a difference, + * keep in mind that zero-filling minimizes the potential risks. + * If you use it, make sure that you never return the buffer in a partially-filled state, + * if you are writing to it sequentially — always truncate it to the actual written length + +Errors in handling buffers allocated with `Buffer.allocUnsafe()` could result in various issues, +ranged from undefined behavior of your code to sensitive data (user input, passwords, certs) +leaking to the remote attacker. + +_Note that the same applies to `new Buffer()` usage without zero-filling, depending on the Node.js +version (and lacking type checks also adds DoS to the list of potential problems)._ + + +## FAQ + + +### What is wrong with the `Buffer` constructor? + +The `Buffer` constructor could be used to create a buffer in many different ways: + +- `new Buffer(42)` creates a `Buffer` of 42 bytes. Before Node.js 8, this buffer contained + *arbitrary memory* for performance reasons, which could include anything ranging from + program source code to passwords and encryption keys. +- `new Buffer('abc')` creates a `Buffer` that contains the UTF-8-encoded version of + the string `'abc'`. A second argument could specify another encoding: for example, + `new Buffer(string, 'base64')` could be used to convert a Base64 string into the original + sequence of bytes that it represents. +- There are several other combinations of arguments. + +This meant that in code like `var buffer = new Buffer(foo);`, *it is not possible to tell +what exactly the contents of the generated buffer are* without knowing the type of `foo`. + +Sometimes, the value of `foo` comes from an external source. For example, this function +could be exposed as a service on a web server, converting a UTF-8 string into its Base64 form: + +```js +function stringToBase64(req, res) { + // The request body should have the format of `{ string: 'foobar' }`. + const rawBytes = new Buffer(req.body.string); + const encoded = rawBytes.toString('base64'); + res.end({ encoded }); +} +``` + +Note that this code does *not* validate the type of `req.body.string`: + +- `req.body.string` is expected to be a string. If this is the case, all goes well. +- `req.body.string` is controlled by the client that sends the request. +- If `req.body.string` is the *number* `50`, the `rawBytes` would be `50` bytes: + - Before Node.js 8, the content would be uninitialized + - After Node.js 8, the content would be `50` bytes with the value `0` + +Because of the missing type check, an attacker could intentionally send a number +as part of the request. Using this, they can either: + +- Read uninitialized memory. This **will** leak passwords, encryption keys and other + kinds of sensitive information. (Information leak) +- Force the program to allocate a large amount of memory. For example, when specifying + `500000000` as the input value, each request will allocate 500MB of memory. + This can be used to either exhaust the memory available of a program completely + and make it crash, or slow it down significantly. (Denial of Service) + +Both of these scenarios are considered serious security issues in a real-world +web server context. + +When using `Buffer.from(req.body.string)` instead, passing a number will always +throw an exception instead, giving a controlled behavior that can always be +handled by the program. + + +### The `Buffer()` constructor has been deprecated for a while. Is this really an issue? + +Surveys of code in the `npm` ecosystem have shown that the `Buffer()` constructor is still +widely used. This includes new code, and overall usage of such code has actually been +*increasing*. diff --git a/locale/fa/docs/guides/debugging-getting-started.md b/locale/fa/docs/guides/debugging-getting-started.md new file mode 100644 index 0000000000000..176300442f103 --- /dev/null +++ b/locale/fa/docs/guides/debugging-getting-started.md @@ -0,0 +1,262 @@ +--- +title: Debugging - Getting Started +layout: docs.hbs +--- + +# Debugging Guide + +This guide will help you get started debugging your Node.js apps and scripts. + +## Enable Inspector + +When started with the **--inspect** switch, a Node.js process listens via WebSockets +for diagnostic commands as defined by the [Inspector Protocol][], +by default at host and port 127.0.0.1:9229. Each process is also assigned a +unique [UUID][] (e.g. `0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e`). + +Inspector clients must know and specify host address, port, and UUID to connect +to the WebSocket interface. The full URL is +`ws://127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e`, of course dependent +on actual host and port and with the correct UUID for the instance. + +Inspector also includes an HTTP endpoint to serve metadata about the debuggee, +including its WebSocket URL, UUID, and Chrome DevTools URL. Get this metadata +by sending an HTTP request to `http://[host:port]/json/list`. This returns a +JSON object like the following; use the `webSocketDebuggerUrl` property as the +URL to connect directly to Inspector. + + +```javascript +{ + "description": "node.js instance", + "devtoolsFrontendUrl": "chrome-devtools://devtools/bundled/inspector.html?experiments=true&v8only=true&ws=127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e", + "faviconUrl": "https://nodejs.org/static/favicon.ico", + "id": "0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e", + "title": "node", + "type": "node", + "url": "file://", + "webSocketDebuggerUrl": "ws://127.0.0.1:9229/0f2c936f-b1cd-4ac9-aab3-f63b0f33d55e" +} +``` + +A Node.js process started *without* `--inspect` can also be instructed to start +listening for debugging messages by signaling it with `SIGUSR1` (on Linux and +OS X). As of Node 7 this activates the legacy Debugger API; in Node 8 and later +it will activate the Inspector API. + +--- +## Security Implications + +Since the debugger has full access to the Node.js execution environment, a +malicious actor able to connect to this port may be able to execute arbitrary +code on behalf of the Node process. It is important to understand the security +implications of exposing the debugger port on public and private networks. + +### Exposing the debug port publicly is unsafe + +If the debugger is bound to a public IP address, or to 0.0.0.0, any clients that +can reach your IP address will be able to connect to the debugger without any +restriction and will be able to run arbitrary code. + +By default `node --inspect` binds to 127.0.0.1. You explicitly need to provide a +public IP address or 0.0.0.0, etc., if you intend to allow external connections +to the debugger. Doing so may expose you a potentially significant security +threat. We suggest you ensure appropriate firewalls and access controls in place +to prevent a security exposure. + +See the section on '[Enabling remote debugging scenarios](#enabling-remote-debugging-scenarios)' on some advice on how +to safely allow remote debugger clients to connect. + +### Local applications have full access to the inspector + +Even if you bind the inspector port to 127.0.0.1 (the default), any applications +running locally on your machine will have unrestricted access. This is by design +to allow local debuggers to be able to attach conveniently. + +### Browsers, WebSockets and same-origin policy + +Websites open in a web-browser can make WebSocket and HTTP requests under the +browser security model. An initial HTTP connection is necessary to obtain a +unique debugger session id. The same-origin-policy prevents websites from being +able to make this HTTP connection. For additional security against +[DNS rebinding attacks](https://en.wikipedia.org/wiki/DNS_rebinding), Node.js +verifies that the 'Host' headers for the connection either +specify an IP address or `localhost` or `localhost6` precisely. + +These security policies disallow connecting to a remote debug server by +specifying the hostname. You can work-around this restriction by specifying +either the IP address or by using ssh tunnels as described below. + +## Inspector Clients + +Several commercial and open source tools can connect to Node's Inspector. Basic +info on these follows: + +#### [node-inspect](https://github.com/nodejs/node-inspect) + +* CLI Debugger supported by the Node.js Foundation which uses the [Inspector Protocol][]. +* A version is bundled with Node and can be used with `node inspect myscript.js`. +* The latest version can also be installed independently (e.g. `npm install -g node-inspect`) + and used with `node-inspect myscript.js`. + +#### [Chrome DevTools](https://github.com/ChromeDevTools/devtools-frontend) 55+ + +* **Option 1**: Open `chrome://inspect` in a Chromium-based + browser. Click the Configure button and ensure your target host and port + are listed. +* **Option 2**: Copy the `devtoolsFrontendUrl` from the output of `/json/list` + (see above) or the --inspect hint text and paste into Chrome. +* **Option 3**: Install the Chrome Extension NIM (Node Inspector Manager): + https://chrome.google.com/webstore/detail/nim-node-inspector-manage/gnhhdgbaldcilmgcpfddgdbkhjohddkj + +#### [Visual Studio Code](https://github.com/microsoft/vscode) 1.10+ + +* In the Debug panel, click the settings icon to open `.vscode/launch.json`. + Select "Node.js" for initial setup. + +#### [Visual Studio](https://github.com/Microsoft/nodejstools) 2017 + +* Choose "Debug > Start Debugging" from the menu or hit F5. +* [Detailed instructions](https://github.com/Microsoft/nodejstools/wiki/Debugging). + +#### [JetBrains WebStorm](https://www.jetbrains.com/webstorm/) 2017.1+ and other JetBrains IDEs + +* Create a new Node.js debug configuration and hit Debug. `--inspect` will be used + by default for Node.js 7+. To disable uncheck `js.debugger.node.use.inspect` in + the IDE Registry. + +#### [chrome-remote-interface](https://github.com/cyrus-and/chrome-remote-interface) + +* Library to ease connections to Inspector Protocol endpoints. + +--- + +## Command-line options + +The following table lists the impact of various runtime flags on debugging: + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagMeaning
--inspect +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
+
--inspect=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
+
--inspect-brk +
    +
  • Enable inspector agent
  • +
  • Listen on default address and port (127.0.0.1:9229)
  • +
  • Break before user code starts
  • +
+
--inspect-brk=[host:port] +
    +
  • Enable inspector agent
  • +
  • Bind to address or hostname host (default: 127.0.0.1)
  • +
  • Listen on port port (default: 9229)
  • +
  • Break before user code starts
  • +
+
node inspect script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
+
node inspect --port=xxxx script.js +
    +
  • Spawn child process to run user's script under --inspect flag; + and use main process to run CLI debugger.
  • +
  • Listen on port port (default: 9229)
  • +
+
+ +--- + +## Enabling remote debugging scenarios + +We recommend that you never have the debugger listen on a public IP address. If +you need to allow remote debugging connections we recommend the use of ssh +tunnels instead. We provide the following example for illustrative purposes only. +Please understand the security risk of allowing remote access to a privileged +service before proceeding. + +Let's say you are running Node on remote machine, remote.example.com, that you +want to be able to debug. On that machine, you should start the node process +with the inspector listening only to localhost (the default). + +```bash +$ node --inspect server.js +``` + +Now, on your local machine from where you want to initiate a debug client +connection, you can setup an ssh tunnel: + +```bash +$ ssh -L 9221:localhost:9229 user@remote.example.com +``` + +This starts a ssh tunnel session where a connection to port 9221 on your local +machine will be forwarded to port 9229 on remote.example.com. You can now attach +a debugger such as Chrome DevTools or Visual Studio Code to localhost:9221, +which should be able to debug as if the Node.js application was running locally. + +--- + +## Legacy Debugger + +**The legacy debugger has been deprecated as of Node 7.7.0. Please use --inspect +and Inspector instead.** + +When started with the **--debug** or **--debug-brk** switches in version 7 and +earlier, Node.js listens for debugging commands defined by the discontinued +V8 Debugging Protocol on a TCP port, by default `5858`. Any debugger client +which speaks this protocol can connect to and debug the running process; a +couple popular ones are listed below. + +The V8 Debugging Protocol is no longer maintained or documented. + +#### [Built-in Debugger](https://nodejs.org/dist/latest-v6.x/docs/api/debugger.html) + +Start `node debug script_name.js` to start your script under Node's builtin +command-line debugger. Your script starts in another Node process started with +the `--debug-brk` option, and the initial Node process runs the `_debugger.js` +script and connects to your target. + +#### [node-inspector](https://github.com/node-inspector/node-inspector) + +Debug your Node.js app with Chrome DevTools by using an intermediary process +which translates the Inspector Protocol used in Chromium to the V8 Debugger +protocol used in Node.js. + + + +[Inspector Protocol]: https://chromedevtools.github.io/debugger-protocol-viewer/v8/ +[UUID]: https://tools.ietf.org/html/rfc4122 diff --git a/locale/fa/docs/guides/domain-postmortem.md b/locale/fa/docs/guides/domain-postmortem.md new file mode 100644 index 0000000000000..6426c2a73361a --- /dev/null +++ b/locale/fa/docs/guides/domain-postmortem.md @@ -0,0 +1,444 @@ +--- +title: Domain Module Postmortem +layout: docs.hbs +--- + +# Domain Module Postmortem + +## Usability Issues + +### Implicit Behavior + +It's possible for a developer to create a new domain and then simply run +`domain.enter()`. Which then acts as a catch-all for any exception in the +future that couldn't be observed by the thrower. Allowing a module author to +intercept the exceptions of unrelated code in a different module. Preventing +the originator of the code from knowing about its own exceptions. + +Here's an example of how one indirectly linked modules can affect another: + +```js +// module a.js +const b = require('./b'); +const c = require('./c'); + + +// module b.js +const d = require('domain').create(); +d.on('error', () => { /* silence everything */ }); +d.enter(); + + +// module c.js +const dep = require('some-dep'); +dep.method(); // Uh-oh! This method doesn't actually exist. +``` + +Since module `b` enters the domain but never exits any uncaught exception will +be swallowed. Leaving module `c` in the dark as to why it didn't run the entire +script. Leaving a potentially partially populated `module.exports`. Doing this +is not the same as listening for `'uncaughtException'`. As the latter is +explicitly meant to globally catch errors. The other issue is that domains are +processed prior to any `'uncaughtException'` handlers, and prevent them from +running. + +Another issue is that domains route errors automatically if no `'error'` +handler was set on the event emitter. There is no opt-in mechanism for this, +and automatically propagates across the entire asynchronous chain. This may +seem useful at first, but once asynchronous calls are two or more modules deep +and one of them doesn't include an error handler the creator of the domain will +suddenly be catching unexpected exceptions, and the thrower's exception will go +unnoticed by the author. + +The following is a simple example of how a missing `'error'` handler allows +the active domain to hijack the error: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', (err) => console.error(err.message)); + +d.run(() => net.createServer((c) => { + c.end(); + c.write('bye'); +}).listen(8000)); +``` + +Even manually removing the connection via `d.remove(c)` does not prevent the +connection's error from being automatically intercepted. + +Failures that plagues both error routing and exception handling are the +inconsistencies in how errors are bubbled. The following is an example of how +nested domains will and won't bubble the exception based on when they happen: + +```js +const domain = require('domain'); +const net = require('net'); +const d = domain.create(); +d.on('error', () => console.error('d intercepted an error')); + +d.run(() => { + const server = net.createServer((c) => { + const e = domain.create(); // No 'error' handler being set. + e.run(() => { + // This will not be caught by d's error handler. + setImmediate(() => { + throw new Error('thrown from setImmediate'); + }); + // Though this one will bubble to d's error handler. + throw new Error('immediately thrown'); + }); + }).listen(8080); +}); +``` + +It may be expected that nested domains always remain nested, and will always +propagate the exception up the domain stack. Or that exceptions will never +automatically bubble. Unfortunately both these situations occur, leading to +potentially confusing behavior that may even be prone to difficult to debug +timing conflicts. + + +### API Gaps + +While APIs based on using `EventEmitter` can use `bind()` and errback style +callbacks can use `intercept()`, alternative APIs that implicitly bind to the +active domain must be executed inside of `run()`. Meaning if module authors +wanted to support domains using a mechanism alternative to those mentioned they +must manually implement domain support themselves. Instead of being able to +leverage the implicit mechanisms already in place. + + +### Error Propagation + +Propagating errors across nested domains is not straight forward, if even +possible. Existing documentation shows a simple example of how to `close()` an +`http` server if there is an error in the request handler. What it does not +explain is how to close the server if the request handler creates another +domain instance for another async request. Using the following as a simple +example of the failing of error propagation: + +```js +const d1 = domain.create(); +d1.foo = true; // custom member to make more visible in console +d1.on('error', (er) => { /* handle error */ }); + +d1.run(() => setTimeout(() => { + const d2 = domain.create(); + d2.bar = 43; + d2.on('error', (er) => console.error(er.message, domain._stack)); + d2.run(() => { + setTimeout(() => { + setTimeout(() => { + throw new Error('outer'); + }); + throw new Error('inner'); + }); + }); +})); +``` + +Even in the case that the domain instances are being used for local storage so +access to resources are made available there is still no way to allow the error +to continue propagating from `d2` back to `d1`. Quick inspection may tell us +that simply throwing from `d2`'s domain `'error'` handler would allow `d1` to +then catch the exception and execute its own error handler. Though that is not +the case. Upon inspection of `domain._stack` you'll see that the stack only +contains `d2`. + +This may be considered a failing of the API, but even if it did operate in this +way there is still the issue of transmitting the fact that a branch in the +asynchronous execution has failed, and that all further operations in that +branch must cease. In the example of the http request handler, if we fire off +several asynchronous requests and each one then `write()`'s data back to the +client many more errors will arise from attempting to `write()` to a closed +handle. More on this in _Resource Cleanup on Exception_. + + +### Resource Cleanup on Exception + +The following script contains a more complex example of properly cleaning up +in a small resource dependency tree in the case that an exception occurs in a +given connection or any of its dependencies. Breaking down the script into its +basic operations: + +```js +'use strict'; + +const domain = require('domain'); +const EE = require('events'); +const fs = require('fs'); +const net = require('net'); +const util = require('util'); +const print = process._rawDebug; + +const pipeList = []; +const FILENAME = '/tmp/tmp.tmp'; +const PIPENAME = '/tmp/node-domain-example-'; +const FILESIZE = 1024; +let uid = 0; + +// Setting up temporary resources +const buf = Buffer.alloc(FILESIZE); +for (let i = 0; i < buf.length; i++) + buf[i] = ((Math.random() * 1e3) % 78) + 48; // Basic ASCII +fs.writeFileSync(FILENAME, buf); + +function ConnectionResource(c) { + EE.call(this); + this._connection = c; + this._alive = true; + this._domain = domain.create(); + this._id = Math.random().toString(32).substr(2).substr(0, 8) + (++uid); + + this._domain.add(c); + this._domain.on('error', () => { + this._alive = false; + }); +} +util.inherits(ConnectionResource, EE); + +ConnectionResource.prototype.end = function end(chunk) { + this._alive = false; + this._connection.end(chunk); + this.emit('end'); +}; + +ConnectionResource.prototype.isAlive = function isAlive() { + return this._alive; +}; + +ConnectionResource.prototype.id = function id() { + return this._id; +}; + +ConnectionResource.prototype.write = function write(chunk) { + this.emit('data', chunk); + return this._connection.write(chunk); +}; + +// Example begin +net.createServer((c) => { + const cr = new ConnectionResource(c); + + const d1 = domain.create(); + fs.open(FILENAME, 'r', d1.intercept((fd) => { + streamInParts(fd, cr, 0); + })); + + pipeData(cr); + + c.on('close', () => cr.end()); +}).listen(8080); + +function streamInParts(fd, cr, pos) { + const d2 = domain.create(); + const alive = true; + d2.on('error', (er) => { + print('d2 error:', er.message); + cr.end(); + }); + fs.read(fd, Buffer.alloc(10), 0, 10, pos, d2.intercept((bRead, buf) => { + if (!cr.isAlive()) { + return fs.close(fd); + } + if (cr._connection.bytesWritten < FILESIZE) { + // Documentation says callback is optional, but doesn't mention that if + // the write fails an exception will be thrown. + const goodtogo = cr.write(buf); + if (goodtogo) { + setTimeout(() => streamInParts(fd, cr, pos + bRead), 1000); + } else { + cr._connection.once('drain', () => streamInParts(fd, cr, pos + bRead)); + } + return; + } + cr.end(buf); + fs.close(fd); + })); +} + +function pipeData(cr) { + const pname = PIPENAME + cr.id(); + const ps = net.createServer(); + const d3 = domain.create(); + const connectionList = []; + d3.on('error', (er) => { + print('d3 error:', er.message); + cr.end(); + }); + d3.add(ps); + ps.on('connection', (conn) => { + connectionList.push(conn); + conn.on('data', () => {}); // don't care about incoming data. + conn.on('close', () => { + connectionList.splice(connectionList.indexOf(conn), 1); + }); + }); + cr.on('data', (chunk) => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].write(chunk); + } + }); + cr.on('end', () => { + for (let i = 0; i < connectionList.length; i++) { + connectionList[i].end(); + } + ps.close(); + }); + pipeList.push(pname); + ps.listen(pname); +} + +process.on('SIGINT', () => process.exit()); +process.on('exit', () => { + try { + for (let i = 0; i < pipeList.length; i++) { + fs.unlinkSync(pipeList[i]); + } + fs.unlinkSync(FILENAME); + } catch (e) { } +}); + +``` + +- When a new connection happens, concurrently: + - Open a file on the file system + - Open Pipe to unique socket +- Read a chunk of the file asynchronously +- Write chunk to both the TCP connection and any listening sockets +- If any of these resources error, notify all other attached resources that + they need to clean up and shutdown + +As we can see from this example a lot more must be done to properly clean up +resources when something fails than what can be done strictly through the +domain API. All that domains offer is an exception aggregation mechanism. Even +the potentially useful ability to propagate data with the domain is easily +countered, in this example, by passing the needed resources as a function +argument. + +One problem domains perpetuated was the supposed simplicity of being able to +continue execution, contrary to what the documentation stated, of the +application despite an unexpected exception. This example demonstrates the +fallacy behind that idea. + +Attempting proper resource cleanup on unexpected exception becomes more complex +as the application itself grows in complexity. This example only has 3 basic +resources in play, and all of them with a clear dependency path. If an +application uses something like shared resources or resource reuse the ability +to cleanup, and properly test that cleanup has been done, grows greatly. + +In the end, in terms of handling errors, domains aren't much more than a +glorified `'uncaughtException'` handler. Except with more implicit and +unobservable behavior by third-parties. + + +### Resource Propagation + +Another use case for domains was to use it to propagate data along asynchronous +data paths. One problematic point is the ambiguity of when to expect the +correct domain when there are multiple in the stack (which must be assumed if +the async stack works with other modules). Also the conflict between being +able to depend on a domain for error handling while also having it available to +retrieve the necessary data. + +The following is a involved example demonstrating the failing using domains to +propagate data along asynchronous stacks: + +```js +const domain = require('domain'); +const net = require('net'); + +const server = net.createServer((c) => { + // Use a domain to propagate data across events within the + // connection so that we don't have to pass arguments + // everywhere. + const d = domain.create(); + d.data = { connection: c }; + d.add(c); + // Mock class that does some useless async data transformation + // for demonstration purposes. + const ds = new DataStream(dataTransformed); + c.on('data', (chunk) => ds.data(chunk)); +}).listen(8080, () => console.log('listening on 8080')); + +function dataTransformed(chunk) { + // FAIL! Because the DataStream instance also created a + // domain we have now lost the active domain we had + // hoped to use. + domain.active.data.connection.write(chunk); +} + +function DataStream(cb) { + this.cb = cb; + // DataStream wants to use domains for data propagation too! + // Unfortunately this will conflict with any domain that + // already exists. + this.domain = domain.create(); + this.domain.data = { inst: this }; +} + +DataStream.prototype.data = function data(chunk) { + // This code is self contained, but pretend it's a complex + // operation that crosses at least one other module. So + // passing along "this", etc., is not easy. + this.domain.run(() => { + // Simulate an async operation that does the data transform. + setImmediate(() => { + for (let i = 0; i < chunk.length; i++) + chunk[i] = ((chunk[i] + Math.random() * 100) % 96) + 33; + // Grab the instance from the active domain and use that + // to call the user's callback. + const self = domain.active.data.inst; + self.cb(chunk); + }); + }); +}; +``` + +The above shows that it is difficult to have more than one asynchronous API +attempt to use domains to propagate data. This example could possibly be fixed +by assigning `parent: domain.active` in the `DataStream` constructor. Then +restoring it via `domain.active = domain.active.data.parent` just before the +user's callback is called. Also the instantiation of `DataStream` in the +`'connection'` callback must be run inside `d.run()`, instead of simply using +`d.add(c)`, otherwise there will be no active domain. + +In short, for this to have a prayer of a chance usage would need to strictly +adhere to a set of guidelines that would be difficult to enforce or test. + + +## Performance Issues + +A significant deterrent from using domains is the overhead. Using node's +built-in http benchmark, `http_simple.js`, without domains it can handle over +22,000 requests/second. Whereas if it's run with `NODE_USE_DOMAINS=1` that +number drops down to under 17,000 requests/second. In this case there is only +a single global domain. If we edit the benchmark so the http request callback +creates a new domain instance performance drops further to 15,000 +requests/second. + +While this probably wouldn't affect a server only serving a few hundred or even +a thousand requests per second, the amount of overhead is directly proportional +to the number of asynchronous requests made. So if a single connection needs to +connect to several other services all of those will contribute to the overall +latency of delivering the final product to the client. + +Using `AsyncWrap` and tracking the number of times +`init`/`pre`/`post`/`destroy` are called in the mentioned benchmark we find +that the sum of all events called is over 170,000 times per second. This means +even adding 1 microsecond overhead per call for any type of setup or tear down +will result in a 17% performance loss. Granted, this is for the optimized +scenario of the benchmark, but I believe this demonstrates the necessity for a +mechanism such as domain to be as cheap to run as possible. + + +## Looking Ahead + +The domain module has been soft deprecated since Dec 2014, but has not yet been +removed because node offers no alternative functionality at the moment. As of +this writing there is ongoing work building out the `AsyncWrap` API and a +proposal for Zones being prepared for the TC39. At such time there is suitable +functionality to replace domains it will undergo the full deprecation cycle and +eventually be removed from core. diff --git a/locale/fa/docs/guides/dont-block-the-event-loop.md b/locale/fa/docs/guides/dont-block-the-event-loop.md new file mode 100644 index 0000000000000..9e3e2c79905e1 --- /dev/null +++ b/locale/fa/docs/guides/dont-block-the-event-loop.md @@ -0,0 +1,476 @@ +--- +title: Don't Block the Event Loop (or the Worker Pool) +layout: docs.hbs +--- + +# Don't Block the Event Loop (or the Worker Pool) + +## Should you read this guide? +If you're writing anything more complicated than a brief command-line script, reading this should help you write higher-performance, more-secure applications. + +This document is written with Node servers in mind, but the concepts apply to complex Node applications as well. +Where OS-specific details vary, this document is Linux-centric. + +## TL; DR +Node.js runs JavaScript code in the Event Loop (initialization and callbacks), and offers a Worker Pool to handle expensive tasks like file I/O. +Node scales well, sometimes better than more heavyweight approaches like Apache. +The secret to Node's scalability is that it uses a small number of threads to handle many clients. +If Node can make do with fewer threads, then it can spend more of your system's time and memory working on clients rather than on paying space and time overheads for threads (memory, context-switching). +But because Node has only a few threads, you must structure your application to use them wisely. + +Here's a good rule of thumb for keeping your Node server speedy: +*Node is fast when the work associated with each client at any given time is "small"*. + +This applies to callbacks on the Event Loop and tasks on the Worker Pool. + +## Why should I avoid blocking the Event Loop and the Worker Pool? +Node uses a small number of threads to handle many clients. +In Node there are two types of threads: one Event Loop (aka the main loop, main thread, event thread, etc.), and a pool of `k` Workers in a Worker Pool (aka the threadpool). + +If a thread is taking a long time to execute a callback (Event Loop) or a task (Worker), we call it "blocked". +While a thread is blocked working on behalf of one client, it cannot handle requests from any other clients. +This provides two motivations for blocking neither the Event Loop nor the Worker Pool: + +1. Performance: If you regularly perform heavyweight activity on either type of thread, the *throughput* (requests/second) of your server will suffer. +2. Security: If it is possible that for certain input one of your threads might block, a malicious client could submit this "evil input", make your threads block, and keep them from working on other clients. This would be a [Denial of Service](https://en.wikipedia.org/wiki/Denial-of-service_attack) attack. + +## A quick review of Node + +Node uses the Event-Driven Architecture: it has an Event Loop for orchestration and a Worker Pool for expensive tasks. + +### What code runs on the Event Loop? +When they begin, Node applications first complete an initialization phase, `require`'ing modules and registering callbacks for events. +Node applications then enter the Event Loop, responding to incoming client requests by executing the appropriate callback. +This callback executes synchronously, and may register asynchronous requests to continue processing after it completes. +The callbacks for these asynchronous requests will also be executed on the Event Loop. + +The Event Loop will also fulfill the non-blocking asynchronous requests made by its callbacks, e.g., network I/O. + +In summary, the Event Loop executes the JavaScript callbacks registered for events, and is also responsible for fulfilling non-blocking asynchronous requests like network I/O. + +### What code runs on the Worker Pool? +Node's Worker Pool is implemented in libuv ([docs](http://docs.libuv.org/en/v1.x/threadpool.html)), which exposes a general task submission API. + +Node uses the Worker Pool to handle "expensive" tasks. +This includes I/O for which an operating system does not provide a non-blocking version, as well as particularly CPU-intensive tasks. + +These are the Node module APIs that make use of this Worker Pool: +1. I/O-intensive + 1. [DNS](https://nodejs.org/api/dns.html): `dns.lookup()`, `dns.lookupService()`. + 2. [File System](https://nodejs.org/api/fs.html#fs_threadpool_usage): All file system APIs except `fs.FSWatcher()` and those that are explicitly synchronous use libuv's threadpool. +2. CPU-intensive + 1. [Crypto](https://nodejs.org/api/crypto.html): `crypto.pbkdf2()`, `crypto.randomBytes()`, `crypto.randomFill()`. + 2. [Zlib](https://nodejs.org/api/zlib.html#zlib_threadpool_usage): All zlib APIs except those that are explicitly synchronous use libuv's threadpool. + +In many Node applications, these APIs are the only sources of tasks for the Worker Pool. Applications and modules that use a [C++ add-on](https://nodejs.org/api/addons.html) can submit other tasks to the Worker Pool. + +For the sake of completeness, we note that when you call one of these APIs from a callback on the Event Loop, the Event Loop pays some minor setup costs as it enters the Node C++ bindings for that API and submits a task to the Worker Pool. +These costs are negligible compared to the overall cost of the task, which is why the Event Loop is offloading it. +When submitting one of these tasks to the Worker Pool, Node provides a pointer to the corresponding C++ function in the Node C++ bindings. + +### How does Node decide what code to run next? +Abstractly, the Event Loop and the Worker Pool maintain queues for pending events and pending tasks, respectively. + +In truth, the Event Loop does not actually maintain a queue. +Instead, it has a collection of file descriptors that it asks the operating system to monitor, using a mechanism like [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) (Linux), [kqueue](https://developer.apple.com/library/content/documentation/Darwin/Conceptual/FSEvents_ProgGuide/KernelQueues/KernelQueues.html) (OSX), event ports (Solaris), or [IOCP](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365198.aspx) (Windows). +These file descriptors correspond to network sockets, any files it is watching, and so on. +When the operating system says that one of these file descriptors is ready, the Event Loop translates it to the appropriate event and invokes the callback(s) associated with that event. +You can learn more about this process [here](https://www.youtube.com/watch?v=P9csgxBgaZ8). + +In contrast, the Worker Pool uses a real queue whose entries are tasks to be processed. +A Worker pops a task from this queue and works on it, and when finished the Worker raises an "At least one task is finished" event for the Event Loop. + +### What does this mean for application design? +In a one-thread-per-client system like Apache, each pending client is assigned its own thread. +If a thread handling one client blocks, the operating system will interrupt it and give another client a turn. +The operating system thus ensures that clients that require a small amount of work are not penalized by clients that require more work. + +Because Node handles many clients with few threads, if a thread blocks handling one client's request, then pending client requests may not get a turn until the thread finishes its callback or task. +*The fair treatment of clients is thus the responsibility of your application*. +This means that you shouldn't do too much work for any client in any single callback or task. + +This is part of why Node can scale well, but it also means that you are responsible for ensuring fair scheduling. +The next sections talk about how to ensure fair scheduling for the Event Loop and for the Worker Pool. + +## Don't block the Event Loop +The Event Loop notices each new client connection and orchestrates the generation of a response. +All incoming requests and outgoing responses pass through the Event Loop. +This means that if the Event Loop spends too long at any point, all current and new clients will not get a turn. + +You should make sure you never block the Event Loop. +In other words, each of your JavaScript callbacks should complete quickly. +This of course also applies to your `await`'s, your `Promise.then`'s, and so on. + +A good way to ensure this is to reason about the ["computational complexity"](https://en.wikipedia.org/wiki/Time_complexity) of your callbacks. +If your callback takes a constant number of steps no matter what its arguments are, then you'll always give every pending client a fair turn. +If your callback takes a different number of steps depending on its arguments, then you should think about how long the arguments might be. + +Example 1: A constant-time callback. + +```javascript +app.get('/constant-time', (req, res) => { + res.sendStatus(200); +}); +``` + +Example 2: An `O(n)` callback. This callback will run quickly for small `n` and more slowly for large `n`. + +```javascript +app.get('/countToN', (req, res) => { + let n = req.query.n; + + // n iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + console.log(`Iter {$i}`); + } + + res.sendStatus(200); +}); +``` + +Example 3: An `O(n^2)` callback. This callback will still run quickly for small `n`, but for large `n` it will run much more slowly than the previous `O(n)` example. + +```javascript +app.get('/countToN2', (req, res) => { + let n = req.query.n; + + // n^2 iterations before giving someone else a turn + for (let i = 0; i < n; i++) { + for (let j = 0; j < n; j++) { + console.log(`Iter ${i}.${j}`); + } + } + + res.sendStatus(200); +}); +``` + +### How careful should you be? +Node uses the Google V8 engine for JavaScript, which is quite fast for many common operations. +Exceptions to this rule are regexps and JSON operations, discussed below. + +However, for complex tasks you should consider bounding the input and rejecting inputs that are too long. +That way, even if your callback has large complexity, by bounding the input you ensure the callback cannot take more than the worst-case time on the longest acceptable input. +You can then evaluate the worst-case cost of this callback and determine whether its running time is acceptable in your context. + +### Blocking the Event Loop: REDOS +One common way to block the Event Loop disastrously is by using a "vulnerable" [regular expression](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions). + +#### Avoiding vulnerable regular expressions +A regular expression (regexp) matches an input string against a pattern. +We usually think of a regexp match as requiring a single pass through the input string --- `O(n)` time where `n` is the length of the input string. +In many cases, a single pass is indeed all it takes. +Unfortunately, in some cases the regexp match might require an exponential number of trips through the input string --- `O(2^n)` time. +An exponential number of trips means that if the engine requires `x` trips to determine a match, it will need `2*x` trips if we add only one more character to the input string. +Since the number of trips is linearly related to the time required, the effect of this evaluation will be to block the Event Loop. + +A *vulnerable regular expression* is one on which your regular expression engine might take exponential time, exposing you to [REDOS](https://www.owasp.org/index.php/Regular_expression_Denial_of_Service_-_ReDoS) on "evil input". +Whether or not your regular expression pattern is vulnerable (i.e. the regexp engine might take exponential time on it) is actually a difficult question to answer, and varies depending on whether you're using Perl, Python, Ruby, Java, JavaScript, etc., but here are some rules of thumb that apply across all of these languages: + +1. Avoid nested quantifiers like `(a+)*`. Node's regexp engine can handle some of these quickly, but others are vulnerable. +2. Avoid OR's with overlapping clauses, like `(a|a)*`. Again, these are sometimes-fast. +3. Avoid using backreferences, like `(a.*) \1`. No regexp engine can guarantee evaluating these in linear time. +4. If you're doing a simple string match, use `indexOf` or the local equivalent. It will be cheaper and will never take more than `O(n)`. + +If you aren't sure whether your regular expression is vulnerable, remember that Node generally doesn't have trouble reporting a *match* even for a vulnerable regexp and a long input string. +The exponential behavior is triggered when there is a mismatch but Node can't be certain until it tries many paths through the input string. + +#### A REDOS example +Here is an example vulnerable regexp exposing its server to REDOS: + +```javascript +app.get('/redos-me', (req, res) => { + let filePath = req.query.filePath; + + // REDOS + if (fileName.match(/(\/.+)+$/)) { + console.log('valid path'); + } + else { + console.log('invalid path'); + } + + res.sendStatus(200); +}); +``` + +The vulnerable regexp in this example is a (bad!) way to check for a valid path on Linux. +It matches strings that are a sequence of "/"-delimited names, like "/a/b/c". +It is dangerous because it violates rule 1: it has a doubly-nested quantifier. + +If a client queries with filePath `///.../\n` (100 /'s followed by a newline character that the regexp's "." won't match), then the Event Loop will take effectively forever, blocking the Event Loop. +This client's REDOS attack causes all other clients not to get a turn until the regexp match finishes. + +For this reason, you should be leery of using complex regular expressions to validate user input. + +#### Anti-REDOS Resources +There are some tools to check your regexps for safety, like +- [safe-regex](https://github.com/substack/safe-regex) +- [rxxr2](http://www.cs.bham.ac.uk/~hxt/research/rxxr2/). +However, neither of these will catch all vulnerable regexps. + +Another approach is to use a different regexp engine. +You could use the [node-re2](https://github.com/uhop/node-re2) module, which uses Google's blazing-fast [RE2](https://github.com/google/re2) regexp engine. +But be warned, RE2 is not 100% compatible with Node's regexps, so check for regressions if you swap in the node-re2 module to handle your regexps. +And particularly complicated regexps are not supported by node-re2. + +If you're trying to match something "obvious", like a URL or a file path, find an example in a [regexp library](http://www.regexlib.com) or use an npm module, e.g. [ip-regex](https://www.npmjs.com/package/ip-regex). + +### Blocking the Event Loop: Node core modules +Several Node core modules have synchronous expensive APIs, including: +- [Encryption](https://nodejs.org/api/crypto.html) +- [Compression](https://nodejs.org/api/zlib.html) +- [File system](https://nodejs.org/api/fs.html) +- [Child process](https://nodejs.org/api/child_process.html) + +These APIs are expensive, because they involve significant computation (encryption, compression), require I/O (file I/O), or potentially both (child process). These APIs are intended for scripting convenience, but are not intended for use in the server context. If you execute them on the Event Loop, they will take far longer to complete than a typical JavaScript instruction, blocking the Event Loop. + +In a server, *you should not use the following synchronous APIs from these modules*: +- Encryption: + - `crypto.randomBytes` (synchronous version) + - `crypto.randomFillSync` + - `crypto.pbkdf2Sync` + - You should also be careful about providing large input to the encryption and decryption routines. +- Compression: + - `zlib.inflateSync` + - `zlib.deflateSync` +- File system: + - Do not use the synchronous file system APIs. For example, if the file you access is in a [distributed file system](https://en.wikipedia.org/wiki/Clustered_file_system#Distributed_file_systems) like [NFS](https://en.wikipedia.org/wiki/Network_File_System), access times can vary widely. +- Child process: + - `child_process.spawnSync` + - `child_process.execSync` + - `child_process.execFileSync` + +This list is reasonably complete as of Node v9. + +### Blocking the Event Loop: JSON DOS +`JSON.parse` and `JSON.stringify` are other potentially expensive operations. +While these are `O(n)` in the length of the input, for large `n` they can take surprisingly long. + +If your server manipulates JSON objects, particularly those from a client, you should be cautious about the size of the objects or strings you work with on the Event Loop. + +Example: JSON blocking. We create an object `obj` of size 2^21 and `JSON.stringify` it, run `indexOf` on the string, and then JSON.parse it. The `JSON.stringify`'d string is 50MB. It takes 0.7 seconds to stringify the object, 0.03 seconds to indexOf on the 50MB string, and 1.3 seconds to parse the string. + +```javascript +var obj = { a: 1 }; +var niter = 20; + +var before, res, took; + +for (var i = 0; i < len; i++) { + obj = { obj1: obj, obj2: obj }; // Doubles in size each iter +} + +before = process.hrtime(); +res = JSON.stringify(obj); +took = process.hrtime(n); +console.log('JSON.stringify took ' + took); + +before = process.hrtime(); +res = str.indexOf('nomatch'); +took = process.hrtime(n); +console.log('Pure indexof took ' + took); + +before = process.hrtime(); +res = JSON.parse(str); +took = process.hrtime(n); +console.log('JSON.parse took ' + took); +``` + +There are npm modules that offer asynchronous JSON APIs. See for example: +- [JSONStream](https://www.npmjs.com/package/JSONStream), which has stream APIs. +- [Big-Friendly JSON](https://github.com/philbooth/bfj), which has stream APIs as well as asynchronous versions of the standard JSON APIs using the partitioning-on-the-Event-Loop paradigm outlined below. + +### Complex calculations without blocking the Event Loop +Suppose you want to do complex calculations in JavaScript without blocking the Event Loop. +You have two options: partitioning or offloading. + +#### Partitioning +You could *partition* your calculations so that each runs on the Event Loop but regularly yields (gives turns to) other pending events. +In JavaScript it's easy to save the state of an ongoing task in a closure, as shown in example 2 below. + +For a simple example, suppose you want to compute the average of the numbers `1` to `n`. + +Example 1: Un-partitioned average, costs `O(n)` +```javascript +for (let i = 0; i < n; i++) + sum += i; +let avg = sum / n; +console.log('avg: ' + avg); +``` + +Example 2: Partitioned average, each of the `n` asynchronous steps costs `O(1)`. +```javascript +function asyncAvg(n, avgCB) { + // Save ongoing sum in JS closure. + var sum = 0; + function help(i, cb) { + sum += i; + if (i == n) { + cb(sum); + return; + } + + // "Asynchronous recursion". + // Schedule next operation asynchronously. + setImmediate(help.bind(null, i+1, cb)); + } + + // Start the helper, with CB to call avgCB. + help(1, function(sum){ + var avg = sum/n; + avgCB(avg); + }); +} + +asyncAvg(n, function(avg){ + console.log('avg of 1-n: ' + avg); +}); +``` + +You can apply this principle to array iterations and so forth. + +#### Offloading +If you need to do something more complex, partitioning is not a good option. +This is because partitioning uses only the Event Loop, and you won't benefit from multiple cores almost certainly available on your machine. +*Remember, the Event Loop should orchestrate client requests, not fulfill them itself.* +For a complicated task, move the work off of the Event Loop onto a Worker Pool. + +##### How to offload +You have two options for a destination Worker Pool to which to offload work. +1. You can use the built-in Node Worker Pool by developing a [C++ addon](https://nodejs.org/api/addons.html). On older versions of Node, build your C++ addon using [NAN](https://github.com/nodejs/nan), and on newer versions use [N-API](https://nodejs.org/api/n-api.html). [node-webworker-threads](https://www.npmjs.com/package/webworker-threads) offers a JavaScript-only way to access Node's Worker Pool. +2. You can create and manage your own Worker Pool dedicated to computation rather than Node's I/O-themed Worker Pool. The most straightforward ways to do this is using [Child Process](https://nodejs.org/api/child_process.html) or [Cluster](https://nodejs.org/api/cluster.html). + +You should *not* simply create a [Child Process](https://nodejs.org/api/child_process.html) for every client. +You can receive client requests more quickly than you can create and manage children, and your server might become a [fork bomb](https://en.wikipedia.org/wiki/Fork_bomb). + +##### Downside of offloading +The downside of the offloading approach is that it incurs overhead in the form of *communication costs*. +Only the Event Loop is allowed to see the "namespace" (JavaScript state) of your application. +From a Worker, you cannot manipulate a JavaScript object in the Event Loop's namespace. +Instead, you have to serialize and deserialize any objects you wish to share. +Then the Worker can operate on its own copy of these object(s) and return the modified object (or a "patch") to the Event Loop. + +For serialization concerns, see the section on JSON DOS. + +##### Some suggestions for offloading +You may wish to distinguish between CPU-intensive and I/O-intensive tasks because they have markedly different characteristics. + +A CPU-intensive task only makes progress when its Worker is scheduled, and the Worker must be scheduled onto one of your machine's [logical cores](https://nodejs.org/api/os.html#os_os_cpus). +If you have 4 logical cores and 5 Workers, one of these Workers cannot make progress. +As a result, you are paying overhead (memory and scheduling costs) for this Worker and getting no return for it. + +I/O-intensive tasks involve querying an external service provider (DNS, file system, etc.) and waiting for its response. +While a Worker with an I/O-intensive task is waiting for its response, it has nothing else to do and can be de-scheduled by the operating system, giving another Worker a chance to submit their request. +Thus, *I/O-intensive tasks will be making progress even while the associated thread is not running*. +External service providers like databases and file systems have been highly optimized to handle many pending requests concurrently. +For example, a file system will examine a large set of pending write and read requests to merge conflicting updates and to retrieve files in an optimal order (e.g. see [these slides](http://researcher.ibm.com/researcher/files/il-AVISHAY/01-block_io-v1.3.pdf)). + +If you rely on only one Worker Pool, e.g. the Node Worker Pool, then the differing characteristics of CPU-bound and I/O-bound work may harm your application's performance. + +For this reason, you might wish to maintain a separate Computation Worker Pool. + +#### Offloading: conclusions +For simple tasks, like iterating over the elements of an arbitrarily long array, partitioning might be a good option. +If your computation is more complex, offloading is a better approach: the communication costs, i.e. the overhead of passing serialized objects between the Event Loop and the Worker Pool, are offset by the benefit of using multiple cores. + +However, if your server relies heavily on complex calculations, you should think about whether Node is really a good fit. Node excels for I/O-bound work, but for expensive computation it might not be the best option. + +If you take the offloading approach, see the section on not blocking the Worker Pool. + +## Don't block the Worker Pool +Node has a Worker Pool composed of `k` Workers. +If you are using the Offloading paradigm discussed above, you might have a separate Computational Worker Pool, to which the same principles apply. +In either case, let us assume that `k` is much smaller than the number of clients you might be handling concurrently. +This is in keeping with Node's "one thread for many clients" philosophy, the secret to its scalability. + +As discussed above, each Worker completes its current Task before proceeding to the next one on the Worker Pool queue. + +Now, there will be variation in the cost of the Tasks required to handle your clients' requests. +Some Tasks can be completed quickly (e.g. reading short or cached files, or producing a small number of random bytes), and others will take longer (e.g reading larger or uncached files, or generating more random bytes). +Your goal should be to *minimize the variation in Task times*, and you should use *Task partitioning* to accomplish this. + +### Minimizing the variation in Task times +If a Worker's current Task is much more expensive than other Tasks, then it will be unavailable to work on other pending Tasks. +In other words, *each relatively long Task effectively decreases the size of the Worker Pool by one until it is completed*. +This is undesirable because, up to a point, the more Workers in the Worker Pool, the greater the Worker Pool throughput (tasks/second) and thus the greater the server throughput (client requests/second). +One client with a relatively expensive Task will decrease the throughput of the Worker Pool, in turn decreasing the throughput of the server. + +To avoid this, you should try to minimize variation in the length of Tasks you submit to the Worker Pool. +While it is appropriate to treat the external systems accessed by your I/O requests (DB, FS, etc.) as black boxes, you should be aware of the relative cost of these I/O requests, and should avoid submitting requests you can expect to be particularly long. + +Two examples should illustrate the possible variation in task times. + +#### Variation example: Long-running file system reads +Suppose your server must read files in order to handle some client requests. +After consulting Node's [File system](https://nodejs.org/api/fs.html) APIs, you opted to use `fs.readFile()` for simplicity. +However, `fs.readFile()` is ([currently](https://github.com/nodejs/node/pull/17054)) not partitioned: it submits a single `fs.read()` Task spanning the entire file. +If you read shorter files for some users and longer files for others, `fs.readFile()` may introduce significant variation in Task lengths, to the detriment of Worker Pool throughput. + +For a worst-case scenario, suppose an attacker can convince your server to read an *arbitrary* file (this is a [directory traversal vulnerability](https://www.owasp.org/index.php/Path_Traversal)). +If your server is running Linux, the attacker can name an extremely slow file: [`/dev/random`](http://man7.org/linux/man-pages/man4/random.4.html). +For all practical purposes, `/dev/random` is infinitely slow, and every Worker asked to read from `/dev/random` will never finish that Task. +An attacker then submits `k` requests, one for each Worker, and no other client requests that use the Worker Pool will make progress. + +#### Variation example: Long-running crypto operations +Suppose your server generates cryptographically secure random bytes using [`crypto.randomBytes()`](https://nodejs.org/api/crypto.html#crypto_crypto_randombytes_size_callback). +`crypto.randomBytes()` is not partitioned: it creates a single `randomBytes()` Task to generate as many bytes as you requested. +If you create fewer bytes for some users and more bytes for others, `crypto.randomBytes()` is another source of variation in Task lengths. + +### Task partitioning +Tasks with variable time costs can harm the throughput of the Worker Pool. +To minimize variation in Task times, as far as possible you should *partition* each Task into comparable-cost sub-Tasks. +When each sub-Task completes it should submit the next sub-Task, and when the final sub-Task completes it should notify the submitter. + +To continue the `fs.readFile()` example, you should instead use `fs.read()` (manual partitioning) or `ReadStream` (automatically partitioned). + +The same principle applies to CPU-bound tasks; the `asyncAvg` example might be inappropriate for the Event Loop, but it is well suited to the Worker Pool. + +When you partition a Task into sub-Tasks, shorter Tasks expand into a small number of sub-Tasks, and longer Tasks expand into a larger number of sub-Tasks. +Between each sub-Task of a longer Task, the Worker to which it was assigned can work on a sub-Task from another, shorter, Task, thus improving the overall Task throughput of the Worker Pool. + +Note that the number of sub-Tasks completed is not a useful metric for the throughput of the Worker Pool. +Instead, concern yourself with the number of *Tasks* completed. + +### Avoiding Task partitioning +Recall that the purpose of Task partitioning is to minimize the variation in Task times. +If you can distinguish between shorter Tasks and longer Tasks (e.g. summing an array vs. sorting an array), you could create one Worker Pool for each class of Task. +Routing shorter Tasks and longer Tasks to separate Worker Pools is another way to minimize Task time variation. + +In favor of this approach, partitioning Tasks incurs overhead (the costs of creating a Worker Pool Task representation and of manipulating the Worker Pool queue), and avoiding partitioning saves you the costs of additional trips to the Worker Pool. +It also keeps you from making mistakes in partitioning your Tasks. + +The downside of this approach is that Workers in all of these Worker Pools will incur space and time overheads and will compete with each other for CPU time. +Remember that each CPU-bound Task makes progress only while it is scheduled. +As a result, you should only consider this approach after careful analysis. + +### Worker Pool: conclusions +Whether you use only the Node Worker Pool or maintain separate Worker Pool(s), you should optimize the Task throughput of your Pool(s). + +To do this, minimize the variation in Task times by using Task partitioning. + +## The risks of npm modules +While the Node core modules offer building blocks for a wide variety of applications, sometimes something more is needed. Node developers benefit tremendously from the [npm ecosystem](https://www.npmjs.com/), with hundreds of thousands of modules offering functionality to accelerate your development process. + +Remember, however, that the majority of these modules are written by third-party developers and are generally released with only best-effort guarantees. A developer using an npm module should be concerned about two things, though the latter is frequently forgotten. +1. Does it honor its APIs? +2. Might its APIs block the Event Loop or a Worker? +Many modules make no effort to indicate the cost of their APIs, to the detriment of the community. + +For simple APIs you can estimate the cost of the APIs; the cost of string manipulation isn't hard to fathom. +But in many cases it's unclear how much an API might cost. + +*If you are calling an API that might do something expensive, double-check the cost. Ask the developers to document it, or examine the source code yourself (and submit a PR documenting the cost).* + +Remember, even if the API is asynchronous, you don't know how much time it might spend on a Worker or on the Event Loop in each of its partitions. +For example, suppose in the `asyncAvg` example given above, each call to the helper function summed *half* of the numbers rather than one of them. +Then this function would still be asynchronous, but the cost of each partition would be `O(n)`, not `O(1)`, making it much less safe to use for arbitrary values of `n`. + +## Conclusion +Node has two types of threads: one Event Loop and `k` Workers. +The Event Loop is responsible for JavaScript callbacks and non-blocking I/O, and a Worker executes tasks corresponding to C++ code that completes an asynchronous request, including blocking I/O and CPU-intensive work. +Both types of threads work on no more than one activity at a time. +If any callback or task takes a long time, the thread running it becomes *blocked*. +If your application makes blocking callbacks or tasks, this can lead to degraded throughput (clients/second) at best, and complete denial of service at worst. + +To write a high-throughput, more DoS-proof web server, you must ensure that on benign and on malicious input, neither your Event Loop nor your Workers will block. diff --git a/locale/fa/docs/guides/event-loop-timers-and-nexttick.md b/locale/fa/docs/guides/event-loop-timers-and-nexttick.md new file mode 100644 index 0000000000000..f0896b04ee20a --- /dev/null +++ b/locale/fa/docs/guides/event-loop-timers-and-nexttick.md @@ -0,0 +1,492 @@ +--- +title: The Node.js Event Loop, Timers, and process.nextTick() +layout: docs.hbs +--- + +# The Node.js Event Loop, Timers, and `process.nextTick()` + +## What is the Event Loop? + +The event loop is what allows Node.js to perform non-blocking I/O +operations — despite the fact that JavaScript is single-threaded — by +offloading operations to the system kernel whenever possible. + +Since most modern kernels are multi-threaded, they can handle multiple +operations executing in the background. When one of these operations +completes, the kernel tells Node.js so that the appropriate callback +may be added to the **poll** queue to eventually be executed. We'll explain +this in further detail later in this topic. + +## Event Loop Explained + +When Node.js starts, it initializes the event loop, processes the +provided input script (or drops into the [REPL][], which is not covered in +this document) which may make async API calls, schedule timers, or call +`process.nextTick()`, then begins processing the event loop. + +The following diagram shows a simplified overview of the event loop's +order of operations. + +``` + ┌───────────────────────────┐ +┌─>│ timers │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ pending callbacks │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +│ │ idle, prepare │ +│ └─────────────┬─────────────┘ ┌───────────────┐ +│ ┌─────────────┴─────────────┐ │ incoming: │ +│ │ poll │<─────┤ connections, │ +│ └─────────────┬─────────────┘ │ data, etc. │ +│ ┌─────────────┴─────────────┐ └───────────────┘ +│ │ check │ +│ └─────────────┬─────────────┘ +│ ┌─────────────┴─────────────┐ +└──┤ close callbacks │ + └───────────────────────────┘ +``` + +*note: each box will be referred to as a "phase" of the event loop.* + +Each phase has a FIFO queue of callbacks to execute. While each phase is +special in its own way, generally, when the event loop enters a given +phase, it will perform any operations specific to that phase, then +execute callbacks in that phase's queue until the queue has been +exhausted or the maximum number of callbacks has executed. When the +queue has been exhausted or the callback limit is reached, the event +loop will move to the next phase, and so on. + +Since any of these operations may schedule _more_ operations and new +events processed in the **poll** phase are queued by the kernel, poll +events can be queued while polling events are being processed. As a +result, long running callbacks can allow the poll phase to run much +longer than a timer's threshold. See the [**timers**](#timers) and +[**poll**](#poll) sections for more details. + +_**NOTE:** There is a slight discrepancy between the Windows and the +Unix/Linux implementation, but that's not important for this +demonstration. The most important parts are here. There are actually +seven or eight steps, but the ones we care about — ones that Node.js +actually uses - are those above._ + + +## Phases Overview + +* **timers**: this phase executes callbacks scheduled by `setTimeout()` + and `setInterval()`. +* **pending callbacks**: executes I/O callbacks deferred to the next loop + iteration. +* **idle, prepare**: only used internally. +* **poll**: retrieve new I/O events; execute I/O related callbacks (almost + all with the exception of close callbacks, the ones scheduled by timers, + and `setImmediate()`); node will block here when appropriate. +* **check**: `setImmediate()` callbacks are invoked here. +* **close callbacks**: some close callbacks, e.g. `socket.on('close', ...)`. + +Between each run of the event loop, Node.js checks if it is waiting for +any asynchronous I/O or timers and shuts down cleanly if there are not +any. + +## Phases in Detail + +### timers + +A timer specifies the **threshold** _after which_ a provided callback +_may be executed_ rather than the **exact** time a person _wants it to +be executed_. Timers callbacks will run as early as they can be +scheduled after the specified amount of time has passed; however, +Operating System scheduling or the running of other callbacks may delay +them. + +_**Note**: Technically, the [**poll** phase](#poll) controls when timers +are executed._ + +For example, say you schedule a timeout to execute after a 100 ms +threshold, then your script starts asynchronously reading a file which +takes 95 ms: + +```js +const fs = require('fs'); + +function someAsyncOperation(callback) { + // Assume this takes 95ms to complete + fs.readFile('/path/to/file', callback); +} + +const timeoutScheduled = Date.now(); + +setTimeout(() => { + const delay = Date.now() - timeoutScheduled; + + console.log(`${delay}ms have passed since I was scheduled`); +}, 100); + + +// do someAsyncOperation which takes 95 ms to complete +someAsyncOperation(() => { + const startCallback = Date.now(); + + // do something that will take 10ms... + while (Date.now() - startCallback < 10) { + // do nothing + } +}); +``` + +When the event loop enters the **poll** phase, it has an empty queue +(`fs.readFile()` has not completed), so it will wait for the number of ms +remaining until the soonest timer's threshold is reached. While it is +waiting 95 ms pass, `fs.readFile()` finishes reading the file and its +callback which takes 10 ms to complete is added to the **poll** queue and +executed. When the callback finishes, there are no more callbacks in the +queue, so the event loop will see that the threshold of the soonest +timer has been reached then wrap back to the **timers** phase to execute +the timer's callback. In this example, you will see that the total delay +between the timer being scheduled and its callback being executed will +be 105ms. + +Note: To prevent the **poll** phase from starving the event loop, [libuv][] +(the C library that implements the Node.js +event loop and all of the asynchronous behaviors of the platform) +also has a hard maximum (system dependent) before it stops polling for +more events. + +### pending callbacks + +This phase executes callbacks for some system operations such as types +of TCP errors. For example if a TCP socket receives `ECONNREFUSED` when +attempting to connect, some \*nix systems want to wait to report the +error. This will be queued to execute in the **pending callbacks** phase. + +### poll + +The **poll** phase has two main functions: + +1. Calculating how long it should block and poll for I/O, then +2. Processing events in the **poll** queue. + +When the event loop enters the **poll** phase _and there are no timers +scheduled_, one of two things will happen: + +* _If the **poll** queue **is not empty**_, the event loop will iterate +through its queue of callbacks executing them synchronously until +either the queue has been exhausted, or the system-dependent hard limit +is reached. + +* _If the **poll** queue **is empty**_, one of two more things will +happen: + * If scripts have been scheduled by `setImmediate()`, the event loop + will end the **poll** phase and continue to the **check** phase to + execute those scheduled scripts. + + * If scripts **have not** been scheduled by `setImmediate()`, the + event loop will wait for callbacks to be added to the queue, then + execute them immediately. + +Once the **poll** queue is empty the event loop will check for timers +_whose time thresholds have been reached_. If one or more timers are +ready, the event loop will wrap back to the **timers** phase to execute +those timers' callbacks. + +### check + +This phase allows a person to execute callbacks immediately after the +**poll** phase has completed. If the **poll** phase becomes idle and +scripts have been queued with `setImmediate()`, the event loop may +continue to the **check** phase rather than waiting. + +`setImmediate()` is actually a special timer that runs in a separate +phase of the event loop. It uses a libuv API that schedules callbacks to +execute after the **poll** phase has completed. + +Generally, as the code is executed, the event loop will eventually hit +the **poll** phase where it will wait for an incoming connection, request, +etc. However, if a callback has been scheduled with `setImmediate()` +and the **poll** phase becomes idle, it will end and continue to the +**check** phase rather than waiting for **poll** events. + +### close callbacks + +If a socket or handle is closed abruptly (e.g. `socket.destroy()`), the +`'close'` event will be emitted in this phase. Otherwise it will be +emitted via `process.nextTick()`. + +## `setImmediate()` vs `setTimeout()` + +`setImmediate` and `setTimeout()` are similar, but behave in different +ways depending on when they are called. + +* `setImmediate()` is designed to execute a script once the current +**poll** phase completes. +* `setTimeout()` schedules a script to be run after a minimum threshold +in ms has elapsed. + +The order in which the timers are executed will vary depending on the +context in which they are called. If both are called from within the +main module, then timing will be bound by the performance of the process +(which can be impacted by other applications running on the machine). + +For example, if we run the following script which is not within an I/O +cycle (i.e. the main module), the order in which the two timers are +executed is non-deterministic, as it is bound by the performance of the +process: + + +```js +// timeout_vs_immediate.js +setTimeout(() => { + console.log('timeout'); +}, 0); + +setImmediate(() => { + console.log('immediate'); +}); +``` + +``` +$ node timeout_vs_immediate.js +timeout +immediate + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +However, if you move the two calls within an I/O cycle, the immediate +callback is always executed first: + +```js +// timeout_vs_immediate.js +const fs = require('fs'); + +fs.readFile(__filename, () => { + setTimeout(() => { + console.log('timeout'); + }, 0); + setImmediate(() => { + console.log('immediate'); + }); +}); +``` + +``` +$ node timeout_vs_immediate.js +immediate +timeout + +$ node timeout_vs_immediate.js +immediate +timeout +``` + +The main advantage to using `setImmediate()` over `setTimeout()` is +`setImmediate()` will always be executed before any timers if scheduled +within an I/O cycle, independently of how many timers are present. + +## `process.nextTick()` + +### Understanding `process.nextTick()` + +You may have noticed that `process.nextTick()` was not displayed in the +diagram, even though it's a part of the asynchronous API. This is because +`process.nextTick()` is not technically part of the event loop. Instead, +the `nextTickQueue` will be processed after the current operation +completes, regardless of the current phase of the event loop. + +Looking back at our diagram, any time you call `process.nextTick()` in a +given phase, all callbacks passed to `process.nextTick()` will be +resolved before the event loop continues. This can create some bad +situations because **it allows you to "starve" your I/O by making +recursive `process.nextTick()` calls**, which prevents the event loop +from reaching the **poll** phase. + +### Why would that be allowed? + +Why would something like this be included in Node.js? Part of it is a +design philosophy where an API should always be asynchronous even where +it doesn't have to be. Take this code snippet for example: + +```js +function apiCall(arg, callback) { + if (typeof arg !== 'string') + return process.nextTick(callback, + new TypeError('argument should be string')); +} +``` + +The snippet does an argument check and if it's not correct, it will pass +the error to the callback. The API updated fairly recently to allow +passing arguments to `process.nextTick()` allowing it to take any +arguments passed after the callback to be propagated as the arguments to +the callback so you don't have to nest functions. + +What we're doing is passing an error back to the user but only *after* +we have allowed the rest of the user's code to execute. By using +`process.nextTick()` we guarantee that `apiCall()` always runs its +callback *after* the rest of the user's code and *before* the event loop +is allowed to proceed. To achieve this, the JS call stack is allowed to +unwind then immediately execute the provided callback which allows a +person to make recursive calls to `process.nextTick()` without reaching a +`RangeError: Maximum call stack size exceeded from v8`. + +This philosophy can lead to some potentially problematic situations. +Take this snippet for example: + +```js +let bar; + +// this has an asynchronous signature, but calls callback synchronously +function someAsyncApiCall(callback) { callback(); } + +// the callback is called before `someAsyncApiCall` completes. +someAsyncApiCall(() => { + // since someAsyncApiCall has completed, bar hasn't been assigned any value + console.log('bar', bar); // undefined +}); + +bar = 1; +``` + +The user defines `someAsyncApiCall()` to have an asynchronous signature, +but it actually operates synchronously. When it is called, the callback +provided to `someAsyncApiCall()` is called in the same phase of the +event loop because `someAsyncApiCall()` doesn't actually do anything +asynchronously. As a result, the callback tries to reference `bar` even +though it may not have that variable in scope yet, because the script has not +been able to run to completion. + +By placing the callback in a `process.nextTick()`, the script still has the +ability to run to completion, allowing all the variables, functions, +etc., to be initialized prior to the callback being called. It also has +the advantage of not allowing the event loop to continue. It may be +useful for the user to be alerted to an error before the event loop is +allowed to continue. Here is the previous example using `process.nextTick()`: + +```js +let bar; + +function someAsyncApiCall(callback) { + process.nextTick(callback); +} + +someAsyncApiCall(() => { + console.log('bar', bar); // 1 +}); + +bar = 1; +``` + +Here's another real world example: + +```js +const server = net.createServer(() => {}).listen(8080); + +server.on('listening', () => {}); +``` + +When only a port is passed, the port is bound immediately. So, the +`'listening'` callback could be called immediately. The problem is that the +`.on('listening')` callback will not have been set by that time. + +To get around this, the `'listening'` event is queued in a `nextTick()` +to allow the script to run to completion. This allows the user to set +any event handlers they want. + +## `process.nextTick()` vs `setImmediate()` + +We have two calls that are similar as far as users are concerned, but +their names are confusing. + +* `process.nextTick()` fires immediately on the same phase +* `setImmediate()` fires on the following iteration or 'tick' of the +event loop + +In essence, the names should be swapped. `process.nextTick()` fires more +immediately than `setImmediate()`, but this is an artifact of the past +which is unlikely to change. Making this switch would break a large +percentage of the packages on npm. Every day more new modules are being +added, which means every day we wait, more potential breakages occur. +While they are confusing, the names themselves won't change. + +*We recommend developers use `setImmediate()` in all cases because it's +easier to reason about (and it leads to code that's compatible with a +wider variety of environments, like browser JS.)* + +## Why use `process.nextTick()`? + +There are two main reasons: + +1. Allow users to handle errors, cleanup any then unneeded resources, or +perhaps try the request again before the event loop continues. + +2. At times it's necessary to allow a callback to run after the call +stack has unwound but before the event loop continues. + +One example is to match the user's expectations. Simple example: + +```js +const server = net.createServer(); +server.on('connection', (conn) => { }); + +server.listen(8080); +server.on('listening', () => { }); +``` + +Say that `listen()` is run at the beginning of the event loop, but the +listening callback is placed in a `setImmediate()`. Unless a +hostname is passed, binding to the port will happen immediately. For +the event loop to proceed, it must hit the **poll** phase, which means +there is a non-zero chance that a connection could have been received +allowing the connection event to be fired before the listening event. + +Another example is running a function constructor that was to, say, +inherit from `EventEmitter` and it wanted to call an event within the +constructor: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + this.emit('event'); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` + +You can't emit an event from the constructor immediately +because the script will not have processed to the point where the user +assigns a callback to that event. So, within the constructor itself, +you can use `process.nextTick()` to set a callback to emit the event +after the constructor has finished, which provides the expected results: + +```js +const EventEmitter = require('events'); +const util = require('util'); + +function MyEmitter() { + EventEmitter.call(this); + + // use nextTick to emit the event once a handler is assigned + process.nextTick(() => { + this.emit('event'); + }); +} +util.inherits(MyEmitter, EventEmitter); + +const myEmitter = new MyEmitter(); +myEmitter.on('event', () => { + console.log('an event occurred!'); +}); +``` + +[libuv]: http://libuv.org +[REPL]: https://nodejs.org/api/repl.html#repl_repl diff --git a/locale/fa/docs/guides/getting-started-guide.md b/locale/fa/docs/guides/getting-started-guide.md new file mode 100644 index 0000000000000..a66d897400f47 --- /dev/null +++ b/locale/fa/docs/guides/getting-started-guide.md @@ -0,0 +1,28 @@ +--- +title: Getting Started Guide +layout: docs.hbs +--- + +# How do I start with Node.js after I installed it? + +Once you have installed Node, let's try building our first web server. +Create a file named "app.js", and paste the following code: + +```javascript +const http = require('http'); + +const hostname = '127.0.0.1'; +const port = 3000; + +const server = http.createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World\n'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); +``` + +After that, run your web server using ``` node app.js ```, visit http://localhost:3000, and you will see a message 'Hello World' diff --git a/locale/fa/docs/guides/index.md b/locale/fa/docs/guides/index.md new file mode 100644 index 0000000000000..2709e17e05615 --- /dev/null +++ b/locale/fa/docs/guides/index.md @@ -0,0 +1,32 @@ +--- +title: Guides +layout: docs.hbs +--- + +# Guides + +## General + +- [Getting Started Guide](getting-started-guide/) +- [Debugging - Getting Started](debugging-getting-started/) +- [Easy profiling for Node.js Applications](simple-profiling/) +- [Dockerizing a Node.js web app](nodejs-docker-webapp/) +- [Migrating to safe Buffer constructors](buffer-constructor-deprecation/) + + +## Node.js core concepts + +- [Overview of Blocking vs Non-Blocking](blocking-vs-non-blocking/) +- [The Node.js Event Loop, Timers, and process.nextTick()](event-loop-timers-and-nexttick/) +- [Don't Block the Event Loop (or the Worker Pool)](dont-block-the-event-loop/) +- [Timers in Node.js](timers-in-node/) + + +## Module-related guides + +- [Anatomy of an HTTP Transaction](anatomy-of-an-http-transaction/) +- [Working with Different Filesystems](working-with-different-filesystems/) +- [Backpressuring in Streams](backpressuring-in-streams/) +- [Domain Module Postmortem](domain-postmortem/) +- [How to publish N-API package](publishing-napi-modules/) +- [ABI Stability](abi-stability/) diff --git a/locale/fa/docs/guides/nodejs-docker-webapp.md b/locale/fa/docs/guides/nodejs-docker-webapp.md new file mode 100644 index 0000000000000..434154a2f0535 --- /dev/null +++ b/locale/fa/docs/guides/nodejs-docker-webapp.md @@ -0,0 +1,274 @@ +--- +title: Dockerizing a Node.js web app +layout: docs.hbs +--- + +# Dockerizing a Node.js web app + +The goal of this example is to show you how to get a Node.js application into a +Docker container. The guide is intended for development, and *not* for a +production deployment. The guide also assumes you have a working [Docker +installation](https://docs.docker.com/engine/installation/) and a basic +understanding of how a Node.js application is structured. + +In the first part of this guide we will create a simple web application in +Node.js, then we will build a Docker image for that application, and lastly we +will run the image as a container. + +Docker allows you to package an application with all of its dependencies into a +standardized unit, called a container, for software development. A container is +a stripped-to-basics version of a Linux operating system. An image is software +you load into a container. + +## Create the Node.js app + +First, create a new directory where all the files would live. In this directory +create a `package.json` file that describes your app and its dependencies: + +```json +{ + "name": "docker_web_app", + "version": "1.0.0", + "description": "Node.js on Docker", + "author": "First Last ", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "express": "^4.16.1" + } +} +``` + +With your new `package.json` file, run `npm install`. If you are using `npm` +version 5 or later, this will generate a `package-lock.json` file which will be copied +to your Docker image. + +Then, create a `server.js` file that defines a web app using the +[Express.js](https://expressjs.com/) framework: + +```javascript +'use strict'; + +const express = require('express'); + +// Constants +const PORT = 8080; +const HOST = '0.0.0.0'; + +// App +const app = express(); +app.get('/', (req, res) => { + res.send('Hello world\n'); +}); + +app.listen(PORT, HOST); +console.log(`Running on http://${HOST}:${PORT}`); +``` + +In the next steps, we'll look at how you can run this app inside a Docker +container using the official Docker image. First, you'll need to build a Docker +image of your app. + +## Creating a Dockerfile + +Create an empty file called `Dockerfile`: + +```markup +touch Dockerfile +``` + +Open the `Dockerfile` in your favorite text editor + +The first thing we need to do is define from what image we want to build from. +Here we will use the latest LTS (long term support) version `8` of `node` +available from the [Docker Hub](https://hub.docker.com/): + +```docker +FROM node:8 +``` + +Next we create a directory to hold the application code inside the image, this +will be the working directory for your application: + +```docker +# Create app directory +WORKDIR /usr/src/app +``` + +This image comes with Node.js and NPM already installed so the next thing we +need to do is to install your app dependencies using the `npm` binary. Please +note that if you are using `npm` version 4 or earlier a `package-lock.json` +file will *not* be generated. + +```docker +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm install --only=production +``` + +Note that, rather than copying the entire working directory, we are only copying +the `package.json` file. This allows us to take advantage of cached Docker +layers. bitJudo has a good explanation of this +[here](http://bitjudo.com/blog/2014/03/13/building-efficient-dockerfiles-node-dot-js/). + +To bundle your app's source code inside the Docker image, use the `COPY` +instruction: + +```docker +# Bundle app source +COPY . . +``` + +Your app binds to port `8080` so you'll use the `EXPOSE` instruction to have it +mapped by the `docker` daemon: + +```docker +EXPOSE 8080 +``` + +Last but not least, define the command to run your app using `CMD` which defines +your runtime. Here we will use the basic `npm start` which will run +`node server.js` to start your server: + +```docker +CMD [ "npm", "start" ] +``` + +Your `Dockerfile` should now look like this: + +```docker +FROM node:8 + +# Create app directory +WORKDIR /usr/src/app + +# Install app dependencies +# A wildcard is used to ensure both package.json AND package-lock.json are copied +# where available (npm@5+) +COPY package*.json ./ + +RUN npm install +# If you are building your code for production +# RUN npm install --only=production + +# Bundle app source +COPY . . + +EXPOSE 8080 +CMD [ "npm", "start" ] +``` + +## .dockerignore file + +Create a `.dockerignore` file in the same directory as your `Dockerfile` +with following content: + +``` +node_modules +npm-debug.log +``` + +This will prevent your local modules and debug logs from being copied onto your +Docker image and possibly overwriting modules installed within your image. + +## Building your image + +Go to the directory that has your `Dockerfile` and run the following command to +build the Docker image. The `-t` flag lets you tag your image so it's easier to +find later using the `docker images` command: + +```bash +$ docker build -t /node-web-app . +``` + +Your image will now be listed by Docker: + +```bash +$ docker images + +# Example +REPOSITORY TAG ID CREATED +node 8 1934b0b038d1 5 days ago +/node-web-app latest d64d3505b0d2 1 minute ago +``` + +## Run the image + +Running your image with `-d` runs the container in detached mode, leaving the +container running in the background. The `-p` flag redirects a public port to a +private port inside the container. Run the image you previously built: + +```bash +$ docker run -p 49160:8080 -d /node-web-app +``` + +Print the output of your app: + +```bash +# Get container ID +$ docker ps + +# Print app output +$ docker logs + +# Example +Running on http://localhost:8080 +``` + +If you need to go inside the container you can use the `exec` command: + +```bash +# Enter the container +$ docker exec -it /bin/bash +``` + +## Test + +To test your app, get the port of your app that Docker mapped: + +```bash +$ docker ps + +# Example +ID IMAGE COMMAND ... PORTS +ecce33b30ebf /node-web-app:latest npm start ... 49160->8080 +``` + +In the example above, Docker mapped the `8080` port inside of the container to +the port `49160` on your machine. + +Now you can call your app using `curl` (install if needed via: `sudo apt-get +install curl`): + +```bash +$ curl -i localhost:49160 + +HTTP/1.1 200 OK +X-Powered-By: Express +Content-Type: text/html; charset=utf-8 +Content-Length: 12 +ETag: W/"c-M6tWOb/Y57lesdjQuHeB1P/qTV0" +Date: Mon, 13 Nov 2017 20:53:59 GMT +Connection: keep-alive + +Hello world +``` + +We hope this tutorial helped you get up and running a simple Node.js application +on Docker. + +You can find more information about Docker and Node.js on Docker in the +following places: + +* [Official Node.js Docker Image](https://hub.docker.com/_/node/) +* [Node.js Docker Best Practices Guide](https://github.com/nodejs/docker-node/blob/master/docs/BestPractices.md) +* [Official Docker documentation](https://docs.docker.com/) +* [Docker Tag on Stack Overflow](https://stackoverflow.com/questions/tagged/docker) +* [Docker Subreddit](https://reddit.com/r/docker) diff --git a/locale/fa/docs/guides/publishing-napi-modules.md b/locale/fa/docs/guides/publishing-napi-modules.md new file mode 100644 index 0000000000000..4e725d3b44c21 --- /dev/null +++ b/locale/fa/docs/guides/publishing-napi-modules.md @@ -0,0 +1,57 @@ +--- +title: How to publish N-API package +layout: docs.hbs +--- + +# To publish N-API version of a package alongside a non-N-API version + +The following steps are illustrated using the package `iotivity-node`: + - First, publish the non-N-API version: + - Update the version in `package.json`. For `iotivity-node`, the version + becomes `1.2.0-2`. + - Go through the release checklist (ensure tests/demos/docs are OK) + - `npm publish` + - Then, publish the N-API version: + - Update the version in `package.json`. In the case of `iotivity-node`, + the version becomes `1.2.0-3`. For versioning, we recommend following + the pre-release version scheme as described by + [semver.org](http://semver.org/#spec-item-9) e.g. `1.2.0-napi`. + - Go through the release checklist (ensure tests/demos/docs are OK) + - `npm publish --tag n-api` + +In this example, tagging the release with `n-api` has ensured that, although +version 1.2.0-3 is later than the non-N-API published version (1.2.0-2), it +will not be installed if someone chooses to install `iotivity-node` by simply +running `npm install iotivity-node`. This will install the non-N-API version +by default. The user will have to run `npm install iotivity-node@n-api` to +receive the N-API version. For more information on using tags with npm check +out ["Using dist-tags"][]. + +# To introduce a dependency on an N-API version of a package + +To add the N-API version of `iotivity-node` as a dependency, the `package.json` +will look like this: + +```json +"dependencies": { + "iotivity-node": "n-api" +} +``` + +**Note:** As explained in +["Using dist-tags"][], unlike regular versions, tagged versions cannot be +addressed by version ranges such as `"^2.0.0"` inside `package.json`. The +reason for this is that the tag refers to exactly one version. So, if the +package maintainer chooses to tag a later version of the package using the +same tag, `npm update` will receive the later version. This should be acceptable +given the currently experimental nature of N-API. To depend on an N-API-enabled +version other than the latest published, the `package.json` dependency will +have to refer to the exact version like the following: + +```json +"dependencies": { + "iotivity-node": "1.2.0-3" +} +``` + +["Using dist-tags"]: https://docs.npmjs.com/getting-started/using-tags diff --git a/locale/fa/docs/guides/simple-profiling.md b/locale/fa/docs/guides/simple-profiling.md new file mode 100644 index 0000000000000..a980721f9dfa3 --- /dev/null +++ b/locale/fa/docs/guides/simple-profiling.md @@ -0,0 +1,280 @@ +--- +title: Easy profiling for Node.js Applications +layout: docs.hbs +--- + +# Easy profiling for Node.js Applications + +There are many third party tools available for profiling Node.js applications +but, in many cases, the easiest option is to use the Node.js built in profiler. +The built in profiler uses the [profiler inside V8][] which samples the stack at +regular intervals during program execution. It records the results of these +samples, along with important optimization events such as jit compiles, as a +series of ticks: + +``` +code-creation,LazyCompile,0,0x2d5000a337a0,396,"bp native array.js:1153:16",0x289f644df68,~ +code-creation,LazyCompile,0,0x2d5000a33940,716,"hasOwnProperty native v8natives.js:198:30",0x289f64438d0,~ +code-creation,LazyCompile,0,0x2d5000a33c20,284,"ToName native runtime.js:549:16",0x289f643bb28,~ +code-creation,Stub,2,0x2d5000a33d40,182,"DoubleToIStub" +code-creation,Stub,2,0x2d5000a33e00,507,"NumberToStringStub" +``` + +In the past you need the V8 source code to be able to interpret the ticks. +Luckily, tools have recently been introduced into Node.js 4.4.0 that facilitate +the consumption of this information without separately building V8 from source. +Let's see how the built-in profiler can help provide insight into application +performance. + +To illustrate the use of the tick profiler, we will work with a simple Express +application. Our application will have two handlers, one for adding new users to +our system: + +```javascript +app.get('/newUser', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || users.username) { + return res.sendStatus(400); + } + + const salt = crypto.randomBytes(128).toString('base64'); + const hash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + users[username] = { salt, hash }; + + res.sendStatus(200); +}); +``` + +and another for validating user authentication attempts: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + const { salt, hash } = users[username]; + const encryptHash = crypto.pbkdf2Sync(password, salt, 10000, 512, 'sha512'); + + if (crypto.timingSafeEqual(hash, encryptHash)) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } +}); +``` + +*Please note that these are NOT recommended handlers for authenticating users in +your Node.js applications and are used purely for illustration purposes. You +should not be trying to design your own cryptographic authentication mechanisms +in general. It is much better to use existing, proven authentication solutions.* + +Now assume that we've deployed our application and users are complaining about +high latency on requests. We can easily run the app with the built in profiler: + +``` +NODE_ENV=production node --prof app.js +``` + +and put some load on the server using `ab` (ApacheBench): + +``` +curl -X GET "http://localhost:8080/newUser?username=matt&password=password" +ab -k -c 20 -n 250 "http://localhost:8080/auth?username=matt&password=password" +``` + +and get an ab output of: + +``` +Concurrency Level: 20 +Time taken for tests: 46.932 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 5.33 [#/sec] (mean) +Time per request: 3754.556 [ms] (mean) +Time per request: 187.728 [ms] (mean, across all concurrent requests) +Transfer rate: 1.05 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 3755 + 66% 3804 + 75% 3818 + 80% 3825 + 90% 3845 + 95% 3858 + 98% 3874 + 99% 3875 + 100% 4225 (longest request) +``` + +From this output, we see that we're only managing to serve about 5 requests per +second and that the average request takes just under 4 seconds round trip. In a +real world example, we could be doing lots of work in many functions on behalf +of a user request but even in our simple example, time could be lost compiling +regular expressions, generating random salts, generating unique hashes from user +passwords, or inside the Express framework itself. + +Since we ran our application using the `--prof` option, a tick file was generated +in the same directory as your local run of the application. It should have the +form `isolate-0xnnnnnnnnnnnn-v8.log` (where `n` is a digit). + +In order to make sense of this file, we need to use the tick processor bundled +with the Node.js binary. To run the processor, use the `--prof-process` flag: + +``` +node --prof-process isolate-0xnnnnnnnnnnnn-v8.log > processed.txt +``` + +Opening processed.txt in your favorite text editor will give you a few different +types of information. The file is broken up into sections which are again broken +up by language. First, we look at the summary section and see: + +``` + [Summary]: + ticks total nonlib name + 79 0.2% 0.2% JavaScript + 36703 97.2% 99.2% C++ + 7 0.0% 0.0% GC + 767 2.0% Shared libraries + 215 0.6% Unaccounted +``` + +This tells us that 97% of all samples gathered occurred in C++ code and that +when viewing other sections of the processed output we should pay most attention +to work being done in C++ (as opposed to JavaScript). With this in mind, we next +find the [C++] section which contains information about which C++ functions are +taking the most CPU time and see: + +``` + [C++]: + ticks total nonlib name + 19557 51.8% 52.9% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 4510 11.9% 12.2% _sha1_block_data_order + 3165 8.4% 8.6% _malloc_zone_malloc +``` + +We see that the top 3 entries account for 72.1% of CPU time taken by the +program. From this output, we immediately see that at least 51.8% of CPU time is +taken up by a function called PBKDF2 which corresponds to our hash generation +from a user's password. However, it may not be immediately obvious how the lower +two entries factor into our application (or if it is we will pretend otherwise +for the sake of example). To better understand the relationship between these +functions, we will next look at the [Bottom up (heavy) profile] section which +provides information about the primary callers of each function. Examining this +section, we find: + +``` + ticks parent name + 19557 51.8% node::crypto::PBKDF2(v8::FunctionCallbackInfo const&) + 19557 100.0% v8::internal::Builtins::~Builtins() + 19557 100.0% LazyCompile: ~pbkdf2 crypto.js:557:16 + + 4510 11.9% _sha1_block_data_order + 4510 100.0% LazyCompile: *pbkdf2 crypto.js:557:16 + 4510 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 + + 3165 8.4% _malloc_zone_malloc + 3161 99.9% LazyCompile: *pbkdf2 crypto.js:557:16 + 3161 100.0% LazyCompile: *exports.pbkdf2Sync crypto.js:552:30 +``` + +Parsing this section takes a little more work than the raw tick counts above. +Within each of the "call stacks" above, the percentage in the parent column +tells you the percentage of samples for which the function in the row above was +called by the function in the current row. For example, in the middle "call +stack" above for _sha1_block_data_order, we see that `_sha1_block_data_order` occurred +in 11.9% of samples, which we knew from the raw counts above. However, here, we +can also tell that it was always called by the pbkdf2 function inside the +Node.js crypto module. We see that similarly, `_malloc_zone_malloc` was called +almost exclusively by the same pbkdf2 function. Thus, using the information in +this view, we can tell that our hash computation from the user's password +accounts not only for the 51.8% from above but also for all CPU time in the top +3 most sampled functions since the calls to `_sha1_block_data_order` and +`_malloc_zone_malloc` were made on behalf of the pbkdf2 function. + +At this point, it is very clear that the password based hash generation should +be the target of our optimization. Thankfully, you've fully internalized the +[benefits of asynchronous programming][] and you realize that the work to +generate a hash from the user's password is being done in a synchronous way and +thus tying down the event loop. This prevents us from working on other incoming +requests while computing a hash. + +To remedy this issue, you make a small modification to the above handlers to use +the asynchronous version of the pbkdf2 function: + +```javascript +app.get('/auth', (req, res) => { + let username = req.query.username || ''; + const password = req.query.password || ''; + + username = username.replace(/[!@#$%^&*]/g, ''); + + if (!username || !password || !users[username]) { + return res.sendStatus(400); + } + + crypto.pbkdf2(password, users[username].salt, 10000, 512, (err, hash) => { + if (users[username].hash.toString() === hash.toString()) { + res.sendStatus(200); + } else { + res.sendStatus(401); + } + }); +}); +``` + +A new run of the ab benchmark above with the asynchronous version of your app +yields: + +``` +Concurrency Level: 20 +Time taken for tests: 12.846 seconds +Complete requests: 250 +Failed requests: 0 +Keep-Alive requests: 250 +Total transferred: 50250 bytes +HTML transferred: 500 bytes +Requests per second: 19.46 [#/sec] (mean) +Time per request: 1027.689 [ms] (mean) +Time per request: 51.384 [ms] (mean, across all concurrent requests) +Transfer rate: 3.82 [Kbytes/sec] received + +... + +Percentage of the requests served within a certain time (ms) + 50% 1018 + 66% 1035 + 75% 1041 + 80% 1043 + 90% 1049 + 95% 1063 + 98% 1070 + 99% 1071 + 100% 1079 (longest request) +``` + +Yay! Your app is now serving about 20 requests per second, roughly 4 times more +than it was with the synchronous hash generation. Additionally, the average +latency is down from the 4 seconds before to just over 1 second. + +Hopefully, through the performance investigation of this (admittedly contrived) +example, you've seen how the V8 tick processor can help you gain a better +understanding of the performance of your Node.js applications. + +[profiler inside V8]: https://developers.google.com/v8/profiler_example +[benefits of asynchronous programming]: https://nodesource.com/blog/why-asynchronous diff --git a/locale/fa/docs/guides/timers-in-node.md b/locale/fa/docs/guides/timers-in-node.md new file mode 100644 index 0000000000000..fbf191404df3e --- /dev/null +++ b/locale/fa/docs/guides/timers-in-node.md @@ -0,0 +1,192 @@ +--- +title: Timers in Node.js +layout: docs.hbs +--- + +# Timers in Node.js and beyond + +The Timers module in Node.js contains functions that execute code after a set +period of time. Timers do not need to be imported via `require()`, since +all the methods are available globally to emulate the browser JavaScript API. +To fully understand when timer functions will be executed, it's a good idea to +read up on the the Node.js +[Event Loop](/en/docs/guides/event-loop-timers-and-nexttick/). + +## Controlling the Time Continuum with Node.js + +The Node.js API provides several ways of scheduling code to execute at +some point after the present moment. The functions below may seem familiar, +since they are available in most browsers, but Node.js actually provides +its own implementation of these methods. Timers integrate very closely +with the system, and despite the fact that the API mirrors the browser +API, there are some differences in implementation. + +### "When I say so" Execution ~ *`setTimeout()`* + +`setTimeout()` can be used to schedule code execution after a designated +amount of milliseconds. This function is similar to +[`window.setTimeout()`](https://developer.mozilla.org/en-US/docs/Web/API/WindowTimers/setTimeout) +from the browser JavaScript API, however a string of code cannot be passed +to be executed. + +`setTimeout()` accepts a function to execute as its first argument and the +millisecond delay defined as a number as the second argument. Additional +arguments may also be included and these will be passed on to the function. Here +is an example of that: + +```js +function myFunc(arg) { + console.log(`arg was => ${arg}`); +} + +setTimeout(myFunc, 1500, 'funky'); +``` + +The above function `myFunc()` will execute as close to 1500 +milliseconds (or 1.5 seconds) as possible due to the call of `setTimeout()`. + +The timeout interval that is set cannot be relied upon to execute after +that *exact* number of milliseconds. This is because other executing code that +blocks or holds onto the event loop will push the execution of the timeout +back. The *only* guarantee is that the timeout will not execute *sooner* than +the declared timeout interval. + +`setTimeout()` returns a `Timeout` object that can be used to reference the +timeout that was set. This returned object can be used to cancel the timeout ( +see `clearTimeout()` below) as well as change the execution behavior (see +`unref()` below). + +### "Right after this" Execution ~ *`setImmediate()`* + +`setImmediate()` will execute code at the end of the current event loop cycle. +This code will execute *after* any I/O operations in the current event loop and +*before* any timers scheduled for the next event loop. This code execution +could be thought of as happening "right after this", meaning any code following +the `setImmediate()` function call will execute before the `setImmediate()` +function argument. + +The first argument to `setImmediate()` will be the function to execute. Any +subsequent arguments will be passed to the function when it is executed. +Here's an example: + +```js +console.log('before immediate'); + +setImmediate((arg) => { + console.log(`executing immediate: ${arg}`); +}, 'so immediate'); + +console.log('after immediate'); +``` + +The above function passed to `setImmediate()` will execute after all runnable +code has executed, and the console output will be: + +``` +before immediate +after immediate +executing immediate: so immediate +``` + +`setImmediate()` returns an `Immediate` object, which can be used to cancel +the scheduled immediate (see `clearImmediate()` below). + +Note: Don't get `setImmediate()` confused with `process.nextTick()`. There are +some major ways they differ. The first is that `process.nextTick()` will run +*before* any `Immediate`s that are set as well as before any scheduled I/O. +The second is that `process.nextTick()` is non-clearable, meaning once +code has been scheduled to execute with `process.nextTick()`, the execution +cannot be stopped, just like with a normal function. Refer to [this guide](/en/docs/guides/event-loop-timers-and-nexttick/#process-nexttick) +to better understand the operation of `process.nextTick()`. + +### "Infinite Loop" Execution ~ *`setInterval()`* + +If there is a block of code that should execute multiple times, `setInterval()` +can be used to execute that code. `setInterval()` takes a function +argument that will run an infinite number of times with a given millisecond +delay as the second argument. Just like `setTimeout()`, additional arguments +can be added beyond the delay, and these will be passed on to the function call. +Also like `setTimeout()`, the delay cannot be guaranteed because of operations +that may hold on to the event loop, and therefore should be treated as an +approximate delay. See the below example: + +```js +function intervalFunc() { + console.log('Cant stop me now!'); +} + +setInterval(intervalFunc, 1500); +``` +In the above example, `intervalFunc()` will execute about every 1500 +milliseconds, or 1.5 seconds, until it is stopped (see below). + +Just like `setTimeout()`, `setInterval()` also returns a `Timeout` object which +can be used to reference and modify the interval that was set. + +## Clearing the Future + +What can be done if a `Timeout` or `Immediate` object needs to be cancelled? +`setTimeout()`, `setImmediate()`, and `setInterval()` return a timer object +that can be used to reference the set `Timeout` or `Immediate` object. +By passing said object into the respective `clear` function, execution of +that object will be halted completely. The respective functions are +`clearTimeout()`, `clearImmediate()`, and `clearInterval()`. See the example +below for an example of each: + +```js +const timeoutObj = setTimeout(() => { + console.log('timeout beyond time'); +}, 1500); + +const immediateObj = setImmediate(() => { + console.log('immediately executing immediate'); +}); + +const intervalObj = setInterval(() => { + console.log('interviewing the interval'); +}, 500); + +clearTimeout(timeoutObj); +clearImmediate(immediateObj); +clearInterval(intervalObj); +``` + +## Leaving Timeouts Behind + +Remember that `Timeout` objects are returned by `setTimeout` and `setInterval`. +The `Timeout` object provides two functions intended to augment `Timeout` +behavior with `unref()` and `ref()`. If there is a `Timeout` object scheduled +using a `set` function, `unref()` can be called on that object. This will change +the behavior slightly, and not call the `Timeout` object *if it is the last +code to execute*. The `Timeout` object will not keep the process alive, waiting +to execute. + +In similar fashion, a `Timeout` object that has had `unref()` called on it +can remove that behavior by calling `ref()` on that same `Timeout` object, +which will then ensure its execution. Be aware, however, that this does +not *exactly* restore the initial behavior for performance reasons. See +below for examples of both: + +```js +const timerObj = setTimeout(() => { + console.log('will i run?'); +}); + +// if left alone, this statement will keep the above +// timeout from running, since the timeout will be the only +// thing keeping the program from exiting +timerObj.unref(); + +// we can bring it back to life by calling ref() inside +// an immediate +setImmediate(() => { + timerObj.ref(); +}); +``` +## Further Down the Event Loop + +There's much more to the Event Loop and Timers than this guide +has covered. To learn more about the internals of the Node.js +Event Loop and how Timers operate during execution, check out +this Node.js guide: [The Node.js Event Loop, Timers, and +process.nextTick()](/en/docs/guides/event-loop-timers-and-nexttick/). diff --git a/locale/fa/docs/guides/working-with-different-filesystems.md b/locale/fa/docs/guides/working-with-different-filesystems.md new file mode 100644 index 0000000000000..36494b9bb1642 --- /dev/null +++ b/locale/fa/docs/guides/working-with-different-filesystems.md @@ -0,0 +1,224 @@ +--- +title: Working with Different Filesystems +layout: docs.hbs +--- + +# Working with Different Filesystems + +Node exposes many features of the filesystem. But not all filesystems are alike. +The following are suggested best practices to keep your code simple and safe +when working with different filesystems. + +## Filesystem Behavior + +Before you can work with a filesystem, you need to know how it behaves. +Different filesystems behave differently and have more or less features than +others: case sensitivity, case insensitivity, case preservation, Unicode form +preservation, timestamp resolution, extended attributes, inodes, Unix +permissions, alternate data streams etc. + +Be wary of inferring filesystem behavior from `process.platform`. For example, +do not assume that because your program is running on Darwin that you are +therefore working on a case-insensitive filesystem (HFS+), as the user may be +using a case-sensitive filesystem (HFSX). Similarly, do not assume that because +your program is running on Linux that you are therefore working on a filesystem +which supports Unix permissions and inodes, as you may be on a particular +external drive, USB or network drive which does not. + +The operating system may not make it easy to infer filesystem behavior, but all +is not lost. Instead of keeping a list of every known filesystem and behavior +(which is always going to be incomplete), you can probe the filesystem to see +how it actually behaves. The presence or absence of certain features which are +easy to probe, are often enough to infer the behavior of other features which +are more difficult to probe. + +Remember that some users may have different filesystems mounted at various paths +in the working tree. + +## Avoid a Lowest Common Denominator Approach + +You might be tempted to make your program act like a lowest common denominator +filesystem, by normalizing all filenames to uppercase, normalizing all filenames +to NFC Unicode form, and normalizing all file timestamps to say 1-second +resolution. This would be the lowest common denominator approach. + +Do not do this. You would only be able to interact safely with a filesystem +which has the exact same lowest common denominator characteristics in every +respect. You would be unable to work with more advanced filesystems in the way +that users expect, and you would run into filename or timestamp collisions. You +would most certainly lose and corrupt user data through a series of complicated +dependent events, and you would create bugs that would be difficult if not +impossible to solve. + +What happens when you later need to support a filesystem that only has 2-second +or 24-hour timestamp resolution? What happens when the Unicode standard advances +to include a slightly different normalization algorithm (as has happened in the +past)? + +A lowest common denominator approach would tend to try to create a portable +program by using only "portable" system calls. This leads to programs that are +leaky and not in fact portable. + +## Adopt a Superset Approach + +Make the best use of each platform you support by adopting a superset approach. +For example, a portable backup program should sync btimes (the created time of a +file or folder) correctly between Windows systems, and should not destroy or +alter btimes, even though btimes are not supported on Linux systems. The same +portable backup program should sync Unix permissions correctly between Linux +systems, and should not destroy or alter Unix permissions, even though Unix +permissions are not supported on Windows systems. + +Handle different filesystems by making your program act like a more advanced +filesystem. Support a superset of all possible features: case-sensitivity, +case-preservation, Unicode form sensitivity, Unicode form preservation, Unix +permissions, high-resolution nanosecond timestamps, extended attributes etc. + +Once you have case-preservation in your program, you can always implement +case-insensitivity if you need to interact with a case-insensitive filesystem. +But if you forego case-preservation in your program, you cannot interact safely +with a case-preserving filesystem. The same is true for Unicode form +preservation and timestamp resolution preservation. + +If a filesystem provides you with a filename in a mix of lowercase and +uppercase, then keep the filename in the exact case given. If a filesystem +provides you with a filename in mixed Unicode form or NFC or NFD (or NFKC or +NFKD), then keep the filename in the exact byte sequence given. If a filesystem +provides you with a millisecond timestamp, then keep the timestamp in +millisecond resolution. + +When you work with a lesser filesystem, you can always downsample appropriately, +with comparison functions as required by the behavior of the filesystem on which +your program is running. If you know that the filesystem does not support Unix +permissions, then you should not expect to read the same Unix permissions you +write. If you know that the filesystem does not preserve case, then you should +be prepared to see `ABC` in a directory listing when your program creates `abc`. +But if you know that the filesystem does preserve case, then you should consider +`ABC` to be a different filename to `abc`, when detecting file renames or if the +filesystem is case-sensitive. + +## Case Preservation + +You may create a directory called `test/abc` and be surprised to see sometimes +that `fs.readdir('test')` returns `['ABC']`. This is not a bug in Node. Node +returns the filename as the filesystem stores it, and not all filesystems +support case-preservation. Some filesystems convert all filenames to uppercase +(or lowercase). + +## Unicode Form Preservation + +*Case preservation and Unicode form preservation are similar concepts. To +understand why Unicode form should be preserved , make sure that you first +understand why case should be preserved. Unicode form preservation is just as +simple when understood correctly.* + +Unicode can encode the same characters using several different byte sequences. +Several strings may look the same, but have different byte sequences. When +working with UTF-8 strings, be careful that your expectations are in line with +how Unicode works. Just as you would not expect all UTF-8 characters to encode +to a single byte, you should not expect several UTF-8 strings that look the same +to the human eye to have the same byte representation. This may be an +expectation that you can have of ASCII, but not of UTF-8. + +You may create a directory called `test/café` (NFC Unicode form with byte +sequence `<63 61 66 c3 a9>` and `string.length === 5`) and be surprised to see +sometimes that `fs.readdir('test')` returns `['café']` (NFD Unicode form with +byte sequence `<63 61 66 65 cc 81>` and `string.length === 6`). This is not a +bug in Node. Node returns the filename as the filesystem stores it, and not all +filesystems support Unicode form preservation. + +HFS+, for example, will normalize all filenames to a form almost always the same +as NFD form. Do not expect HFS+ to behave the same as NTFS or EXT4 and +vice-versa. Do not try to change data permanently through normalization as a +leaky abstraction to paper over Unicode differences between filesystems. This +would create problems without solving any. Rather, preserve Unicode form and use +normalization as a comparison function only. + +## Unicode Form Insensitivity + +Unicode form insensitivity and Unicode form preservation are two different +filesystem behaviors often mistaken for each other. Just as case-insensitivity +has sometimes been incorrectly implemented by permanently normalizing filenames +to uppercase when storing and transmitting filenames, so Unicode form +insensitivity has sometimes been incorrectly implemented by permanently +normalizing filenames to a certain Unicode form (NFD in the case of HFS+) when +storing and transmitting filenames. It is possible and much better to implement +Unicode form insensitivity without sacrificing Unicode form preservation, by +using Unicode normalization for comparison only. + +## Comparing Different Unicode Forms + +Node provides `string.normalize('NFC' / 'NFD')` which you can use to normalize a +UTF-8 string to either NFC or NFD. You should never store the output from this +function but only use it as part of a comparison function to test whether two +UTF-8 strings would look the same to the user. + +You can use `string1.normalize('NFC') === string2.normalize('NFC')` or +`string1.normalize('NFD') === string2.normalize('NFD')` as your comparison +function. Which form you use does not matter. + +Normalization is fast but you may want to use a cache as input to your +comparison function to avoid normalizing the same string many times over. If the +string is not present in the cache then normalize it and cache it. Be careful +not to store or persist the cache, use it only as a cache. + +Note that using `normalize()` requires that your version of Node include ICU +(otherwise `normalize()` will just return the original string). If you download +the latest version of Node from the website then it will include ICU. + +## Timestamp Resolution + +You may set the `mtime` (the modified time) of a file to `1444291759414` +(millisecond resolution) and be surprised to see sometimes that `fs.stat` +returns the new mtime as `1444291759000` (1-second resolution) or +`1444291758000` (2-second resolution). This is not a bug in Node. Node returns +the timestamp as the filesystem stores it, and not all filesystems support +nanosecond, millisecond or 1-second timestamp resolution. Some filesystems even +have very coarse resolution for the atime timestamp in particular, e.g. 24 hours +for some FAT filesystems. + +## Do Not Corrupt Filenames and Timestamps Through Normalization + +Filenames and timestamps are user data. Just as you would never automatically +rewrite user file data to uppercase the data or normalize `CRLF` to `LF` +line-endings, so you should never change, interfere or corrupt filenames or +timestamps through case / Unicode form / timestamp normalization. Normalization +should only ever be used for comparison, never for altering data. + +Normalization is effectively a lossy hash code. You can use it to test for +certain kinds of equivalence (e.g. do several strings look the same even though +they have different byte sequences) but you can never use it as a substitute for +the actual data. Your program should pass on filename and timestamp data as is. + +Your program can create new data in NFC (or in any combination of Unicode form +it prefers) or with a lowercase or uppercase filename, or with a 2-second +resolution timestamp, but your program should not corrupt existing user data by +imposing case / Unicode form / timestamp normalization. Rather, adopt a superset +approach and preserve case, Unicode form and timestamp resolution in your +program. That way, you will be able to interact safely with filesystems which do +the same. + +## Use Normalization Comparison Functions Appropriately + +Make sure that you use case / Unicode form / timestamp comparison functions +appropriately. Do not use a case-insensitive filename comparison function if you +are working on a case-sensitive filesystem. Do not use a Unicode form +insensitive comparison function if you are working on a Unicode form sensitive +filesystem (e.g. NTFS and most Linux filesystems which preserve both NFC and NFD +or mixed Unicode forms). Do not compare timestamps at 2-second resolution if you +are working on a nanosecond timestamp resolution filesystem. + +## Be Prepared for Slight Differences in Comparison Functions + +Be careful that your comparison functions match those of the filesystem (or +probe the filesystem if possible to see how it would actually compare). +Case-insensitivity for example is more complex than a simple `toLowerCase()` +comparison. In fact, `toUpperCase()` is usually better than `toLowerCase()` +(since it handles certain foreign language characters differently). But better +still would be to probe the filesystem since every filesystem has its own case +comparison table baked in. + +As an example, Apple's HFS+ normalizes filenames to NFD form but this NFD form +is actually an older version of the current NFD form and may sometimes be +slightly different from the latest Unicode standard's NFD form. Do not expect +HFS+ NFD to be exactly the same as Unicode NFD all the time. diff --git a/locale/fa/docs/index.md b/locale/fa/docs/index.md new file mode 100644 index 0000000000000..7f34bcd7bdb56 --- /dev/null +++ b/locale/fa/docs/index.md @@ -0,0 +1,43 @@ +--- +title: Docs +layout: docs.hbs +labels: + lts: LTS +--- + +# About Docs + +There are several types of documentation available on this website: + +* API reference documentation +* ES6 features +* Guides + +### API Reference Documentation + +The [API reference documentation](/api/) provides detailed information about a function or object in Node.js. This documentation indicates what arguments a method accepts, the return value of that method, and what errors may be related to that method. It also indicates which methods are available for different versions of Node.js. + +This documentation describes the built-in modules provided by Node.js. It does not document modules provided by the community. + +
+

Looking for API docs of previous releases?

+ + +
+ +### ES6 Features + +The [ES6 section](/en/docs/es6/) describes the three ES6 feature groups, and details which features are enabled by default in Node.js, alongside explanatory links. It also shows how to find which version of V8 shipped with a particular Node.js release. + +### Guides + +Long-form, in-depth articles about Node.js technical features and capabilities. diff --git a/locale/fa/docs/meta/topics/dependencies.md b/locale/fa/docs/meta/topics/dependencies.md new file mode 100644 index 0000000000000..ccde7e1e50d38 --- /dev/null +++ b/locale/fa/docs/meta/topics/dependencies.md @@ -0,0 +1,102 @@ +--- +title: Dependencies +layout: docs.hbs +--- + +# Dependencies + +There are several dependencies that Node.js relies on to work the way it does. + +- [Libraries](#libraries) + - [V8](#v8) + - [libuv](#libuv) + - [http-parser](#http-parser) + - [c-ares](#c-ares) + - [OpenSSL](#openssl) + - [zlib](#zlib) +- [Tools](#tools) + - [npm](#npm) + - [gyp](#gyp) + - [gtest](#gtest) + +## Libraries + +### V8 + +The V8 library provides Node.js with a JavaScript engine, which Node.js +controls via the V8 C++ API. V8 is maintained by Google, for use in Chrome. + +- [Documentation](https://v8docs.nodesource.com/) + +### libuv + +Another important dependency is libuv, a C library that is used to abstract +non-blocking I/O operations to a consistent interface across all supported +platforms. It provides mechanisms to handle file system, DNS, network, child +processes, pipes, signal handling, polling and streaming. It also includes a +thread pool for offloading work for some things that can't be done +asynchronously at the operating system level. + +- [Documentation](http://docs.libuv.org/) + +### http-parser + +HTTP parsing is handled by a lightweight C library called http-parser. It is +designed to not make any syscalls or allocations, so it has a very small +per-request memory footprint. + +- [Documentation](https://github.com/joyent/http-parser/) + +### c-ares + +For some asynchronous DNS requests, Node.js uses a C library called c-ares. +It is exposed through the DNS module in JavaScript as the resolve() family of +functions. The lookup() function, which is what the rest of core uses, makes +use of threaded getaddrinfo(3) calls in libuv. The reason for this is that +c-ares supports /etc/hosts, /etc/resolv.conf and /etc/svc.conf, but not things +like mDNS. + +- [Documentation](http://c-ares.haxx.se/docs.html) + +### OpenSSL + +OpenSSL is used extensively in both the `tls` and `crypto` modules. It provides +battle-tested implementations of many cryptographic functions that the modern +web relies on for security. + +- [Documentation](https://www.openssl.org/docs/) + +### zlib + +For fast compression and decompression, Node.js relies on the industry-standard +zlib library, also known for its use in gzip and libpng. Node.js uses zlib to +create sync, async and streaming compression and decompression interfaces. + +- [Documentation](http://www.zlib.net/manual.html) + +## Tools + +### npm + +Node.js is all about modularity, and with that comes the need for a quality +package manager; for this purpose, npm was made. With npm comes the largest +selection of community-created packages of any programming ecosystem, +which makes building Node.js apps quick and easy. + +- [Documentation](https://docs.npmjs.com/) + +### gyp + +The build system is handled by gyp, a python-based project generator copied +from V8. It can generate project files for use with build systems across many +platforms. Node.js requires a build system because large parts of it — and its +dependencies — are written in languages that require compilation. + +- [Documentation](https://gyp.gsrc.io/docs/UserDocumentation.md) + +### gtest + +Native code can be tested using gtest, which is taken from Chromium. It allows +testing C/C++ without needing an existing node executable to bootstrap from. + +- [Documentation](https://code.google.com/p/googletest/wiki/V1_7_Documentation) diff --git a/locale/fa/download/current.md b/locale/fa/download/current.md new file mode 100644 index 0000000000000..2efef82c6a90d --- /dev/null +++ b/locale/fa/download/current.md @@ -0,0 +1,34 @@ +--- +layout: download-current.hbs +title: Download +download: Download +downloads: + headline: Downloads + lts: LTS + current: Current + tagline-current: Latest Features + tagline-lts: Recommended For Most Users + display-hint: Display downloads for + intro: > + Download the Node.js source code or a pre-built installer for your platform, and start developing today. + currentVersion: Latest Current Version + buildInstructions: Building Node.js from source on supported platforms + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code +additional: + headline: Additional Platforms + intro: > + Members of the Node.js community maintain unofficial builds of Node.js for additional platforms. Note that such builds are not supported by the Node.js core team and may not yet be at the same build level as current Node.js release. + platform: Platform + provider: Provider + SunOSBinaries: SunOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems +--- diff --git a/locale/fa/download/index.md b/locale/fa/download/index.md new file mode 100644 index 0000000000000..6aea1a4fd8a8d --- /dev/null +++ b/locale/fa/download/index.md @@ -0,0 +1,34 @@ +--- +layout: download.hbs +title: Download +download: Download +downloads: + headline: Downloads + lts: LTS + current: Current + tagline-current: Latest Features + tagline-lts: Recommended For Most Users + display-hint: Display downloads for + intro: > + Download the Node.js source code or a pre-built installer for your platform, and start developing today. + currentVersion: Latest LTS Version + buildInstructions: Building Node.js from source on supported platforms + WindowsInstaller: Windows Installer + WindowsBinary: Windows Binary + MacOSInstaller: macOS Installer + MacOSBinary: macOS Binary + LinuxBinaries: Linux Binaries + SourceCode: Source Code +additional: + headline: Additional Platforms + intro: > + Members of the Node.js community maintain unofficial builds of Node.js for additional platforms. Note that such builds are not supported by the Node.js core team and may not yet be at the same build level as current Node.js release. + platform: Platform + provider: Provider + SunOSBinaries: SunOS Binaries + DockerImage: Docker Image + officialDockerImage: Official Node.js Docker Image + LinuxPowerSystems: Linux on Power Systems + LinuxSystemZ: Linux on System z + AIXPowerSystems: AIX on Power Systems +--- diff --git a/locale/fa/download/package-manager.md b/locale/fa/download/package-manager.md new file mode 100644 index 0000000000000..131060da36f7d --- /dev/null +++ b/locale/fa/download/package-manager.md @@ -0,0 +1,419 @@ +--- +layout: page.hbs +title: Installing Node.js via package manager +--- + +# Installing Node.js via package manager + +***Note:*** The packages on this page are maintained and supported by their respective packagers, **not** the Node.js core team. Please report any issues you encounter to the package maintainer. If it turns out your issue is a bug in Node.js itself, the maintainer will report the issue upstream. + +---------------------------- + +* [Android](#android) +* [Arch Linux](#arch-linux) +* [Debian and Ubuntu based Linux distributions](#debian-and-ubuntu-based-linux-distributions) +* [Enterprise Linux and Fedora](#enterprise-linux-and-fedora) +* [FreeBSD](#freebsd) +* [Gentoo](#gentoo) +* [NetBSD](#netbsd) +* [nvm](#nvm) +* [OpenBSD](#openbsd) +* [openSUSE and SLE](#opensuse-and-sle) +* [macOS](#macos) +* [SmartOS and illumos](#smartos-and-illumos) +* [Void Linux](#void-linux) +* [Solus](#solus) +* [Windows](#windows) + +---------------------------- + +## Android + +Android support is still experimental in Node.js, so precompiled binaries are not yet provided by Node.js developers. + +However, there are some third-party solutions. For example, [Termux](https://termux.com/) community provides terminal emulator and Linux environment for Android, as well as own package manager and [extensive collection](https://github.com/termux/termux-packages) of many precompiled applications. This command in Termux app will install the last available Node.js version: + +```bash +pkg install nodejs +``` + +Currently, Termux Node.js binaries are compiled without Inspector support and linked against `system-icu` (depending on `libicu` package). + +## Arch Linux + +Node.js and npm packages are available in the Community Repository. + +```bash +pacman -S nodejs npm +``` + + +## Debian and Ubuntu based Linux distributions + +Also including: **Linux Mint**, **Linux Mint Debian Edition (LMDE)**, **elementaryOS**, **bash on Windows** and others. + +Node.js is available from the [NodeSource](https://nodesource.com) Debian and Ubuntu binary distributions repository (formerly [Chris Lea's](https://github.com/chrislea) Launchpad PPA). Support for this repository, along with its scripts, can be found on GitHub at [nodesource/distributions](https://github.com/nodesource/distributions). + +**NOTE:** If you are using Ubuntu Precise or Debian Wheezy, you might want to read about [running Node.js >= 6.x on older distros](https://github.com/nodesource/distributions/blob/master/OLDER_DISTROS.md). + +```bash +wget -qO- https://deb.nodesource.com/setup_8.x | sudo -E bash - +sudo apt-get install -y nodejs +``` + +Alternatively, for Node.js 10: + +```bash +wget -qO- https://deb.nodesource.com/setup_10.x | sudo -E bash - +sudo apt-get install -y nodejs +``` + +***Optional***: install build tools + +To compile and install native addons from npm you may also need to install build tools: + +```bash +sudo apt-get install -y build-essential +``` + +**Available architectures:** + +* **i386** (32-bit) +* **amd64** (64-bit) +* **armhf** (ARM 32-bit hard-float, ARMv7 and up: _arm-linux-gnueabihf_) + +**Supported Ubuntu versions:** + +* **Ubuntu 14.04 LTS** (Trusty Tahr) +* **Ubuntu 16.04 LTS** (Xenial Xerus) + +**Supported Debian versions:** + +* **Debian 8** (jessie, old-stable) +* **Debian 9 / stable** (stretch) +* **Debian testing** (buster to-be-released-as-next-stable) +* **Debian unstable** (sid never-to-be-released, aka rolling) + +A Node.js package is also available in the [official repo](http://packages.debian.org/search?searchon=names&keywords=nodejs) for Debian Sid (unstable), Jessie (testing) and Wheezy (wheezy-backports) as "nodejs". It only installs a `nodejs` binary. + +The [nodejs-legacy package](http://packages.debian.org/search?searchon=names&keywords=nodejs-legacy) installs a `node` symlink that is needed by many modules to build and run correctly. +The Node.js modules available in the distribution official repositories do not need it. + +**Supported Linux Mint versions:** + +* **Linux Mint 17 "Qiana"** (via Ubuntu 14.04 LTS) +* **Linux Mint 17.1 "Rebecca"** (via Ubuntu 14.04 LTS) +* **Linux Mint 17.2 "Rafaela"** (via Ubuntu 14.04 LTS) +* **Linux Mint Debian Edition (LMDE) 2 "Betsy"** (via Debian 8) + +**Supported elementary OS versions:** + +* **elementary OS Luna** (via Ubuntu 12.04 LTS) +* **elementary OS Freya** (via Ubuntu 14.04 LTS) +* **elementary OS Loki** (via Ubuntu 16.04 LTS) +* **elementary OS Juno** (via Ubuntu 18.04 LTS) + +**Supported Trisquel versions:** + +* **Trisquel 7 "Belenos"** (via Ubuntu 14.04 LTS) + +**Supported BOSS versions:** + +* **BOSS 5.0 "Anokha"** (via Debian 7) + +## Enterprise Linux and Fedora + +Including **Red Hat® Enterprise Linux®** / **RHEL**, **CentOS** and **Fedora**. + +Node.js is available from the [NodeSource](https://nodesource.com) Enterprise Linux and Fedora binary distributions repository. Support for this repository, along with its scripts, can be found on GitHub at [nodesource/distributions](https://github.com/nodesource/distributions). + +Note that the Node.js packages for EL 5 (RHEL5 and CentOS 5) depend on the **[EPEL](https://fedoraproject.org/wiki/EPEL)** repository being available. The setup script will check and provide instructions if it is not installed. + +On RHEL, CentOS or Fedora, for Node.js v8 LTS: + +```bash +curl --silent --location https://rpm.nodesource.com/setup_8.x | sudo bash - +``` + +Alternatively for Node.js 10: + +```bash +curl --silent --location https://rpm.nodesource.com/setup_10.x | sudo bash - +``` + +Then install: + +```bash +sudo yum -y install nodejs +``` + +***Optional***: install build tools + +To compile and install native addons from npm you may also need to install build tools: + +```bash +sudo yum install gcc-c++ make +# or: sudo yum groupinstall 'Development Tools' +``` + +**Available architectures:** + +* **i386** (32-bit, not available for EL7) +* **x86_64** (64-bit) + +**Supported Red Hat® Enterprise Linux® versions:** + +* **RHEL 5** (32-bit and 64-bit) +* **RHEL 6** (32-bit and 64-bit) +* **RHEL 7** (64-bit) + +**Supported CentOS versions:** + +* **CentOS 5** (32-bit and 64-bit) +* **CentOS 6** (32-bit and 64-bit) +* **CentOS 7** (64-bit) + +**Supported CloudLinux versions:** +* **CloudLinux 6** (32-bit and 64-bit) + +**Supported Fedora versions:** + +* **Fedora 21 (Twenty One)** (32-bit and 64-bit) +* **Fedora 20 (Heisenbug)** (32-bit and 64-bit) +* **Fedora 19 (Schrödinger's Cat)** (32-bit and 64-bit) + +**Other distributions known to be supported:** + +* **Oracle Linux** (mirrors RHEL very closely) +* **Amazon Linux** (tested on 2016.03) + +### Alternatives + +Official **Fedora** [Node.js](https://apps.fedoraproject.org/packages/nodejs) and [npm](https://apps.fedoraproject.org/packages/npm) packages are available in Fedora 18 and later. Install with: + +```bash +sudo dnf install nodejs +``` + +In a hurry for the latest updates? [Grab them from updates-testing.](https://fedoraproject.org/wiki/QA:Updates_Testing) + +**Enterprise Linux** (RHEL and CentOS) users may use the Node.js and npm packages from the [EPEL](https://fedoraproject.org/wiki/EPEL) repository. + +Install the appropriate *epel-release* RPM for your version (found on the [EPEL](https://fedoraproject.org/wiki/EPEL) repository homepage), then run: + +```bash +sudo yum install nodejs npm --enablerepo=epel +``` + +In a hurry for the latest updates? [Grab them from epel-testing.](https://fedoraproject.org/wiki/EPEL/testing) + +**Available architectures:** + +* **i686** (32-bit, not available for EL7) +* **x86_64** (64-bit) +* **armv6hl** (Raspberry Pi, [Pidora](http://pidora.ca) only) +* **armv7hl** (32-bit ARM hard-float, ARMv7 and up, Fedora only) + +**Supported Red Hat® Enterprise Linux® versions:** + +* **RHEL 6** (i686/x86_64) +* **RHEL 7** (aarch64/x86_64) + +RHEL 6 is no longer supported through EPEL, you can however use [Red Hat Software Collections](https://www.softwarecollections.org/en/scls/?search=nodejs). + +Additionally, versions of **CentOS** and **Scientific Linux** corresponding to the above RHEL versions are also officially supported by all EPEL packages, including nodejs. Amazon Linux is not officially supported by EPEL due to significant incompatibilities previously reported to the epel-devel mailing list, however you might find that nodejs at least still works. + +**Supported Fedora versions:** + +* **Fedora Rawhide** (i686/x86_64/armv7hl/aarch64/ppc64/ppc64le/s390x) +* **Fedora 27** (i686/x86_64/armv7hl/aarch64/ppc64/ppc64le/s390x) +* **Fedora 26** (i686/x86_64/armv7hl/aarch64/ppc64/ppc64le) + +## FreeBSD + +The most recent release of Node.js is available via the [www/node](http://freshports.org/www/node) port. + +Install a binary package via [pkg](https://www.freebsd.org/cgi/man.cgi?pkg): + +```bash +pkg install node +``` + +Or compile it on your own using [ports](https://www.freebsd.org/cgi/man.cgi?ports): + +```bash +cd /usr/ports/www/node && make install +``` + +## Gentoo + +Node.js is available in the portage tree. + +```bash +emerge nodejs +``` + + +## NetBSD + +Node.js is available in the pkgsrc tree: + +```bash +cd /usr/pkgsrc/lang/nodejs && make install +``` + +Or install a binary package (if available for your platform) using pkgin: + +```bash +pkgin -y install nodejs +``` + +## nvm +Node Version Manager is a bash script used to manage multiple released Node.js versions. It allows +you to perform operations like install, uninstall, switch version, etc. +To install nvm, use this [install script](https://github.com/creationix/nvm#install-script). + +On Unix / OS X systems Node.js built from source can be installed using +[nvm](https://github.com/creationix/nvm) by installing into the location that nvm expects: + +```bash +$ env VERSION=`python tools/getnodeversion.py` make install DESTDIR=`nvm_version_path v$VERSION` PREFIX="" +``` + +After this you can use `nvm` to switch between released versions and versions +built from source. +For example, if the version of Node.js is v8.0.0-pre: + +```bash +$ nvm use 8 +``` + +Once the official release is out you will want to uninstall the version built +from source: + +```bash +$ nvm uninstall 8 +``` + +## OpenBSD + +Node.js is available through the ports system. + +```bash +/usr/ports/lang/node +``` + +Using [pkg_add](http://man.openbsd.org/OpenBSD-current/man1/pkg_add.1) on OpenBSD: + +```bash +pkg_add node +``` + +## openSUSE and SLE + +Node.js is available in the main repositories under the following packages: + +* **openSUSE Leap 42.2**: `nodejs4` +* **openSUSE Leap 42.3**: `nodejs4`, `nodejs6` +* **openSUSE Tumbleweed**: `nodejs4`, `nodejs6`, `nodejs8` +* **SUSE Linux Enterprise Server (SLES) 12**: `nodejs4`, `nodejs6` + (The "Web and Scripting Module" must be [added before installing](https://www.suse.com/documentation/sles-12/book_sle_deployment/data/sec_add-ons_extensions.html).) + +For example, to install Node.js 4.x on openSUSE Leap 42.2, run the following as root: + +```bash +zypper install nodejs4 +``` + +## macOS + +Simply download the [macOS Installer](https://nodejs.org/#download) direct from the [nodejs.org](https://nodejs.org) web site. + +_If you want to download the package with bash:_ + +```bash +curl "https://nodejs.org/dist/latest/node-${VERSION:-$(wget -qO- https://nodejs.org/dist/latest/ | sed -nE 's|.*>node-(.*)\.pkg.*|\1|p')}.pkg" > "$HOME/Downloads/node-latest.pkg" && sudo installer -store -pkg "$HOME/Downloads/node-latest.pkg" -target "/" +``` + +### Alternatives + +Using **[Homebrew](http://brew.sh/)**: + +```bash +brew install node +``` + +Using **[MacPorts](http://www.macports.org/)**: + +```bash +port install nodejs + +# Example +port install nodejs7 +``` + +Using **[pkgsrc](https://pkgsrc.joyent.com/install-on-osx/)**: + +Install the binary package: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + +## SmartOS and illumos + +SmartOS images come with pkgsrc pre-installed. On other illumos distributions, first install **[pkgsrc](https://pkgsrc.joyent.com/install-on-illumos/)**, then you may install the binary package as normal: + +```bash +pkgin -y install nodejs +``` + +Or build manually from pkgsrc: + +```bash +cd pkgsrc/lang/nodejs && bmake install +``` + + +## Void Linux + +Void Linux ships node.js stable in the main repository. + +```bash +xbps-install -Sy nodejs +``` + +## Solus + +Solus provides node.js in its main repository. + +```bash +sudo eopkg install nodejs +``` + + +## Windows + +Simply download the [Windows Installer](https://nodejs.org/#download) directly from the [nodejs.org](https://nodejs.org) web site. + +### Alternatives + +Using **[Chocolatey](http://chocolatey.org)**: + +```bash +cinst nodejs +# or for full install with npm +cinst nodejs.install +``` + +Using **[Scoop](http://scoop.sh/)**: + +```bash +scoop install nodejs +``` diff --git a/locale/fa/download/releases.md b/locale/fa/download/releases.md new file mode 100644 index 0000000000000..543f9837eaf48 --- /dev/null +++ b/locale/fa/download/releases.md @@ -0,0 +1,21 @@ +--- +layout: download-releases.hbs +title: Previous Releases +modules: "NODE_MODULE_VERSION refers to the ABI (application binary interface) version number of Node.js, used to determine which versions of Node.js compiled C++ add-on binaries can be loaded in to without needing to be re-compiled. It used to be stored as hex value in earlier versions, but is now represented as an integer." +--- + +### io.js & Node.js +Releases 1.x through 3.x were called "io.js" as they were part of the io.js fork. As of Node.js 4.0.0 the former release lines of io.js converged with Node.js 0.12.x into unified Node.js releases. + +
+ Looking for latest release of a version branch? + + +
diff --git a/locale/fa/get-involved/code-and-learn.md b/locale/fa/get-involved/code-and-learn.md new file mode 100644 index 0000000000000..1c9c4bf10ca9b --- /dev/null +++ b/locale/fa/get-involved/code-and-learn.md @@ -0,0 +1,24 @@ +--- +title: Code + Learn +layout: contribute.hbs +--- + +# Code + Learn + +Code & Learn events allow you to get started (or go further) with Node.js core contributions. Experienced contributors help guide you through your first (or second or third or fourth) commit to Node.js core. They also are available to provide impromptu guided tours through specific areas of Node.js core source code. + +## Upcoming Code + Learn Events + +- Vancouver, BC at [Node Interactive](https://events.linuxfoundation.org/events/node-js-interactive-2018/): October 12, 2018 +- Kilkenny, Ireland at [NodeConfEU](https://www.nodeconf.eu/): November 4, 2018 + +## Past Code + Learn Events + +- [Oakland on April 22, 2017](https://medium.com/the-node-js-collection/code-learn-learn-how-to-contribute-to-node-js-core-8a2dbdf9be45) +- Shanghai at JSConf.CN: July 2017 +- Vancouver, BC at [Node Interactive](http://events.linuxfoundation.org/events/node-interactive): October 6, 2017 +- Kilkenny, Ireland at [NodeConfEU](http://www.nodeconf.eu/): November 5, 2017 +- Austin in December 2016 +- Tokyo in November 2016 +- Amsterdam in September 2016 +- Dublin and London in September 2015 diff --git a/locale/fa/get-involved/collab-summit.md b/locale/fa/get-involved/collab-summit.md new file mode 100644 index 0000000000000..bc7ab92e163fa --- /dev/null +++ b/locale/fa/get-involved/collab-summit.md @@ -0,0 +1,29 @@ +--- +title: Collab Summit +layout: contribute.hbs +--- + +# Collab Summit +Collaboration Summit is an un-conference for bringing current and +potential contributors together to discuss Node.js with lively collaboration, +education, and knowledge sharing. Committees and working groups come together +twice per year to make important decisions while also being able to work on some +exciting efforts they want to push forward in-person. + +## Who attends? + +Anyone is welcome to attend Collab Summit. During the +summit, leaders will help onboard new contributors to groups they'd love to help +prior to integrating them into the working sessions. + +This is your opportunity to learn what is happening within the community to jump +in and contribute with the skills you have and would like to hone. + +Working groups will put together a schedule so that people can +familiarize themselves before folks get onsite, having the general collaborator +discussions, and then dive into breakout sessions. + +We'd love to see you at Collab Summit! Check out the [Summit repo](https://github.com/nodejs/summit) +for upcoming and past Collab Summits and have a look at the +[issues filed](https://github.com/nodejs/summit/issues) that share what +individual working groups and committees are looking to discuss in-person. diff --git a/locale/fa/get-involved/contribute.md b/locale/fa/get-involved/contribute.md new file mode 100644 index 0000000000000..04faa28cec2cc --- /dev/null +++ b/locale/fa/get-involved/contribute.md @@ -0,0 +1,47 @@ +--- +title: Contributing +layout: contribute.hbs +--- + +# Contributing + +Thank you for your interest in contributing to Node.js! There are multiple ways and places you can contribute, and we're here to help facilitate that. + +## Asking for General Help + +Because the level of activity in the `nodejs/node` repository is so high, questions or requests for general help using Node.js should be directed at the [Node.js help repository](https://github.com/nodejs/help/issues). + +## Reporting an Issue + +If you have found what you believe to be an issue with Node.js please do not hesitate to file an issue on the GitHub project. When filing your issue please make sure you can express the issue with a reproducible test case, and that test case should not include any external dependencies. That is to say, the test case can be executed without anything more than Node.js itself. + +When reporting an issue we also need as much information about your environment that you can include. We never know what information will be pertinent when trying narrow down the issue. Please include at least the following information: + +* Version of Node +* Platform you're running on (macOS, SunOS, Linux, Windows) +* Architecture you're running on (32bit or 64bit and x86 or ARM) + +The Node.js project is currently managed across a number of separate GitHub repositories, each with their own separate issues database. If possible, please direct any issues you are reporting to the appropriate repository but don't worry if things happen to get put in the wrong place, the community of contributors will be more than happy to help get you pointed in the right direction. + +* To report issues specific to Node.js, please use [nodejs/node](https://github.com/nodejs/node) +* To report issues specific to this website, please use [nodejs/nodejs.org](https://github.com/nodejs/nodejs.org/issues) + +## Code contributions + +If you'd like to fix bugs or add a new feature to Node.js, please make sure you consult the [Node.js Contribution Guidelines](https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#pull-requests). The review process by existing collaborators for all contributions to the project is explained there as well. + +If you are wondering how to start, you can check [Node Todo](http://nodetodo.org/) which may guide you towards your first contribution. + +## Becoming a collaborator + +By becoming a collaborator, contributors can have even more impact on the project. They can help other contributors by reviewing their contributions, triage issues and take an even bigger part in shaping the project's future. Individuals identified by the TSC as making significant and valuable contributions across any Node.js repository may be made Collaborators and given commit access to the project. Activities taken into consideration include (but are not limited to) the quality of: + +* code commits and pull requests +* documentation commits and pull requests +* comments on issues and pull requests +* contributions to the Node.js website +* assistance provided to end users and novice contributors +* participation in Working Groups +* other participation in the wider Node.js community + +If individuals making valuable contributions do not believe they have been considered for commit access, they may [log an issue](https://github.com/nodejs/TSC/issues) or [contact a TSC member](https://github.com/nodejs/TSC#current-members) directly. diff --git a/locale/fa/get-involved/index.md b/locale/fa/get-involved/index.md new file mode 100644 index 0000000000000..b7ef8e1e2fd44 --- /dev/null +++ b/locale/fa/get-involved/index.md @@ -0,0 +1,43 @@ +--- +title: Get involved +layout: contribute.hbs +--- + +# We encourage all kinds of contribution from the community. + +The Node.js community is large, inclusive, and excited to enable as many users to contribute in whatever way they can. If you want to [report an issue](https://github.com/nodejs/node/issues), [help with documentation or contribute to the code base](/en/get-involved/contribute/) of the project, you’ve come to the right place. Explore our community resources to find out how you can help: + +## Community Discussion + +- The [GitHub issues list](https://github.com/nodejs/node/issues) is the place for discussion of Node.js core features. +- For real-time chat about Node development go to `irc.freenode.net` in the `#node.js` channel with an [IRC client](http://en.wikipedia.org/wiki/Comparison_of_Internet_Relay_Chat_clients) or connect in your web browser to the channel using [freenode's WebChat](http://webchat.freenode.net/?channels=node.js). +- The official Node.js Twitter account is [nodejs](https://twitter.com/nodejs). +- [Node.js Everywhere](https://newsletter.nodejs.org) is the official Node.js Monthly Newsletter. +- [Node.js Collection](https://medium.com/the-node-js-collection) is a collection of community-curated content on Medium. +- [NodeUp](http://nodeup.com) is a podcast covering the latest Node news in the community. +- The [Community Committee](https://github.com/nodejs/community-committee) is a top-level committee in the Node.js Foundation focused on community-facing efforts. + + +## Learning + +- [Official API reference documentation](/api) details the Node API. +- [NodeSchool.io](http://nodeschool.io) will teach you Node.js concepts via interactive command-line games. +- [Stack Overflow Node.js tag](http://stackoverflow.com/questions/tagged/node.js) collects new information every day. +- [The DEV Community Node.js tag](https://dev.to/t/node) is a place to share Node.js projects, articles and tutorials as well as start discussions and ask for feedback on Node.js-related topics. Developers of all skill-levels are welcome to take part. +- [Nodeiflux](https://discordapp.com/invite/vUsrbjd) is a friendly community of Node backend developers supporting each other on Discord. +- [How To Node](http://howtonode.org/) has a growing number of useful tutorials. + +## International community sites and projects + +- [Australian Node.js blog & developers listing](http://nodejs.org.au/) +- [Chinese community](http://cnodejs.org) +- [French Google+ Community of Node.js users](https://plus.google.com/communities/113346206415381691435) +- [Hungarian (Magyar) community](http://nodehun.blogspot.com/) +- [Iranian group in Persian](http://nodejs.ir) +- [Israeli Facebook group for Node.js](https://www.facebook.com/groups/node.il/) +- [Japanese user group](http://nodejs.jp/) +- [Korean Node.js community](http://nodejs.github.io/nodejs-ko/) +- [Nicaragua Node.js community](http://nodenica.com/) +- [Spanish language Facebook group for Node.js](https://www.facebook.com/groups/node.es/) +- [Spanish language community](http://nodehispano.com) +- [Việt Nam Node.js community](http://nodejs.vn) diff --git a/locale/fa/index.md b/locale/fa/index.md new file mode 100644 index 0000000000000..06146f19f2f35 --- /dev/null +++ b/locale/fa/index.md @@ -0,0 +1,24 @@ +--- +layout: index.hbs +labels: + banner: انتشار امنیتی مهم! لطفا بروزرسانی نمایید. + current-version: نگارش کنونی + download: دانلود + download-for: دانلود برای + other-downloads: سایر دانلود‌ها + other-lts-downloads: سایر دانلود‌های با پشتیبانی طولانی + other-current-downloads: سایر دانلودهای جاری + current: جاری + lts: پشتیبانی طولانی مدت + tagline-current: آخرین ویژگی‌ها + tagline-lts: پیشنهاد شده برای بیشتر کاربران + changelog: گزارش تغییرات + api: مستندات API + version-schedule-prompt: یا نگاهی بیاندازید به + version-schedule-prompt-link-text: زمان بندی پشتیبانی طولانی مدت + newsletter: درست + newsletter-prefix: نام‌نویسی برای + newsletter-postfix: "، خبرنامهٔ رسمی NodeJs.org" +--- + +Node.js® is a JavaScript runtime built on [Chrome's V8 JavaScript engine](https://developers.google.com/v8/). diff --git a/locale/fa/knowledge/HTTP/clients/how-to-access-query-string-parameters.md b/locale/fa/knowledge/HTTP/clients/how-to-access-query-string-parameters.md new file mode 100644 index 0000000000000..8538a2874e09a --- /dev/null +++ b/locale/fa/knowledge/HTTP/clients/how-to-access-query-string-parameters.md @@ -0,0 +1,26 @@ +--- +title: How to access query string parameters +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +In Node.js, functionality to aid in the accessing of URL query string parameters is built into the standard library. The built-in `url.parse` method takes care of most of the heavy lifting for us. Here is an example script using this handy function and an explanation on how it works: + + var fs = require('fs'); + var http = require('http'); + var url = require('url') ; + + http.createServer(function (req, res) { + var queryObject = url.parse(req.url,true).query; + console.log(queryObject); + + res.writeHead(200); + res.end('Feel free to add query parameters to the end of the url'); + }).listen(8080); + +The key part of this whole script is this line: `var queryObject = url.parse(req.url,true).query;`. Let's take a look at things from the inside-out. First off, `req.url` will look like `/app.js?foo=bad&baz=foo`. This is the part that is in the URL bar of the browser. Next, it gets passed to `url.parse` which parses out the various elements of the URL (NOTE: the second paramater is a boolean stating whether the method should parse the query string, so we set it to true). Finally, we access the `.query` property, which returns us a nice, friendly JavaScript object with our query string data. + + diff --git a/locale/fa/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md b/locale/fa/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md new file mode 100644 index 0000000000000..dd4810a13d865 --- /dev/null +++ b/locale/fa/knowledge/HTTP/clients/how-to-create-a-HTTP-request.md @@ -0,0 +1,95 @@ +--- +title: How do I make a http request? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - http +difficulty: 2 +layout: knowledge-post.hbs +--- + + +Another extremely common programming task is making an HTTP request to a web server. Node.js provides an extremely simple API for this functionality in the form of `http.request`. + +As an example, we are going to preform a GET request to [www.random.org/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new](www.random.org/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new) (which returns a random integer between 1 and 10) and print the result to the console. + + var http = require('http'); + + //The url we want is: 'www.random.org/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' + var options = { + host: 'www.random.org', + path: '/integers/?num=1&min=1&max=10&col=1&base=10&format=plain&rnd=new' + }; + + callback = function(response) { + var str = ''; + + //another chunk of data has been received, so append it to `str` + response.on('data', function (chunk) { + str += chunk; + }); + + //the whole response has been received, so we just print it out here + response.on('end', function () { + console.log(str); + }); + } + + http.request(options, callback).end(); + + +Making a POST request is just as easy. We will make a POST request to `www.nodejitsu.com:1337` which is running a server that will echo back what we post. The code for making a POST request is almost identical to making a GET request, just a few simple modifications: + + var http = require('http'); + + //The url we want is `www.nodejitsu.com:1337/` + var options = { + host: 'www.nodejitsu.com', + path: '/', + //since we are listening on a custom port, we need to specify it by hand + port: '1337', + //This is what changes the request to a POST request + method: 'POST' + }; + + callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); + } + + var req = http.request(options, callback); + //This is the data we are posting, it needs to be a string or a buffer + req.write("hello world!"); + req.end(); + +Throwing in custom headers is just a tiny bit harder. On `www.nodejitsu.com:1338` we are running a server that will print out the `custom` header. So we will just make a quick request to it: + + var http = require('http'); + + var options = { + host: 'www.nodejitsu.com', + path: '/', + port: '1338', + //This is the only line that is new. `headers` is an object with the headers to request + headers: {'custom': 'Custom Header Demo works'} + }; + + callback = function(response) { + var str = '' + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + console.log(str); + }); + } + + var req = http.request(options, callback); + req.end(); diff --git a/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md b/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md new file mode 100644 index 0000000000000..b2e33c71a295b --- /dev/null +++ b/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTP-server.md @@ -0,0 +1,34 @@ +--- +title: How do I create a HTTP server? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + + +Making a simple HTTP server in Node.js has become the de facto 'hello world' for the platform. On the one hand, Node.js provides extremely easy-to-use HTTP APIs; on the other hand, a simple web server also serves as an excellent demonstration of Node's asynchronous strengths. + +Let's take a look at a very simple example: + + var http = require('http'); + var requestListener = function (req, res) { + res.writeHead(200); + res.end('Hello, World!\n'); + } + + var server = http.createServer(requestListener); + server.listen(8080); + +Save this in a file called `server.js` - run `node server.js`, and your program will hang there... it's waiting for connections to respond to, so you'll have to give it one if you want to see it do anything. Try opening up a browser, and typing `localhost:8080` into the location bar. If everything has been set up correctly, you should see your server saying hello! + +Let's take a more in-depth look at what the above code is doing. First, a function is defined called `requestListener` that takes a request object and a response object as parameters. + +The request object contains things such as the requested URL, but in this example we ignore it and always return "Hello World". + +The response object is how we send the headers and contents of the response back to the user making the request. Here we return a 200 response code (signaling a successful response) with the body "Hello World". Other headers, such as `Content-type`, would also be set here. + +Next, the `http.createServer` method creates a server that calls `requestListener` whenever a request comes in. The next line, `server.listen(8080)`, calls the `listen` method, which causes the server to wait for incoming requests on the specified port - 8080, in this case. + +There you have it - your most basic Node.js HTTP server. diff --git a/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md b/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md new file mode 100644 index 0000000000000..440e3095902b2 --- /dev/null +++ b/locale/fa/knowledge/HTTP/servers/how-to-create-a-HTTPS-server.md @@ -0,0 +1,47 @@ +--- +title: How to create an https server? +date: '2011-08-26T10:08:50.000Z' +tags: + - https +difficulty: 1 +layout: knowledge-post.hbs +--- + +*If you're using [Nodejitsu](http://nodejitsu.com)*, we handle HTTPS for you. Free SSL on jit.su and nodejitsu.com subdomains, and SSL on custom domains for business customers. +*It's never necessary to create an HTTPS server yourself.* + +- - - + +To create an HTTPS server, you need two things: an SSL certificate, and Node's built-in `https` module. + +We need to start out with a word about SSL certificates. Speaking generally, there are two kinds of certificates: those signed by a 'Certificate Authority', or CA, and 'self-signed certificates'. A Certificate Authority is a trusted source for an SSL certificate, and using a certificate from a CA allows your users to be trust the identity of your website. In most cases, you would want to use a CA-signed certificate in a production environment - for testing purposes, however, a self-signed certicate will do just fine. + +To generate a self-signed certificate, run the following in your shell: + + openssl genrsa -out key.pem + openssl req -new -key key.pem -out csr.pem + openssl x509 -req -days 9999 -in csr.pem -signkey key.pem -out cert.pem + rm csr.pem + +This should leave you with two files, `cert.pem` (the certificate) and `key.pem` (the private key). This is all you need for a SSL connection. So now you set up a quick hello world example (the biggest difference between https and [http](/how-do-i-create-a-http-server) is the `options` parameter): + + var https = require('https'); + var fs = require('fs'); + + var options = { + key: fs.readFileSync('key.pem'), + cert: fs.readFileSync('cert.pem') + }; + + var a = https.createServer(options, function (req, res) { + res.writeHead(200); + res.end("hello world\n"); + }).listen(8000); + +NODE PRO TIP: Note `fs.readFileSync` - unlike `fs.readFile`, `fs.readFileSync` will block the entire process until it completes. In situations like this - loading vital configuration data - the `sync` functions are okay. In a busy server, however, using a synchronous function during a request will force the server to deal with the requests one by one! + +Now that your server is set up and started, you should be able to get the file with curl: + + curl -k https://localhost:8000 + +or in your browser, by going to https://localhost:8000 . diff --git a/locale/fa/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md b/locale/fa/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md new file mode 100644 index 0000000000000..9cf3d2e53f10a --- /dev/null +++ b/locale/fa/knowledge/HTTP/servers/how-to-handle-multipart-form-data.md @@ -0,0 +1,63 @@ +--- +title: How to handle multipart form data +date: '2011-09-09T10:08:50.000Z' +tags: + - http + - forms + - multipart + - uploads +difficulty: 3 +layout: knowledge-post.hbs +--- + +Handling form data and file uploads properly is an important and complex problem in HTTP servers. Doing it by hand would involve parsing streaming binary data, writing it to the file system, parsing out other form data, and several other complex concerns - luckily, only a very few people will need to worry about it on that deep level. Felix Geisendorfer, one of the Node.js core committers, wrote a library called `node-formidable` that handles all the hard parts for you. With its friendly API, you can be parsing forms and receiving file uploads in no time. + +This example is taken directly from the `node-formidable` GitHub page, with some additional explanation added. + + var formidable = require('formidable'), + http = require('http'), + util = require('util'); + + http.createServer(function(req, res) { + + // This if statement is here to catch form submissions, and initiate multipart form data parsing. + + if (req.url == '/upload' && req.method.toLowerCase() == 'post') { + + // Instantiate a new formidable form for processing. + + var form = new formidable.IncomingForm(); + + // form.parse analyzes the incoming stream data, picking apart the different fields and files for you. + + form.parse(req, function(err, fields, files) { + if (err) { + + // Check for and handle any errors here. + + console.error(err.message); + return; + } + res.writeHead(200, {'content-type': 'text/plain'}); + res.write('received upload:\n\n'); + + // This last line responds to the form submission with a list of the parsed data and files. + + res.end(util.inspect({fields: fields, files: files})); + }); + return; + } + + // If this is a regular request, and not a form submission, then send the form. + + res.writeHead(200, {'content-type': 'text/html'}); + res.end( + '
'+ + '
'+ + '
'+ + ''+ + '
' + ); + }).listen(8080); + +Try it out for yourself - it's definitely the simpler solution, and `node-formidable` is a battle-hardened, production-ready library. Let userland solve problems like this for you, so that you can get back to writing the rest of your code! \ No newline at end of file diff --git a/locale/fa/knowledge/HTTP/servers/how-to-read-POST-data.md b/locale/fa/knowledge/HTTP/servers/how-to-read-POST-data.md new file mode 100644 index 0000000000000..ab9342c1999fe --- /dev/null +++ b/locale/fa/knowledge/HTTP/servers/how-to-read-POST-data.md @@ -0,0 +1,44 @@ +--- +title: How can I read POST data? +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +Reading the data from a POST request (i.e. a form submission) can be a little bit of a pitfall in Node.js, so we're going to go through an example of how to do it properly. The first step, obviously, is to listen for incoming data - the trick is to wait for the data to finish, so that you can process all the form data without losing anything. + +Here is a quick script that shows you how to do exactly that: + + var http = require('http'); + var postHTML = + 'Post Example' + + '' + + '
' + + 'Input 1:
' + + 'Input 2:
' + + '' + + '
' + + ''; + + http.createServer(function (req, res) { + var body = ""; + req.on('data', function (chunk) { + body += chunk; + }); + req.on('end', function () { + console.log('POSTed: ' + body); + res.writeHead(200); + res.end(postHTML); + }); + }).listen(8080); + +The variable `postHTML` is a static string containing the HTML for two input boxes and a submit box - this HTML is provided so that you can `POST` example data. This is NOT the right way to serve static HTML - please see [How to Serve Static Files](link) for a more proper example. + +With the HTML out of the way, we [create a server](/how-do-i-create-a-http-server) to listen for requests. It is important to note, when listening for POST data, that the `req` object is also an [Event Emitter](/what-are-event-emitters). `req`, therefore, will emit a `data` event whenever a 'chunk' of incoming data is received; when there is no more incoming data, the `end` event is emitted. So, in our case, we listen for `data` events. Once all the data is received, we log the data to the console and send the response. + +Something important to note is that the event listeners are being added immediately after the request object is received. If you don't immediately set them, then there is a possibility of missing some of the events. If, for example, an event listener was attached from inside a callback, then the `data` and `end` events might be fired in the meantime with no listeners attached! + +You can save this script to `server.js` and run it with `node server.js`. Once you run it you will notice that occasionally you will see lines with no data, e.g. `POSTed: `. This happens because regular `GET` requests go through the same codepath. In a more 'real-world' application, it would be proper practice to check the type of request and handle the different request types differently. + diff --git a/locale/fa/knowledge/HTTP/servers/how-to-serve-static-files.md b/locale/fa/knowledge/HTTP/servers/how-to-serve-static-files.md new file mode 100644 index 0000000000000..840564e1ae751 --- /dev/null +++ b/locale/fa/knowledge/HTTP/servers/how-to-serve-static-files.md @@ -0,0 +1,42 @@ +--- +title: How to serve static files +date: '2011-08-26T10:08:50.000Z' +tags: + - http +difficulty: 1 +layout: knowledge-post.hbs +--- + +A basic necessity for most [http servers](/how-do-i-create-a-https-server) is to be able to serve static files. Thankfully, it is not that hard to do in Node.js. First you [link]read the file, then you serve the file. Here is an example of a script that will serve the files in the current directory: + + var fs = require('fs'), + http = require('http'); + + http.createServer(function (req, res) { + fs.readFile(__dirname + req.url, function (err,data) { + if (err) { + res.writeHead(404); + res.end(JSON.stringify(err)); + return; + } + res.writeHead(200); + res.end(data); + }); + }).listen(8080); + +This example takes the path requested and it serves that path, relative to the local directory. This works fine as a quick solution; however, there are a few problems with this approach. First, this code does not correctly handle mime types. Additionally, a proper static file server should really be taking advantage of client side caching, and should send a "Not Modified" response if nothing has changed. Furthermore, there are security bugs that can enable a malicious user to break out of the current directory. (for example, `GET /../../../`). + +Each of these can be addressed invidually without much difficulty. You can send the proper mime type header. You can figure how to utilize the client caches. You can take advantage of `path.normalize` to make sure that requests don't break out of the current directory. But why write all that code when you can just use someone else's library? + +There is a good static file server called [node-static](https://github.com/cloudhead/node-static) written by Alexis Sellier which you can leverage. Here is a script which functions similarly to the previous one: + + var static = require('node-static'); + var http = require('http'); + + var file = new(static.Server)(); + + http.createServer(function (req, res) { + file.serve(req, res); + }).listen(8080); + +This is a fully functional file server that doesn't have any of the bugs previously mentioned. This is just the most basic set up, there are more things you can do if you look at [the api](https://github.com/cloudhead/node-static). Also since it is an open source project, you can always modify it to your needs (and feel free to contribute back to the project!). diff --git a/locale/fa/knowledge/REPL/how-to-create-a-custom-repl.md b/locale/fa/knowledge/REPL/how-to-create-a-custom-repl.md new file mode 100644 index 0000000000000..8cf757f03b2bd --- /dev/null +++ b/locale/fa/knowledge/REPL/how-to-create-a-custom-repl.md @@ -0,0 +1,100 @@ +--- +title: How to create and use a custom REPL +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - repl +difficulty: 2 +layout: knowledge-post.hbs +--- + +Node allows users to create their own REPLs with the [repl module](https://nodejs.org/docs/v0.4.10/api/repl.html). Its basic use looks like this: + + repl.start(prompt, stream); + +`prompt` is a string that's used for the prompt of your REPL and defaults to "> ". `stream` is the stream that the repl listens on and defaults to `process.stdin`. When you run `node` from the command prompt, what it's doing in the background is running `repl.start()` to give you the standard REPL. + +However, the repl is pretty flexible. Here's an example that shows this off: + + #!/usr/bin/env node + + var net = require("net"), + repl = require("repl"); + + var mood = function () { + var m = [ "^__^", "-___-;", ">.<", "<_>" ]; + return m[Math.floor(Math.random()*m.length)]; + }; + + //A remote node repl that you can telnet to! + net.createServer(function (socket) { + var remote = repl.start("node::remote> ", socket); + //Adding "mood" and "bonus" to the remote REPL's context. + remote.context.mood = mood; + remote.context.bonus = "UNLOCKED"; + }).listen(5001); + + console.log("Remote REPL started on port 5001."); + + //A "local" node repl with a custom prompt + var local = repl.start("node::local> "); + + // Exposing the function "mood" to the local REPL's context. + local.context.mood = mood; + +This script creates *two* REPLs: One is normal excepting for its custom prompt, but the *other* is exposed via the net module so I can telnet to it! In addition, it uses the `context` property to expose the function "mood" to both REPLs, and the "bonus" string to the remote REPL only. As you will see, this approach of trying to expose objects to one REPL and not the other *doesn't really work*. + +In addition, all objects in the global scope will also be accessible to your REPLs. + +Here's what happens when I run the script: + + $ node repl.js + Remote REPL started on port 5001. + node::local> .exit + ^Cjosh@pidgey:/tmp/telnet$ node repl.js + Remote REPL started on port 5001. + node::local> mood() + '^__^' + node::local> bonus + ReferenceError: bonus is not defined + at [object Context]:1:1 + at Interface. (repl.js:171:22) + at Interface.emit (events.js:64:17) + at Interface._onLine (readline.js:153:10) + at Interface._line (readline.js:408:8) + at Interface._ttyWrite (readline.js:585:14) + at ReadStream. (readline.js:73:12) + at ReadStream.emit (events.js:81:20) + at ReadStream._emitKey (tty_posix.js:307:10) + at ReadStream.onData (tty_posix.js:70:12) + +As may be seen, the `mood` function is usable within the local REPL, but the +`bonus` string is not. This is as expected. + +Now, here's what happens when I try to telnet to port 5001: + + josh@pidgey:/tmp/telnet$ telnet localhost 5001 + Trying ::1... + Trying 127.0.0.1... + Connected to localhost. + Escape character is '^]'. + node::remote> mood() + '>.<' + node::remote> bonus + 'UNLOCKED' + +As you can see, the `mood` function is *also* available over telnet! In addition, so is "bonus". + +As an interesting consequence of my actions, bonus is now also defined on the local REPL: + + node::local> bonus + 'UNLOCKED' + +It seems we "unlocked" the `bonus` string on the local REPL as well. As it turns out, any variables created in one REPL are also available to the other: + + node::local> var node = "AWESOME!" + + node::remote> node + 'AWESOME!' + +As you can see, the node REPL is powerful and flexible. diff --git a/locale/fa/knowledge/REPL/how-to-use-nodejs-repl.md b/locale/fa/knowledge/REPL/how-to-use-nodejs-repl.md new file mode 100644 index 0000000000000..de40a3e862cf8 --- /dev/null +++ b/locale/fa/knowledge/REPL/how-to-use-nodejs-repl.md @@ -0,0 +1,60 @@ +--- +title: "How do I use node's REPL?" +date: '2011-08-26T10:08:50.000Z' +tags: + - cli + - repl +difficulty: 1 +layout: knowledge-post.hbs +--- + + + +Node.js ships with a REPL, which is short for 'Read-Eval-Print Loop'. It is the Node.js shell; any valid JavaScript which can be written in a script can be passed to the REPL. It can be extremely useful for experimenting with node.js, debugging code, and figuring out some of JavaScript's more eccentric behaviors. + +Running it is simple - just run node without a filename. + + docs@nodejitsu:~/$ node + +It then drops you into a simple prompt ('>') where you can type any JavaScript command you wish. As in most shells, you can press the up and down arrow keys to scroll through your command history and modify previous commands. The REPL also `Tab` to make the REPL try to autocomplete the command. + +Whenever you type a command, it will print the return value of the command. If you want to reuse the previous return value, you can use the special `_` variable. + +For example: + + node + > 1+1 + 2 + > _+1 + 3 + +One thing worth noting where REPL return values are concerned: + + > x = 10 + 10 + > var y = 5 + > x + 10 + > y + 5 + +When the `var` keyword is used, the value of the expression is stored, but *NOT* returned. When a bare identifier is used, the value is also returned, as well as stored. + +If you need to access any of the builtin modules, or any third party modules, they can be accessed with `require`, just like in the rest of Node. + +For example: + + node + > path = require('path') + { resolve: [Function], + normalize: [Function], + join: [Function], + dirname: [Function], + basename: [Function], + extname: [Function], + exists: [Function], + existsSync: [Function] } + > path.basename("/a/b/c.txt") + 'c.txt' + +Note once again that without the `var` keyword, the contents of the object are returned immediately and displayed to `stdout`. diff --git a/locale/fa/knowledge/advanced/buffers/how-to-use-buffers.md b/locale/fa/knowledge/advanced/buffers/how-to-use-buffers.md new file mode 100644 index 0000000000000..2660e5d56e0de --- /dev/null +++ b/locale/fa/knowledge/advanced/buffers/how-to-use-buffers.md @@ -0,0 +1,161 @@ +--- +title: How to Use Buffers in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - Buffer + - buffer + - buffers + - binary +difficulty: 3 +layout: knowledge-post.hbs +--- + +## Why Buffers? + +Pure javascript, while great with unicode-encoded strings, does not handle straight binary data very well. This is fine on the browser, where most data is in the form of strings. However, node.js servers have to also deal with TCP streams and reading and writing to the filesystem, both which make it necessary to deal with purely binary streams of data. + +One way to handle this problem is to just use strings *anyway*, which is exactly what Node.js did at first. However, this approach is extremely problematic to work with; It's slow, makes you work with an API designed for strings and not binary data, and has a tendency to break in strange and mysterious ways. + +Don't use binary strings. Use *buffers* instead! + +## What Are Buffers? + +Buffers are instances of the `Buffer` class in node, which is designed to handle raw binary data. Each buffer corresponds to some raw memory allocated outside V8. Buffers act somewhat like arrays of integers, but aren't resizable and have a whole bunch of methods specifically for binary data. In addition, the "integers" in a buffer each represent a byte and so are limited to values from 0 to 255 (2^8 - 1), inclusive. + +## Where You See Buffers: + +In the wild, buffers are usually seen in the context of binary data coming from streams, such as `fs.createReadStream`. + +## Usage: + +### Creating Buffers: + +There are a few ways to create new buffers: + + var buffer = new Buffer(8); + +This buffer is uninitialized and contains 8 bytes. + + var buffer = new Buffer([ 8, 6, 7, 5, 3, 0, 9]); + +This initializes the buffer to the contents of this array. Keep in mind that the contents of the array are integers representing bytes. + + var buffer = new Buffer("I'm a string!", "utf-8") + +This initializes the buffer to a binary encoding of the first string as specified by the second argument (in this case, utf-8). **utf-8** is by far the most common encoding used with node, but `Buffer` also supports: + +* **"ascii"**: This encoding is way fast, but is limited to the ascii character set. Moreover, it will convert null characters into spaces, unlike the utf-8 encoding. +* **"ucs2"**: A two-byte, little-endian encoding. Can encode a subset of unicode. +* **"base64"**: Base64 string encoding. +* **"binary"**: This is the "binary string" format mentioned earlier, and is in the process of being deprecated. Avoid its use. + +### Writing to Buffers + +Given that there is already a buffer created: + + > var buffer = new Buffer(16); + +we can start writing strings to it: + + > buffer.write("Hello", "utf-8") + 5 + +The first argument to `buffer.write` is the string to write to the buffer, and the second argument is the string encoding. It happens to default to utf-8 so this argument is extraneous. + +`buffer.write` returned 5. This means that we wrote to five bytes of the buffer. The fact that the string "Hello" is also 5 characters long is coincidental, since each character *just happened* to be 8 bits apiece. This is useful if you want to complete the message: + + > buffer.write(" world!", 5, "utf-8") + 7 + +When `buffer.write` has 3 arguments, the second argument indicates an offset, or the index of the buffer to start writing at. + +### Reading from Buffers: + +#### toString: + +Probably the most common way to read buffers is to use the `toString` method, since many buffers contain text: + + > buffer.toString('utf-8') + 'Hello world!\u0000�k\t' + +Again, the first argument is the encoding. In this case, it can be seen that not the entire buffer was used! Luckily, because we know how many bytes we've written to the buffer, we can simply add more arguments to "stringify" the slice that's actually interesting: + + > buffer.toString("utf-8", 0, 12) + 'Hello world!' + +#### Individual octets: + +You can also set individual bytes by using an array-like syntax: + + > buffer[12] = buffer[11]; + 33 + > buffer[13] = "1".charCodeAt(); + 49 + > buffer[14] = buffer[13]; + 49 + > buffer[15] = 33 + 33 + > buffer.toString("utf-8") + 'Hello world!!11!' + +In this example, I set the remaining bytes, by hand, such that they represent utf-8 encoded "!" and "1" characters. + +### More Fun With Buffers + +#### Buffer.isBuffer(object) + +This method checks to see if `object` is a buffer, similar to `Array.isArray`. + +#### Buffer.byteLength(string, encoding) + +With this function, you can check the number of bytes required to encode a string with a given encoding (which defaults to utf-8). This length is *not* the same as string length, since many characters require more bytes to encode. For example: + + > var snowman = "☃"; + > snowman.length + 1 + > Buffer.byteLength(snowman) + 3 + +The unicode snowman is only one character, but takes 3 entire bytes to encode! + +#### buffer.length + +This is the length of your buffer, and represents how much memory is allocated. It is not the same as the size of the buffer's contents, since a buffer may be half-filled. For example: + + > var buffer = new Buffer(16) + > buffer.write(snowman) + 3 + > buffer.length + 16 + +In this example, the contents written to the buffer only consist of three groups (since they represent the single-character snowman), but the buffer's length is still 16, as it was initialized. + +#### buffer.copy(target, targetStart=0, sourceStart=0, sourceEnd=buffer.length) + +`buffer.copy` allows one to copy the contents of one buffer onto another. The first argument is the target buffer on which to copy the contents of `buffer`, and the rest of the arguments allow for copying only a subsection of the source buffer to somewhere in the middle of the target buffer. For example: + + > var frosty = new Buffer(24) + > var snowman = new Buffer("☃", "utf-8") + > frosty.write("Happy birthday! ", "utf-8") + 16 + > snowman.copy(frosty, 16) + 3 + > frosty.toString("utf-8", 0, 19) + 'Happy birthday! ☃' + +In this example, I copied the "snowman" buffer, which contains a 3 byte long character, to the "frosty" buffer, to which I had written to the first 16 bytes. Because the snowman character is 3 bytes long, the result takes up 19 bytes of the buffer. + +#### buffer.slice(start, end=buffer.length) + +This method's API is generally the same as that of `Array.prototype.slice`, but with one very import difference: The slice is **not** a new buffer and merely references a subset of the memory space. *Modifying the slice will also modify the original buffer*! For example: + + > var puddle = frosty.slice(16, 19) + > puddle.toString() + '☃' + > puddle.write("___") + 3 + > frosty.toString("utf-8", 0, 19) + 'Happy birthday! ___' + +Now Frosty has been turned into a puddle of underscores. Bummer. diff --git a/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md b/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md new file mode 100644 index 0000000000000..d0a4823c575b1 --- /dev/null +++ b/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-read-stream.md @@ -0,0 +1,35 @@ +--- +title: How to use fs.createReadStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createReadStream()` allows you to open up a readable stream in a very simple manner. All you have to do is pass the path of the file to start streaming in. It turns out that the response (as well as the request) objects are streams. So we will use this fact to create a http server that streams the files to the client. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + + var http = require('http'); + var fs = require('fs'); + + http.createServer(function(req, res) { + // The filename is simple the local directory and tacks on the requested url + var filename = __dirname+req.url; + + // This line opens the file as a readable stream + var readStream = fs.createReadStream(filename); + + // This will wait until we know the readable stream is actually valid before piping + readStream.on('open', function () { + // This just pipes the read stream to the response object (which goes to the client) + readStream.pipe(res); + }); + + // This catches any errors that happen while creating the readable stream (usually invalid names) + readStream.on('error', function(err) { + res.end(err); + }); + }).listen(8080); + diff --git a/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md b/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md new file mode 100644 index 0000000000000..8c678f854fc76 --- /dev/null +++ b/locale/fa/knowledge/advanced/streams/how-to-use-fs-create-write-stream.md @@ -0,0 +1,34 @@ +--- +title: How to use fs.createWriteStream? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams + - fs +difficulty: 3 +layout: knowledge-post.hbs +--- + +The function `fs.createWriteStream()` creates a writable stream in a very simple manner. After a call to `fs.createWriteStream` with the filepath, you have a writeable stream to work with. It turns out that the response (as well as the request) objects are streams. So we will stream the `POST` data to the file `output`. Since the code is simple enough, it is pretty easy just to read through it and comment why each line is necessary. + + var http = require('http'); + var fs = require('fs'); + + http.createServer(function(req, res) { + // This opens up the writeable stream to `output` + var writeStream = fs.createWriteStream('./output'); + + // This pipes the POST data to the file + req.pipe(writeStream); + + // After all the data is saved, respond with a simple html form so they can post more data + req.on('end', function () { + res.writeHead(200, {"content-type":"text/html"}); + res.end('
'); + }); + + // This is here incase any errors occur + writeStream.on('error', function (err) { + console.log(err); + }); + }).listen(8080); diff --git a/locale/fa/knowledge/advanced/streams/how-to-use-stream-pipe.md b/locale/fa/knowledge/advanced/streams/how-to-use-stream-pipe.md new file mode 100644 index 0000000000000..4d0cf3bafac60 --- /dev/null +++ b/locale/fa/knowledge/advanced/streams/how-to-use-stream-pipe.md @@ -0,0 +1,88 @@ +--- +title: How to use stream.pipe +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 2 +layout: knowledge-post.hbs +--- + +If you've been using node.js for a while, you've definitely run into streams. HTTP connections are streams, open files are streams; stdin, stdout, and stderr are all streams as well. A 'stream' is node's I/O abstraction - if you feel like you still need to understand them better, you can read more about them [here](https://nodejs.org/api/stream.html#stream_stream). + +Streams make for quite a handy abstraction, and there's a lot you can do with them - as an example, let's take a look at stream.pipe, the method used to take a readable stream and connect it to a writeable steam. Suppose we wanted to spawn a `node` child process and pipe our stdout and stdin to its corresponding stdout and stdin. + + #!/usr/bin/env node + + var child = require('child_process'); + + var myREPL = child.spawn('node'); + + myREPL.stdout.pipe(process.stdout, { end: false }); + + process.stdin.resume(); + + process.stdin.pipe(myREPL.stdin, { end: false }); + + myREPL.stdin.on('end', function() { + process.stdout.write('REPL stream ended.'); + }); + + myREPL.on('exit', function (code) { + process.exit(code); + }); + +There you have it - spawn the node REPL as a child process, and pipe your stdin and stdout to its stdin and stdout. Make sure to listen for the child's 'exit' event, too, or else your program will just hang there when the REPL exits. + +Another use for stream.pipe is file streams. In node.js, fs.createReadStream and fs.createWriteStream are used to create a stream to an open file descriptor. Now let's look at how one might use stream.pipe to write to a file. You'll probably recognize most of the code: + + #!/usr/bin/env node + + var child = require('child_process'), + fs = require('fs'); + + var myREPL = child.spawn('node'), + myFile = fs.createWriteStream('myOutput.txt'); + + myREPL.stdout.pipe(process.stdout, { end: false }); + myREPL.stdout.pipe(myFile); + + process.stdin.resume(); + + process.stdin.pipe(myREPL.stdin, { end: false }); + process.stdin.pipe(myFile); + + myREPL.stdin.on("end", function() { + process.stdout.write("REPL stream ended."); + }); + + myREPL.on('exit', function (code) { + process.exit(code); + }); + +With those small additions, your stdin and the stdout from your REPL will both be piped to the writeable file stream you opened to 'myOutput.txt'. It's that simple - you can pipe streams to as many places as you want. + +Another very important use case for stream.pipe is with HTTP request and response objects. Here we have the very simplest kind of proxy: + + #!/usr/bin/env node + + var http = require('http'); + + http.createServer(function(request, response) { + var proxy = http.createClient(9000, 'localhost') + var proxyRequest = proxy.request(request.method, request.url, request.headers); + proxyRequest.on('response', function (proxyResponse) { + proxyResponse.pipe(response); + }); + request.pipe(proxyRequest); + }).listen(8080); + + http.createServer(function (req, res) { + res.writeHead(200, { 'Content-Type': 'text/plain' }); + res.write('request successfully proxied to port 9000!' + '\n' + JSON.stringify(req.headers, true, 2)); + res.end(); + }).listen(9000); + +One could also use stream.pipe to send incoming requests to a file for logging, or to a child process, or any one of a number of other things. + +Hopefully this has shown you the basics of using stream.pipe to easily pass your data streams around. It's truly a powerful little trick in node.js, and its uses are yours to explore. Happy coding, and try not to cross your streams! diff --git a/locale/fa/knowledge/advanced/streams/what-are-streams.md b/locale/fa/knowledge/advanced/streams/what-are-streams.md new file mode 100644 index 0000000000000..a2e709b931e74 --- /dev/null +++ b/locale/fa/knowledge/advanced/streams/what-are-streams.md @@ -0,0 +1,45 @@ +--- +title: What are streams? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - streams +difficulty: 3 +layout: knowledge-post.hbs +--- + + + +Streams are another basic construct in node.js that encourages asynchronous coding. Streams allow you to process the data as it is generated or retrieved. Streams can be readable, writeable, or both. + +In other words, Streams use events to deal with data as it happens, rather than only with a callback at the end. Readable streams emit the event `data` for each chunk of data that comes in, and an `end` event, which is emitted when there is no more data. Writeable streams can be written to with the `write()` function, and closed with the `end()` function. All types of streams emit `error` events when errors arise. + +As a quick example, we can write a simple version of `cp` (the unix utility that copies files). We could do that by reading the whole file with standard filesystem calls and then writing it out to a file. Unfortunately, that requires that the whole file be read in before it can be written. In the case of 1-2 giga files, you could run into out of memory operations. The biggest advantage that streams give you over their non-stream versions are that you can start process the info before you have all the information. In this case, writing out the file doesn't get sped up, but if we were streaming over the internet or doing cpu processing on it then there could be measurable performance improvements. + +Run this script with arguments like `node cp.js src.txt dest.txt`. This would mean, in the code below, that `process.argv[2]` is `src.txt` and `process.argv[3]` is `desc.txt`. + + var fs = require('fs'); + console.log(process.argv[2], '->', process.argv[3]); + + var readStream = fs.createReadStream(process.argv[2]); + var writeStream = fs.createWriteStream(process.argv[3]); + + readStream.on('data', function (chunk) { + writeStream.write(chunk); + }); + + readStream.on('end', function () { + writeStream.end(); + }); + + //Some basic error handling + readStream.on('error', function (err) { + console.log("ERROR", err); + }); + + writeStream.on('error', function (err) { + console.log("ERROR", err); + }); + + +This sets up a readable stream from the source file and a writable stream to the destination file. Then whenever the readable stream gets data, it gets written to the writeable stream. Then finally it closes the writable stream when the readable stream is finished. NOTE: it would have been better to use [pipe](/how-to-use-stream-pipe) like `readStream.pipe(writeStream);`, however, to show how streams work, we have done things the long way. \ No newline at end of file diff --git a/locale/fa/knowledge/child-processes/how-to-spawn-a-child-process.md b/locale/fa/knowledge/child-processes/how-to-spawn-a-child-process.md new file mode 100644 index 0000000000000..9f89edb57c25a --- /dev/null +++ b/locale/fa/knowledge/child-processes/how-to-spawn-a-child-process.md @@ -0,0 +1,39 @@ +--- +title: How to spawn a child process - the basics +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - child_process +difficulty: 2 +layout: knowledge-post.hbs +--- + + +If you find yourself wishing you could have your Node.js process start another program for you, then look no further than the `child_process` module. + +The simplest way is the "fire, forget, and buffer" method using `child_process.exec`. It runs your process, buffers its output (up to a default maximum of 200kb), and lets you access it from a callback when it is finished. Let us take a look at an example: + + var childProcess = require('child_process'), + ls; + + ls = childProcess.exec('ls -l', function (error, stdout, stderr) { + if (error) { + console.log(error.stack); + console.log('Error code: '+error.code); + console.log('Signal received: '+error.signal); + } + console.log('Child Process STDOUT: '+stdout); + console.log('Child Process STDERR: '+stderr); + }); + + ls.on('exit', function (code) { + console.log('Child process exited with exit code '+code); + }); + +NODE PRO TIP: `error.stack` is a stack trace to the point that the [Error object](/what-is-the-error-object) was created. + +It should be noted that the `STDERR` of a given process is not exclusively reserved for error messages. Many programs use it as a channel for secondary data instead. As such, when trying to work with a program that you have not previously spawned as a child process, it can be helpful to start out dumping both `STDOUT` and `STDERR`, as shown above, to avoid any surprises. + +While `child_process.exec` buffers the output of the child process for you, it also returns a `ChildProcess` object, Node's way of wrapping a still-running process. In the example above, since we are using `ls`, a program that will exit immediately regardless, the only part of the `ChildProcess` object worth worrying about is the `on exit` handler. It is not necessary here - the process will still exit and the error code will still be shown on errors. + + diff --git a/locale/fa/knowledge/command-line/how-to-get-colors-on-the-command-line.md b/locale/fa/knowledge/command-line/how-to-get-colors-on-the-command-line.md new file mode 100644 index 0000000000000..fbf7f83af8e70 --- /dev/null +++ b/locale/fa/knowledge/command-line/how-to-get-colors-on-the-command-line.md @@ -0,0 +1,66 @@ +--- +title: How to get colors on the command line +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + + +When working on the command line, it can be both fun and extremely useful to colorize one's output. To colorize console output, you need to use ANSI escape codes. The module `colors.js`, available on `npm`, provides an extremely easy to use wrapper that makes adding colors a breeze. + +First, install it to the directory you'd like to work in. + + cd mydir + npm install colors + +Now open up a little test script for yourself, and try something like this: + + var colors = require('colors'), + stringOne = 'This is a plain string.', + stringTwo = 'This string is red.'.red, + stringThree = 'This string is blue.'.blue; + + console.log(stringOne.green); + console.log(stringOne.yellow); + + console.log(stringTwo); + console.log(stringThree); + + console.log(stringTwo.magenta); + console.log(stringThree.grey.bold); + +There are several things to take note of here - first, the string object has been prototyped, so any color may be added simply by adding the property to the string! It works both on string literals and on variables, as shown at the top of the example above. + +Notice, also, from the second pair of `console.log` statements, that once set, a color value persists as part of the string. This is because under the hood, the proper ANSI color tags have been prepended and appended as necessary - anywhere the string gets passed where ANSI color codes are also supported, the color will remain. + +The last pair of `console.log` statements are probably the most important. Because of the way `colors.js` and ANSI color codes work, if more than one color property is set on a string, **only the first color property to be set on the string takes effect.** This is because the colors function as 'state shifts' rather than as tags. + +Let's look at a more explicit example. If you set the following properties with `colors.js`: + + myString.red.blue.green + +You can think of your terminal saying to itself, "Make this green. No, make this blue. No, make this red. No more color codes now? Red it is, then." The codes are read in the reverse order, and the last/'innermost' is applied. This can be extremely useful if you're using a library that sets its own default colors that you don't like - if you set a color code yourself on the string you pass in to the library, it will supersede the other author's color code(s). + +The last thing to note is the final line of the example script. While a color code was set previously, a 'bold' code was not, so the example was made bold, but not given a different color. + +One last thing: the colors can look quite different in different terminals - sometimes, `bold` is bold, sometimes it's just a different color. Try it out and see for yourself! + +For reference, here's the full list of available `colors.js` properties. + +- bold +- italic +- underline +- inverse +- yellow +- cyan +- white +- magenta +- green +- red +- grey +- blue +- rainbow + +Some people may tell you that `colors.js` is haunted, but those people are just trolls... right? diff --git a/locale/fa/knowledge/command-line/how-to-parse-command-line-arguments.md b/locale/fa/knowledge/command-line/how-to-parse-command-line-arguments.md new file mode 100644 index 0000000000000..63547f643f95f --- /dev/null +++ b/locale/fa/knowledge/command-line/how-to-parse-command-line-arguments.md @@ -0,0 +1,93 @@ +--- +title: How to parse command line arguments +date: '2011-08-26T10:08:50.000Z' +tags: + - cli +difficulty: 1 +layout: knowledge-post.hbs +--- + + +Passing in arguments via the command line is an extremely basic programming task, and a necessity for anyone trying to write a simple Command-Line Interface (CLI). In Node.js, as in C and many related environments, all command-line arguments received by the shell are given to the process in an array called `argv` (short for 'argument values'). + +Node.js exposes this array for every running process in the form of `process.argv` - let's take a look at an example. Make a file called `argv.js` and add this line: + + console.log(process.argv); + +Now save it, and try the following in your shell: + + $ node argv.js one two three four five + [ 'node', + '/home/avian/argvdemo/argv.js', + 'one', + 'two', + 'three', + 'four', + 'five' ] + +There you have it - an array containing any arguments you passed in. Notice the first two elements - `node` and the path to your script. These will always be present - even if your program takes no arguments of its own, your script's interpreter and path are still considered arguments to the shell you're using. + +Where everyday CLI arguments are concerned, you'll want to skip the first two. Now try this in `argv.js`: + + var myArgs = process.argv.slice(2); + console.log('myArgs: ', myArgs); + +This yields: + + $ node argv.js one two three four + myArgs: [ 'one', 'two', 'three', 'four' ] + +Now let's actually do something with the args: + + var myArgs = process.argv.slice(2); + console.log('myArgs: ', myArgs); + + switch (myArgs[0]) { + case 'insult': + console.log(myArgs[1], 'smells quite badly.'); + break; + case 'compliment': + console.log(myArgs[1], 'is really cool.'); + break; + default: + console.log('Sorry, that is not something I know how to do.'); + } + +JS PRO TIP: Remember to `break` after each `case` - otherwise you'll run the next case too! + +Referring to your command-line arguments by array index isn't very clean, and can quickly turn into a nightmare when you start working with flags and the like - imagine you made a server, and it needed a lot of arguments. Imagine having to deal with something like `myapp -h host -p port -r -v -b --quiet -x -o outfile` - some flags need to know about what comes next, some don't, and most CLIs let users specify arguments in any order they want. Sound like a fun string to parse? + +Luckily, there's a third party module that makes all of this trivial - it's called [Optimist](https://github.com/substack/node-optimist), written by one Mr. James Halliday (aka SubStack). It's available via `npm`. Use this command from your app's base path: + + npm install optimist + +Once you have it, give it a try - it can really be a life-saver. + + var myArgs = require('optimist').argv, + help = 'This would be a great place for real help information.'; + + if ((myArgs.h)||(myArgs.help)) { + console.log(help); + process.exit(0); + } + + switch (myArgs._[0]) { + case 'insult': + console.log(myArgs.n || myArgs.name, 'smells quite badly.'); + break; + case 'compliment': + console.log(myArgs.n || myArgs.name, 'is really cool.'); + break; + default: + console.log(help); + } + + console.log('myArgs: ', myArgs); + +The last line was included to let you see how Optimist handles your arguments. Here's a quick reference: + +- `argv.$0` contains the first two elements of `process.argv` joined together - "node ./myapp.js". +- `argv._` is an array containing each element not attached to a flag. +- Individual flags become properties of `argv`, such as with `myArgs.h` and `myArgs.help`. Note that non-single-letter flags must be passed in as `--flag`. + +For more information on Optimist and the many, many other things it can do for your command-line arguments, please visit [https://github.com/substack/node-optimist](https://github.com/substack/node-optimist) diff --git a/locale/fa/knowledge/command-line/how-to-prompt-for-command-line-input.md b/locale/fa/knowledge/command-line/how-to-prompt-for-command-line-input.md new file mode 100644 index 0000000000000..c170d9af62ca1 --- /dev/null +++ b/locale/fa/knowledge/command-line/how-to-prompt-for-command-line-input.md @@ -0,0 +1,91 @@ +--- +title: How do I prompt users for input from a command-line script? +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - core + - cli +difficulty: 2 +layout: knowledge-post.hbs +--- + +So you've got a little CLI tool, but you want to be able to prompt a user for additional data after the script has started, rather than passing it in as a command line argument or putting it in a file. To do this, you'll need to listen to STDIN ("standard input", i.e. your keyboard), which Node.js exposes for you as `process.stdin`, a readable stream. + +Streams are Node's way of dealing with evented I/O - they're a big topic, and you can read more about them (here). For now, we're only going to deal with the Stream methods relevant to working with `process.stdin` so as to keep the examples easy. + +The first two Readable Stream methods you'll need to know about here are `pause()` and `resume()`. Not every program needs to care whether or not you're pressing keys at a given moment, so `process.stdin` is paused by default. + +Here's a simple example. Try the following in a new file: + + process.stdin.resume(); + process.stdin.setEncoding('utf8'); + var util = require('util'); + + process.stdin.on('data', function (text) { + console.log('received data:', util.inspect(text)); + if (text === 'quit\n') { + done(); + } + }); + + function done() { + console.log('Now that process.stdin is paused, there is nothing more to do.'); + process.exit(); + } + + +If all of this sounds complicated, or if you want a higher-level interface to this sort of thing, don't worry - as usual, the Node.js community has come to the rescue. One particularly friendly module to use for this is Prompt, maintained by Nodejitsu. It's available on `npm`: + + npm install prompt + +Prompt is built to be easy - if your eyes started to glaze over as soon as you saw `Readable Stream`, then this is the section for you. Compare the following to the example above: + + var prompt = require('prompt'); + + prompt.start(); + + prompt.get(['username', 'email'], function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Email: ' + result.email); + }); + + function onErr(err) { + console.log(err); + return 1; + } + +NODE PRO TIP: This short script also demonstrates proper error handling in node - errors are a callback's first argument, and `return` is used with the error handler so that the rest of the function doesn't execute when errors happen. For more information, look (here). + +Prompt also makes it trivial to handle a certain set of recurring properties that one might want to attach. + + var prompt = require('prompt'); + + var properties = [ + { + name: 'username', + validator: /^[a-zA-Z\s\-]+$/, + warning: 'Username must be only letters, spaces, or dashes' + }, + { + name: 'password', + hidden: true + } + ]; + + prompt.start(); + + prompt.get(properties, function (err, result) { + if (err) { return onErr(err); } + console.log('Command-line input received:'); + console.log(' Username: ' + result.username); + console.log(' Password: ' + result.password); + }); + + function onErr(err) { + console.log(err); + return 1; + } + +For more information on Prompt, please see [the project's GitHub page](http://github.com/nodejitsu/node-prompt). diff --git a/locale/fa/knowledge/cryptography/how-to-use-crypto-module.md b/locale/fa/knowledge/cryptography/how-to-use-crypto-module.md new file mode 100644 index 0000000000000..83e73a12d56b3 --- /dev/null +++ b/locale/fa/knowledge/cryptography/how-to-use-crypto-module.md @@ -0,0 +1,126 @@ +--- +title: How to use the crypto module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - crypto +difficulty: 3 +layout: knowledge-post.hbs +--- + +The [crypto](https://nodejs.org/docs/v0.4.10/api/crypto.html) module is a wrapper for [OpenSSL](http://en.wikipedia.org/wiki/Openssl) cryptographic functions. It supports calculating hashes, authentication with HMAC, ciphers, and more! + +The crypto module is mostly useful as a tool for implementing [cryptographic protocols](http://en.wikipedia.org/wiki/Cryptographic_protocol) such as [TLS](http://en.wikipedia.org/wiki/Transport_Layer_Security) and [https](http://en.wikipedia.org/wiki/Https). For most users, Node's built-in [tls module](https://nodejs.org/docs/v0.4.10/api/tls.html) and [https module](https://nodejs.org/docs/v0.4.10/api/https.html) should more than suffice. However, for the user that only wants to use small parts of what's needed for full-scale cryptography or is crazy/desperate enough to implement a protocol using OpenSSL and Node: Read on. + +## Hashes + +### What Is A Hash? + +A hash is a fixed-length string of bits that is procedurally and deterministially generated from some arbitrary block of source data. Some important properties of these hashes (the type useful for cryptography) include: + +* **Fixed length:** This means that, no matter what the input, the length of the hash is the same. For example, md5 hashes are always 128 bits long whether the input data is a few bits or a few gigabytes. + +* **Deterministic:** For the same input, you should expect to be able to calculate exactly the same hash. This makes hashes useful for checksums. + +* **Collision-Resistant:** A collision is when the same hash is generated for two different input blocks of data. Hash algorithms are designed to be extremely unlikely to have collisions -- just how unlikely is a property of the hash algorithm. The importance of this property depends on the use case. + +* **Unidirectional:** A good hash algorithm is easy to apply, but hard to undo. This means that, given a hash, there isn't any reasonable way to find out what the original piece of data was. + +### Hash Algorithms That Work With Crypto + +The hashes that work with crypto are dependent on what your version of OpenSSL supports. If you have a new enough version of OpenSSL, you can get a list of hash types your OpenSSL supports by typing `openssl list-message-digest-algorithms` into the command line. For older versions, simply type `openssl list-message-digest-commands` instead! Some of the most common hash types are: + +* [sha1](http://en.wikipedia.org/wiki/Sha1) +* [md5](http://en.wikipedia.org/wiki/Md5). + +### How To Calculate Hashes with Crypto + +Crypto has a method called `createHash` which allows you to calculate a hash. Its only argument is a string representing the hash This example finds the md5 hash for the string, "Man oh man do I love node!": + + require("crypto") + .createHash("md5") + .update("Man oh man do I love node!") + .digest("hex"); + +The `update` method is used to push data to later be turned into a hash with the `digest` method. `update` can be invoked multiple times to ingest streaming data, such as buffers from a file read stream. The argument for `digest` represents the output format, and may either be "binary", "hex" or "base64". It defaults to binary. + +## HMAC + +HMAC stands for Hash-based Message Authentication Code, and is a process for applying a hash algorithm to both data and a secret key that results in a single final hash. Its use is similar to that of a vanilla hash, but also allows to check the *authenticity* of data as *well* as the integrity of said data (as you can using md5 checksums). + +The API for hmacs is very similar to that of `createHash`, except that the method is called `createHmac` and it takes a key as a second argument: + + require("crypto").createHmac("md5", "password") + .update("If you love node so much why don't you marry it?") + .digest("hex"); + +The resulting md5 hash is unique to both the input data and the key. + +## Ciphers + +Ciphers allow you to encode and decode messages given a password. + +### Cipher Algorithms That Work With Crypto + +Like crypto's hash algorithms, the cyphers that work with crypto are dependent on what your version of OpenSSL supports. You can get a list of hash types your OpenSSL supports by typing `openssl list-cipher-commands` into the command line for older versions, or `openssl list-cipher-algorithms` for newer versions of OpenSSL. OpenSSL supports *many* ciphers; A good and popular one is [AES192](http://en.wikipedia.org/wiki/Aes192). + +### How To Use Cipher Algorithms with Crypto: + +Crypto comes with two methods for ciphering and deciphering: + +* `crypto.createCypher(algorithm, key)` +* `crypto.createDecipher(algorithm, key)` + +Both of these methods take arguments similarly to `createHmac`. They also both have analogous `update` functions. However, each use of `update` returns a chunk of the encoded/decoded data instead of requiring one to call `digest` to get the result. Moreover, after encoding (or decoding) your data, you will likely have to call the `final` method to get the last chunk of encoded information. + +Here's an example, slightly less trivial than previous examples, that uses crypto and [optimist](https://github.com/substack/node-optimist) to encode and decode messages from the command line: + + #!/usr/bin/env node + + var crypto = require("crypto"), + argv = require("optimist").argv; + + if (argv.e && argv.password) { + var cipher = crypto.createCipher("aes192", argv.password), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(cipher.update(phrase, "binary", "hex")); + }); + + msg.push(cipher.final("hex")); + console.log(msg.join("")); + + } else if (argv.d && argv.password) { + var decipher = crypto.createDecipher("aes192", argv.password), + msg = []; + + argv._.forEach( function (phrase) { + msg.push(decipher.update(phrase, "hex", "binary")); + }); + + msg.push(decipher.final("binary")); + console.log(msg.join("")); + } + +Using this script to encode a message looks like this: + + $ ./secretmsg.js -e --password="popcorn" "My treasure is buried behind Carl's Jr. on Telegraph." + 6df66752b24f0886f8a6c55e56977788c2090bb657ff3bd645097f8abe11099963fb3bd9627986c60fa7e5120d8fead928cff620b37e3e79be8de519f490527a + +Now, if I gave somebody the same script, my encoded message and the password, they can decode the message and find out where I buried my treasure: + + $ ./secretmsg.js -d --password="popcorn" 6df66752b24f0886f8a6c55e56977788c2090bb657ff3bd645097f8abe11099963fb3bd9627986c60fa7e5120d8fead928cff620b37e3e79be8de519f490527a + My treasure is buried behind Carl's Jr. on Telegraph. + +You should know that what I buried behind Carl's Jr was just a cigarette butt, and that this script is obviously not for serious use. + +## Signing and Verification + +Crypto has other methods used for dealing with certificates and credentials, as used for TLS: + +* `crypto.createCredentials` +* `crypto.createSign` +* `crypto.createVerify` + +These methods supply the last building blocks for a complete cryptographic protocol, and require an advanced knowledge of real-world cryptographic protocols to be useful. Again, it is recommended that developers use either the [tls](https://nodejs.org/docs/v0.4.10/api/tls.html) module or the [https](https://nodejs.org/docs/v0.4.10/api/https.html) module if applicable. diff --git a/locale/fa/knowledge/cryptography/how-to-use-the-tls-module.md b/locale/fa/knowledge/cryptography/how-to-use-the-tls-module.md new file mode 100644 index 0000000000000..b3935ffb3dc07 --- /dev/null +++ b/locale/fa/knowledge/cryptography/how-to-use-the-tls-module.md @@ -0,0 +1,160 @@ +--- +title: How To Use The TLS Module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - tls + - ssl + - secure +difficulty: 3 +layout: knowledge-post.hbs +--- + +## What is TLS? + +[Transport Layer Security](http://en.wikipedia.org/wiki/Transport_Layer_Security) (or TSL) is the successor to Secure Sockets Layer (or SSL). It, along with SSL, are the de-facto standard cryptographic protocols for secure communications over the web. TSL encrypts communications on top of a network transport layer (typically tcp), and uses public-key cryptography to encrypt messages. + +### Public-Key Cryptography + +In public-key cryptography, each peer has two keys: A public key, and a private key. The public key is shared with everyone, and the private key is (naturally) kept secret. In order to encrypt a message, a computer requires its private key and the recipient's public key. Then, in order to decrypt the message, the recipient requires its *own* private key and the *sender*'s public key. + +In TLS connections, the public key is called a *[certificate](http://en.wikipedia.org/wiki/Digital_certificate)*. This is because it's "[signed](http://en.wikipedia.org/wiki/Digital_signature)" to prove that the public key belongs to its owner. TLS certificates may either be signed by a third-party certificate authority (CA), or they may be [self-signed](http://en.wikipedia.org/wiki/Self-signed_certificate). In the case of Certificate Authorities, Mozilla keeps [a list of trusted root CAs](http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt) that are generally agreed upon by most web browsers. These root CAs may then issue certificates to other signing authorities, which in turn sign certificates for the general public. + +### History of TLS/SSL Support in Node.JS + +TLS support in node is relatively new. The first stable version of node.js to support TSL and HTTPS was the v0.4 branch, which was released in early 2011. Since then, the primary focus of the core developers has shifted from TLS/HTTPS to Windows support in the v0.5 branch. As such, the TSL APIs in node are still a little rough around the edges, and documentation leaves something to be desired. + +## The tls Module + +### tls.createServer + +In most ways, the tls module's server api is similar to that of the net module. Besides the fact that it's for encrypted connections, the major difference is that the options object passed to `tls.connect` or `tls.createServer` needs to include information on both the private key and the certificate, in [pem format](http://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions). Here's an example of a tls server: + + var tls = require('tls'), + fs = require('fs'), + colors = require('colors'), + msg = [ + ".-..-..-. .-. .-. .--. .---. .-. .---. .-.", + ": :; :: : : :.-.: :: ,. :: .; :: : : . :: :", + ": :: : : :: :: :: :: :: .': : : :: :: :", + ": :: :: : : `' `' ;: :; :: :.`.: :__ : :; ::_;", + ":_;:_;:_; `.,`.,' `.__.':_;:_;:___.':___.':_;" + ].join("\n").cyan; + + var options = { + key: fs.readFileSync('private-key.pem'), + cert: fs.readFileSync('public-cert.pem') + }; + + tls.createServer(options, function (s) { + s.write(msg+"\n"); + s.pipe(s); + }).listen(8000); + + +In this example, a "hello world" tls server is created, listening on port 8000. The options object includes two properties: `key` and `cert`. The contents of these properties come directly from the private key and public certificate stored on the filesystem. In this case they are binary buffers, but the tls module can also accept unicode strings. + +### Generating Your Private Key And Certificate With OpenSSL: + +In order for this example server to work, of course, you will need a private key and a certificate. You can generate both of these with OpenSSL. + +First, generate a private key: + + $ openssl genrsa -out private-key.pem 1024 + Generating RSA private key, 1024 bit long modulus + ......................................++++++ + ........++++++ + e is 65537 (0x10001) + +This creates a suitable private key and writes it to `./private-key.pem`. + +Next, create a Certificate Signing Request file using your private key: + + $ openssl req -new -key private-key.pem -out csr.pem + You are about to be asked to enter information that will be incorporated + into your certificate request. + What you are about to enter is what is called a Distinguished Name or a DN. + There are quite a few fields but you can leave some blank + For some fields there will be a default value, + If you enter '.', the field will be left blank. + ----- + Country Name (2 letter code) [AU]:US + State or Province Name (full name) [Some-State]:California + Locality Name (eg, city) []:Oakland + Organization Name (eg, company) [Internet Widgits Pty Ltd]:Panco, Inc. + Organizational Unit Name (eg, section) []: + Common Name (eg, YOUR name) []:Joshua Holbrook + Email Address []:josh.holbrook@gmail.com + + Please enter the following 'extra' attributes + to be sent with your certificate request + A challenge password []:dangerface + An optional company name []: + +The purpose of this CSR is to "request" a certificate. That is, if you wanted a CA to sign your certificate, you could give this file to them to process and they would give you back a certificate. + +Alternately, however, you may self-sign your certificate, again using your private key: + + $ openssl x509 -req -in csr.pem -signkey private-key.pem -out public-cert.pem + Signature ok + subject=/C=US/ST=California/L=Oakland/O=Panco, Inc./CN=Joshua Holbrook/emailAddress=josh.holbrook@gmail.com + Getting Private key + +This generates your certificate. Now you're cooking! + +### Trying it out: + +One way to test out your new "hello world" server is to again use OpenSSL: + + $ openssl s_client -connect 127.0.0.1:8000 + +You should see a bunch of output regarding the handshaking process, and then at the very end you should see a big, cyan figlet banner saying, "Hi world!" + +### tls.connect + +The tls module also supplies tools for connecting to such a server: + + var tls = require('tls'), + fs = require('fs'); + + var options = { + key: fs.readFileSync('private-key.pem'), + cert: fs.readFileSync('public-cert.pem') + }; + + var conn = tls.connect(8000, options, function() { + if (conn.authorized) { + console.log("Connection authorized by a Certificate Authority."); + } else { + console.log("Connection not authorized: " + conn.authorizationError) + } + console.log(); + }); + + + + conn.on("data", function (data) { + console.log(data.toString()); + conn.end(); + }); + +The idea is similar, except instead of creating a server, this script connects to one instead. `tls.connect` also takes an options object, but then returns a stream. + +`tls.connect` also fires a callback when the connection is made, which allows for checking to see if the connection is authorized---that is, if all the certificates are in order. `conn.authorized` is a boolean, and `conn.authorizationError` is a string containing the reason that the connection is unauthorized. + +This is what happens when the client is ran (with the server running): + + $ node client.js + Connection not authorized: DEPTH_ZERO_SELF_SIGNED_CERT + + .-..-..-. .-. .-. .--. .---. .-. .---. .-. + : :; :: : : :.-.: :: ,. :: .; :: : : . :: : + : :: : : :: :: :: :: :: .': : : :: :: : + : :: :: : : `' `' ;: :; :: :.`.: :__ : :; ::_; + :_;:_;:_; `.,`.,' `.__.':_;:_;:___.':___.':_; + +Note that self-signing the server certificate results in a non-authorized status because you're not listed as a trusted certificate authority. + +## "starttls" + +It's entirely possible to "upgrade" an existing tcp connection into a TLS-encrypted one with node. However, node does not have a special functions for doing so as of the v0.4 branch. Therefore, it needs to be done "by-hand", using the crypto module and some undocumented tls module functionality. Node's documentation points to , which aims to abstract the process. diff --git a/locale/fa/knowledge/errors/what-are-the-error-conventions.md b/locale/fa/knowledge/errors/what-are-the-error-conventions.md new file mode 100644 index 0000000000000..6dd2d64f19d84 --- /dev/null +++ b/locale/fa/knowledge/errors/what-are-the-error-conventions.md @@ -0,0 +1,49 @@ +--- +title: What are the error conventions? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - conventions +difficulty: 1 +layout: knowledge-post.hbs +--- + + +In node.js, it is considered standard practice to handle errors in asynchronous functions by returning them as the first argument to the current function's callback. If there is an error, the first parameter is passed an `Error` object with all the details. Otherwise, the first parameter is null. + +It's simpler than it sounds; let's demonstrate. + + var isTrue = function(value, callback) { + if (value === true) { + callback(null, "Value was true."); + } + else { + callback(new Error("Value is not true!")); + } + } + + var callback = function (error, retval) { + if (error) { + console.log(error); + return; + } + console.log(retval); + } + + // Note: when calling the same asynchronous function twice like this, you are in a race condition. + // You have no way of knowing for certain which callback will be called first when calling the functions in this manner. + + isTrue(false, callback); + isTrue(true, callback); + + { stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'Value is not true!' } + Value was true. + +As you can see from the example, the callback is called with null as its first argument if there is no error. However, if there is an error, you create an `Error` object, which then becomes the callback's only parameter. + +The `callback` function shows the reason for this: it allows a user to easily know whether or not an error occurred. If `null` was not the first argument passed on success, the user would need to check the object being returned and determine themselves whether or not the object constituted an error - a much more complex and less user-friendly approach. + +So to wrap it all up, when using callbacks, if an error comes up, then pass it as the first argument. Otherwise, pass `null` first, and then your return arguments. On the receiving end, inside the callback function, check if the first parameter is non-null; if it is, handle it as an error. diff --git a/locale/fa/knowledge/errors/what-is-the-error-object.md b/locale/fa/knowledge/errors/what-is-the-error-object.md new file mode 100644 index 0000000000000..62bba857c8a1e --- /dev/null +++ b/locale/fa/knowledge/errors/what-is-the-error-object.md @@ -0,0 +1,42 @@ +--- +title: What is the error object? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +The error object is a built-in object that provides a standard set of useful information when an error occurs, such as a stack trace and the error message. For example: + +Code: + + var error = new Error("The error message"); + console.log(error); + console.log(error.stack); + +Result: + + { stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'The error message' } + Error: The error message + at Object. (/home/nico/example.js:1:75) + at Module._compile (module.js:407:26) + at Object..js (module.js:413:10) + at Module.load (module.js:339:31) + at Function._load (module.js:298:12) + at Array.0 (module.js:426:10) + at EventEmitter._tickCallback (node.js:126:26) + +`error.stack` shows you where an error came from, as well as a list of the function calls that preceded it - for your convenience, `error.stack` always prints `error.message` as the first line of its output, making `error.stack` a convenient single property to log during debugging. + +If you want to add more information to the Error object, you can always add properities, just as with any other JavaScript object: + + var error = new Error("The error message"); + error.http_code = 404; + console.log(error); + +For more details how to use the Error object, check out the [article on error conventions](/articles/errors/what-are-the-error-conventions) diff --git a/locale/fa/knowledge/errors/what-is-try-catch.md b/locale/fa/knowledge/errors/what-is-try-catch.md new file mode 100644 index 0000000000000..4dfedd8f16f6b --- /dev/null +++ b/locale/fa/knowledge/errors/what-is-try-catch.md @@ -0,0 +1,49 @@ +--- +title: What is try-catch? +date: '2011-08-26T10:08:50.000Z' +tags: + - errors + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +Example: + + console.log("entering try-catch statement"); + + try { + console.log("entering try block"); + throw "thrown message"; + console.log("this message is never seen"); + } + catch (e) { + console.log("entering catch block"); + console.log(e); + console.log("leaving catch block"); + } + finally { + console.log("entering and leaving the finally block"); + } + + console.log("leaving try-catch statement"); + +Results: + + entering try-catch statement + entering try block + entering catch block + thrown message + leaving catch block + entering and leaving the finally block + leaving try-catch statement + +JavaScript's `try-catch-finally` statement works very similarly to the `try-catch-finally` encountered in C++ and Java. First, the try block is executed until and unless the code in it throws an exception (whether it is an explicit `throw` statement, the code has an uncaught native exception, or if the code calls a function that uses `throw`). + +If the code doesn't throw an exception, then the whole try block is executed. If the code threw an exception inside the try block, then the catch block is executed. Last of all, the finally block is always executed, subsequent to the other blocks but prior to any subsequent code located outside of the `try-catch-finally` blocks. The `finally` block will just about always execute, no matter what kind of throwing, catching, or returning one might be trying to do inside the `try` or `catch` blocks. + +Note that you can omit the `catch` or `finally` block, but one of them must be present. + +## But wait, isn't it Node.js convention to not use try-catch? + +In the core node.js libraries, the only place that one really *needs* to use a try-catch is around `JSON.parse()`. All of the other methods use either the standard Error object through the first parameter of the callback or emit an `error` event. Because of this, it is generally considered [standard](/what-are-the-error-conventions) to return errors through the callback rather than to use the `throw` statement. diff --git a/locale/fa/knowledge/file-system/how-to-read-files-in-nodejs.md b/locale/fa/knowledge/file-system/how-to-read-files-in-nodejs.md new file mode 100644 index 0000000000000..91864caf3d988 --- /dev/null +++ b/locale/fa/knowledge/file-system/how-to-read-files-in-nodejs.md @@ -0,0 +1,53 @@ +--- +title: How do I read files in node.js? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 2 +layout: knowledge-post.hbs +--- + +Reading the contents of a file into memory is a very common programming task, and, as with many other things, the Node.js core API provides methods to make this trivial. There are a variety of file system methods, all contained in the `fs` module. The easiest way to read the entire contents of a file is with `fs.readFile`, as follows: + + fs = require('fs'); + fs.readFile(file, [encoding], [callback]); + + // file = (string) filepath of the file to read + +`encoding` is an optional parameter that specifies the type of encoding to read the file. Possible encodings are 'ascii', 'utf8', and 'base64'. If no encoding is provided, the default is `null`. + +`callback` is a function to call when the file has been read and the contents are ready - it is passed two arguments, `error` and `data`. If there is no error, `error` will be `null` and `data` will contain the file contents; otherwise `err` contains the error message. + +So if we wanted to read `/etc/hosts` and print it to stdout (just like UNIX `cat`): + + fs = require('fs') + fs.readFile('/etc/hosts', 'utf8', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); + }); + +The contents of `/etc/hosts` should now be visible to you, provided you have permission to read the file in the first place. + +Let's now take a look at an example of what happens when you try to read an invalid file - the easiest example is one that doesn't exist. + + fs = require('fs'); + fs.readFile('/doesnt/exist', 'utf8', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); + }); + +This is the output: + + { stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'ENOENT, No such file or directory \'/doesnt/exist\'', + errno: 2, + code: 'ENOENT', + path: '/doesnt/exist' } + +This is a basic Node.js [Error object](/what-is-the-error-object) - it can often be useful to log `err.stack` directly, since this contains a stack trace to the location in code at which the Error object was created. diff --git a/locale/fa/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md b/locale/fa/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md new file mode 100644 index 0000000000000..a708eb556f33a --- /dev/null +++ b/locale/fa/knowledge/file-system/how-to-search-files-and-directories-in-nodejs.md @@ -0,0 +1,42 @@ +--- +title: How do I search files and directories? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + +Suppose you want to list all the files in the current directory. One approach is to use the builtin `fs.readdir` [method](/how-do-i-read-files-in-node-js). This will get you an array of all the files and directories on the specified path: + + fs = require('fs'); + + fs.readdir(process.cwd(), function (err, files) { + if (err) { + console.log(err); + return; + } + console.log(files); + }); + + +Unfortunately, if you want to do a recursive list of files, then things get much more complicated very quickly. To avoid all of this scary complexity, this is one of the places where a Node.js user-land library can save the day. [Node-findit](https://github.com/substack/node-findit), by SubStack, is a helper module to make searching for files easier. It has interfaces to let you work with callbacks, events, or just plain old synchronously (not a good idea most of the time). + +To install `node-findit`, simply use npm: + + npm install findit + +In the same folder, create a file called `example.js`, and then add this code. Run it with `node example.js`. This example uses the `node-findit` event-based interface. + + //This sets up the file finder + var finder = require('findit').find(__dirname); + + //This listens for directories found + finder.on('directory', function (dir) { + console.log('Directory: ' + dir + '/'); + }); + + //This listens for files found + finder.on('file', function (file) { + console.log('File: ' + file); + }); diff --git a/locale/fa/knowledge/file-system/how-to-store-local-config-data.md b/locale/fa/knowledge/file-system/how-to-store-local-config-data.md new file mode 100644 index 0000000000000..4e6aba75d9ed0 --- /dev/null +++ b/locale/fa/knowledge/file-system/how-to-store-local-config-data.md @@ -0,0 +1,85 @@ +--- +title: How to store local configuration data +date: '2011-08-26T10:08:50.000Z' +tags: + - conventions + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + + +Storing your Node.js application's configuration data is quite simple - every object in JavaScript can be easily rendered as [JSON](/what-is-json), which in turn is just string data that can be sent or saved any way you'd like. The simplest way to do this involves the built-in `JSON.parse()` and `JSON.stringify()` methods. + +Let's take a look at a very simple (and contrived) example. First, to save some very simple data: + + var fs = require('fs'); + + var myOptions = { + name: 'Avian', + dessert: 'cake' + flavor: 'chocolate', + beverage: 'coffee' + }; + + var data = JSON.stringify(myOptions); + + fs.writeFile('./config.json', data, function (err) { + if (err) { + console.log('There has been an error saving your configuration data.'); + console.log(err.message); + return; + } + console.log('Configuration saved successfully.') + }); + +It's really that simple - just `JSON.stringify()` and then save it however you'd like. + +Now let's load some configuration data: + + var fs = require('fs'); + + var data = fs.readFileSync('./config.json'), + myObj; + + try { + myObj = JSON.parse(data); + console.dir(myObj); + } + catch (err) { + console.log('There has been an error parsing your JSON.') + console.log(err); + } + +NODE PRO TIP: Even if you don't like using `try/catch`, this is a place to use it. `JSON.parse` is a very strict JSON parser, and errors are common - most importantly, though, `JSON.parse` uses the `throw` statement rather than giving a callback, so `try/catch` is the only way to guard against the error. + +Using the built-in `JSON` methods can take you far, but as with so many other problems you might be looking to solve with Node.js, there is already a solution in Userland that can take you much further. The solution, in this case, is `nconf`. Written by Charlie Robbins, it's a configuration manager for Node.js, supporting in-memory storage, local file storage, as well as support for a `redis` backend, provided in a separate module. + +Let's take a look now at how we'd perform some local configuration access with `nconf`. First, you'll need to install it to your project's working directory: + + npm install nconf + +After that, the syntax is a breeze. Have a look at an example: + + var nconf = require('nconf'); + + nconf.use('file', { file: './config.json' }); + nconf.load(); + nconf.set('name', 'Avian'); + nconf.set('dessert:name', 'Ice Cream'); + nconf.set('dessert:flavor', 'chocolate'); + + console.log(nconf.get('dessert')); + + nconf.save(function (err) { + if (err) { + console.error(err.message); + return; + } + console.log('Configuration saved successfully.'); + }); + +The only tricky thing to notice here is the delimiter - ':'. When accessing nested properties with `nconf`, a colon is used to delimit the namespaces of key names. If a specific sub-key is not provided, the whole object is set or returned. + +When using `nconf` to store your configuration data to a file, `nconf.save()` and `nconf.load()` are the only times that any actual file interaction will happen. All other access is performed on an in-memory copy of your data, which will not persist without a call to `nconf.save()`. Similarly, if you're trying to bring back configuration data from the last time your application ran, it will not exist in memory without a call to `nconf.load()`, as shown above. + diff --git a/locale/fa/knowledge/file-system/how-to-use-the-path-module.md b/locale/fa/knowledge/file-system/how-to-use-the-path-module.md new file mode 100644 index 0000000000000..584f0ff4583dd --- /dev/null +++ b/locale/fa/knowledge/file-system/how-to-use-the-path-module.md @@ -0,0 +1,68 @@ +--- +title: How to use the path module? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - filesystem +difficulty: 1 +layout: knowledge-post.hbs +--- + +The path module contains several helper functions to help make path manipulation easier. + +The first function worth mentioning is `path.normalize`. This function takes a path (in the form of a string) and strips it of duplicate slashes and normalizes directory abbreviations, like '.' for 'this directory' and '..' for 'one level up'. For example: + + > var path = require('path'); + > path.normalize('/a/.///b/d/../c/') + '/a/b/c/' + +A closely related function to `normalize` is `join`. This function takes a variable number of arguments, joins them together, and normalizes the path. + + > var path = require('path'); + > path.join('/a/.', './//b/', 'd/../c/') + '/a/b/c' + +A possible use of `join` is to manipulate paths when serving urls: + + > var path = require('path'); + > var url = '/index.html'; + > path.join(process.cwd(), 'static', url); + '/home/nico/static/index.html' + +There are three functions which are used to extract the various parts of the path name: `basename`, `extname`, and `dirname`. +- `basename` returns the last portion of the path passed in. +- `extname` returns the extension of the last portion. Generally for directories, `extname` just returns ''. +- Finally, `dirname` returns everything that `basename` does not return. +For example: + + > var path = require('path') + > var a = '/a/b/c.html' + > path.basename(a) + 'c.html' + > path.extname(a) + '.html' + > path.dirname(a) + '/a/b' + +Note that `basename` has an optional second parameter that will strip out the extension if you pass the correct extension. + + > var path = require('path') + > var a = '/a/b/c.html' + > path.basename(a, path.extname(a)) + 'c' + +Lastly, the `path` module provides methods to check whether or not a given path exists: `exists` and `existsSync` They both take the path of a file for the first parameter. + +`exists` takes a callback as its second parameter, to which is returned a boolean representing the existence of the file. + +`existsSync`, on the other hand, checks the given path synchronously, returning the boolean directly. In Node.js, you will typically want to use the asynchronous functions for most file system I/O - the synchronous versions will block your entire process until they finish. + +Blocking isn't always a bad thing. Checking the existence of a vital configuration file synchronously makes sense, for example - it doesn't matter much if your process is blocking for something it can't run without! Conversely, though, in a busy HTTP server, any per-request file I/O **MUST** be asynchronous, or else you'll be responding to requests one by one. See the article on [asynchronous operations](/how-to-write-asynchronous-code) for more details. + + > var path = require('path') + > path.exists('/etc', function(exists){console.log("Does the file exist?", exists)}) + > Does the file exist? true + + > path.existsSync('/etc') + true + diff --git a/locale/fa/knowledge/file-system/how-to-write-files-in-nodejs.md b/locale/fa/knowledge/file-system/how-to-write-files-in-nodejs.md new file mode 100644 index 0000000000000..b45699d4d7883 --- /dev/null +++ b/locale/fa/knowledge/file-system/how-to-write-files-in-nodejs.md @@ -0,0 +1,50 @@ +--- +title: How do I write files in node.js? +date: '2011-08-26T10:08:50.000Z' +tags: + - filesystem +difficulty: 2 +layout: knowledge-post.hbs +--- + +Writing to a file is another of the basic programming tasks that one usually needs to know about - luckily, this task is very simple in Node.js. We can use the handy `writeFile` method inside the standard library's `fs` module, which can save all sorts of time and trouble. + + fs = require('fs'); + fs.writeFile(filename, data, [encoding], [callback]) + +`file = (string)` filepath of the file to read + +`data = (string or buffer)` the data you want to write to the file + +`encoding = (optional string)` the encoding of the `data`. Possible encodings are 'ascii', 'utf8', and 'base64'. If no encoding provided, then 'utf8' is assumed. + +`callback = (optional function (err) {})` If there is no error, `err === null`, otherwise `err` contains the error message. + +So if we wanted to write "Hello World" to `helloworld.txt`: + + fs = require('fs'); + fs.writeFile('helloworld.txt', 'Hello World!', function (err) { + if (err) return console.log(err); + console.log('Hello World > helloworld.txt'); + }); + + [contents of helloworld.txt]: + Hello World! + +If we purposely want to cause an error, we can try to write to a file that we don't have permission to access: + + fs = require('fs') + fs.writeFile('/etc/doesntexist', 'abc', function (err,data) { + if (err) { + return console.log(err); + } + console.log(data); + }); + + { stack: [Getter/Setter], + arguments: undefined, + type: undefined, + message: 'EACCES, Permission denied \'/etc/doesntexist\'', + errno: 13, + code: 'EACCES', + path: '/etc/doesntexist' } diff --git a/locale/fa/knowledge/file-system/security/introduction.md b/locale/fa/knowledge/file-system/security/introduction.md new file mode 100644 index 0000000000000..8803a3e8964b4 --- /dev/null +++ b/locale/fa/knowledge/file-system/security/introduction.md @@ -0,0 +1,50 @@ +--- +title: How can I secure my code? +date: null +tags: + - filesystem + - security +difficulty: 3 +layout: knowledge-post.hbs +--- + +Sometimes, you might want to let users read or write files on your server. For example, maybe you want to write a forum software without using an actual database. The problem is that you do not want your users to be able to modify or to read arbitrary files on your server, and there sometimes are ways to get around restrictions that should prevent it. Read on to see how you can secure your code against evil attackers trying to mess with your files. + +Poison Null Bytes +================= +Poison null bytes are a way to trick your code into seeing another filename than the one that will actually be opened. This can in many cases be used to circumvent directory traversal protections, to trick servers into delivering files with wrong file types and to circumvent restrictions on the file names that may be used. [A more detailed description is here.](http://groups.google.com/group/nodejs/browse_thread/thread/51f66075e249d767/85f647474b564fde) Always use code like this when accessing files with user-supplied names: + + if (filename.indexOf('\0') !== -1) { + return respond('That was evil.'); + } + +Whitelisting +============ +You won't always be able to use whitelisting, but if you are, do it - it's very easy to implement and hard to get wrong. For example, if you know that all filenames are lowercase alphanumeric strings: + + if (!/^[a-z0-9]+$/.test(filename)) { + return respond('illegal character'); + } + +However, note that whitelisting alone isn't sufficient anymore as soon as you allow dots and slashes - people could enter things like `../../etc/passwd` in order to get files from outside the allowed folder. + +Preventing Directory Traversal +============================== +Directory traversal means that an attacker tries to access files outside of the folder you want to allow him to access. You can prevent this by using nodes built-in "path" module. **Do not implement the stuff in the path module again yourself** - for example, when someone runs your code on a windows server, not handling backslashes like slashes will allow attackers to do directory traversal. + +This example assumes that you already checked the `userSuppliedFilename` variable as described in the "Poison Null Bytes" section above. + + var rootDirectory = '/var/www/'; + +Make sure that you have a slash at the end of the allowed folders name - you don't want people to be able to access `/var/www-secret/`, do you?. + + var path = require('path'); + var filename = path.join(rootDirectory, userSuppliedFilename); + +Now `filename` contains an absolute path and doesn't contain `..` sequences anymore - `path.join` takes care of that. However, it might be something like `/etc/passwd` now, so you have to check whether it starts with the `rootDirectory`: + + if (filename.indexOf(rootDirectory) !== 0) { + return respond('trying to sneak out of the web root?'); + } + +Now the `filename` variable should contain the name of a file or directory that's inside the allowed directory (unless it doesn't exist). diff --git a/locale/fa/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md b/locale/fa/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md new file mode 100644 index 0000000000000..0f24e1baf418e --- /dev/null +++ b/locale/fa/knowledge/getting-started/control-flow/how-to-write-asynchronous-code.md @@ -0,0 +1,80 @@ +--- +title: How to write asynchronous code +date: '2011-08-26T10:08:50.000Z' +tags: + - asynchronous + - callbacks + - event-emitters +difficulty: 1 +layout: knowledge-post.hbs +--- + +Nodejs promotes an asynchronous coding style from the ground up, in contrast to many of the most popular web frameworks. There are a number of important things to be aware of when learning to write asynchronous code - otherwise, you will often find your code executing in extremely unexpected ways. Take this (general) rule to heart: + +### Use the asynchronous functions, avoid the synchronous ones! + +Many of the functions in Node.js core have both synchronous and asynchronous versions. Under most circumstances, it will be far better for you to use the asynchronous functions - otherwise, why are you using Node.js? + +As a quick example comparing and contrasting the two, using `fs.readFile`: + + var fs = require('fs'); + + fs.readFile('example.file', 'utf8', function (err, data) { + if (err) { + return console.log(err); + } + console.log(data); + }); + + //==================== + + var data = fs.readFileSync('example.file','utf8'); + console.log(data); + +Just looking at these two blocks of code, the synchronous version appears to be more concise. However, the asynchronous version is more complicated for a very good reason. In the synchronous version, the world is paused until the file is finished reading - your process will just sit there, waiting for the OS (which handles all file system tasks). + +The asynchronous version, on the other hand, does not stop time - instead, the callback function gets called when the file is finished reading. This leaves your process free to execute other code in the meantime. + +When only reading a file or two, or saving something quickly, the difference between synchronous and asynchronous file I/O can be quite small. On the other hand, though, when you have multiple requests coming in per second that require file or database IO, trying to do that IO synchronously would be quite thoroughly disastrous for performance. + + +### Callbacks +Callbacks are a basic idiom in node.js for asynchronous operations. When most people talk about callbacks, they mean the function that is passed as the last parameter to an asynchronous function. The callback is then later called with any return value or error message that the function produced. For more details, see the article on [callbacks](/articles/getting-started/control-flow/what-are-callbacks) + +### Event Emitters +Event Emitters are another basic idiom in node.js. A constructor is provided in Node.js core: `require('events').EventEmitter`. An Event Emitter is typically used when there will be multiple parts to the response (since usually you only want to call a callback once). For more details, see the article on [EventEmitters](/articles/getting-started/control-flow/what-are-event-emitters) + +### A gotcha with asynchronous code +A common mistake in asynchronous code with javascript is to write code that does something like this: + + for (var i = 0; i < 5; i++) { + setTimeout(function () { + console.log(i); + }, i); + } + +The unexpected output is then: + + 5 + 5 + 5 + 5 + 5 + +The reason this happens is because each timeout is created and then `i` is incremented. Then when the callback is called, it looks for the value of `i` and it is 5. The solution is to create a closure so that the current value of `i` is stored. For example: + + for (var i = 0; i < 5; i++) { + (function(i) { + setTimeout(function () { + console.log(i); + }, i); + })(i); + } + +This gives the proper output: + + 0 + 1 + 2 + 3 + 4 diff --git a/locale/fa/knowledge/getting-started/control-flow/what-are-callbacks.md b/locale/fa/knowledge/getting-started/control-flow/what-are-callbacks.md new file mode 100644 index 0000000000000..18515791498ea --- /dev/null +++ b/locale/fa/knowledge/getting-started/control-flow/what-are-callbacks.md @@ -0,0 +1,54 @@ +--- +title: What are callbacks? +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - core + - asynchronous + - callbacks +difficulty: 1 +layout: knowledge-post.hbs +--- + + +In a synchronous program, you would write something along the lines of: + + function processData () { + var data = fetchData (); + data += 1; + return data; + } + +This works just fine and is very typical in other development environments. However, if fetchData takes a long time to load the data (maybe it is streaming it off the drive or the internet), then this causes the whole program to 'block' - otherwise known as sitting still and waiting - until it loads the data. Node.js, being an asynchronous platform, doesn't wait around for things like file I/O to finish - Node.js uses callbacks. A callback is a function called at the completion of a given task; this prevents any blocking, and allows other code to be run in the meantime. + +The node.js way to deal with the above would look a bit more like this: + + function processData (callback) { + fetchData(function (err, data) { + if (err) { + console.log("An error has occurred. Abort everything!"); + return callback(err); + } + data += 1; + callback(data); + }); + } + +At first glance, it may look unnecessarily complicated, but callbacks are the foundation of Node.js. Callbacks give you an interface with which to say, "and when you're done doing that, do all this." This allows you to have as many IO operations as your OS can handle happening at the same time. For example, in a web server with hundreds or thousands of pending requests with multiple blocking queries, performing the blocking queries asynchronously gives you the ability to be able to continue working and not just sit still and wait until the blocking operations come back. This is a major improvement. + +The typical convention with asynchronous functions (which almost all of your functions should be): + + function asyncOperation ( a, b, c, callback ) { + // ... lots of hard work ... + if ( /* an error occurs */ ) { + return callback(new Error("An error has occurred")); + } + // ... more work ... + callback(null, d, e, f); + } + + asyncOperation ( params.., function ( err, returnValues.. ) { + //This code gets run after the async operation gets run + }); + +You will almost always want to follow the [error callback convention](/articles/errors/what-are-the-error-conventions), since most Node.js users will expect your project to follow them. The general idea is that the callback is the last parameter. The callback gets called after the function is done with all of its operations. Traditionally, the first parameter of the callback is the `error` value. If the function hits an error, then they typically call the callback with the first parameter being an Error object. If it cleanly exits, then they will call the callback with the first parameter being null and the rest being the return value(s). diff --git a/locale/fa/knowledge/getting-started/control-flow/what-are-event-emitters.md b/locale/fa/knowledge/getting-started/control-flow/what-are-event-emitters.md new file mode 100644 index 0000000000000..474297e34af2f --- /dev/null +++ b/locale/fa/knowledge/getting-started/control-flow/what-are-event-emitters.md @@ -0,0 +1,103 @@ +--- +title: What are Event Emitters? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - asynchronous + - event-emitters +difficulty: 2 +layout: knowledge-post.hbs +--- + + +In node.js an event can be described simply as a string with a corresponding callback. An event can be "emitted" (or in other words, the corresponding callback be called) multiple times or you can choose to only listen for the first time it is emitted. So a simple example ran on the node [REPL](/articles/REPL/how-to-use-nodejs-repl): + + var example_emitter = new (require('events').EventEmitter); + example_emitter.on("test", function () { console.log("test"); }); + example_emitter.on("print", function (message) { console.log(message); }); + example_emitter.emit("test"); + example_emitter.emit("print", "message"); + example_emitter.emit("unhandled"); + + > var example_emitter = new (require('events').EventEmitter); + {} + > example_emitter.on("test", function () { console.log("test"); }); + { _events: { test: [Function] } } + > example_emitter.on("print", function (message) { console.log(message); }); + { _events: { test: [Function], print: [Function] } } + > example_emitter.emit("test"); + test //console.log'd + true //return value + > example_emitter.emit("print", "message"); + message //console.log'd + true //return value + > example_emitter.emit("unhandled"); + false //return value + +This demonstrates all the basic functionality of an EventEmitter. The `on` or `addListener` method (basically the subscription method) allows you to choose the event to watch for and the callback to be called. The `emit` method (the publish method), on the other hand, allows you to "emit" an event, which causes all callbacks registered to the event to 'fire', (get called). + +So in the example, we first subscribe to both the `test` and `print` events. Then we emit the `test`, `print`, and `unhandled` events. Since `unhandled` has no callback, it just returns false; the other two run all the attached callbacks and return true. + +In the `print` event, note that we pass an extra parameter - all the extra parameters passed to 'emit' get passed to the callback function as arguments. + +If you use the method `once` instead of `on`, after the callback is fired, it is removed from the list of callbacks. A handy little function if you want to detect only the first time an event has been emitted. + +If you want remove a specific callback, you can use `removeListener`. If you want to remove all callbacks to a specific event, you can use `removeAllListeners`. + + var EventEmitter = require('events').EventEmitter, + ee = new EventEmitter(); + + function callback() { + console.log("Callback has been called!"); + } + + ee.once("event", callback); + ee.emit("event"); + ee.emit("event"); + + ee.on("event", callback); + ee.emit("event"); + ee.emit("event"); + ee.removeListener("event", callback); + ee.emit("event"); + + ee.on("event", callback); + ee.emit("event"); + ee.removeAllListeners("event"); + ee.emit("event"); + + > var ee = new (require('events').EventEmitter); + > var callback = function () { console.log("Callbacked!"); } + > ee.once("event", callback); + { _events: { event: { [Function: g] listener: [Function] } } } + > ee.emit("event"); + Callbacked! //console.log'd + true + > ee.emit("event"); + false + + > ee.on("event", callback); + { _events: { event: [Function] } } + > ee.emit("event"); + Callbacked! //console.log'd + true + > ee.emit("event"); + Callbacked! //console.log'd + true + > ee.removeListener("event", callback); + { _events: {} } + > ee.emit("event"); + false + + > ee.on("event", callback); + { _events: { event: [Function] } } + > ee.emit("event"); + Callbacked! //console.log'd + true + > ee.removeAllListeners("event"); + { _events: { event: null } } + > ee.emit("event"); + false + +NOTE: If you want create more than 10 listeners on a single event, you will have to make a call to `ee.setMaxListeners(n)` where n is the max numbers of listeners (with zero being unlimited number of listeners). This is used to make sure you aren't accidentally leaking event listeners. + diff --git a/locale/fa/knowledge/getting-started/globals-in-node-js.md b/locale/fa/knowledge/getting-started/globals-in-node-js.md new file mode 100644 index 0000000000000..8a2f2d83ede17 --- /dev/null +++ b/locale/fa/knowledge/getting-started/globals-in-node-js.md @@ -0,0 +1,27 @@ +--- +title: The built-in globals in Node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node.js has a number of built-in global identifiers that every Node.js developer should have some familiarity with. Some of these are true globals, being visible everywhere; others exist at the module level, but are inherent to every module, thus being pseudo-globals. + +First, let's go through the list of 'true globals': + +- `global` - The global namespace. Setting a property to this namespace makes it globally visible within the running process. +- `process` - Node's built-in `process` module, which provides interaction with the current Node process. [Read More](/articles/getting-started/the-process-module) +- `console` - Node's built-in `console` module, which wraps various STDIO functionality in a browser-like way. [Read More](/articles/getting-started/the-console-module) +- `setTimeout()`, `clearTimeout()`, `setInterval()`, `clearInterval()` - The built-in timer functions are globals. [Read More](/articles/javascript-conventions/what-are-the-built-in-timer-functions) + +As mentioned above, there are also a number of 'pseudo-globals' included at the module level in every module: + +- `module`, `module.exports`, `exports` - These objects all pertain to Node's module system. [Read More](/articles/getting-started/what-is-require) +- __filename - The `__filename` keyword contains the path of the currently executing file. Note that this is not defined while running the [Node REPL](/articles/REPL/how-to-use-nodejs-repl). +- __dirname - Like `__filename`, the `__dirname` keyword contains the path to the root directory of the currently executing script. Also not present in the Node REPL. +- require() - The `require()` function is a built-in function, exposed per-module, that allows other valid modules to be included. [Read More](/articles/getting-started/what-is-require) + +Much of this functionality can be extremely useful for a Node.js developer's daily life - but at the very least, remember these as bad names to use for your own functions! diff --git a/locale/fa/knowledge/getting-started/how-to-debug-nodejs-applications.md b/locale/fa/knowledge/getting-started/how-to-debug-nodejs-applications.md new file mode 100644 index 0000000000000..f20cdc8d67c4f --- /dev/null +++ b/locale/fa/knowledge/getting-started/how-to-debug-nodejs-applications.md @@ -0,0 +1,93 @@ +--- +title: How to debug a node application +date: '2011-08-26T10:08:50.000Z' +tags: + - debug +difficulty: 1 +layout: knowledge-post.hbs +--- + +Often times, not just in the Node.js community but in software at large, people debug simply with a liberal sprinkle of standard output statements. This allows you to track down where unexpected values are being generated. However, this method can be tedious, or worse yet, not robust enough to detect the real problem. + + +### Set up + +Thankfully, through the use of `node-inspector`, we can harness to power of the webkit-debuggers to work with our node.js code. The process itself is simple. + +First, ensure that node-inspector is installed: + + npm install node-inspector -g + +A good example application to experiment with is a basically 'hello world' server with a counter (copied from the `node-inspector` repo): + + var http = require('http'); + + var x = 0; + http.createServer(function (req, res) { + x += 1; + res.writeHead(200, {'Content-Type': 'text/plain'}); + res.end('Hello World ' + x); + }).listen(8124); + console.log('Server running at http://127.0.0.1:8124/'); + +First, we start your node program with debugging enabled. + + node --debug app.js + +which should print something along the lines of `debugger listening on port 5858` to stderr. Take note of the port number, it is the port that the debugger is running on. + +Next, start up `node-inspector`. If your program uses port 8080, then you may have to pass it a custom port. + + node-inspector [--web-port=] + +Finally you fire up a webkit browser such as chrome or safari. and go to `127.0.0.1:8080/debug?port=5858`. Note, if the debugger is listening on a port other than `5858`, you will need to change it. Also, if you passed a custom webport to node-inspector, then you will have to modify the `8080`. + +At this point, you will be met with a fairly empty screen with the `scripts`, `profiles`, and `console` tabs. + +### Scripts tab + +This is just like most webkit/firebug debuggers. It has a list of all the javascript files (including node.js core and third party libraries) which you can select and dive into. To stop the interpreter on a specific line, you set a breakpoint by clicking on the number of the desired line. When the execution is frozen, by a breakpoint or by manually pausing interpretation by pressing the pause button, you can check the callstack and examine all the local, closure, and global variables. You can also modify the code to try and fix behavior. Note that when you modify the code through the script tab, it does not get saved to the file, so you will need to transfer the modifications back by hand. + +### Profiles tab + +To use the profile tab, you need a library called `v8-profiler`: + + npm install v8-profiler + +Next, you have to require it inside the file you are debugging: + + var profiler = require('v8-profiler'); + +Now you can finally enable the `profiles` tab, unfortunately, all you can do from this screen is a heap snapshot. So from the code, you need to select where you want to start to cpu profiler and can select more precise location for heap snapshots. + +To take a heap snapshot, just insert this line in the desired location and optionally pass it a name. + + var snapshot = profiler.takeSnapshot(name); + +To take a cpu profile, just surround the code that you are profiling with the two lines shown below. Optionally, a name can be included to indentify the cpu profile. + + profiler.startProfiling(name); + //..lots and lots of methods and code called..// + var cpuProfile = profiler.stopProfiling([name]); + +As an example how to use these, here is the code given earlier modified to take a cpu profile on every request and take a heap snapshot: after the server is created. + + var http = require('http'); + var profiler = require('v8-profiler'); + + var x = 0; + http.createServer(function (req, res) { + x += 1; + profiler.startProfiling('request '+x); + res.writeHead(200, {'Content-Type': 'text/plain'}); + res.end('Hello World ' + x); + profiler.stopProfiling('request '+x); + }).listen(8124); + profiler.takeSnapshot('Post-Server Snapshot'); + console.log('Server running at http://127.0.0.1:8124/'); + +Note that despite these apis returning objects, it is much easier to sort through the data through the node-inspector interface. Hopefully with these tools, you can make more informed decisions about memory leaks and bottlenecks. + +### Console tab + +Finally, the console tab allows you to use node's REPL in your program's global scope. This has a few gotchas since that means you can not access in local variables. Thus the variables you can read or write are variables that were defined without a `var` statement. The other gotcha is when you use `console.log` refers to node's `console.log` and not webkit's console.log. This means the output goes to stdout and not to your console tab. Otherwise it is a very straightforward node REPL. diff --git a/locale/fa/knowledge/getting-started/how-to-use-util-inspect.md b/locale/fa/knowledge/getting-started/how-to-use-util-inspect.md new file mode 100644 index 0000000000000..9f9844ddebd74 --- /dev/null +++ b/locale/fa/knowledge/getting-started/how-to-use-util-inspect.md @@ -0,0 +1,49 @@ +--- +title: How to use util.inspect +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - debug +difficulty: 1 +layout: knowledge-post.hbs +--- + +Node provides a utility function, for debugging purposes, that returns a string representation of an object. `util.inspect()` can be a true lifesaver while working with properties of large, complex objects. + +Let's provide a basic example. `util.inspect()` can be used on any object - a good demonstration will be one of Node's built-in objects. Try this in the REPL (type `node` at your command line with no arguments): + + var util = require('util'); + util.inspect(console); + +The output will be: + + '{ log: [Function], info: [Function], warn: [Function], error: [Function], dir: [Function], time: [Function], timeEnd: [Function], trace: [Function], assert: [Function] }' + +This is a listing of all the enumerable properties of the `console` object. It is also worth noting that `console.dir` is a wrapper around `util.inspect` that uses its default arguments. + +In the REPL, `util.inspect` will immediately return its output - this is not usually the case. In the context of normal Node.js code in a file, something must be done with the output. The simplest thing to do: + + console.log(util.inspect(myObj)); + +`util.inspect` can also be passed several optional arguments, shown here with their defaults: + + util.inspect(object, showHidden=false, depth=2, colorize=true); + +For example, `util.inspect(myObj, true, 7, true)` would inspect `myObj`, showing all the hidden and non-hidden properties up to a depth of `7` and colorize the output. Let's go over the arguments individually. + +The `depth` argument is the number of levels deep into a nested object to recurse - it defaults to 2. Setting it to `null` will cause it to recurse 'all the way', showing every level. Compare the (size of) the outputs of these two `util.inspect` statements in the REPL: + + var http = require('http'); + util.inspect(http, true, 1); + util.inspect(http, true, 3); + +The optional argument `showHidden` is a boolean that determines whether or not the 'non-enumerable' properties of an object will be displayed - it defaults to `false`, which tends to result in vastly more readable output. This isn't something a beginner needs to worry about most of the time, but it's worth demonstrating briefly. Once more, try the following in the REPL: + + var util = require('util'); + util.inspect(console, true); + +Finally, the optional argument `colorize` is a boolean that adds ANSI escape codes to the string output. When logged to a terminal window, it should be pretty printed with colors. + + var util = require('util'); + console.log(util.inspect({a:1, b:"b"}, false,2,true)); + diff --git a/locale/fa/knowledge/getting-started/npm/how-to-access-module-package-info.md b/locale/fa/knowledge/getting-started/npm/how-to-access-module-package-info.md new file mode 100644 index 0000000000000..7c4b67ae7cf4a --- /dev/null +++ b/locale/fa/knowledge/getting-started/npm/how-to-access-module-package-info.md @@ -0,0 +1,32 @@ +--- +title: How to access module package info +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - npm +difficulty: 1 +layout: knowledge-post.hbs +--- + + +There are many situations in the world of software development where using the wrong version of a dependency or submodule can cause all sorts of pain and anguish - luckily for you, node.js has a module available called pkginfo that can help keep these sorts of troubles at bay. + +Let's take a look at pkginfo - first, install via npm: + + npm install pkginfo + +Now all we need to do is require it, and invoke it. + + var pkginfo = require('pkginfo')(module); + + console.dir(module.exports); + +That would show us the entire contents of the package.json, neatly displayed to our console. If we only wanted certain pieces of information, we just specify them like so: + + var pkginfo = require('pkginfo')(module, 'version', 'author'); + + console.dir(module.exports); + +And only the fields we specify will be shown to us. + +For more information, see http://github.com/indexzero/ . diff --git a/locale/fa/knowledge/getting-started/npm/what-is-npm.md b/locale/fa/knowledge/getting-started/npm/what-is-npm.md new file mode 100644 index 0000000000000..32312603dc4ee --- /dev/null +++ b/locale/fa/knowledge/getting-started/npm/what-is-npm.md @@ -0,0 +1,26 @@ +--- +title: What is npm? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm +difficulty: 1 +layout: knowledge-post.hbs +--- + +`npm`, short for Node Package Manager, is two things: first and foremost, it is an online repository for the publishing of open-source Node.js projects; second, it is a command-line utility for interacting with said repository that aids in package installation, version management, and dependency management. A plethora of node.js libraries and applications are published on npm, and many more are added every day. These applications can be searched for on http://search.npmjs.org/. Once you have a package you want to install, it can be installed with a single command-line command. + + +Let's say you're hard at work one day, developing the Next Great Application. You come across a problem, and you decide that it's time to use that cool library you keep hearing about - let's use Caolan McMahon's [async](http://github.com/caolan/async) as an example. Thankfully, `npm` is very simple to use: you only have to run `npm install async`, and the specified module will be installed in the current directory under `./node_modules/`. Once installed to your `node_modules` folder, you'll be able to use `require()` on them just like they were built-ins. + +Let's look at an example of a global install - let's say `coffee-script`. The npm command is simple: `npm install coffee-script -g`. This will typically install the program and put a symlink to it in `/usr/local/bin/`. This will then allow you to run the program from the console just like any other CLI tool. In this case, running `coffee` will now allow you to use the coffee-script REPL. + +Another important use for npm is dependency management. When you have a node project with a [package.json](/articles/getting-started/npm/what-is-the-file-package-json) file, you can run `npm install` from the project root and npm will install all the dependencies listed in the package.json. This makes installing a Node project from a git repo much easier! For example, `vows`, one of Node's testing frameworks, can be installed from git, and its single dependency, `eyes`, can be automatically handled: + +Example: + + git clone https://github.com/cloudhead/vows.git + cd vows + npm install + +After running those commands, you will see a `node_modules` folder containing all of the project dependencies specified in the package.json. + diff --git a/locale/fa/knowledge/getting-started/npm/what-is-the-file-package-json.md b/locale/fa/knowledge/getting-started/npm/what-is-the-file-package-json.md new file mode 100644 index 0000000000000..9cd4ce1c0fe03 --- /dev/null +++ b/locale/fa/knowledge/getting-started/npm/what-is-the-file-package-json.md @@ -0,0 +1,46 @@ +--- +title: What is the file `package.json`? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - conventions + - core +difficulty: 2 +layout: knowledge-post.hbs +--- + +All npm packages contain a file, usually in the project root, called `package.json` - this file holds various metadata relevant to the project. This file is used to give information to `npm` that allows it to identify the project as well as handle the project's dependencies. It can also contain other metadata such as a project description, the version of the project in a particular distribution, license information, even configuration data - all of which can be vital to both `npm` and to the end users of the package. The `package.json` file is normally located at the root directory of a Node.js project. + +Node itself is only aware of two fields in the `package.json`: + + { + "name" : "barebones", + "version" : "0.0.0", + } + +The `name` field should explain itself: this is the name of your project. The `version` field is used by npm to make sure the right version of the package is being installed. Generally, it takes the form of `major.minor.patch` where `major`, `minor`, and `patch` are integers which increase after each new release. For more details, look at this spec: http://semver.org . + +For a more complete package.json, we can check out `underscore`: + + { + "name" : "underscore", + "description" : "JavaScript's functional programming helper library.", + "homepage" : "http://documentcloud.github.com/underscore/", + "keywords" : ["util", "functional", "server", "client", "browser"], + "author" : "Jeremy Ashkenas ", + "contributors" : [], + "dependencies" : [], + "repository" : {"type": "git", "url": "git://github.com/documentcloud/underscore.git"}, + "main" : "underscore.js", + "version" : "1.1.6" + } + +As you can see, there are fields for the `description` and `keywords` of your projects. This allows people who find your project understand what it is in just a few words. The `author`, `contributors`, `homepage` and `repository` fields can all be used to credit the people who contributed to the project, show how to contact the author/maintainer, and give links for additional references. + +The file listed in the `main` field is the main entry point for the library; when someone runs `require()`, require resolves this call to `require()`. + +Finally, the `dependencies` field is used to list all the dependencies of your project that are available on `npm`. When someone installs your project through `npm`, all the dependencies listed will be installed as well. Additionally, if someone runs `npm install` in the root directory of your project, it will install all the dependencies to `./node_modules`. + +It is also possible to add a `devDependencies` field to your `package.json` - these are dependencies not required for normal operation, but required/recommended if you want to patch or modify the project. If you built your unit tests using a testing framework, for example, it would be appropriate to put the testing framework you used in your `devDependencies` field. To install a project's `devDependencies`, simply pass the `--dev` option when you use `npm install`. + +For even more options, you can look through the [online docs](https://github.com/npm/npm/blob/master/doc/files/package.json.md) or run `npm help json` diff --git a/locale/fa/knowledge/getting-started/the-console-module.md b/locale/fa/knowledge/getting-started/the-console-module.md new file mode 100644 index 0000000000000..a95a62b07e477 --- /dev/null +++ b/locale/fa/knowledge/getting-started/the-console-module.md @@ -0,0 +1,53 @@ +--- +title: The built-in console module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - cli + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + + +Anyone familiar with browser-side development has probably used `console.log` for debugging purposes - Node.js has implemented a built-in `console` object to mimic much of this experience. Since we're working server-side, however, it wraps `stdout`, `stdin`, and `stderr` instead of the browser's debugging console. + +Because of this browser parallel, the `console` module has become home to quite a bit of Node's standard output functionality. The simplest is `console.log()`. + + console.log('Hi, everybody!'); + console.log('This script is:', __filename); + console.log(__filename, process.title, process.argv); + +The first, simplest example just prints the provided string to `stdout`. It can also be used to output the contents of variables, as evidenced in #2; furthermore, `console.dir()` is called on any objects passed in as arguments, enumerating their properties. + +NODE.JS PRO TIP: +`console.log()` accepts three format characters, `%s`, `%d`, and `%j`. These format characters can be used to insert string, integer, or JSON data into your output - the order of format characters must match the order of arguments. + + var name = 'Harry', + number = 17, + myObj = { + propOne: 'stuff', + propTwo: 'more stuff' + }; + console.log('My name is %s, my number is %d, my object is %j', name, number, myObj); + +A gotcha with `console.log`, and all functions that depend on it, is that it buffers the output. So if your process ends suddenly, whether it be from an exception or from `process.exit()`, it is entirely possible that the buffered output will never reach the screen. This can cause a great deal of frustration, so watch out for this unfortunate situation. + +`console.error()` works the same as `console.log`, except that the output is sent to `stderr` instead of `stdout`. This is actually an extremely important difference, as `stderr` is always written to synchronously. Any use of `console.error`, or any of the other functions in Node.js core that write to `stderr`, will block your process until the output has all been written. This is useful for error messages - you get them exactly when they occur - but if used everywhere, can greatly slow down your process. + +`console.dir()`, as mentioned above, is an alias for `util.inspect()` - it is used to enumerate object properties. [Read More](/articles/getting-started/how-to-use-util-inspect) + +That covers the basic `console` module functionality, but there are a few other methods worth mentioning as well. First, the `console` module allows for the marking of time via `console.time()` and `console.timeEnd()`. Here is an example: + + console.time('myTimer'); + var string = ''; + for (var i = 0; i < 300; i++) { + (function (i) { + string += 'aaaa' + i.toString(); + })(i); + } + console.timeEnd('myTimer'); + +This would determine the amount of time taken to perform the actions in between the `console.time` and `console.timeEnd` calls. + +One last function worth mentioning is `console.trace()`, which prints a stack trace to its location in your code without throwing an error. This can occasionally be useful if you'd like to figure out where a particular failing function was called from. diff --git a/locale/fa/knowledge/getting-started/the-process-module.md b/locale/fa/knowledge/getting-started/the-process-module.md new file mode 100644 index 0000000000000..90158e568a80b --- /dev/null +++ b/locale/fa/knowledge/getting-started/the-process-module.md @@ -0,0 +1,104 @@ +--- +title: How to use the global process module +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - globals +difficulty: 2 +layout: knowledge-post.hbs +--- + + +Each Node.js process has a set of built-in functionality, accessible through the global `process` module. The `process` module doesn't need to be required - it is somewhat literally a wrapper around the currently executing process, and many of the methods it exposes are actually wrappers around calls into some of Node's core C libraries. + +##Events + +There are two built-in events worth noting in the `process` module, `exit` and `uncaughtException`. + +The `exit` event fires whenever the process is about to exit. + + process.on('exit', function () { + fs.writeFileSync('/tmp/myfile', 'This MUST be saved on exit.'); + }); + +Code like the above can occasionally be useful for saving some kind of final report before you exit. Note the use of a synchronous file system call - this is to make sure the I/O finishes before the process actually exits. + +The other built-in event is called `uncaughtException`. It fires, as you might guess, whenever an exception has occurred that hasn't been caught or dealt with somewhere else in your program. It's not the ideal way to handle errors, but it can be very useful as a last line of defense if a program needs to stay running indefinitely. + + process.on('uncaughtException', function (err) { + console.error('An uncaught error occurred!'); + console.error(err.stack); + }); + +The default behavior on `uncaughtException` is to print a stack trace and exit - using the above, your program will display the message provided and the stack trace, but will **not** exit. + +##Streams + +The `process` object also provides wrappings for the three `STDIO` streams, `stdin`, `stdout`, and `stderr`. Put briefly, `stdin` is a readable stream (where one would read input from the user), `stdout` is a non-blocking writeable stream (writes to `stdout` are asynchronous, in other words), and `stderr` is a blocking (synchronous) writeable stream. + +The simplest one to describe is `process.stdout`. Technically, most output in Node is accomplished by using `process.stdout.write()` - though most people would never know it. The following is from `console.js` in Node core: + + exports.log = function() { + process.stdout.write(format.apply(this, arguments) + '\n'); + }; + +Since most people are used to the `console.log` syntax from browser development, it was provided as a convenient wrapper. + +Next we have `process.stderr`, which is very similar to `process.stdout` with one key exception - it blocks. When you write to `stderr`, your process blocks until the write is completed. Node.js provides a number of alias functions for output, most of which either end up using `stdout` or `stderr` under the hood. Here's a quick reference list: + +STDOUT, or non-blocking functions: `console.log`, `console.info`, `util.puts`, `util.print` + +STDERR, or blocking functions: `console.warn`, `console.error`, `util.debug` + +Lastly, `process.stdin` is a readable stream for getting user input. See [more on cli input](/articles/command-line/how-to-prompt-for-command-line-input). + +##Other Properties + +The `process` object additionally contains a variety of properties that allow you to access information about the running process. Let's run through a few quick examples with the help of the REPL: + + > process.pid + 3290 + > process.version + 'v0.4.9' + > process.platform + 'linux' + > process.title + 'node' + +The `pid` is the OS Process ID, `platform` is something general like 'linux' or 'darwin', and `version` refers to your Node version. `process.title` is a little bit different - while set to `node` by default, it can be set to anything you want, and will be what gets displayed in lists of running processes. + +The `process` module also exposes `process.argv`, an array containing the command-line arguments to the current process, and `process.argc`, an integer representing the number of arguments passed in. Read more on [how to parse command line arguments](/articles/command-line/how-to-parse-command-line-arguments) + +`process.execPath` will return the absolute path of the executable that started this process. + +`process.env` contains your environment variables. Try `process.env.HOME`, for example. + +##Methods + +There are also a variety of methods attached to the `process` object, many of which deal with quite advanced aspects of a program. We'll take a look at a few of the more commonly useful ones, while leaving the more advanced parts for another article. + +`process.exit` exits the process. If you call an asynchronous function and then call `process.exit()` immediately afterwards, you will be in a race condition - the asynchronous call may or may not complete before the process is exited. `process.exit` accepts one optional argument - an integer exit code. `0`, by convention, is an exit with no errors. + +`process.cwd` returns the 'current working directory' of the process - this is often the directory from which the command to start the process was issued. + +`process.chdir` is used to change the current working directory. For example: + + > process.cwd() + '/home/avian/dev' + > process.chdir('/home/avian') + > process.cwd() + '/home/avian' + +Finally, on a more advanced note, we have `process.nextTick`. This method accepts one argument - a callback - and places it at the top of the next iteration of the event loop. Some people do something like this: + + setTimeout(function () { + // code here + }, 0) + +This, however, is not ideal. In Node.js, this should be used instead: + + process.nextTick(function () { + console.log('Next trip around the event loop, wheeee!') + }); + +It is much more efficient, and much more accurate. diff --git a/locale/fa/knowledge/getting-started/what-is-node-core-verus-userland.md b/locale/fa/knowledge/getting-started/what-is-node-core-verus-userland.md new file mode 100644 index 0000000000000..8407feda81661 --- /dev/null +++ b/locale/fa/knowledge/getting-started/what-is-node-core-verus-userland.md @@ -0,0 +1,39 @@ +--- +title: What is node core versus userland +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - core + - userland + - terminology +difficulty: 1 +layout: knowledge-post.hbs +--- + +Occasionally, in the discussions in the NodeJS mailing lists and IRC channels, you may hear things referred to as "node-core" and "userland". + +Of course, traditionally, "userland" or "userspace" refer to everything outside the operating system kernel. In that sense, Node itself is a "userland" program. + +However, in the context of NodeJS, "core" refers to the modules and bindings that are compiled into NodeJS. In general, they provide a hook into very well-understood low-level functionality which almost all networking programs are going to require: TCP, HTTP, DNS, the File System, child processes, and a few other things. If something is fancy enough to argue about, there's a good chance it won't be part of node-core. HTTP is about as big as it gets, and if it wasn't so popular, it'd certainly not be a part of node. + +There are also some things in node-core that are simply too painful to do without in a JavaScript environment, or which have been created to implement some BOM constructs which are not part of the JavaScript language, but may as well be (eg, setTimeout, setInterval, and console). + +Everything else is "userland". This includes: npm, express, request, coffee-script, mysql clients, redis clients, and so on. You can often install these programs using [npm](http://npmjs.org/). + +The question of what is properly "node-core" and what belongs in "userland" is a constant battleground. In general, node is based on the philosophy that it should *not* come with "batteries included". It is easier to move things out of node-core than it is to move them in, which means that core modules must continually "pay rent" in terms of providing necessary functionality that nearly everyone finds valuable. + +## This is a Good Thing. + +One goal of node's minimal core library is to encourage people to implement things in creative ways, without forcing their ideas onto everyone. With a tiny core and a vibrant user space, we can all flourish and experiment without the onerous burden of having to always agree all the time. + +## Userland isn't Less + +If anything, it's more. Building functionality in userland rather than in node-core means: + +* You have a lot more freedom to iterate on the idea. +* Everyone who wants your module can install it easily enough (if you publish it with npm). +* You have freedom to break node conventions if that makes sense for your use-case. + +If you believe that something *really* just *needs* to be part of node's core library set, you should *still* build it as a module! It's much more likely to be pulled into node-core if people have a chance to see your great ideas in action, and if its core principles are iterated and polished and tested with real-world use. + +Changing functionality that is included in node-core is very costly. We do it sometimes, but it's not easy, and carries a high risk of regressions. Better to experiment outside, and then pull it into node-core once it's stable. Once it's usable as a userland package, you may even find that it's less essential to node-core than you first thought. \ No newline at end of file diff --git a/locale/fa/knowledge/getting-started/what-is-require.md b/locale/fa/knowledge/getting-started/what-is-require.md new file mode 100644 index 0000000000000..8f5367b00ca45 --- /dev/null +++ b/locale/fa/knowledge/getting-started/what-is-require.md @@ -0,0 +1,60 @@ +--- +title: What is require? +date: '2011-08-26T10:08:50.000Z' +tags: + - npm + - core + - globals + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + + Node.js follows the CommonJS module system, and the builtin `require` function is the easiest way to include modules that exist in separate files. The basic functionality of `require` is that it reads a javascript file, executes the file, and then proceeds to return the `exports` object. An example module: + + console.log("evaluating example.js"); + + var invisible = function () { + console.log("invisible"); + } + + exports.message = "hi"; + + exports.say = function () { + console.log(exports.message); + } + +So if you run `var example = require('./example.js')`, then `example.js` will get evaluated and then `example` be an object equal to: + + { + message: "hi", + say: [Function] + } + +If you want to set the exports object to a function or a new object, you have to use the `module.exports` object. So for an example: + + module.exports = function () { + console.log("hello world") + } + + require('./example2.js')() //require itself and run the exports object + +It is worth noting that each time you subsequently require an already-required file, the `exports` object is cached and reused. To illustrate this point: + + node> require('./example.js') + evaluating example.js + { message: 'hi', say: [Function] } + node> require('./example.js') + { message: 'hi', say: [Function] } + node> require('./example.js').message = "hey" //set the message to "hey" + 'hey' + node> require('./example.js') //One might think that this "reloads" the file... + { message: 'hey', say: [Function] } //...but the message is still "hey" because of the module cache. + + +As you can see from the above, `example.js` is evaluated the first time, but all subsequent calls to `require()` only invoke the module cache, rather than reading the file again. As seen above, this can occasionally produce side effects. + +The rules of where `require` finds the files can be a little complex, but a simple rule of thumb is that if the file doesn't start with "./" or "/", then it is either considered a core module (and the local Node path is checked), or a dependency in the local `node_modules` folder. If the file starts with "./" it is considered a relative file to the file that called `require`. If the file starts with "/", it is considered an absolute path. NOTE: you can omit ".js" and `require` will automatically append it if needed. For more detailed information, see [the official docs](https://nodejs.org/docs/v0.4.2/api/modules.htmll#all_Together...) + +An extra note: if the filename passed to `require` is actually a directory, it will first look for `package.json` in the directory and load the file referenced in the `main` property. Otherwise, it will look for an `index.js`. + diff --git a/locale/fa/knowledge/index.md b/locale/fa/knowledge/index.md new file mode 100644 index 0000000000000..e6ee74d2e977d --- /dev/null +++ b/locale/fa/knowledge/index.md @@ -0,0 +1,6 @@ +--- +title: Knowledge Base +layout: knowledge-base-index.hbs +--- + +# Knowledge Base diff --git a/locale/fa/knowledge/intermediate/how-to-log.md b/locale/fa/knowledge/intermediate/how-to-log.md new file mode 100644 index 0000000000000..560226ac429b1 --- /dev/null +++ b/locale/fa/knowledge/intermediate/how-to-log.md @@ -0,0 +1,92 @@ +--- +title: How to log in node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - logging +difficulty: 2 +layout: knowledge-post.hbs +--- + +Many processes, including most servers, write logs in one form or another. Reasons for logging include debugging, keeping track of users and resource usage, and reporting application state. + +### Simple Logging + +The simplest form of logging involves simply using `console.log` or one of the other standard output methods. In this approach, any information is printed to `stdout` where it can either be read by the developer as it occurs, or, for example, redirected to a log file. + + console.log('Web Server started, waiting for connections...'); + +Because it's so simple, console.log is by far the most common way of logging data in node.js. + +### Custom Logging + +Logging only with functions such as `console.log` is not ideal for every use case, however. Many applications have some sort of 'debugging mode', for example, that shows the user much more output than normal execution. To do something like this, a better idea is to write your own simple logger, and use it instead of `console.log`. + +Here is an example of a basic custom logging module with configurable debugging levels. + + var logger = exports; + logger.debugLevel = 'warn'; + logger.log = function(level, message) { + var levels = ['info', 'warn', 'error']; + if (levels.indexOf(level) <= levels.indexOf(logger.debugLevel) ) { + if (typeof message !== 'string') { + message = JSON.stringify(message); + }; + console.log(level+': '+message); + } + } + +Usage would then look like the following: + + var logger = require('./logger'); + logger.debugLevel = 'warn'; + logger.log('info', 'Everything started properly.'); + logger.log('warn', 'Running out of memory...'); + logger.log('error', { error: 'flagrant'}); + +Because `logger.debugLevel` was set to `warn`, the warning message and the error would both be displayed, but the `info` message would not be. + +The advantage here is that the behavior of our logging mechanisms can now be modified and controlled from a central part of our code. In this case, logging levels were added, and messages are converted to JSON if they aren't already in string form. There is a lot more that could be done here - saving logs to a file, pushing them to a database, setting custom colors and formatting the output - but by the time you want that much functionality from your custom logging function, it might be time to use an already-existing library. + +### Winston - multi-transport logging made easy + +[Winston](https://github.com/indexzero/winston) is a multi-transport, asynchronous logging library for node.js. It is conceptually similar to our custom logger, but comes with a wide variety of useful features and functionality baked in. In addition, `winston` is battle-hardened by internal use at Nodejitsu! + +Here is an example of setting up a `winston` logger. This example includes most of the transports one could ever possibly want - please note that most use cases will only warrant a few of these. + + var winston = require('winston'); + + require('winston-riak').Riak; + require('winston-mongo').Mongo; + require('winston-couchdb').Couchdb; + + var logger = new (winston.Logger)({ + transports: [ + new winston.transports.Console(), + new winston.transports.File({ filename: 'path/to/all-logs.log' }), + new winston.transports.Couchdb({ 'host': 'localhost', 'db': 'logs' }), + new winston.transports.Riak({ bucket: 'logs' }), + new winston.transports.MongoDB({ db: 'db', level: 'info'}) + ], + exceptionHandlers: [ + new winston.transports.File({ filename: 'path/to/exceptions.log' }) + ] + }); + +Here, we have instantiated a new `winston` logger, and provided a number of logging transports. Winston has built-in support for configurable logging levels, and provides alias methods for each configured logging level. For example, `winston.warn(x)` is an alias for `winston.log('warn', x)`. Thus, the following: + + logger.warn('Hull Breach Detected on Deck 7!'); + +Would output to the screen: + + warn: Hull Breach Detected on Deck 7! + +Because of the file transport we set up, winston also logged the warning to 'somefile.log'. After the `logger.warn` call we just used, the log file, `somefile.log`, would contain the following output: + + $ cat somefile.log + {'level':'warn','message':'Hull Breach Detected on Deck 7!'} + +Note that winston's file logger formats the logs differently for file logging (JSON in this case) than it does for the console transport. + +Winston also supports logging to Riak, CouchDB, MongoDB and [many other transports](https://github.com/winstonjs/winston/blob/master/docs/transports.md). The `logger.warn` call we used before also put the same message into each database, according to the options we gave to each transport. + +For further information, please see the [thorough documentation for Winston.](https://github.com/indexzero/winston). diff --git a/locale/fa/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md b/locale/fa/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md new file mode 100644 index 0000000000000..c522dac0eca75 --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/how-to-create-default-parameters-for-functions.md @@ -0,0 +1,68 @@ +--- +title: How To Create Default Parameters for Functions +date: '2011-08-26T10:08:50.000Z' +tags: + - javascript + - builtin +difficulty: 1 +layout: knowledge-post.hbs +--- + +Usually a function will take a set number of parameters, and require that all of them be present before it can be executed successfully. However, you will sometimes run into situations where you want to provide a default value for a parameter or take a variable number of parameters. Unfortunately, javascript does not have a builtin way to do that; over time, however, people have developed idioms to compensate. + +The first idiom is giving a default value for the last parameter. This is done by checking if the last parameter is `undefined` and setting it to a default value if it is. Sometimes people use the idiom: `optionalParameter = optionalParameter || defaultValue`. This can have some undesirable behavior when they pass values that are equal to false such as `false`, `0`, and `""`. So a better way to do this is by explicitly checking that the optional parameter is `undefined`. Here is some code showing the two styles and the differing behavior: + + example = function (optionalArg) { + optionalArg = optionalArg || "No parameter was passed"; + console.log(optionalArg); + } + + betterExample = function (optionalArg) { + if (typeof optionalArg === 'undefined') { + optionalArg = "No parameter was passed"; + } + console.log(optionalArg); + } + + console.log("Without parameter:"); + example(); + betterExample(); + + console.log("\nWith paramater:"); + example("parameter was passed"); + betterExample("parameter was passed"); + + console.log("\nEmpty String:"); + example(""); + betterExample(""); + + +If the optional value is in the middle it can cause some undesired effects, since all the parameters are shifted over. The optional parameter is not the `undefined` value in this case - the last parameter is the `undefined` one. So you have to check if the last parameter is `undefined` and then manually fix all the other parameters before continuing in the code. This example shows you how to do that: + + example = function (param1, optParam, callback) { + if (typeof callback === 'undefined') { + // only two parameters were passed, so the callback is actually in `optParam` + callback = optParam; + + //give `optParam` a default value + optParam = "and a default parameter"; + } + callback(param1, optParam); + } + + example("This is a necessary parameter", console.log); + example("This is a necessary parameter", "and an optional parameter", console.log); + +More complicated cases require more code and can obscure the meaning of what you are trying to do. It then becomes a good idea to use helper functions - for example, suppose we wanted to take a variable amount of parameters and pass them all to the callback. You could try to accomplish this by manipulating the `arguments` variable - however, it is just easier to use the [vargs](https://github.com/cloudhead/vargs) library. As you can see by this code, it makes the whole process a little simpler: + + var Args = require("vargs").Constructor; + + example = function () { + var args = new Args(arguments); + args.callback.apply({},args.all); + } + + example("The first parameter", console.log); + example("The first parameter", "and second parameter", console.log); + example("The first parameter", "and second parameter", "and third parameter", console.log); + example("The first parameter", "and second parameter", "and third parameter", "etc", console.log); diff --git a/locale/fa/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md b/locale/fa/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md new file mode 100644 index 0000000000000..57e10e05bd18c --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/using-ECMA5-in-nodejs.md @@ -0,0 +1,97 @@ +--- +title: Using ECMA5 in node.js +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - builtin + - globals +difficulty: 2 +layout: knowledge-post.hbs +--- + +When developing in the browser there are many wonderful built in JavaScript functions that we can't use because certain browsers don't implement them. As a result, most developers never use them. In Node, however we can assume that everyone has the same JavaScript implementation and as such can use these wonderful functions and not implement them over and over in our own libraries. + +The following is a list of some interesting api bits that aren't considered safe to use in a web setting but are built in to node's V8 engine. + +Note that V8 implements all of ECMA 3rd edition and parts of the new stuff in the [ECMA 5th edition](http://www.ecma-international.org/publications/standards/Ecma-262.htm) + +## Syntax extensions + + * `var obj = { get a() { return "something" }, set a() { "do nothing" } }` getter/setter syntax + +## Array + + * `Array.isArray(array)` - Returns true if the passed argument is an array. + +## Array.prototype + + * `indexOf(value)` - Returns the first (least) index of an element within the array equal to the specified value, or -1 if none is found. + * `lastIndexOf(value)` - Returns the last (greatest) index of an element within the array equal to the specified value, or -1 if none is found. + * `filter(callback)` - Creates a new array with all of the elements of this array for which the provided filtering function returns true. + * `forEach(callback)` - Calls a function for each element in the array. + * `every(callback)` - Returns true if every element in this array satisfies the provided testing function. + * `map(callback)` - Creates a new array with the results of calling a provided function on every element in this array. + * `some(callback)` - Returns true if at least one element in this array satisfies the provided testing function. + * `reduce(callback[, initialValue])` - Apply a function simultaneously against two values of the array (from left-to-right) as to reduce it to a single value. + * `reduceRight(callback[, initialValue])` - Apply a function simultaneously against two values of the array (from right-to-left) as to reduce it to a single value. + +## Date + + * `Date.now()` - Returns the numeric value corresponding to the current time. + +## Date.prototype + + * `toISOString()` - + +## Object + + * `Object.create(proto, props)` - Creates a new object whose prototype is the passed in parent object and whose properties are those specified by props. + * `Object.keys(obj)` - Returns a list of the ownProperties of an object that are enumerable. + * `Object.defineProperty(obj, prop, desc)` - Defines a property on an object with the given descriptor + * `Object.defineProperties(obj, props)` - Adds own properties and/or updates the attributes of existing own properties of an object + * `Object.getOwnPropertyNames(obj)` - Returns a list of the ownProperties of an object including ones that are not enumerable. + * `Object.getPrototypeOf(obj)` - Returns the prototype of an object. + * `Object.getOwnPropertyDescriptor(obj, property)` - Returns an object with keys describing the description of a property (value, writable, enumerable, configurable) + * `Object.preventExtensions(obj)` - Prevents any new properties from being added to the given object. + * `Object.isExtensible(obj)` - Checks if Object.preventExtensions() has been called on this object. + * `Object.seal(obj)` - Prevents code from adding or deleting properties, or changing the descriptors of any property on an object. Property values can be changed however. + * `Object.isSealed(obj)` - Checks if Object.seal() has been called on this object. + * `Object.freeze(obj)` - Same as Object.seal, except property values cannot be changed. + * `Object.isFrozen(obj)` - Checks if Object.freeze() has been called on this object. + +## Object.prototype + + * `__defineGetter__(name, callback)` - (Mozilla extension, not ECMAScript 5) Associates a function with a property that, when accessed, executes that function and returns its return value. + * `__defineSetter__(name, callback)` - (Mozilla extension, not ECMAScript 5) Associates a function with a property that, when set, executes that function which modifies the property. + * `__lookupGetter__(name)` - (Mozilla extension, not ECMAScript 5) Returns the function associated with the specified property by the __defineGetter__ method. + * `__lookupSetter__(name)` - (Mozilla extension, not ECMAScript 5) Returns the function associated with the specified property by the __defineSetter__ method. + * `isPrototypeOf(obj)` - (EcmaScript 3 and 5) Returns true if `this` is a prototype of the passed in object. + +## Function.prototype + + * `bind(thisArg[, arg1[, argN]])` - Sets the value of 'this' inside the function to always be the value of thisArg when the function is called. Optionally, function arguments can be specified (arg1, arg2, etc) that will automatically be prepended to the argument list whenever this function is called. + +## JSON + + * `JSON.stringify(obj [, replacer [, space]])` - Takes any serializable object and returns the JSON representation as a string [More info](https://developer.mozilla.org/En/Using_JSON_in_Firefox) + * `JSON.parse(string)` - Takes a well formed JSON string and returns the corresponding JavaScript object. + +## String.prototype + + * `trim()` - Trims whitespace from both ends of the string + * `trimRight()` - Trims whitespace from the right side of the string + * `trimLeft()` - Trims whitespace from the left side of the string + +## Property Descriptor Defaults + + * `value` - undefined + * `get` - undefined + * `set` - undefined + * `writable` - false + * `enumerable` - false + * `configurable` - false + +# Missing features + + * `Object.__noSuchMethod__` (Mozilla extension, not ECMAScript 5) + * `"use strict";` syntax extension ([v8 issue](http://code.google.com/p/v8/issues/detail?id=919)) \ No newline at end of file diff --git a/locale/fa/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md b/locale/fa/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md new file mode 100644 index 0000000000000..1159525e33f7e --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/what-are-the-built-in-timer-functions.md @@ -0,0 +1,68 @@ +--- +title: What are the built-in timer functions? +date: '2011-08-26T10:08:50.000Z' +tags: + - core + - builtin + - globals +difficulty: 1 +layout: knowledge-post.hbs +--- + +There are two built-in timer functions, `setTimeout` and `setInterval`, which can be used to call a function at a later time. For an example usage: + + setTimeout(function() { console.log("setTimeout: It's been one second!"); }, 1000); + setInterval(function() { console.log("setInterval: It's been one second!"); }, 1000); + +An example output is: + + setTimeout: It's been one second! + setInterval: It's been one second! + setInterval: It's been one second! + setInterval: It's been one second! + setInterval: It's been one second! + ... + +As you can see the parameters to both are the same. The number second parameter says how long in milliseconds to wait before calling the function passed into the first parameter. The difference between the two functions is that `setTimeout` calls the callback only once while `setInterval` will call it over and over again. + +Typically you want to be careful with `setInterval` because it can cause some undesirable effects. If, for example, you wanted to make sure your server was up by pinging it every second, you might think to try something like this: + + setInterval(ping, 1000); + +This can cause problems, however, if your server is slow and it takes, say, 3 seconds to respond to the first request. In the time it takes to get back the response, you would have sent off 3 more requests - not exactly desirable! This isn't the end of the world when serving small static files, but if you doing an expensive operation such as database query or any complicated computation this can have some undesirable results. A common solution looks like this: + + var recursive = function () { + console.log("It has been one second!"); + setTimeout(recursive,1000); + } + recursive(); + +As you can see, it makes a call to the `recursive` function which, as it completes, makes a call to `setTimeout(recursive, 1000)` which makes it call `recursive` again in 1 second - thus having near the same effect as setInterval while being resilient to the unintended errors that can pile up. + +You can clear the timers you set with `clearTimeout` and `clearInterval`. Their usages are very simple: + + function never_call () { + console.log("You should never call this function"); + } + + var id1 = setTimeout(never_call,1000); + var id2 = setInterval(never_call,1000); + + clearTimeout(id1); + clearInterval(id2); + +So if you keep track of the return values of the timers, you can easily unhook the timers. + +The final trick for the timer objects is you can pass parameters to the callback by passing more parameters to setTimeout and setInterval: + + setTimeout(console.log, 1000, "This", "has", 4, "parameters"); + setInterval(console.log, 1000, "This only has one"); + + + This has 4 parameters + This only has one + This only has one + This only has one + This only has one + This only has one + ... diff --git a/locale/fa/knowledge/javascript-conventions/what-are-truthy-and-falsy-values.md b/locale/fa/knowledge/javascript-conventions/what-are-truthy-and-falsy-values.md new file mode 100644 index 0000000000000..70af00eb00fcf --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/what-are-truthy-and-falsy-values.md @@ -0,0 +1,74 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - truthy + - falsy + - types + - coercion +title: 'What are "truthy" and "falsy" values?' +difficulty: 4 +layout: knowledge-post.hbs +--- + + +JavaScript is weakly typed language. That means different types can be +used in operations and the language will try to convert the types +until the operation makes sense. + + console.log("1" > 0); // true, "1" converted to number + console.log(1 + "1"); // 11, 1 converted to string + +Type conversion also applys when values are used in unary boolean +operations, most notably if statements. If a value converts to the +boolean true, then it is said to be "truthy". If it converts to false +it is "falsy". + + var myval = "value"; + if(myval) { + console.log("This value is truthy"); + } + + myval = 0; + if(!myval) { + console.log("This value is falsy"); + } + +Since most values in javascript are truthy, e.g. objects, arrays, most +numbers and strings, it's easier to identify all of the falsy +values. These are: + + false // obviously + 0 // The only falsy number + "" // the empty string + null + undefined + NaN + +Note that all objects and arrays are truthy, even empty ones. + +Truthiness and Falsiness also come into play with logical +operators. When using logical AND/OR, the values will be converted +based on truthiness or falsyness and then the expression will resolve +to the last truthy value. Short circuit rules apply. Here's an +extended example. + + var first = "truthy" + , second = "also truthy"; + + var myvalue = first && second; + console.log(myvalue); // "also truthy" + + first = null; + second = "truthy"; + + myvalue = first || second; + console.log(myvalue); // "truthy" + + myvalue2 = second || first; + console.log(myvalue2); // "truthy" + + var truthy = "truthy" + , falsy = 0; + + myvalue = truthy ? true : false; + myvalue = falsy ? true : false; diff --git a/locale/fa/knowledge/javascript-conventions/what-is-json.md b/locale/fa/knowledge/javascript-conventions/what-is-json.md new file mode 100644 index 0000000000000..c13840be48a8b --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/what-is-json.md @@ -0,0 +1,126 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - json + - stringify + - parse +title: What is JSON? +difficulty: 5 +layout: knowledge-post.hbs +--- + +JavaScript Object Notation, or JSON, is a lightweight data format that +has become the defacto standard for the web. JSON can be represented +as either a list of values, e.g. an Array, or a hash of properties and +values, e.g. an Object. + + // a JSON array + ["one", "two", "three"] + + // a JSON object + { "one": 1, "two": 2, "three": 3 } + +## Encoding and Decoding + +JavaScript provides 2 methods for encoding data structures to json and +encoding json back to javascript objects and arrays. They are both +available on the `JSON` object that is available in the global scope. + +`JSON.stringify` takes a javascript object or array and returns a +serialized string in the JSON format. + + var data = { + name: "John Doe" + , age: 32 + , title: "Vice President of JavaScript" + } + + var jsonStr = JSON.stringify(data); + + console.log(jsonStr); + + // prints '{"name":"John Doe","age":32,"title":"Vice President of JavaScript"}' + +`JSON.parse` takes a JSON string and decodes it to a javascript data +structure. + + var jsonStr = '{"name":"John Doe","age":32,"title":"Vice President of JavaScript"}'; + + var data = JSON.parse(jsonStr); + + console.log(data.title); + + // prints 'Vice President of JavaScript' + +## What is valid JSON? + +There are a few rules to remember when dealing with data in JSON +format. There are several gotchas that can produce invalid JSON as well. + +* Empty objects and arrays are okay +* Strings can contain any unicode character, this includes object properties +* `null` is a valid JSON value on it's own +* All object properties should always be double quoted +* Object property values must be one of the following: String, Number, Boolean, Object, Array, null +* Number values must be in decimal format, no octal or hex representations +* Trailing commas on arrays are not allowed + +These are all examples of valid JSON. + + {"name":"John Doe","age":32,"title":"Vice President of JavaScript"} + + ["one", "two", "three"] + + // nesting valid values is okay + {"names": ["John Doe", "Jane Doe"] } + + [ { "name": "John Doe"}, {"name": "Jane Doe"} ] + + {} // empty hash + + [] // empty list + + null + + { "key": "\uFDD0" } // unicode escape codes + +These are all examples of bad JSON formatting. + + { name: "John Doe", 'age': 32 } // name and age should be in double quotes + + [32, 64, 128, 0xFFF] // hex numbers are not allowed + + { "name": "John Doe", age: undefined } // undefined is an invalid value + + // functions and dates are not allowed + { "name": "John Doe" + , "birthday": new Date('Fri, 26 Aug 2011 07:13:10 GMT') + , "getName": function() { + return this.name; + } + } + +Calling `JSON.parse` with an invalid JSON string will result in a +SyntaxError being thrown. If you are not sure of the validity of your +JSON data, you can anticipate errors by wrapping the call in a +try/catch block. + +Notice that the only complex values allowed in JSON are objects and +arrays. Functions, dates and other types are excluded. This may not +seem to make sense at first. But remember that JSON is a data format, +not a format for transferring complex javascript objects along with +their functionality. + +## JSON in other languages + +Although JSON was inspired by the simplicity of javascript data +structures, it's use is not limited to the javascript language. Many +other languages have methods of transferring native hashes and lists +into stringified JSON objects. Here's a quick example in ruby. + + require 'json' + + data = { :one => 1 } + puts data.to_json + + # prints "{ \"one\": 1 }" diff --git a/locale/fa/knowledge/javascript-conventions/what-is-the-arguments-object.md b/locale/fa/knowledge/javascript-conventions/what-is-the-arguments-object.md new file mode 100644 index 0000000000000..2f6bde9db5261 --- /dev/null +++ b/locale/fa/knowledge/javascript-conventions/what-is-the-arguments-object.md @@ -0,0 +1,64 @@ +--- +date: '2011-08-26T10:08:50.000Z' +tags: + - truthy + - falsy + - types + - coercion +title: What is the arguments object? +difficulty: 4 +layout: knowledge-post.hbs +--- + +The `arguments` object is a special construct available inside all +function calls. It represents the list of arguments that were passed +in when invoking the function. Since javascript allows functions to be +called with any number args, we need a way to dynamically discover and +access them. + +The `arguments` object is an array-like object. It has a length +property that corresponds to the number of arguments passed into the +function. You can access these values by indexing into the array, +e.g. `arguments[0]` is the first argument. The only other standard +property of `arguments` is callee. This always refers to the function +currently being executed. Here's an example that illustrates the +properties of `arguments`. + + var myfunc = function(one) { + arguments.callee === myfunc; + arguments[0] === one; + arguments[1] === 2; + arguments.length === 3; + } + + myfunc(1, 2, 3); + +This construct is very useful and gives javascript functions a lot of +flexibility. But there is an important gotcha. The `arguments` object +behaves like an array, but it is not an actual array. It does not have +Array in its prototype chain and it does not respond to any array +methods, e.g. `arguments.sort()` raises a TypeError. Instead you need to +copy the values into a true array first. Since a normal for loop +works, this is pretty easy. + + var args = []; + for(var i = 0; i < arguments.length; i++) { + args.push(arguments[i]); + } + +In certain cases you can still treat `arguments` as an array. You can +use `arguments` in dynamic function invocations using apply. And most +native Array methods will also accept `arguments` when dynamically +invoked using call or apply. This technique also suggests another way +to convert `arguments` into a true array using the Array#slice method. + + myfunc.apply(obj, arguments). + + // concat arguments onto the + Array.prototype.concat.apply([1,2,3], arguments). + + // turn arguments into a true array + var args = Array.prototype.slice.call(arguments); + + // cut out first argument + args = Array.prototype.slice.call(arguments, 1); diff --git a/locale/fa/knowledge/other-resources/tutorials-and-how-tos.md b/locale/fa/knowledge/other-resources/tutorials-and-how-tos.md new file mode 100644 index 0000000000000..6eec2ce73fed7 --- /dev/null +++ b/locale/fa/knowledge/other-resources/tutorials-and-how-tos.md @@ -0,0 +1,30 @@ +--- +title: Tutorials and How-Tos +date: '2011-09-15T23:04:37.000Z' +tags: + - help + - resources +difficulty: 1 +layout: knowledge-post.hbs +--- + +## [Nodejitsu Blog](http://blog.nodejitsu.com) + +Nodejitsu's developers blog regularly about how to use node.js libraries, best practices *and* community news! + +## [How To Node](http://howtonode.org) + +How To Node is a community-driven blog with lots of node.js tutorials, complete with code snippets. + +## [The Node Beginner Book](http://nodebeginner.org/) + +The Node Beginner Book is a free book that teaches how to program with javascript and node.js by having the reader build a fully-functional bare-bones web application. + +## [Node.js Guide](http://nodeguide.com/) + +Felix Geisendörfer, one of the very first people to use Node in production, assembled an unofficial guide not only on writing node apps, but also on coding style, the Node community and on convincing the boss. + +## [Node Tuts](http://nodetuts.com/) + +Not only does Node Tuts have nearly two dozen screencasts on node, it also is the home of the e-book "Hands-On Node.js"! + diff --git a/locale/fa/security.md b/locale/fa/security.md new file mode 100644 index 0000000000000..d7f1c31fd92c6 --- /dev/null +++ b/locale/fa/security.md @@ -0,0 +1,72 @@ +--- +layout: security.hbs +title: Security +--- + +# Security + +## Reporting a Bug in Node.js + +All security bugs in Node.js are taken seriously and should be reported via [HackerOne](https://hackerone.com/nodejs) +or by emailing [security@nodejs.org](mailto:security@nodejs.org). This will be delivered to a subset of the core team +who handle security issues. + +Your report will be acknowledged within 24 hours, and you’ll receive a more detailed response to your report within 48 +hours indicating the next steps in handling your submission. + +After the initial reply to your report, the security team will endeavor to keep you informed of the progress being made +towards a fix and full announcement, and may ask for additional information or guidance surrounding the reported issue. +These updates will be sent at least every five days; in practice, this is more likely to be every 24-48 hours. + +### Node.js Bug Bounty Program + +The Node.js project engages in an official bug bounty program for security researchers and responsible public disclosures. + +The program is managed through the HackerOne platform at [https://hackerone.com/nodejs](https://hackerone.com/nodejs) with further details. + +## Reporting a Bug in a third party module + +Security bugs in third party modules should be reported to their respective maintainers and should also be coordinated +through the [Node Ecosystem Security Team](https://hackerone.com/nodejs-ecosystem) or by emailing +[security-ecosystem@nodejs.org](mailto:security-ecosystem@nodejs.org). + +Details regarding this process can be found in the [Security Working Group repository](https://github.com/nodejs/security-wg/blob/master/processes/third_party_vuln_process.md). + +Thank you for improving the security of Node.js and its ecosystem. Your efforts and responsible disclosure are greatly +appreciated and will be acknowledged. + +## Disclosure Policy + +Here is the security disclosure policy for Node.js + +- The security report is received and is assigned a primary handler. This person will coordinate the fix and release +process. The problem is confirmed and a list of all affected versions is determined. Code is audited to find any +potential similar problems. Fixes are prepared for all releases which are still under maintenance. These fixes are not +committed to the public repository but rather held locally pending the announcement. + +- A suggested embargo date for this vulnerability is chosen and a CVE (Common Vulnerabilities and Exposures (CVE®)) +is requested for the vulnerability. + +- On the embargo date, the Node.js security mailing list is sent a copy of the announcement. The changes are pushed to +the public repository and new builds are deployed to nodejs.org. Within 6 hours of the mailing list being notified, a +copy of the advisory will be published on the Node.js blog. + +- Typically the embargo date will be set 72 hours from the time the CVE is issued. However, this may vary depending on +the severity of the bug or difficulty in applying a fix. + +- This process can take some time, especially when coordination is required with maintainers of other projects. Every +effort will be made to handle the bug in as timely a manner as possible; however, it’s important that we follow the +release process above to ensure that the disclosure is handled in a consistent manner. + + +## Receiving Security Updates + +Security notifications will be distributed via the following methods. + +- [https://groups.google.com/group/nodejs-sec](https://groups.google.com/group/nodejs-sec) +- [https://nodejs.org/en/blog](https://nodejs.org/en/blog) + +## Comments on this Policy + +If you have suggestions on how this process could be improved please submit a [pull request](https://github.com/nodejs/nodejs.org) +or [file an issue](https://github.com/nodejs/security-wg/issues/new) to discuss. diff --git a/locale/fa/site.json b/locale/fa/site.json new file mode 100644 index 0000000000000..e092084b14919 --- /dev/null +++ b/locale/fa/site.json @@ -0,0 +1,149 @@ +{ + "title": "Node.js", + "author": "Node.js Foundation", + "url": "https://nodejs.org/en/", + "locale": "en", + "scrollToTop": "Scroll to top", + "reportNodeIssue": "Report Node.js issue", + "reportWebsiteIssue": "Report website issue", + "getHelpIssue": "Get Help", + "by": "by", + "all-downloads": "All download options", + "nightly": "Nightly builds", + "chakracore-nightly": "Node-ChakraCore Nightly builds", + "previous": "Previous", + "next": "Next", + "feeds": [ + { + "link": "feed/blog.xml", + "text": "وبلاگ نودجی‌اس" + }, + { + "link": "feed/releases.xml", + "text": "بلاگ نودجی‌اس: انتشارها" + }, + { + "link": "feed/vulnerability.xml", + "text": "بلاگ نودجی‌اس: مسائل امنیتی" + } + ], + "home": { + "text": "خانه" + }, + "about": { + "link": "about", + "text": "دربارهٔ...", + "governance": { + "link": "about/governance", + "text": "مدیریت" + }, + "community": { + "link": "about/community", + "text": "اجتماع" + }, + "workinggroups": { + "link": "about/working-groups", + "text": "گروه‌های کاری" + }, + "releases": { + "link": "about/releases", + "text": "انتشارها" + }, + "resources": { + "link": "about/resources", + "text": "منابع" + }, + "trademark": { + "link": "about/trademark", + "text": "نشان بازرگانی" + }, + "privacy": { + "link": "about/privacy", + "text": "سیاست‌های حریم شخصی" + } + }, + "download": { + "link": "download", + "text": "دانلود‌ها", + "releases": { + "link": "download/releases", + "text": "انتشارهای پیشین" + }, + "package-manager": { + "link": "download/package-manager", + "text": "نصب نودجی‌اس با package manager" + }, + "shasums": { + "link": "SHASUMS256.txt.asc", + "text": "Signed SHASUMS for release files", + "verify-link": "https://github.com/nodejs/node#verifying-binaries", + "verify-text": "چگونه راستی‌آزمایی کنیم؟" + } + }, + "docs": { + "link": "docs", + "text": "اسناد", + "es6": { + "link": "docs/es6", + "text": "ES6 و فراتر" + }, + "api-lts": { + "link": "/dist/latest-%ver-major%/docs/api", + "subtext": "LTS", + "text": "%ver% API" + }, + "api-current": { + "link": "/dist/latest-%ver-major%/docs/api", + "text": "%ver% API" + }, + "guides": { + "link": "docs/guides", + "text": "راهنمایی‌ها" + }, + "dependencies": { + "link": "docs/meta/topics/dependencies", + "text": "وابستگی‌ها" + } + }, + "getinvolved": { + "link": "get-involved", + "text": "مشارکت جستن", + "code-and-learn": { + "link": "get-involved/code-and-learn", + "text": "کد + یادگیری" + }, + "collab-summit": { + "link": "get-involved/collab-summit", + "text": "Collab Summit" + }, + "contribute": { + "link": "get-involved/contribute", + "text": "Contribute" + }, + "conduct": { + "link": "https://github.com/nodejs/node/blob/master/CONTRIBUTING.md#code-of-conduct", + "text": "Code of Conduct" + } + }, + "security": { + "link": "security", + "text": "امنیت" + }, + "blog": { + "link": "blog", + "text": "بلاگ" + }, + "foundation": { + "link": "https://foundation.nodejs.org/", + "text": "بنیاد" + }, + "releases": { + "title": "تاریخچهٔ انتشارها", + "downloads": "Downloads" + }, + "links": { + "pages": { + "changelog": "Changelog" + } + } +}